Clone of chromium aad1ce808763f59c7a3753e08f1500a104ecc6fd refs/remotes/origin/HEAD
diff --git a/cc/resources/bitmap_content_layer_updater.cc b/cc/resources/bitmap_content_layer_updater.cc
new file mode 100644
index 0000000..63ba336
--- /dev/null
+++ b/cc/resources/bitmap_content_layer_updater.cc
@@ -0,0 +1,115 @@
+// Copyright 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/bitmap_content_layer_updater.h"
+
+#include "cc/debug/devtools_instrumentation.h"
+#include "cc/debug/rendering_stats_instrumentation.h"
+#include "cc/resources/layer_painter.h"
+#include "cc/resources/prioritized_resource.h"
+#include "cc/resources/resource_update.h"
+#include "cc/resources/resource_update_queue.h"
+#include "skia/ext/platform_canvas.h"
+
+namespace cc {
+
+BitmapContentLayerUpdater::Resource::Resource(
+ BitmapContentLayerUpdater* updater,
+ scoped_ptr<PrioritizedResource> texture)
+ : LayerUpdater::Resource(texture.Pass()), updater_(updater) {}
+
+BitmapContentLayerUpdater::Resource::~Resource() {}
+
+void BitmapContentLayerUpdater::Resource::Update(
+ ResourceUpdateQueue* queue,
+ const gfx::Rect& source_rect,
+ const gfx::Vector2d& dest_offset,
+ bool partial_update) {
+ updater_->UpdateTexture(
+ queue, texture(), source_rect, dest_offset, partial_update);
+}
+
+scoped_refptr<BitmapContentLayerUpdater> BitmapContentLayerUpdater::Create(
+ scoped_ptr<LayerPainter> painter,
+ RenderingStatsInstrumentation* stats_instrumentation,
+ int layer_id) {
+ return make_scoped_refptr(
+ new BitmapContentLayerUpdater(painter.Pass(),
+ stats_instrumentation,
+ layer_id));
+}
+
+BitmapContentLayerUpdater::BitmapContentLayerUpdater(
+ scoped_ptr<LayerPainter> painter,
+ RenderingStatsInstrumentation* stats_instrumentation,
+ int layer_id)
+ : ContentLayerUpdater(painter.Pass(), stats_instrumentation, layer_id) {}
+
+BitmapContentLayerUpdater::~BitmapContentLayerUpdater() {}
+
+scoped_ptr<LayerUpdater::Resource> BitmapContentLayerUpdater::CreateResource(
+ PrioritizedResourceManager* manager) {
+ return make_scoped_ptr(
+ new Resource(this, PrioritizedResource::Create(manager)));
+}
+
+void BitmapContentLayerUpdater::PrepareToUpdate(const gfx::Size& content_size,
+ const gfx::Rect& paint_rect,
+ const gfx::Size& tile_size,
+ float contents_width_scale,
+ float contents_height_scale) {
+ if (canvas_size_ != paint_rect.size()) {
+ devtools_instrumentation::ScopedLayerTask paint_setup(
+ devtools_instrumentation::kPaintSetup, layer_id_);
+ canvas_size_ = paint_rect.size();
+ bitmap_backing_.allocN32Pixels(
+ canvas_size_.width(), canvas_size_.height(), layer_is_opaque_);
+ // TODO(danak): Remove when skia does the check for us: crbug.com/360384
+ canvas_ = skia::AdoptRef(new SkCanvas(bitmap_backing_));
+ DCHECK_EQ(paint_rect.width(), canvas_->getBaseLayerSize().width());
+ DCHECK_EQ(paint_rect.height(), canvas_->getBaseLayerSize().height());
+ }
+
+ base::TimeTicks start_time =
+ rendering_stats_instrumentation_->StartRecording();
+ PaintContents(canvas_.get(),
+ content_size,
+ paint_rect,
+ contents_width_scale,
+ contents_height_scale);
+ base::TimeDelta duration =
+ rendering_stats_instrumentation_->EndRecording(start_time);
+ rendering_stats_instrumentation_->AddPaint(
+ duration, paint_rect.width() * paint_rect.height());
+}
+
+void BitmapContentLayerUpdater::UpdateTexture(ResourceUpdateQueue* queue,
+ PrioritizedResource* texture,
+ const gfx::Rect& source_rect,
+ const gfx::Vector2d& dest_offset,
+ bool partial_update) {
+ CHECK(canvas_);
+ ResourceUpdate upload = ResourceUpdate::Create(
+ texture, &bitmap_backing_, paint_rect(), source_rect, dest_offset);
+ if (partial_update)
+ queue->AppendPartialUpload(upload);
+ else
+ queue->AppendFullUpload(upload);
+}
+
+void BitmapContentLayerUpdater::ReduceMemoryUsage() {
+ canvas_.clear();
+ canvas_size_ = gfx::Size();
+}
+
+void BitmapContentLayerUpdater::SetOpaque(bool opaque) {
+ if (opaque != layer_is_opaque_) {
+ canvas_.clear();
+ canvas_size_ = gfx::Size();
+ }
+
+ ContentLayerUpdater::SetOpaque(opaque);
+}
+
+} // namespace cc
diff --git a/cc/resources/bitmap_content_layer_updater.h b/cc/resources/bitmap_content_layer_updater.h
new file mode 100644
index 0000000..ca3f445
--- /dev/null
+++ b/cc/resources/bitmap_content_layer_updater.h
@@ -0,0 +1,80 @@
+// Copyright 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_BITMAP_CONTENT_LAYER_UPDATER_H_
+#define CC_RESOURCES_BITMAP_CONTENT_LAYER_UPDATER_H_
+
+#include "cc/base/cc_export.h"
+#include "cc/resources/content_layer_updater.h"
+#include "skia/ext/refptr.h"
+#include "third_party/skia/include/core/SkBitmap.h"
+
+class SkCanvas;
+
+namespace cc {
+
+class LayerPainter;
+class RenderingStatsInstrumenation;
+
+// This class rasterizes the content_rect into a skia bitmap canvas. It then
+// creates a ResourceUpdate with this bitmap canvas and inserts the
+// ResourceBundle to the provided ResourceUpdateQueue. Actual texture uploading
+// is done by ResourceUpdateController.
+class CC_EXPORT BitmapContentLayerUpdater : public ContentLayerUpdater {
+ public:
+ class Resource : public LayerUpdater::Resource {
+ public:
+ Resource(BitmapContentLayerUpdater* updater,
+ scoped_ptr<PrioritizedResource> resource);
+ virtual ~Resource();
+
+ virtual void Update(ResourceUpdateQueue* queue,
+ const gfx::Rect& source_rect,
+ const gfx::Vector2d& dest_offset,
+ bool partial_update) OVERRIDE;
+
+ private:
+ BitmapContentLayerUpdater* updater_;
+
+ DISALLOW_COPY_AND_ASSIGN(Resource);
+ };
+
+ static scoped_refptr<BitmapContentLayerUpdater> Create(
+ scoped_ptr<LayerPainter> painter,
+ RenderingStatsInstrumentation* stats_instrumenation,
+ int layer_id);
+
+ virtual scoped_ptr<LayerUpdater::Resource> CreateResource(
+ PrioritizedResourceManager* manager) OVERRIDE;
+ virtual void PrepareToUpdate(const gfx::Size& content_size,
+ const gfx::Rect& paint_rect,
+ const gfx::Size& tile_size,
+ float contents_width_scale,
+ float contents_height_scale) OVERRIDE;
+ void UpdateTexture(ResourceUpdateQueue* queue,
+ PrioritizedResource* resource,
+ const gfx::Rect& source_rect,
+ const gfx::Vector2d& dest_offset,
+ bool partial_update);
+ virtual void SetOpaque(bool opaque) OVERRIDE;
+ virtual void ReduceMemoryUsage() OVERRIDE;
+
+ protected:
+ BitmapContentLayerUpdater(
+ scoped_ptr<LayerPainter> painter,
+ RenderingStatsInstrumentation* stats_instrumenation,
+ int layer_id);
+ virtual ~BitmapContentLayerUpdater();
+
+ SkBitmap bitmap_backing_;
+ skia::RefPtr<SkCanvas> canvas_;
+ gfx::Size canvas_size_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BitmapContentLayerUpdater);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_BITMAP_CONTENT_LAYER_UPDATER_H_
diff --git a/cc/resources/bitmap_raster_worker_pool.cc b/cc/resources/bitmap_raster_worker_pool.cc
new file mode 100644
index 0000000..959a0f3
--- /dev/null
+++ b/cc/resources/bitmap_raster_worker_pool.cc
@@ -0,0 +1,201 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/bitmap_raster_worker_pool.h"
+
+#include <algorithm>
+
+#include "base/debug/trace_event.h"
+#include "base/debug/trace_event_argument.h"
+#include "base/strings/stringprintf.h"
+#include "cc/debug/traced_value.h"
+#include "cc/resources/raster_buffer.h"
+#include "cc/resources/resource.h"
+
+namespace cc {
+namespace {
+
+class RasterBufferImpl : public RasterBuffer {
+ public:
+ RasterBufferImpl(ResourceProvider* resource_provider,
+ const Resource* resource)
+ : lock_(resource_provider, resource->id()) {}
+
+ // Overridden from RasterBuffer:
+ virtual skia::RefPtr<SkCanvas> AcquireSkCanvas() OVERRIDE {
+ return skia::SharePtr(lock_.sk_canvas());
+ }
+ virtual void ReleaseSkCanvas(const skia::RefPtr<SkCanvas>& canvas) OVERRIDE {}
+
+ private:
+ ResourceProvider::ScopedWriteLockSoftware lock_;
+
+ DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
+};
+
+} // namespace
+
+// static
+scoped_ptr<RasterWorkerPool> BitmapRasterWorkerPool::Create(
+ base::SequencedTaskRunner* task_runner,
+ TaskGraphRunner* task_graph_runner,
+ ResourceProvider* resource_provider) {
+ return make_scoped_ptr<RasterWorkerPool>(new BitmapRasterWorkerPool(
+ task_runner, task_graph_runner, resource_provider));
+}
+
+BitmapRasterWorkerPool::BitmapRasterWorkerPool(
+ base::SequencedTaskRunner* task_runner,
+ TaskGraphRunner* task_graph_runner,
+ ResourceProvider* resource_provider)
+ : task_runner_(task_runner),
+ task_graph_runner_(task_graph_runner),
+ namespace_token_(task_graph_runner->GetNamespaceToken()),
+ resource_provider_(resource_provider),
+ raster_finished_weak_ptr_factory_(this) {
+}
+
+BitmapRasterWorkerPool::~BitmapRasterWorkerPool() {
+}
+
+Rasterizer* BitmapRasterWorkerPool::AsRasterizer() {
+ return this;
+}
+
+void BitmapRasterWorkerPool::SetClient(RasterizerClient* client) {
+ client_ = client;
+}
+
+void BitmapRasterWorkerPool::Shutdown() {
+ TRACE_EVENT0("cc", "BitmapRasterWorkerPool::Shutdown");
+
+ TaskGraph empty;
+ task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
+ task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
+}
+
+void BitmapRasterWorkerPool::ScheduleTasks(RasterTaskQueue* queue) {
+ TRACE_EVENT0("cc", "BitmapRasterWorkerPool::ScheduleTasks");
+
+ if (raster_pending_.none())
+ TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this);
+
+ // Mark all task sets as pending.
+ raster_pending_.set();
+
+ unsigned priority = kRasterTaskPriorityBase;
+
+ graph_.Reset();
+
+ // Cancel existing OnRasterFinished callbacks.
+ raster_finished_weak_ptr_factory_.InvalidateWeakPtrs();
+
+ scoped_refptr<RasterizerTask> new_raster_finished_tasks[kNumberOfTaskSets];
+
+ size_t task_count[kNumberOfTaskSets] = {0};
+
+ for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
+ new_raster_finished_tasks[task_set] = CreateRasterFinishedTask(
+ task_runner_.get(),
+ base::Bind(&BitmapRasterWorkerPool::OnRasterFinished,
+ raster_finished_weak_ptr_factory_.GetWeakPtr(),
+ task_set));
+ }
+
+ for (RasterTaskQueue::Item::Vector::const_iterator it = queue->items.begin();
+ it != queue->items.end();
+ ++it) {
+ const RasterTaskQueue::Item& item = *it;
+ RasterTask* task = item.task;
+ DCHECK(!task->HasCompleted());
+
+ for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
+ if (!item.task_sets[task_set])
+ continue;
+
+ ++task_count[task_set];
+
+ graph_.edges.push_back(
+ TaskGraph::Edge(task, new_raster_finished_tasks[task_set].get()));
+ }
+
+ InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++);
+ }
+
+ for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
+ InsertNodeForTask(&graph_,
+ new_raster_finished_tasks[task_set].get(),
+ kRasterFinishedTaskPriority,
+ task_count[task_set]);
+ }
+
+ ScheduleTasksOnOriginThread(this, &graph_);
+ task_graph_runner_->ScheduleTasks(namespace_token_, &graph_);
+
+ std::copy(new_raster_finished_tasks,
+ new_raster_finished_tasks + kNumberOfTaskSets,
+ raster_finished_tasks_);
+
+ TRACE_EVENT_ASYNC_STEP_INTO1(
+ "cc", "ScheduledTasks", this, "rasterizing", "state", StateAsValue());
+}
+
+void BitmapRasterWorkerPool::CheckForCompletedTasks() {
+ TRACE_EVENT0("cc", "BitmapRasterWorkerPool::CheckForCompletedTasks");
+
+ task_graph_runner_->CollectCompletedTasks(namespace_token_,
+ &completed_tasks_);
+ for (Task::Vector::const_iterator it = completed_tasks_.begin();
+ it != completed_tasks_.end();
+ ++it) {
+ RasterizerTask* task = static_cast<RasterizerTask*>(it->get());
+
+ task->WillComplete();
+ task->CompleteOnOriginThread(this);
+ task->DidComplete();
+
+ task->RunReplyOnOriginThread();
+ }
+ completed_tasks_.clear();
+}
+
+scoped_ptr<RasterBuffer> BitmapRasterWorkerPool::AcquireBufferForRaster(
+ const Resource* resource) {
+ return make_scoped_ptr<RasterBuffer>(
+ new RasterBufferImpl(resource_provider_, resource));
+}
+
+void BitmapRasterWorkerPool::ReleaseBufferForRaster(
+ scoped_ptr<RasterBuffer> buffer) {
+ // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
+}
+
+void BitmapRasterWorkerPool::OnRasterFinished(TaskSet task_set) {
+ TRACE_EVENT1(
+ "cc", "BitmapRasterWorkerPool::OnRasterFinished", "task_set", task_set);
+
+ DCHECK(raster_pending_[task_set]);
+ raster_pending_[task_set] = false;
+ if (raster_pending_.any()) {
+ TRACE_EVENT_ASYNC_STEP_INTO1(
+ "cc", "ScheduledTasks", this, "rasterizing", "state", StateAsValue());
+ } else {
+ TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this);
+ }
+ client_->DidFinishRunningTasks(task_set);
+}
+
+scoped_refptr<base::debug::ConvertableToTraceFormat>
+BitmapRasterWorkerPool::StateAsValue() const {
+ scoped_refptr<base::debug::TracedValue> state =
+ new base::debug::TracedValue();
+
+ state->BeginArray("tasks_pending");
+ for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set)
+ state->AppendBoolean(raster_pending_[task_set]);
+ state->EndArray();
+ return state;
+}
+
+} // namespace cc
diff --git a/cc/resources/bitmap_raster_worker_pool.h b/cc/resources/bitmap_raster_worker_pool.h
new file mode 100644
index 0000000..39c4237
--- /dev/null
+++ b/cc/resources/bitmap_raster_worker_pool.h
@@ -0,0 +1,79 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_BITMAP_RASTER_WORKER_POOL_H_
+#define CC_RESOURCES_BITMAP_RASTER_WORKER_POOL_H_
+
+#include "base/memory/weak_ptr.h"
+#include "base/values.h"
+#include "cc/resources/raster_worker_pool.h"
+#include "cc/resources/rasterizer.h"
+
+namespace base {
+namespace debug {
+class ConvertableToTraceFormat;
+}
+}
+
+namespace cc {
+class ResourceProvider;
+
+class CC_EXPORT BitmapRasterWorkerPool : public RasterWorkerPool,
+ public Rasterizer,
+ public RasterizerTaskClient {
+ public:
+ virtual ~BitmapRasterWorkerPool();
+
+ static scoped_ptr<RasterWorkerPool> Create(
+ base::SequencedTaskRunner* task_runner,
+ TaskGraphRunner* task_graph_runner,
+ ResourceProvider* resource_provider);
+
+ // Overridden from RasterWorkerPool:
+ virtual Rasterizer* AsRasterizer() OVERRIDE;
+
+ // Overridden from Rasterizer:
+ virtual void SetClient(RasterizerClient* client) OVERRIDE;
+ virtual void Shutdown() OVERRIDE;
+ virtual void ScheduleTasks(RasterTaskQueue* queue) OVERRIDE;
+ virtual void CheckForCompletedTasks() OVERRIDE;
+
+ // Overridden from RasterizerTaskClient:
+ virtual scoped_ptr<RasterBuffer> AcquireBufferForRaster(
+ const Resource* resource) OVERRIDE;
+ virtual void ReleaseBufferForRaster(scoped_ptr<RasterBuffer> buffer) OVERRIDE;
+
+ protected:
+ BitmapRasterWorkerPool(base::SequencedTaskRunner* task_runner,
+ TaskGraphRunner* task_graph_runner,
+ ResourceProvider* resource_provider);
+
+ private:
+ void OnRasterFinished(TaskSet task_set);
+ scoped_refptr<base::debug::ConvertableToTraceFormat> StateAsValue() const;
+
+ scoped_refptr<base::SequencedTaskRunner> task_runner_;
+ TaskGraphRunner* task_graph_runner_;
+ const NamespaceToken namespace_token_;
+ RasterizerClient* client_;
+ ResourceProvider* resource_provider_;
+
+ TaskSetCollection raster_pending_;
+
+ scoped_refptr<RasterizerTask> raster_finished_tasks_[kNumberOfTaskSets];
+
+ // Task graph used when scheduling tasks and vector used to gather
+ // completed tasks.
+ TaskGraph graph_;
+ Task::Vector completed_tasks_;
+
+ base::WeakPtrFactory<BitmapRasterWorkerPool>
+ raster_finished_weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(BitmapRasterWorkerPool);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_BITMAP_RASTER_WORKER_POOL_H_
diff --git a/cc/resources/bitmap_skpicture_content_layer_updater.cc b/cc/resources/bitmap_skpicture_content_layer_updater.cc
new file mode 100644
index 0000000..5b4187d
--- /dev/null
+++ b/cc/resources/bitmap_skpicture_content_layer_updater.cc
@@ -0,0 +1,87 @@
+// Copyright 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/bitmap_skpicture_content_layer_updater.h"
+
+#include "base/time/time.h"
+#include "cc/debug/rendering_stats_instrumentation.h"
+#include "cc/resources/layer_painter.h"
+#include "cc/resources/prioritized_resource.h"
+#include "cc/resources/resource_update_queue.h"
+#include "third_party/skia/include/core/SkCanvas.h"
+
+namespace cc {
+
+BitmapSkPictureContentLayerUpdater::Resource::Resource(
+ BitmapSkPictureContentLayerUpdater* updater,
+ scoped_ptr<PrioritizedResource> texture)
+ : ContentLayerUpdater::Resource(texture.Pass()), updater_(updater) {}
+
+void BitmapSkPictureContentLayerUpdater::Resource::Update(
+ ResourceUpdateQueue* queue,
+ const gfx::Rect& source_rect,
+ const gfx::Vector2d& dest_offset,
+ bool partial_update) {
+ SkAlphaType at =
+ updater_->layer_is_opaque() ? kOpaque_SkAlphaType : kPremul_SkAlphaType;
+ bitmap_.allocPixels(SkImageInfo::Make(
+ source_rect.width(), source_rect.height(), kN32_SkColorType, at));
+ SkCanvas canvas(bitmap_);
+ updater_->PaintContentsRect(&canvas, source_rect);
+
+ ResourceUpdate upload = ResourceUpdate::Create(
+ texture(), &bitmap_, source_rect, source_rect, dest_offset);
+ if (partial_update)
+ queue->AppendPartialUpload(upload);
+ else
+ queue->AppendFullUpload(upload);
+}
+
+scoped_refptr<BitmapSkPictureContentLayerUpdater>
+BitmapSkPictureContentLayerUpdater::Create(
+ scoped_ptr<LayerPainter> painter,
+ RenderingStatsInstrumentation* stats_instrumentation,
+ int layer_id) {
+ return make_scoped_refptr(
+ new BitmapSkPictureContentLayerUpdater(painter.Pass(),
+ stats_instrumentation,
+ layer_id));
+}
+
+BitmapSkPictureContentLayerUpdater::BitmapSkPictureContentLayerUpdater(
+ scoped_ptr<LayerPainter> painter,
+ RenderingStatsInstrumentation* stats_instrumentation,
+ int layer_id)
+ : SkPictureContentLayerUpdater(painter.Pass(),
+ stats_instrumentation,
+ layer_id) {}
+
+BitmapSkPictureContentLayerUpdater::~BitmapSkPictureContentLayerUpdater() {}
+
+scoped_ptr<LayerUpdater::Resource>
+BitmapSkPictureContentLayerUpdater::CreateResource(
+ PrioritizedResourceManager* manager) {
+ return make_scoped_ptr(
+ new Resource(this, PrioritizedResource::Create(manager)));
+}
+
+void BitmapSkPictureContentLayerUpdater::PaintContentsRect(
+ SkCanvas* canvas,
+ const gfx::Rect& source_rect) {
+ if (!canvas)
+ return;
+ // Translate the origin of content_rect to that of source_rect.
+ canvas->translate(paint_rect().x() - source_rect.x(),
+ paint_rect().y() - source_rect.y());
+ base::TimeTicks start_time =
+ rendering_stats_instrumentation_->StartRecording();
+ DrawPicture(canvas);
+ base::TimeDelta duration =
+ rendering_stats_instrumentation_->EndRecording(start_time);
+ rendering_stats_instrumentation_->AddRaster(
+ duration,
+ source_rect.width() * source_rect.height());
+}
+
+} // namespace cc
diff --git a/cc/resources/bitmap_skpicture_content_layer_updater.h b/cc/resources/bitmap_skpicture_content_layer_updater.h
new file mode 100644
index 0000000..e20e3c3
--- /dev/null
+++ b/cc/resources/bitmap_skpicture_content_layer_updater.h
@@ -0,0 +1,57 @@
+// Copyright 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_BITMAP_SKPICTURE_CONTENT_LAYER_UPDATER_H_
+#define CC_RESOURCES_BITMAP_SKPICTURE_CONTENT_LAYER_UPDATER_H_
+
+#include "cc/resources/skpicture_content_layer_updater.h"
+#include "third_party/skia/include/core/SkBitmap.h"
+
+namespace cc {
+
+// This class records the content_rect into an SkPicture, then software
+// rasterizes the SkPicture into bitmaps for each tile. This implements
+// LayerTreeSettingSettings::per_tile_painting_enabled.
+class BitmapSkPictureContentLayerUpdater : public SkPictureContentLayerUpdater {
+ public:
+ class Resource : public ContentLayerUpdater::Resource {
+ public:
+ Resource(BitmapSkPictureContentLayerUpdater* updater,
+ scoped_ptr<PrioritizedResource> texture);
+
+ virtual void Update(ResourceUpdateQueue* queue,
+ const gfx::Rect& source_rect,
+ const gfx::Vector2d& dest_offset,
+ bool partial_update) OVERRIDE;
+
+ private:
+ SkBitmap bitmap_;
+ BitmapSkPictureContentLayerUpdater* updater_;
+
+ DISALLOW_COPY_AND_ASSIGN(Resource);
+ };
+
+ static scoped_refptr<BitmapSkPictureContentLayerUpdater> Create(
+ scoped_ptr<LayerPainter> painter,
+ RenderingStatsInstrumentation* stats_instrumentation,
+ int layer_id);
+
+ virtual scoped_ptr<LayerUpdater::Resource> CreateResource(
+ PrioritizedResourceManager* manager) OVERRIDE;
+ void PaintContentsRect(SkCanvas* canvas,
+ const gfx::Rect& source_rect);
+
+ private:
+ BitmapSkPictureContentLayerUpdater(
+ scoped_ptr<LayerPainter> painter,
+ RenderingStatsInstrumentation* stats_instrumentation,
+ int layer_id);
+ virtual ~BitmapSkPictureContentLayerUpdater();
+
+ DISALLOW_COPY_AND_ASSIGN(BitmapSkPictureContentLayerUpdater);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_BITMAP_SKPICTURE_CONTENT_LAYER_UPDATER_H_
diff --git a/cc/resources/content_layer_updater.cc b/cc/resources/content_layer_updater.cc
new file mode 100644
index 0000000..f6469ff
--- /dev/null
+++ b/cc/resources/content_layer_updater.cc
@@ -0,0 +1,129 @@
+// Copyright 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/content_layer_updater.h"
+
+#include "base/debug/trace_event.h"
+#include "base/time/time.h"
+#include "cc/debug/rendering_stats_instrumentation.h"
+#include "cc/resources/layer_painter.h"
+#include "third_party/skia/include/core/SkCanvas.h"
+#include "third_party/skia/include/core/SkPaint.h"
+#include "third_party/skia/include/core/SkRect.h"
+#include "third_party/skia/include/core/SkScalar.h"
+#include "ui/gfx/rect_conversions.h"
+#include "ui/gfx/rect_f.h"
+#include "ui/gfx/skia_util.h"
+
+namespace cc {
+
+ContentLayerUpdater::ContentLayerUpdater(
+ scoped_ptr<LayerPainter> painter,
+ RenderingStatsInstrumentation* stats_instrumentation,
+ int layer_id)
+ : rendering_stats_instrumentation_(stats_instrumentation),
+ layer_id_(layer_id),
+ layer_is_opaque_(false),
+ layer_fills_bounds_completely_(false),
+ painter_(painter.Pass()),
+ background_color_(SK_ColorTRANSPARENT) {
+}
+
+ContentLayerUpdater::~ContentLayerUpdater() {}
+
+void ContentLayerUpdater::set_rendering_stats_instrumentation(
+ RenderingStatsInstrumentation* rsi) {
+ rendering_stats_instrumentation_ = rsi;
+}
+
+void ContentLayerUpdater::PaintContents(SkCanvas* canvas,
+ const gfx::Size& layer_content_size,
+ const gfx::Rect& paint_rect,
+ float contents_width_scale,
+ float contents_height_scale) {
+ TRACE_EVENT0("cc", "ContentLayerUpdater::PaintContents");
+ if (!canvas)
+ return;
+ canvas->save();
+ canvas->translate(SkIntToScalar(-paint_rect.x()),
+ SkIntToScalar(-paint_rect.y()));
+
+ // The |canvas| backing should be sized to hold the |paint_rect|.
+ DCHECK_EQ(paint_rect.width(), canvas->getBaseLayerSize().width());
+ DCHECK_EQ(paint_rect.height(), canvas->getBaseLayerSize().height());
+
+ const bool is_scaled =
+ contents_width_scale != 1.f || contents_height_scale != 1.f;
+
+ if (is_scaled && (layer_is_opaque_ || layer_fills_bounds_completely_)) {
+ // Even if completely covered, for rasterizations that touch the edge of the
+ // layer, we also need to raster the background color underneath the last
+ // texel (since the paint won't cover it).
+ //
+ // The final texel of content may only be partially covered by a
+ // rasterization; this rect represents the content rect that is fully
+ // covered by content.
+ const gfx::Rect layer_content_rect = gfx::Rect(layer_content_size);
+ gfx::Rect deflated_layer_content_rect = layer_content_rect;
+ deflated_layer_content_rect.Inset(0, 0, 1, 1);
+
+ if (!layer_content_rect.Contains(deflated_layer_content_rect)) {
+ // Drawing at most 1 x 1 x (canvas width + canvas height) texels is 2-3X
+ // faster than clearing, so special case this.
+ DCHECK_LE(paint_rect.right(), layer_content_rect.right());
+ DCHECK_LE(paint_rect.bottom(), layer_content_rect.bottom());
+ canvas->save();
+ canvas->clipRect(gfx::RectToSkRect(layer_content_rect),
+ SkRegion::kReplace_Op);
+ canvas->clipRect(gfx::RectToSkRect(deflated_layer_content_rect),
+ SkRegion::kDifference_Op);
+ canvas->drawColor(background_color_, SkXfermode::kSrc_Mode);
+ canvas->restore();
+ }
+ }
+
+ gfx::Rect layer_rect;
+ if (is_scaled) {
+ canvas->scale(SkFloatToScalar(contents_width_scale),
+ SkFloatToScalar(contents_height_scale));
+
+ // NOTE: this may go beyond the bounds of the layer, but that shouldn't
+ // cause problems (anything beyond the layer is clipped out).
+ layer_rect = gfx::ScaleToEnclosingRect(
+ paint_rect, 1.f / contents_width_scale, 1.f / contents_height_scale);
+ } else {
+ layer_rect = paint_rect;
+ }
+
+ SkRect layer_sk_rect = SkRect::MakeXYWH(
+ layer_rect.x(), layer_rect.y(), layer_rect.width(), layer_rect.height());
+
+ canvas->clipRect(layer_sk_rect);
+
+ // If the layer has opaque contents or will fill the bounds completely there
+ // is no need to clear the canvas before painting.
+ if (!layer_is_opaque_ && !layer_fills_bounds_completely_) {
+ TRACE_EVENT0("cc", "Clear");
+ canvas->drawColor(SK_ColorTRANSPARENT, SkXfermode::kSrc_Mode);
+ }
+
+ painter_->Paint(canvas, layer_rect);
+ canvas->restore();
+
+ paint_rect_ = paint_rect;
+}
+
+void ContentLayerUpdater::SetOpaque(bool opaque) {
+ layer_is_opaque_ = opaque;
+}
+
+void ContentLayerUpdater::SetFillsBoundsCompletely(bool fills_bounds) {
+ layer_fills_bounds_completely_ = fills_bounds;
+}
+
+void ContentLayerUpdater::SetBackgroundColor(SkColor background_color) {
+ background_color_ = background_color;
+}
+
+} // namespace cc
diff --git a/cc/resources/content_layer_updater.h b/cc/resources/content_layer_updater.h
new file mode 100644
index 0000000..b11281e
--- /dev/null
+++ b/cc/resources/content_layer_updater.h
@@ -0,0 +1,70 @@
+// Copyright 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_CONTENT_LAYER_UPDATER_H_
+#define CC_RESOURCES_CONTENT_LAYER_UPDATER_H_
+
+#include "cc/base/cc_export.h"
+#include "cc/resources/layer_updater.h"
+#include "ui/gfx/rect.h"
+
+class SkCanvas;
+
+namespace cc {
+
+class LayerPainter;
+class RenderingStatsInstrumentation;
+
+// Base class for BitmapContentLayerUpdater and
+// SkPictureContentLayerUpdater that reduces code duplication between
+// their respective PaintContents implementations.
+class CC_EXPORT ContentLayerUpdater : public LayerUpdater {
+ public:
+ void set_rendering_stats_instrumentation(RenderingStatsInstrumentation* rsi);
+ virtual void SetOpaque(bool opaque) OVERRIDE;
+ virtual void SetFillsBoundsCompletely(bool fills_bounds) OVERRIDE;
+ virtual void SetBackgroundColor(SkColor background_color) OVERRIDE;
+
+ protected:
+ ContentLayerUpdater(scoped_ptr<LayerPainter> painter,
+ RenderingStatsInstrumentation* stats_instrumentation,
+ int layer_id);
+ virtual ~ContentLayerUpdater();
+
+ // Paints the contents. |content_size| size of the underlying layer in
+ // layer's content space. |paint_rect| bounds to paint in content space of the
+ // layer. Both |content_size| and |paint_rect| are in pixels.
+ void PaintContents(SkCanvas* canvas,
+ const gfx::Size& layer_content_size,
+ const gfx::Rect& paint_rect,
+ float contents_width_scale,
+ float contents_height_scale);
+ gfx::Rect paint_rect() const { return paint_rect_; }
+
+ bool layer_is_opaque() const { return layer_is_opaque_; }
+ bool layer_fills_bounds_completely() const {
+ return layer_fills_bounds_completely_;
+ }
+
+ SkColor background_color() const { return background_color_; }
+
+ RenderingStatsInstrumentation* rendering_stats_instrumentation_;
+ int layer_id_;
+
+ // True when it is known that all output pixels will be opaque.
+ bool layer_is_opaque_;
+ // True when it is known that all output pixels will be filled.
+ bool layer_fills_bounds_completely_;
+
+ private:
+ gfx::Rect paint_rect_;
+ scoped_ptr<LayerPainter> painter_;
+ SkColor background_color_;
+
+ DISALLOW_COPY_AND_ASSIGN(ContentLayerUpdater);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_CONTENT_LAYER_UPDATER_H_
diff --git a/cc/resources/eviction_tile_priority_queue.cc b/cc/resources/eviction_tile_priority_queue.cc
new file mode 100644
index 0000000..9f05e81
--- /dev/null
+++ b/cc/resources/eviction_tile_priority_queue.cc
@@ -0,0 +1,221 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/eviction_tile_priority_queue.h"
+
+namespace cc {
+
+namespace {
+
+class EvictionOrderComparator {
+ public:
+ explicit EvictionOrderComparator(TreePriority tree_priority)
+ : tree_priority_(tree_priority) {}
+
+ bool operator()(
+ const EvictionTilePriorityQueue::PairedPictureLayerQueue* a,
+ const EvictionTilePriorityQueue::PairedPictureLayerQueue* b) const {
+ // Note that in this function, we have to return true if and only if
+ // b is strictly lower priority than a. Note that for the sake of
+ // completeness, empty queue is considered to have lowest priority.
+ if (a->IsEmpty() || b->IsEmpty())
+ return b->IsEmpty() < a->IsEmpty();
+
+ WhichTree a_tree = a->NextTileIteratorTree(tree_priority_);
+ const PictureLayerImpl::LayerEvictionTileIterator* a_iterator =
+ a_tree == ACTIVE_TREE ? &a->active_iterator : &a->pending_iterator;
+
+ WhichTree b_tree = b->NextTileIteratorTree(tree_priority_);
+ const PictureLayerImpl::LayerEvictionTileIterator* b_iterator =
+ b_tree == ACTIVE_TREE ? &b->active_iterator : &b->pending_iterator;
+
+ const Tile* a_tile = **a_iterator;
+ const Tile* b_tile = **b_iterator;
+
+ const TilePriority& a_priority =
+ a_tile->priority_for_tree_priority(tree_priority_);
+ const TilePriority& b_priority =
+ b_tile->priority_for_tree_priority(tree_priority_);
+ bool prioritize_low_res = tree_priority_ == SMOOTHNESS_TAKES_PRIORITY;
+
+ // If the priority bin differs, b is lower priority if it has the higher
+ // priority bin.
+ if (a_priority.priority_bin != b_priority.priority_bin)
+ return b_priority.priority_bin > a_priority.priority_bin;
+
+ // Otherwise if the resolution differs, then the order will be determined by
+ // whether we prioritize low res or not.
+ // TODO(vmpstr): Remove this when TilePriority is no longer a member of Tile
+ // class but instead produced by the iterators.
+ if (b_priority.resolution != a_priority.resolution) {
+ // Non ideal resolution should be sorted higher than other resolutions.
+ if (a_priority.resolution == NON_IDEAL_RESOLUTION)
+ return false;
+
+ if (b_priority.resolution == NON_IDEAL_RESOLUTION)
+ return true;
+
+ if (prioritize_low_res)
+ return a_priority.resolution == LOW_RESOLUTION;
+ return a_priority.resolution == HIGH_RESOLUTION;
+ }
+
+ // Otherwise if the occlusion differs, b is lower priority if it is
+ // occluded.
+ bool a_is_occluded = a_tile->is_occluded_for_tree_priority(tree_priority_);
+ bool b_is_occluded = b_tile->is_occluded_for_tree_priority(tree_priority_);
+ if (a_is_occluded != b_is_occluded)
+ return b_is_occluded;
+
+ // b is lower priorty if it is farther from visible.
+ return b_priority.distance_to_visible > a_priority.distance_to_visible;
+ }
+
+ private:
+ TreePriority tree_priority_;
+};
+
+} // namespace
+
+EvictionTilePriorityQueue::EvictionTilePriorityQueue() {
+}
+
+EvictionTilePriorityQueue::~EvictionTilePriorityQueue() {
+}
+
+void EvictionTilePriorityQueue::Build(
+ const std::vector<PictureLayerImpl::Pair>& paired_layers,
+ TreePriority tree_priority) {
+ tree_priority_ = tree_priority;
+
+ for (std::vector<PictureLayerImpl::Pair>::const_iterator it =
+ paired_layers.begin();
+ it != paired_layers.end();
+ ++it) {
+ paired_queues_.push_back(
+ make_scoped_ptr(new PairedPictureLayerQueue(*it, tree_priority_)));
+ }
+
+ paired_queues_.make_heap(EvictionOrderComparator(tree_priority_));
+}
+
+void EvictionTilePriorityQueue::Reset() {
+ paired_queues_.clear();
+}
+
+bool EvictionTilePriorityQueue::IsEmpty() const {
+ return paired_queues_.empty() || paired_queues_.front()->IsEmpty();
+}
+
+Tile* EvictionTilePriorityQueue::Top() {
+ DCHECK(!IsEmpty());
+ return paired_queues_.front()->Top(tree_priority_);
+}
+
+void EvictionTilePriorityQueue::Pop() {
+ DCHECK(!IsEmpty());
+
+ paired_queues_.pop_heap(EvictionOrderComparator(tree_priority_));
+ PairedPictureLayerQueue* paired_queue = paired_queues_.back();
+ paired_queue->Pop(tree_priority_);
+ paired_queues_.push_heap(EvictionOrderComparator(tree_priority_));
+}
+
+EvictionTilePriorityQueue::PairedPictureLayerQueue::PairedPictureLayerQueue() {
+}
+
+EvictionTilePriorityQueue::PairedPictureLayerQueue::PairedPictureLayerQueue(
+ const PictureLayerImpl::Pair& layer_pair,
+ TreePriority tree_priority)
+ : active_iterator(
+ layer_pair.active
+ ? PictureLayerImpl::LayerEvictionTileIterator(layer_pair.active,
+ tree_priority)
+ : PictureLayerImpl::LayerEvictionTileIterator()),
+ pending_iterator(
+ layer_pair.pending
+ ? PictureLayerImpl::LayerEvictionTileIterator(layer_pair.pending,
+ tree_priority)
+ : PictureLayerImpl::LayerEvictionTileIterator()) {
+}
+
+EvictionTilePriorityQueue::PairedPictureLayerQueue::~PairedPictureLayerQueue() {
+}
+
+bool EvictionTilePriorityQueue::PairedPictureLayerQueue::IsEmpty() const {
+ return !active_iterator && !pending_iterator;
+}
+
+Tile* EvictionTilePriorityQueue::PairedPictureLayerQueue::Top(
+ TreePriority tree_priority) {
+ DCHECK(!IsEmpty());
+
+ WhichTree next_tree = NextTileIteratorTree(tree_priority);
+ PictureLayerImpl::LayerEvictionTileIterator* next_iterator =
+ next_tree == ACTIVE_TREE ? &active_iterator : &pending_iterator;
+ DCHECK(*next_iterator);
+
+ Tile* tile = **next_iterator;
+ DCHECK(std::find(returned_shared_tiles.begin(),
+ returned_shared_tiles.end(),
+ tile) == returned_shared_tiles.end());
+ return tile;
+}
+
+void EvictionTilePriorityQueue::PairedPictureLayerQueue::Pop(
+ TreePriority tree_priority) {
+ DCHECK(!IsEmpty());
+
+ WhichTree next_tree = NextTileIteratorTree(tree_priority);
+ PictureLayerImpl::LayerEvictionTileIterator* next_iterator =
+ next_tree == ACTIVE_TREE ? &active_iterator : &pending_iterator;
+ DCHECK(*next_iterator);
+ returned_shared_tiles.push_back(**next_iterator);
+ ++(*next_iterator);
+
+ if (IsEmpty())
+ return;
+
+ next_tree = NextTileIteratorTree(tree_priority);
+ next_iterator =
+ next_tree == ACTIVE_TREE ? &active_iterator : &pending_iterator;
+ while (std::find(returned_shared_tiles.begin(),
+ returned_shared_tiles.end(),
+ **next_iterator) != returned_shared_tiles.end()) {
+ ++(*next_iterator);
+ if (IsEmpty())
+ break;
+ next_tree = NextTileIteratorTree(tree_priority);
+ next_iterator =
+ next_tree == ACTIVE_TREE ? &active_iterator : &pending_iterator;
+ }
+}
+
+WhichTree
+EvictionTilePriorityQueue::PairedPictureLayerQueue::NextTileIteratorTree(
+ TreePriority tree_priority) const {
+ DCHECK(!IsEmpty());
+
+ // If we only have one iterator with tiles, return it.
+ if (!active_iterator)
+ return PENDING_TREE;
+ if (!pending_iterator)
+ return ACTIVE_TREE;
+
+ const Tile* active_tile = *active_iterator;
+ const Tile* pending_tile = *pending_iterator;
+ if (active_tile == pending_tile)
+ return ACTIVE_TREE;
+
+ const TilePriority& active_priority =
+ active_tile->priority_for_tree_priority(tree_priority);
+ const TilePriority& pending_priority =
+ pending_tile->priority_for_tree_priority(tree_priority);
+
+ if (pending_priority.IsHigherPriorityThan(active_priority))
+ return ACTIVE_TREE;
+ return PENDING_TREE;
+}
+
+} // namespace cc
diff --git a/cc/resources/eviction_tile_priority_queue.h b/cc/resources/eviction_tile_priority_queue.h
new file mode 100644
index 0000000..e91f0d2
--- /dev/null
+++ b/cc/resources/eviction_tile_priority_queue.h
@@ -0,0 +1,61 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_EVICTION_TILE_PRIORITY_QUEUE_H_
+#define CC_RESOURCES_EVICTION_TILE_PRIORITY_QUEUE_H_
+
+#include <utility>
+#include <vector>
+
+#include "cc/base/cc_export.h"
+#include "cc/layers/picture_layer_impl.h"
+#include "cc/resources/tile_priority.h"
+
+namespace cc {
+
+class CC_EXPORT EvictionTilePriorityQueue {
+ public:
+ struct PairedPictureLayerQueue {
+ PairedPictureLayerQueue();
+ PairedPictureLayerQueue(const PictureLayerImpl::Pair& layer_pair,
+ TreePriority tree_priority);
+ ~PairedPictureLayerQueue();
+
+ bool IsEmpty() const;
+ Tile* Top(TreePriority tree_priority);
+ void Pop(TreePriority tree_priority);
+
+ WhichTree NextTileIteratorTree(TreePriority tree_priority) const;
+
+ PictureLayerImpl::LayerEvictionTileIterator active_iterator;
+ PictureLayerImpl::LayerEvictionTileIterator pending_iterator;
+
+ // TODO(vmpstr): Investigate removing this.
+ std::vector<Tile*> returned_shared_tiles;
+ };
+
+ EvictionTilePriorityQueue();
+ ~EvictionTilePriorityQueue();
+
+ void Build(const std::vector<PictureLayerImpl::Pair>& paired_layers,
+ TreePriority tree_priority);
+ void Reset();
+
+ bool IsEmpty() const;
+ Tile* Top();
+ void Pop();
+
+ private:
+ // TODO(vmpstr): This is potentially unnecessary if it becomes the case that
+ // PairedPictureLayerQueue is fast enough to copy. In that case, we can use
+ // objects directly (ie std::vector<PairedPictureLayerQueue>).
+ ScopedPtrVector<PairedPictureLayerQueue> paired_queues_;
+ TreePriority tree_priority_;
+
+ DISALLOW_COPY_AND_ASSIGN(EvictionTilePriorityQueue);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_EVICTION_TILE_PRIORITY_QUEUE_H_
diff --git a/cc/resources/gpu_raster_worker_pool.cc b/cc/resources/gpu_raster_worker_pool.cc
new file mode 100644
index 0000000..cee01fe
--- /dev/null
+++ b/cc/resources/gpu_raster_worker_pool.cc
@@ -0,0 +1,248 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/gpu_raster_worker_pool.h"
+
+#include <algorithm>
+
+#include "base/debug/trace_event.h"
+#include "cc/output/context_provider.h"
+#include "cc/resources/raster_buffer.h"
+#include "cc/resources/resource.h"
+#include "cc/resources/resource_provider.h"
+#include "cc/resources/scoped_gpu_raster.h"
+#include "gpu/command_buffer/client/gles2_interface.h"
+#include "third_party/skia/include/core/SkMultiPictureDraw.h"
+#include "third_party/skia/include/core/SkPictureRecorder.h"
+#include "third_party/skia/include/core/SkSurface.h"
+#include "third_party/skia/include/gpu/GrContext.h"
+#include "third_party/skia/include/utils/SkNullCanvas.h"
+
+namespace cc {
+namespace {
+
+class RasterBufferImpl : public RasterBuffer {
+ public:
+ RasterBufferImpl(ResourceProvider* resource_provider,
+ const Resource* resource,
+ SkMultiPictureDraw* multi_picture_draw)
+ : resource_provider_(resource_provider),
+ resource_(resource),
+ surface_(resource_provider->LockForWriteToSkSurface(resource->id())),
+ multi_picture_draw_(multi_picture_draw) {}
+ virtual ~RasterBufferImpl() {
+ resource_provider_->UnlockForWriteToSkSurface(resource_->id());
+ }
+
+ // Overridden from RasterBuffer:
+ virtual skia::RefPtr<SkCanvas> AcquireSkCanvas() OVERRIDE {
+ if (!surface_)
+ return skia::AdoptRef(SkCreateNullCanvas());
+
+ skia::RefPtr<SkCanvas> canvas = skia::SharePtr(recorder_.beginRecording(
+ resource_->size().width(), resource_->size().height()));
+
+ // Balanced with restore() call in ReleaseSkCanvas. save()/restore() calls
+ // are needed to ensure that canvas returns to its previous state after use.
+ canvas->save();
+ return canvas;
+ }
+ virtual void ReleaseSkCanvas(const skia::RefPtr<SkCanvas>& canvas) OVERRIDE {
+ if (!surface_)
+ return;
+
+ // Balanced with save() call in AcquireSkCanvas.
+ canvas->restore();
+
+ // Add the canvas and recorded picture to |multi_picture_draw_|.
+ skia::RefPtr<SkPicture> picture = skia::AdoptRef(recorder_.endRecording());
+ multi_picture_draw_->add(surface_->getCanvas(), picture.get());
+ }
+
+ private:
+ ResourceProvider* resource_provider_;
+ const Resource* resource_;
+ SkSurface* surface_;
+ SkMultiPictureDraw* multi_picture_draw_;
+ SkPictureRecorder recorder_;
+
+ DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
+};
+
+} // namespace
+
+// static
+scoped_ptr<RasterWorkerPool> GpuRasterWorkerPool::Create(
+ base::SequencedTaskRunner* task_runner,
+ ContextProvider* context_provider,
+ ResourceProvider* resource_provider) {
+ return make_scoped_ptr<RasterWorkerPool>(new GpuRasterWorkerPool(
+ task_runner, context_provider, resource_provider));
+}
+
+GpuRasterWorkerPool::GpuRasterWorkerPool(base::SequencedTaskRunner* task_runner,
+ ContextProvider* context_provider,
+ ResourceProvider* resource_provider)
+ : task_runner_(task_runner),
+ task_graph_runner_(new TaskGraphRunner),
+ namespace_token_(task_graph_runner_->GetNamespaceToken()),
+ context_provider_(context_provider),
+ resource_provider_(resource_provider),
+ run_tasks_on_origin_thread_pending_(false),
+ raster_finished_weak_ptr_factory_(this),
+ weak_ptr_factory_(this) {
+ DCHECK(context_provider_);
+}
+
+GpuRasterWorkerPool::~GpuRasterWorkerPool() {
+ DCHECK_EQ(0u, completed_tasks_.size());
+}
+
+Rasterizer* GpuRasterWorkerPool::AsRasterizer() {
+ return this;
+}
+
+void GpuRasterWorkerPool::SetClient(RasterizerClient* client) {
+ client_ = client;
+}
+
+void GpuRasterWorkerPool::Shutdown() {
+ TRACE_EVENT0("cc", "GpuRasterWorkerPool::Shutdown");
+
+ TaskGraph empty;
+ task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
+ task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
+}
+
+void GpuRasterWorkerPool::ScheduleTasks(RasterTaskQueue* queue) {
+ TRACE_EVENT0("cc", "GpuRasterWorkerPool::ScheduleTasks");
+
+ // Mark all task sets as pending.
+ raster_pending_.set();
+
+ unsigned priority = kRasterTaskPriorityBase;
+
+ graph_.Reset();
+
+ // Cancel existing OnRasterFinished callbacks.
+ raster_finished_weak_ptr_factory_.InvalidateWeakPtrs();
+
+ scoped_refptr<RasterizerTask> new_raster_finished_tasks[kNumberOfTaskSets];
+
+ size_t task_count[kNumberOfTaskSets] = {0};
+
+ for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
+ new_raster_finished_tasks[task_set] = CreateRasterFinishedTask(
+ task_runner_.get(),
+ base::Bind(&GpuRasterWorkerPool::OnRasterFinished,
+ raster_finished_weak_ptr_factory_.GetWeakPtr(),
+ task_set));
+ }
+
+ for (RasterTaskQueue::Item::Vector::const_iterator it = queue->items.begin();
+ it != queue->items.end();
+ ++it) {
+ const RasterTaskQueue::Item& item = *it;
+ RasterTask* task = item.task;
+ DCHECK(!task->HasCompleted());
+
+ for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
+ if (!item.task_sets[task_set])
+ continue;
+
+ ++task_count[task_set];
+
+ graph_.edges.push_back(
+ TaskGraph::Edge(task, new_raster_finished_tasks[task_set].get()));
+ }
+
+ InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++);
+ }
+
+ for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
+ InsertNodeForTask(&graph_,
+ new_raster_finished_tasks[task_set].get(),
+ kRasterFinishedTaskPriority,
+ task_count[task_set]);
+ }
+
+ ScheduleTasksOnOriginThread(this, &graph_);
+ task_graph_runner_->ScheduleTasks(namespace_token_, &graph_);
+
+ ScheduleRunTasksOnOriginThread();
+
+ std::copy(new_raster_finished_tasks,
+ new_raster_finished_tasks + kNumberOfTaskSets,
+ raster_finished_tasks_);
+}
+
+void GpuRasterWorkerPool::CheckForCompletedTasks() {
+ TRACE_EVENT0("cc", "GpuRasterWorkerPool::CheckForCompletedTasks");
+
+ task_graph_runner_->CollectCompletedTasks(namespace_token_,
+ &completed_tasks_);
+ for (Task::Vector::const_iterator it = completed_tasks_.begin();
+ it != completed_tasks_.end();
+ ++it) {
+ RasterizerTask* task = static_cast<RasterizerTask*>(it->get());
+
+ task->WillComplete();
+ task->CompleteOnOriginThread(this);
+ task->DidComplete();
+
+ task->RunReplyOnOriginThread();
+ }
+ completed_tasks_.clear();
+}
+
+scoped_ptr<RasterBuffer> GpuRasterWorkerPool::AcquireBufferForRaster(
+ const Resource* resource) {
+ // RasterBuffer implementation depends on a SkSurface having been acquired for
+ // the resource.
+ resource_provider_->AcquireSkSurface(resource->id());
+
+ return make_scoped_ptr<RasterBuffer>(
+ new RasterBufferImpl(resource_provider_, resource, &multi_picture_draw_));
+}
+
+void GpuRasterWorkerPool::ReleaseBufferForRaster(
+ scoped_ptr<RasterBuffer> buffer) {
+ // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
+}
+
+void GpuRasterWorkerPool::OnRasterFinished(TaskSet task_set) {
+ TRACE_EVENT1(
+ "cc", "GpuRasterWorkerPool::OnRasterFinished", "task_set", task_set);
+
+ DCHECK(raster_pending_[task_set]);
+ raster_pending_[task_set] = false;
+ client_->DidFinishRunningTasks(task_set);
+}
+
+void GpuRasterWorkerPool::ScheduleRunTasksOnOriginThread() {
+ if (run_tasks_on_origin_thread_pending_)
+ return;
+
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&GpuRasterWorkerPool::RunTasksOnOriginThread,
+ weak_ptr_factory_.GetWeakPtr()));
+ run_tasks_on_origin_thread_pending_ = true;
+}
+
+void GpuRasterWorkerPool::RunTasksOnOriginThread() {
+ TRACE_EVENT0("cc", "GpuRasterWorkerPool::RunTasksOnOriginThread");
+
+ DCHECK(run_tasks_on_origin_thread_pending_);
+ run_tasks_on_origin_thread_pending_ = false;
+
+ ScopedGpuRaster gpu_raster(context_provider_);
+ task_graph_runner_->RunUntilIdle();
+
+ // Draw each all of the pictures that were collected. This will also clear
+ // the pictures and canvases added to |multi_picture_draw_|
+ multi_picture_draw_.draw();
+}
+
+} // namespace cc
diff --git a/cc/resources/gpu_raster_worker_pool.h b/cc/resources/gpu_raster_worker_pool.h
new file mode 100644
index 0000000..2149400
--- /dev/null
+++ b/cc/resources/gpu_raster_worker_pool.h
@@ -0,0 +1,80 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_GPU_RASTER_WORKER_POOL_H_
+#define CC_RESOURCES_GPU_RASTER_WORKER_POOL_H_
+
+#include "base/memory/weak_ptr.h"
+#include "cc/resources/raster_worker_pool.h"
+#include "cc/resources/rasterizer.h"
+#include "third_party/skia/include/core/SkMultiPictureDraw.h"
+
+namespace cc {
+class ContextProvider;
+class ResourceProvider;
+
+class CC_EXPORT GpuRasterWorkerPool : public RasterWorkerPool,
+ public Rasterizer,
+ public RasterizerTaskClient {
+ public:
+ virtual ~GpuRasterWorkerPool();
+
+ static scoped_ptr<RasterWorkerPool> Create(
+ base::SequencedTaskRunner* task_runner,
+ ContextProvider* context_provider,
+ ResourceProvider* resource_provider);
+
+ // Overridden from RasterWorkerPool:
+ virtual Rasterizer* AsRasterizer() OVERRIDE;
+
+ // Overridden from Rasterizer:
+ virtual void SetClient(RasterizerClient* client) OVERRIDE;
+ virtual void Shutdown() OVERRIDE;
+ virtual void ScheduleTasks(RasterTaskQueue* queue) OVERRIDE;
+ virtual void CheckForCompletedTasks() OVERRIDE;
+
+ // Overridden from RasterizerTaskClient:
+ virtual scoped_ptr<RasterBuffer> AcquireBufferForRaster(
+ const Resource* resource) OVERRIDE;
+ virtual void ReleaseBufferForRaster(scoped_ptr<RasterBuffer> buffer) OVERRIDE;
+
+ private:
+ GpuRasterWorkerPool(base::SequencedTaskRunner* task_runner,
+ ContextProvider* context_provider,
+ ResourceProvider* resource_provider);
+
+ void OnRasterFinished(TaskSet task_set);
+ void ScheduleRunTasksOnOriginThread();
+ void RunTasksOnOriginThread();
+ void RunTaskOnOriginThread(RasterizerTask* task);
+
+ scoped_refptr<base::SequencedTaskRunner> task_runner_;
+ scoped_ptr<TaskGraphRunner> task_graph_runner_;
+ const NamespaceToken namespace_token_;
+ RasterizerClient* client_;
+ ContextProvider* context_provider_;
+ ResourceProvider* resource_provider_;
+ SkMultiPictureDraw multi_picture_draw_;
+
+ bool run_tasks_on_origin_thread_pending_;
+
+ TaskSetCollection raster_pending_;
+
+ scoped_refptr<RasterizerTask> raster_finished_tasks_[kNumberOfTaskSets];
+
+ // Task graph used when scheduling tasks and vector used to gather
+ // completed tasks.
+ TaskGraph graph_;
+ Task::Vector completed_tasks_;
+
+ base::WeakPtrFactory<GpuRasterWorkerPool> raster_finished_weak_ptr_factory_;
+
+ base::WeakPtrFactory<GpuRasterWorkerPool> weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(GpuRasterWorkerPool);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_GPU_RASTER_WORKER_POOL_H_
diff --git a/cc/resources/image_layer_updater.cc b/cc/resources/image_layer_updater.cc
new file mode 100644
index 0000000..0538d96
--- /dev/null
+++ b/cc/resources/image_layer_updater.cc
@@ -0,0 +1,68 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/image_layer_updater.h"
+#include "cc/resources/prioritized_resource.h"
+#include "cc/resources/resource_update_queue.h"
+
+namespace cc {
+
+ImageLayerUpdater::Resource::Resource(ImageLayerUpdater* updater,
+ scoped_ptr<PrioritizedResource> texture)
+ : LayerUpdater::Resource(texture.Pass()), updater_(updater) {}
+
+ImageLayerUpdater::Resource::~Resource() {}
+
+void ImageLayerUpdater::Resource::Update(ResourceUpdateQueue* queue,
+ const gfx::Rect& source_rect,
+ const gfx::Vector2d& dest_offset,
+ bool partial_update) {
+ updater_->UpdateTexture(
+ queue, texture(), source_rect, dest_offset, partial_update);
+}
+
+// static
+scoped_refptr<ImageLayerUpdater> ImageLayerUpdater::Create() {
+ return make_scoped_refptr(new ImageLayerUpdater());
+}
+
+scoped_ptr<LayerUpdater::Resource> ImageLayerUpdater::CreateResource(
+ PrioritizedResourceManager* manager) {
+ return make_scoped_ptr(
+ new Resource(this, PrioritizedResource::Create(manager)));
+}
+
+void ImageLayerUpdater::UpdateTexture(ResourceUpdateQueue* queue,
+ PrioritizedResource* texture,
+ const gfx::Rect& source_rect,
+ const gfx::Vector2d& dest_offset,
+ bool partial_update) {
+ // Source rect should never go outside the image pixels, even if this
+ // is requested because the texture extends outside the image.
+ gfx::Rect clipped_source_rect = source_rect;
+ gfx::Rect image_rect = gfx::Rect(0, 0, bitmap_.width(), bitmap_.height());
+ clipped_source_rect.Intersect(image_rect);
+
+ gfx::Vector2d clipped_dest_offset =
+ dest_offset +
+ gfx::Vector2d(clipped_source_rect.origin() - source_rect.origin());
+
+ ResourceUpdate upload = ResourceUpdate::Create(
+ texture, &bitmap_, image_rect, clipped_source_rect, clipped_dest_offset);
+ if (partial_update)
+ queue->AppendPartialUpload(upload);
+ else
+ queue->AppendFullUpload(upload);
+}
+
+void ImageLayerUpdater::SetBitmap(const SkBitmap& bitmap) {
+ DCHECK(bitmap.pixelRef());
+ bitmap_ = bitmap;
+}
+
+bool ImageLayerUpdater::UsingBitmap(const SkBitmap& bitmap) const {
+ return bitmap.pixelRef() == bitmap_.pixelRef();
+}
+
+} // namespace cc
diff --git a/cc/resources/image_layer_updater.h b/cc/resources/image_layer_updater.h
new file mode 100644
index 0000000..b2235b1
--- /dev/null
+++ b/cc/resources/image_layer_updater.h
@@ -0,0 +1,60 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_IMAGE_LAYER_UPDATER_H_
+#define CC_RESOURCES_IMAGE_LAYER_UPDATER_H_
+
+#include "cc/base/cc_export.h"
+#include "cc/resources/layer_updater.h"
+#include "third_party/skia/include/core/SkBitmap.h"
+
+namespace cc {
+
+class ResourceUpdateQueue;
+
+class CC_EXPORT ImageLayerUpdater : public LayerUpdater {
+ public:
+ class Resource : public LayerUpdater::Resource {
+ public:
+ Resource(ImageLayerUpdater* updater,
+ scoped_ptr<PrioritizedResource> texture);
+ virtual ~Resource();
+
+ virtual void Update(ResourceUpdateQueue* queue,
+ const gfx::Rect& source_rect,
+ const gfx::Vector2d& dest_offset,
+ bool partial_update) OVERRIDE;
+
+ private:
+ ImageLayerUpdater* updater_;
+
+ DISALLOW_COPY_AND_ASSIGN(Resource);
+ };
+
+ static scoped_refptr<ImageLayerUpdater> Create();
+
+ virtual scoped_ptr<LayerUpdater::Resource> CreateResource(
+ PrioritizedResourceManager*) OVERRIDE;
+
+ void UpdateTexture(ResourceUpdateQueue* queue,
+ PrioritizedResource* texture,
+ const gfx::Rect& source_rect,
+ const gfx::Vector2d& dest_offset,
+ bool partial_update);
+
+ void SetBitmap(const SkBitmap& bitmap);
+ bool UsingBitmap(const SkBitmap& bitmap) const;
+
+ private:
+ ImageLayerUpdater() {}
+ virtual ~ImageLayerUpdater() {}
+
+ SkBitmap bitmap_;
+
+ DISALLOW_COPY_AND_ASSIGN(ImageLayerUpdater);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_IMAGE_LAYER_UPDATER_H_
diff --git a/cc/resources/layer_painter.h b/cc/resources/layer_painter.h
new file mode 100644
index 0000000..54dc434
--- /dev/null
+++ b/cc/resources/layer_painter.h
@@ -0,0 +1,27 @@
+// Copyright 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_LAYER_PAINTER_H_
+#define CC_RESOURCES_LAYER_PAINTER_H_
+
+#include "cc/base/cc_export.h"
+
+class SkCanvas;
+
+namespace gfx {
+class Rect;
+class RectF;
+}
+
+namespace cc {
+
+class CC_EXPORT LayerPainter {
+ public:
+ virtual ~LayerPainter() {}
+ virtual void Paint(SkCanvas* canvas, const gfx::Rect& content_rect) = 0;
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_LAYER_PAINTER_H_
diff --git a/cc/resources/layer_quad.cc b/cc/resources/layer_quad.cc
new file mode 100644
index 0000000..fdd6866
--- /dev/null
+++ b/cc/resources/layer_quad.cc
@@ -0,0 +1,67 @@
+// Copyright 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/layer_quad.h"
+
+#include "base/logging.h"
+#include "ui/gfx/quad_f.h"
+
+namespace cc {
+
+LayerQuad::Edge::Edge(const gfx::PointF& p, const gfx::PointF& q) {
+ DCHECK(p != q);
+
+ gfx::Vector2dF tangent(p.y() - q.y(), q.x() - p.x());
+ float cross2 = p.x() * q.y() - q.x() * p.y();
+
+ set(tangent.x(), tangent.y(), cross2);
+ scale(1.0f / tangent.Length());
+}
+
+LayerQuad::LayerQuad(const gfx::QuadF& quad) {
+ // Create edges.
+ left_ = Edge(quad.p4(), quad.p1());
+ right_ = Edge(quad.p2(), quad.p3());
+ top_ = Edge(quad.p1(), quad.p2());
+ bottom_ = Edge(quad.p3(), quad.p4());
+
+ float sign = quad.IsCounterClockwise() ? -1 : 1;
+ left_.scale(sign);
+ right_.scale(sign);
+ top_.scale(sign);
+ bottom_.scale(sign);
+}
+
+LayerQuad::LayerQuad(const Edge& left,
+ const Edge& top,
+ const Edge& right,
+ const Edge& bottom)
+ : left_(left),
+ top_(top),
+ right_(right),
+ bottom_(bottom) {}
+
+gfx::QuadF LayerQuad::ToQuadF() const {
+ return gfx::QuadF(left_.Intersect(top_),
+ top_.Intersect(right_),
+ right_.Intersect(bottom_),
+ bottom_.Intersect(left_));
+}
+
+void LayerQuad::ToFloatArray(float flattened[12]) const {
+ flattened[0] = left_.x();
+ flattened[1] = left_.y();
+ flattened[2] = left_.z();
+ flattened[3] = top_.x();
+ flattened[4] = top_.y();
+ flattened[5] = top_.z();
+ flattened[6] = right_.x();
+ flattened[7] = right_.y();
+ flattened[8] = right_.z();
+ flattened[9] = bottom_.x();
+ flattened[10] = bottom_.y();
+ flattened[11] = bottom_.z();
+}
+
+} // namespace cc
diff --git a/cc/resources/layer_quad.h b/cc/resources/layer_quad.h
new file mode 100644
index 0000000..1d71193
--- /dev/null
+++ b/cc/resources/layer_quad.h
@@ -0,0 +1,114 @@
+// Copyright 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#ifndef CC_RESOURCES_LAYER_QUAD_H_
+#define CC_RESOURCES_LAYER_QUAD_H_
+
+#include "base/basictypes.h"
+#include "cc/base/cc_export.h"
+#include "ui/gfx/point_f.h"
+
+namespace gfx {
+class QuadF;
+}
+
+static const float kAntiAliasingInflateDistance = 0.5f;
+
+namespace cc {
+
+class CC_EXPORT LayerQuad {
+ public:
+ class Edge {
+ public:
+ Edge() : x_(0), y_(0), z_(0) {}
+ Edge(const gfx::PointF& p, const gfx::PointF& q);
+
+ float x() const { return x_; }
+ float y() const { return y_; }
+ float z() const { return z_; }
+
+ void set_x(float x) { x_ = x; }
+ void set_y(float y) { y_ = y; }
+ void set_z(float z) { z_ = z; }
+ void set(float x, float y, float z) {
+ x_ = x;
+ y_ = y;
+ z_ = z;
+ }
+
+ void move_x(float dx) { x_ += dx; }
+ void move_y(float dy) { y_ += dy; }
+ void move_z(float dz) { z_ += dz; }
+ void move(float dx, float dy, float dz) {
+ x_ += dx;
+ y_ += dy;
+ z_ += dz;
+ }
+
+ void scale_x(float sx) { x_ *= sx; }
+ void scale_y(float sy) { y_ *= sy; }
+ void scale_z(float sz) { z_ *= sz; }
+ void scale(float sx, float sy, float sz) {
+ x_ *= sx;
+ y_ *= sy;
+ z_ *= sz;
+ }
+ void scale(float s) { scale(s, s, s); }
+
+ gfx::PointF Intersect(const Edge& e) const {
+ return gfx::PointF(
+ (y() * e.z() - e.y() * z()) / (x() * e.y() - e.x() * y()),
+ (x() * e.z() - e.x() * z()) / (e.x() * y() - x() * e.y()));
+ }
+
+ private:
+ float x_;
+ float y_;
+ float z_;
+ };
+
+ LayerQuad(const Edge& left,
+ const Edge& top,
+ const Edge& right,
+ const Edge& bottom);
+ explicit LayerQuad(const gfx::QuadF& quad);
+
+ Edge left() const { return left_; }
+ Edge top() const { return top_; }
+ Edge right() const { return right_; }
+ Edge bottom() const { return bottom_; }
+
+ void InflateX(float dx) {
+ left_.move_z(dx);
+ right_.move_z(dx);
+ }
+ void InflateY(float dy) {
+ top_.move_z(dy);
+ bottom_.move_z(dy);
+ }
+ void Inflate(float d) {
+ InflateX(d);
+ InflateY(d);
+ }
+ void InflateAntiAliasingDistance() {
+ Inflate(kAntiAliasingInflateDistance);
+ }
+
+ gfx::QuadF ToQuadF() const;
+
+ void ToFloatArray(float flattened[12]) const;
+
+ private:
+ Edge left_;
+ Edge top_;
+ Edge right_;
+ Edge bottom_;
+
+ DISALLOW_COPY_AND_ASSIGN(LayerQuad);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_LAYER_QUAD_H_
diff --git a/cc/resources/layer_quad_unittest.cc b/cc/resources/layer_quad_unittest.cc
new file mode 100644
index 0000000..8d3909c
--- /dev/null
+++ b/cc/resources/layer_quad_unittest.cc
@@ -0,0 +1,42 @@
+// Copyright 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/layer_quad.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gfx/quad_f.h"
+
+namespace cc {
+namespace {
+
+TEST(LayerQuadTest, QuadFConversion) {
+ gfx::PointF p1(-0.5f, -0.5f);
+ gfx::PointF p2(0.5f, -0.5f);
+ gfx::PointF p3(0.5f, 0.5f);
+ gfx::PointF p4(-0.5f, 0.5f);
+
+ gfx::QuadF quad_cw(p1, p2, p3, p4);
+ LayerQuad layer_quad_cw(quad_cw);
+ EXPECT_EQ(layer_quad_cw.ToQuadF(), quad_cw);
+
+ gfx::QuadF quad_ccw(p1, p4, p3, p2);
+ LayerQuad layer_quad_ccw(quad_ccw);
+ EXPECT_EQ(layer_quad_ccw.ToQuadF(), quad_ccw);
+}
+
+TEST(LayerQuadTest, Inflate) {
+ gfx::PointF p1(-0.5f, -0.5f);
+ gfx::PointF p2(0.5f, -0.5f);
+ gfx::PointF p3(0.5f, 0.5f);
+ gfx::PointF p4(-0.5f, 0.5f);
+
+ gfx::QuadF quad(p1, p2, p3, p4);
+ LayerQuad layer_quad(quad);
+ quad.Scale(2.f, 2.f);
+ layer_quad.Inflate(0.5f);
+ EXPECT_EQ(layer_quad.ToQuadF(), quad);
+}
+
+} // namespace
+} // namespace cc
diff --git a/cc/resources/layer_tiling_data.cc b/cc/resources/layer_tiling_data.cc
new file mode 100644
index 0000000..63b0036
--- /dev/null
+++ b/cc/resources/layer_tiling_data.cc
@@ -0,0 +1,116 @@
+// Copyright 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/layer_tiling_data.h"
+
+#include <vector>
+
+#include "base/logging.h"
+#include "cc/base/region.h"
+#include "cc/base/simple_enclosed_region.h"
+
+namespace cc {
+
+scoped_ptr<LayerTilingData> LayerTilingData::Create(const gfx::Size& tile_size,
+ BorderTexelOption border) {
+ return make_scoped_ptr(new LayerTilingData(tile_size, border));
+}
+
+LayerTilingData::LayerTilingData(const gfx::Size& tile_size,
+ BorderTexelOption border)
+ : tiling_data_(tile_size, gfx::Size(), border == HAS_BORDER_TEXELS) {
+ SetTileSize(tile_size);
+}
+
+LayerTilingData::~LayerTilingData() {}
+
+void LayerTilingData::SetTileSize(const gfx::Size& size) {
+ if (tile_size() == size)
+ return;
+
+ reset();
+
+ tiling_data_.SetMaxTextureSize(size);
+}
+
+gfx::Size LayerTilingData::tile_size() const {
+ return tiling_data_.max_texture_size();
+}
+
+void LayerTilingData::SetBorderTexelOption(
+ BorderTexelOption border_texel_option) {
+ bool border_texels = border_texel_option == HAS_BORDER_TEXELS;
+ if (has_border_texels() == border_texels)
+ return;
+
+ reset();
+ tiling_data_.SetHasBorderTexels(border_texels);
+}
+
+const LayerTilingData& LayerTilingData::operator=
+ (const LayerTilingData & tiler) {
+ tiling_data_ = tiler.tiling_data_;
+
+ return *this;
+}
+
+void LayerTilingData::AddTile(scoped_ptr<Tile> tile, int i, int j) {
+ DCHECK(!TileAt(i, j));
+ tile->move_to(i, j);
+ tiles_.add(std::make_pair(i, j), tile.Pass());
+}
+
+scoped_ptr<LayerTilingData::Tile> LayerTilingData::TakeTile(int i, int j) {
+ return tiles_.take_and_erase(std::make_pair(i, j));
+}
+
+LayerTilingData::Tile* LayerTilingData::TileAt(int i, int j) const {
+ return tiles_.get(std::make_pair(i, j));
+}
+
+void LayerTilingData::ContentRectToTileIndices(const gfx::Rect& content_rect,
+ int* left,
+ int* top,
+ int* right,
+ int* bottom) const {
+ // An empty rect doesn't result in an empty set of tiles, so don't pass an
+ // empty rect.
+ // TODO(enne): Possibly we should fill a vector of tiles instead, since the
+ // normal use of this function is to enumerate some tiles.
+ DCHECK(!content_rect.IsEmpty());
+
+ *left = tiling_data_.TileXIndexFromSrcCoord(content_rect.x());
+ *top = tiling_data_.TileYIndexFromSrcCoord(content_rect.y());
+ *right = tiling_data_.TileXIndexFromSrcCoord(content_rect.right() - 1);
+ *bottom = tiling_data_.TileYIndexFromSrcCoord(content_rect.bottom() - 1);
+}
+
+gfx::Rect LayerTilingData::TileRect(const Tile* tile) const {
+ gfx::Rect tile_rect = tiling_data_.TileBoundsWithBorder(tile->i(), tile->j());
+ tile_rect.set_size(tile_size());
+ return tile_rect;
+}
+
+void LayerTilingData::SetTilingSize(const gfx::Size& tiling_size) {
+ tiling_data_.SetTilingSize(tiling_size);
+ if (tiling_size.IsEmpty()) {
+ tiles_.clear();
+ return;
+ }
+
+ // Any tiles completely outside our new bounds are invalid and should be
+ // dropped.
+ int left, top, right, bottom;
+ ContentRectToTileIndices(
+ gfx::Rect(tiling_size), &left, &top, &right, &bottom);
+ std::vector<TileMapKey> invalid_tile_keys;
+ for (TileMap::const_iterator it = tiles_.begin(); it != tiles_.end(); ++it) {
+ if (it->first.first > right || it->first.second > bottom)
+ invalid_tile_keys.push_back(it->first);
+ }
+ for (size_t i = 0; i < invalid_tile_keys.size(); ++i)
+ tiles_.erase(invalid_tile_keys[i]);
+}
+
+} // namespace cc
diff --git a/cc/resources/layer_tiling_data.h b/cc/resources/layer_tiling_data.h
new file mode 100644
index 0000000..b369d61
--- /dev/null
+++ b/cc/resources/layer_tiling_data.h
@@ -0,0 +1,102 @@
+// Copyright 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_LAYER_TILING_DATA_H_
+#define CC_RESOURCES_LAYER_TILING_DATA_H_
+
+#include <utility>
+
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/containers/scoped_ptr_hash_map.h"
+#include "base/memory/scoped_ptr.h"
+#include "cc/base/cc_export.h"
+#include "cc/base/simple_enclosed_region.h"
+#include "cc/base/tiling_data.h"
+#include "ui/gfx/rect.h"
+
+namespace cc {
+
+class CC_EXPORT LayerTilingData {
+ public:
+ enum BorderTexelOption {
+ HAS_BORDER_TEXELS,
+ NO_BORDER_TEXELS
+ };
+
+ ~LayerTilingData();
+
+ static scoped_ptr<LayerTilingData> Create(const gfx::Size& tile_size,
+ BorderTexelOption option);
+
+ bool has_empty_bounds() const { return tiling_data_.has_empty_bounds(); }
+ int num_tiles_x() const { return tiling_data_.num_tiles_x(); }
+ int num_tiles_y() const { return tiling_data_.num_tiles_y(); }
+ gfx::Rect tile_bounds(int i, int j) const {
+ return tiling_data_.TileBounds(i, j);
+ }
+ gfx::Vector2d texture_offset(int x_index, int y_index) const {
+ return tiling_data_.TextureOffset(x_index, y_index);
+ }
+
+ // Change the tile size. This may invalidate all the existing tiles.
+ void SetTileSize(const gfx::Size& size);
+ gfx::Size tile_size() const;
+ // Change the border texel setting. This may invalidate all existing tiles.
+ void SetBorderTexelOption(BorderTexelOption option);
+ bool has_border_texels() const { return !!tiling_data_.border_texels(); }
+
+ bool is_empty() const { return has_empty_bounds() || !tiles().size(); }
+
+ const LayerTilingData& operator=(const LayerTilingData&);
+
+ class Tile {
+ public:
+ Tile() : i_(-1), j_(-1) {}
+ virtual ~Tile() {}
+
+ int i() const { return i_; }
+ int j() const { return j_; }
+ void move_to(int i, int j) {
+ i_ = i;
+ j_ = j;
+ }
+
+ private:
+ int i_;
+ int j_;
+ DISALLOW_COPY_AND_ASSIGN(Tile);
+ };
+ typedef std::pair<int, int> TileMapKey;
+ typedef base::ScopedPtrHashMap<TileMapKey, Tile> TileMap;
+
+ void AddTile(scoped_ptr<Tile> tile, int i, int j);
+ scoped_ptr<Tile> TakeTile(int i, int j);
+ Tile* TileAt(int i, int j) const;
+ const TileMap& tiles() const { return tiles_; }
+
+ void SetTilingSize(const gfx::Size& tiling_size);
+ gfx::Size tiling_size() const { return tiling_data_.tiling_size(); }
+
+ void ContentRectToTileIndices(const gfx::Rect& rect,
+ int* left,
+ int* top,
+ int* right,
+ int* bottom) const;
+ gfx::Rect TileRect(const Tile* tile) const;
+
+ void reset() { tiles_.clear(); }
+
+ protected:
+ LayerTilingData(const gfx::Size& tile_size, BorderTexelOption option);
+
+ TileMap tiles_;
+ TilingData tiling_data_;
+
+ DISALLOW_COPY(LayerTilingData);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_LAYER_TILING_DATA_H_
diff --git a/cc/resources/layer_updater.cc b/cc/resources/layer_updater.cc
new file mode 100644
index 0000000..aa7d1a4
--- /dev/null
+++ b/cc/resources/layer_updater.cc
@@ -0,0 +1,16 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/layer_updater.h"
+
+#include "cc/resources/prioritized_resource.h"
+
+namespace cc {
+
+LayerUpdater::Resource::Resource(scoped_ptr<PrioritizedResource> texture)
+ : texture_(texture.Pass()) {}
+
+LayerUpdater::Resource::~Resource() {}
+
+} // namespace cc
diff --git a/cc/resources/layer_updater.h b/cc/resources/layer_updater.h
new file mode 100644
index 0000000..4614c5a
--- /dev/null
+++ b/cc/resources/layer_updater.h
@@ -0,0 +1,76 @@
+// Copyright 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_LAYER_UPDATER_H_
+#define CC_RESOURCES_LAYER_UPDATER_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "cc/base/cc_export.h"
+#include "third_party/skia/include/core/SkColor.h"
+#include "ui/gfx/rect.h"
+#include "ui/gfx/vector2d.h"
+
+namespace cc {
+
+class PrioritizedResource;
+class PrioritizedResourceManager;
+class ResourceUpdateQueue;
+class TextureManager;
+
+class CC_EXPORT LayerUpdater : public base::RefCounted<LayerUpdater> {
+ public:
+ // Allows updaters to store per-resource update properties.
+ class CC_EXPORT Resource {
+ public:
+ virtual ~Resource();
+
+ PrioritizedResource* texture() { return texture_.get(); }
+ // TODO(reveman): partial_update should be a property of this class
+ // instead of an argument passed to Update().
+ virtual void Update(ResourceUpdateQueue* queue,
+ const gfx::Rect& source_rect,
+ const gfx::Vector2d& dest_offset,
+ bool partial_update) = 0;
+
+ protected:
+ explicit Resource(scoped_ptr<PrioritizedResource> texture);
+
+ private:
+ scoped_ptr<PrioritizedResource> texture_;
+
+ DISALLOW_COPY_AND_ASSIGN(Resource);
+ };
+
+ LayerUpdater() {}
+
+ virtual scoped_ptr<Resource> CreateResource(
+ PrioritizedResourceManager* manager) = 0;
+ virtual void PrepareToUpdate(const gfx::Size& content_size,
+ const gfx::Rect& paint_rect,
+ const gfx::Size& tile_size,
+ float contents_width_scale,
+ float contents_height_scale) {}
+ virtual void ReduceMemoryUsage() {}
+
+ // Set true by the layer when it is known that the entire output is going to
+ // be opaque.
+ virtual void SetOpaque(bool opaque) {}
+ // Set true by the layer when it is known that the entire output bounds will
+ // be rasterized.
+ virtual void SetFillsBoundsCompletely(bool fills_bounds) {}
+ virtual void SetBackgroundColor(SkColor background_color) {}
+
+ protected:
+ virtual ~LayerUpdater() {}
+
+ private:
+ friend class base::RefCounted<LayerUpdater>;
+
+ DISALLOW_COPY_AND_ASSIGN(LayerUpdater);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_LAYER_UPDATER_H_
diff --git a/cc/resources/managed_tile_state.cc b/cc/resources/managed_tile_state.cc
new file mode 100644
index 0000000..2841711
--- /dev/null
+++ b/cc/resources/managed_tile_state.cc
@@ -0,0 +1,94 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/managed_tile_state.h"
+
+#include <limits>
+#include <string>
+
+#include "base/debug/trace_event_argument.h"
+#include "cc/base/math_util.h"
+
+namespace cc {
+
+std::string ManagedTileBinToString(ManagedTileBin bin) {
+ switch (bin) {
+ case NOW_AND_READY_TO_DRAW_BIN:
+ return "NOW_AND_READY_TO_DRAW_BIN";
+ case NOW_BIN:
+ return "NOW_BIN";
+ case SOON_BIN:
+ return "SOON_BIN";
+ case EVENTUALLY_AND_ACTIVE_BIN:
+ return "EVENTUALLY_AND_ACTIVE_BIN";
+ case EVENTUALLY_BIN:
+ return "EVENTUALLY_BIN";
+ case AT_LAST_AND_ACTIVE_BIN:
+ return "AT_LAST_AND_ACTIVE_BIN";
+ case AT_LAST_BIN:
+ return "AT_LAST_BIN";
+ case NEVER_BIN:
+ return "NEVER_BIN";
+ case NUM_BINS:
+ NOTREACHED();
+ return "Invalid Bin (NUM_BINS)";
+ }
+ return "Invalid Bin (UNKNOWN)";
+}
+
+ManagedTileState::ManagedTileState()
+ : bin(NEVER_BIN),
+ resolution(NON_IDEAL_RESOLUTION),
+ required_for_activation(false),
+ priority_bin(TilePriority::EVENTUALLY),
+ distance_to_visible(std::numeric_limits<float>::infinity()),
+ visible_and_ready_to_draw(false),
+ scheduled_priority(0) {
+}
+
+ManagedTileState::DrawInfo::DrawInfo()
+ : mode_(RESOURCE_MODE), solid_color_(SK_ColorWHITE) {
+}
+
+ManagedTileState::DrawInfo::~DrawInfo() {
+ DCHECK(!resource_);
+}
+
+bool ManagedTileState::DrawInfo::IsReadyToDraw() const {
+ switch (mode_) {
+ case RESOURCE_MODE:
+ return !!resource_;
+ case SOLID_COLOR_MODE:
+ case PICTURE_PILE_MODE:
+ return true;
+ }
+ NOTREACHED();
+ return false;
+}
+
+ManagedTileState::~ManagedTileState() {}
+
+void ManagedTileState::AsValueInto(base::debug::TracedValue* state) const {
+ bool has_resource = (draw_info.resource_.get() != 0);
+ bool has_active_task = (raster_task.get() != 0);
+
+ bool is_using_gpu_memory = has_resource || has_active_task;
+
+ state->SetBoolean("has_resource", has_resource);
+ state->SetBoolean("is_using_gpu_memory", is_using_gpu_memory);
+ state->SetString("bin", ManagedTileBinToString(bin));
+ state->SetString("resolution", TileResolutionToString(resolution));
+ state->SetString("priority_bin", TilePriorityBinToString(priority_bin));
+ state->SetDouble("distance_to_visible",
+ MathUtil::AsFloatSafely(distance_to_visible));
+ state->SetBoolean("required_for_activation", required_for_activation);
+ state->SetBoolean("is_solid_color",
+ draw_info.mode_ == DrawInfo::SOLID_COLOR_MODE);
+ state->SetBoolean("is_transparent",
+ draw_info.mode_ == DrawInfo::SOLID_COLOR_MODE &&
+ !SkColorGetA(draw_info.solid_color_));
+ state->SetInteger("scheduled_priority", scheduled_priority);
+}
+
+} // namespace cc
diff --git a/cc/resources/managed_tile_state.h b/cc/resources/managed_tile_state.h
new file mode 100644
index 0000000..58b77c6
--- /dev/null
+++ b/cc/resources/managed_tile_state.h
@@ -0,0 +1,124 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_MANAGED_TILE_STATE_H_
+#define CC_RESOURCES_MANAGED_TILE_STATE_H_
+
+#include "base/memory/scoped_ptr.h"
+#include "cc/resources/platform_color.h"
+#include "cc/resources/rasterizer.h"
+#include "cc/resources/resource_pool.h"
+#include "cc/resources/resource_provider.h"
+#include "cc/resources/scoped_resource.h"
+#include "cc/resources/tile_priority.h"
+
+namespace cc {
+
+class TileManager;
+
+// Tile manager classifying tiles into a few basic bins:
+enum ManagedTileBin {
+ NOW_AND_READY_TO_DRAW_BIN = 0, // Ready to draw and within viewport.
+ NOW_BIN = 1, // Needed ASAP.
+ SOON_BIN = 2, // Impl-side version of prepainting.
+ EVENTUALLY_AND_ACTIVE_BIN = 3, // Nice to have, and has a task or resource.
+ EVENTUALLY_BIN = 4, // Nice to have, if we've got memory and time.
+ AT_LAST_AND_ACTIVE_BIN = 5, // Only do this after all other bins.
+ AT_LAST_BIN = 6, // Only do this after all other bins.
+ NEVER_BIN = 7, // Dont bother.
+ NUM_BINS = 8
+ // NOTE: Be sure to update ManagedTileBinAsValue and kBinPolicyMap when adding
+ // or reordering fields.
+};
+scoped_ptr<base::Value> ManagedTileBinAsValue(ManagedTileBin bin);
+
+// This is state that is specific to a tile that is
+// managed by the TileManager.
+class CC_EXPORT ManagedTileState {
+ public:
+ // This class holds all the state relevant to drawing a tile.
+ class CC_EXPORT DrawInfo {
+ public:
+ enum Mode { RESOURCE_MODE, SOLID_COLOR_MODE, PICTURE_PILE_MODE };
+
+ DrawInfo();
+ ~DrawInfo();
+
+ Mode mode() const { return mode_; }
+
+ bool IsReadyToDraw() const;
+
+ ResourceProvider::ResourceId get_resource_id() const {
+ DCHECK(mode_ == RESOURCE_MODE);
+ DCHECK(resource_);
+
+ return resource_->id();
+ }
+
+ SkColor get_solid_color() const {
+ DCHECK(mode_ == SOLID_COLOR_MODE);
+
+ return solid_color_;
+ }
+
+ bool contents_swizzled() const {
+ DCHECK(resource_);
+ return !PlatformColor::SameComponentOrder(resource_->format());
+ }
+
+ bool requires_resource() const {
+ return mode_ == RESOURCE_MODE || mode_ == PICTURE_PILE_MODE;
+ }
+
+ inline bool has_resource() const { return !!resource_; }
+
+ void SetSolidColorForTesting(SkColor color) { set_solid_color(color); }
+ void SetResourceForTesting(scoped_ptr<ScopedResource> resource) {
+ resource_ = resource.Pass();
+ }
+
+ private:
+ friend class TileManager;
+ friend class PrioritizedTileSet;
+ friend class Tile;
+ friend class ManagedTileState;
+
+ void set_use_resource() { mode_ = RESOURCE_MODE; }
+
+ void set_solid_color(const SkColor& color) {
+ mode_ = SOLID_COLOR_MODE;
+ solid_color_ = color;
+ }
+
+ void set_rasterize_on_demand() { mode_ = PICTURE_PILE_MODE; }
+
+ Mode mode_;
+ SkColor solid_color_;
+ scoped_ptr<ScopedResource> resource_;
+ };
+
+ ManagedTileState();
+ ~ManagedTileState();
+
+ void AsValueInto(base::debug::TracedValue* dict) const;
+
+ // Persisted state: valid all the time.
+ DrawInfo draw_info;
+ scoped_refptr<RasterTask> raster_task;
+
+ ManagedTileBin bin;
+
+ TileResolution resolution;
+ bool required_for_activation;
+ TilePriority::PriorityBin priority_bin;
+ float distance_to_visible;
+ bool visible_and_ready_to_draw;
+
+ // Priority for this state from the last time we assigned memory.
+ unsigned scheduled_priority;
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_MANAGED_TILE_STATE_H_
diff --git a/cc/resources/memory_history.cc b/cc/resources/memory_history.cc
new file mode 100644
index 0000000..a2f8b6e
--- /dev/null
+++ b/cc/resources/memory_history.cc
@@ -0,0 +1,39 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/memory_history.h"
+
+#include <limits>
+
+namespace cc {
+
+// static
+scoped_ptr<MemoryHistory> MemoryHistory::Create() {
+ return make_scoped_ptr(new MemoryHistory());
+}
+
+MemoryHistory::MemoryHistory() {}
+
+void MemoryHistory::SaveEntry(const MemoryHistory::Entry& entry) {
+ ring_buffer_.SaveToBuffer(entry);
+}
+
+void MemoryHistory::GetMinAndMax(size_t* min, size_t* max) const {
+ *min = std::numeric_limits<size_t>::max();
+ *max = 0;
+
+ for (RingBufferType::Iterator it = ring_buffer_.Begin(); it; ++it) {
+ size_t bytes_total = it->bytes_total();
+
+ if (bytes_total < *min)
+ *min = bytes_total;
+ if (bytes_total > *max)
+ *max = bytes_total;
+ }
+
+ if (*min > *max)
+ *min = *max;
+}
+
+} // namespace cc
diff --git a/cc/resources/memory_history.h b/cc/resources/memory_history.h
new file mode 100644
index 0000000..daca10f
--- /dev/null
+++ b/cc/resources/memory_history.h
@@ -0,0 +1,55 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_MEMORY_HISTORY_H_
+#define CC_RESOURCES_MEMORY_HISTORY_H_
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time/time.h"
+#include "cc/debug/ring_buffer.h"
+
+namespace cc {
+
+// Maintains a history of memory for each frame.
+class MemoryHistory {
+ public:
+ static scoped_ptr<MemoryHistory> Create();
+
+ size_t HistorySize() const { return ring_buffer_.BufferSize(); }
+
+ struct Entry {
+ Entry()
+ : total_budget_in_bytes(0),
+ bytes_allocated(0),
+ bytes_unreleasable(0),
+ bytes_over(0) {}
+
+ size_t total_budget_in_bytes;
+ size_t bytes_allocated;
+ size_t bytes_unreleasable;
+ size_t bytes_over;
+ size_t bytes_total() const {
+ return bytes_allocated + bytes_unreleasable + bytes_over;
+ }
+ };
+
+ void SaveEntry(const Entry& entry);
+ void GetMinAndMax(size_t* min, size_t* max) const;
+
+ typedef RingBuffer<Entry, 80> RingBufferType;
+ RingBufferType::Iterator Begin() const { return ring_buffer_.Begin(); }
+ RingBufferType::Iterator End() const { return ring_buffer_.End(); }
+
+ private:
+ MemoryHistory();
+
+ RingBufferType ring_buffer_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryHistory);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_MEMORY_HISTORY_H_
diff --git a/cc/resources/one_copy_raster_worker_pool.cc b/cc/resources/one_copy_raster_worker_pool.cc
new file mode 100644
index 0000000..a86d498
--- /dev/null
+++ b/cc/resources/one_copy_raster_worker_pool.cc
@@ -0,0 +1,283 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/one_copy_raster_worker_pool.h"
+
+#include <algorithm>
+
+#include "base/debug/trace_event.h"
+#include "base/debug/trace_event_argument.h"
+#include "base/strings/stringprintf.h"
+#include "cc/debug/traced_value.h"
+#include "cc/resources/raster_buffer.h"
+#include "cc/resources/resource_pool.h"
+#include "cc/resources/scoped_resource.h"
+#include "gpu/command_buffer/client/gles2_interface.h"
+#include "third_party/skia/include/utils/SkNullCanvas.h"
+
+namespace cc {
+namespace {
+
+class RasterBufferImpl : public RasterBuffer {
+ public:
+ RasterBufferImpl(ResourceProvider* resource_provider,
+ ResourcePool* resource_pool,
+ const Resource* resource)
+ : resource_provider_(resource_provider),
+ resource_pool_(resource_pool),
+ resource_(resource),
+ raster_resource_(resource_pool->AcquireResource(resource->size())),
+ buffer_(NULL),
+ stride_(0) {
+ // Acquire and map image for raster resource.
+ resource_provider_->AcquireImage(raster_resource_->id());
+ buffer_ = resource_provider_->MapImage(raster_resource_->id(), &stride_);
+ }
+
+ virtual ~RasterBufferImpl() {
+ // First unmap image for raster resource.
+ resource_provider_->UnmapImage(raster_resource_->id());
+
+ // Copy contents of raster resource to |resource_|.
+ resource_provider_->CopyResource(raster_resource_->id(), resource_->id());
+
+ // This RasterBuffer implementation provides direct access to the memory
+ // used by the GPU. Read lock fences are required to ensure that we're not
+ // trying to map a resource that is currently in-use by the GPU.
+ resource_provider_->EnableReadLockFences(raster_resource_->id());
+
+ // Return raster resource to pool so it can be used by another RasterBuffer
+ // instance.
+ resource_pool_->ReleaseResource(raster_resource_.Pass());
+ }
+
+ // Overridden from RasterBuffer:
+ virtual skia::RefPtr<SkCanvas> AcquireSkCanvas() OVERRIDE {
+ if (!buffer_)
+ return skia::AdoptRef(SkCreateNullCanvas());
+
+ RasterWorkerPool::AcquireBitmapForBuffer(
+ &bitmap_, buffer_, resource_->format(), resource_->size(), stride_);
+ return skia::AdoptRef(new SkCanvas(bitmap_));
+ }
+ virtual void ReleaseSkCanvas(const skia::RefPtr<SkCanvas>& canvas) OVERRIDE {
+ if (!buffer_)
+ return;
+
+ RasterWorkerPool::ReleaseBitmapForBuffer(
+ &bitmap_, buffer_, resource_->format());
+ }
+
+ private:
+ ResourceProvider* resource_provider_;
+ ResourcePool* resource_pool_;
+ const Resource* resource_;
+ scoped_ptr<ScopedResource> raster_resource_;
+ uint8_t* buffer_;
+ int stride_;
+ SkBitmap bitmap_;
+
+ DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
+};
+
+} // namespace
+
+// static
+scoped_ptr<RasterWorkerPool> OneCopyRasterWorkerPool::Create(
+ base::SequencedTaskRunner* task_runner,
+ TaskGraphRunner* task_graph_runner,
+ ContextProvider* context_provider,
+ ResourceProvider* resource_provider,
+ ResourcePool* resource_pool) {
+ return make_scoped_ptr<RasterWorkerPool>(
+ new OneCopyRasterWorkerPool(task_runner,
+ task_graph_runner,
+ context_provider,
+ resource_provider,
+ resource_pool));
+}
+
+OneCopyRasterWorkerPool::OneCopyRasterWorkerPool(
+ base::SequencedTaskRunner* task_runner,
+ TaskGraphRunner* task_graph_runner,
+ ContextProvider* context_provider,
+ ResourceProvider* resource_provider,
+ ResourcePool* resource_pool)
+ : task_runner_(task_runner),
+ task_graph_runner_(task_graph_runner),
+ namespace_token_(task_graph_runner->GetNamespaceToken()),
+ context_provider_(context_provider),
+ resource_provider_(resource_provider),
+ resource_pool_(resource_pool),
+ raster_finished_weak_ptr_factory_(this) {
+ DCHECK(context_provider_);
+}
+
+OneCopyRasterWorkerPool::~OneCopyRasterWorkerPool() {
+}
+
+Rasterizer* OneCopyRasterWorkerPool::AsRasterizer() {
+ return this;
+}
+
+void OneCopyRasterWorkerPool::SetClient(RasterizerClient* client) {
+ client_ = client;
+}
+
+void OneCopyRasterWorkerPool::Shutdown() {
+ TRACE_EVENT0("cc", "OneCopyRasterWorkerPool::Shutdown");
+
+ TaskGraph empty;
+ task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
+ task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
+}
+
+void OneCopyRasterWorkerPool::ScheduleTasks(RasterTaskQueue* queue) {
+ TRACE_EVENT0("cc", "OneCopyRasterWorkerPool::ScheduleTasks");
+
+ if (raster_pending_.none())
+ TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this);
+
+ // Mark all task sets as pending.
+ raster_pending_.set();
+
+ unsigned priority = kRasterTaskPriorityBase;
+
+ graph_.Reset();
+
+ // Cancel existing OnRasterFinished callbacks.
+ raster_finished_weak_ptr_factory_.InvalidateWeakPtrs();
+
+ scoped_refptr<RasterizerTask> new_raster_finished_tasks[kNumberOfTaskSets];
+
+ size_t task_count[kNumberOfTaskSets] = {0};
+
+ for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
+ new_raster_finished_tasks[task_set] = CreateRasterFinishedTask(
+ task_runner_.get(),
+ base::Bind(&OneCopyRasterWorkerPool::OnRasterFinished,
+ raster_finished_weak_ptr_factory_.GetWeakPtr(),
+ task_set));
+ }
+
+ resource_pool_->CheckBusyResources();
+
+ for (RasterTaskQueue::Item::Vector::const_iterator it = queue->items.begin();
+ it != queue->items.end();
+ ++it) {
+ const RasterTaskQueue::Item& item = *it;
+ RasterTask* task = item.task;
+ DCHECK(!task->HasCompleted());
+
+ for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
+ if (!item.task_sets[task_set])
+ continue;
+
+ ++task_count[task_set];
+
+ graph_.edges.push_back(
+ TaskGraph::Edge(task, new_raster_finished_tasks[task_set].get()));
+ }
+
+ InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++);
+ }
+
+ for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
+ InsertNodeForTask(&graph_,
+ new_raster_finished_tasks[task_set].get(),
+ kRasterFinishedTaskPriority,
+ task_count[task_set]);
+ }
+
+ ScheduleTasksOnOriginThread(this, &graph_);
+ task_graph_runner_->ScheduleTasks(namespace_token_, &graph_);
+
+ std::copy(new_raster_finished_tasks,
+ new_raster_finished_tasks + kNumberOfTaskSets,
+ raster_finished_tasks_);
+
+ resource_pool_->ReduceResourceUsage();
+
+ TRACE_EVENT_ASYNC_STEP_INTO1(
+ "cc", "ScheduledTasks", this, "rasterizing", "state", StateAsValue());
+}
+
+void OneCopyRasterWorkerPool::CheckForCompletedTasks() {
+ TRACE_EVENT0("cc", "OneCopyRasterWorkerPool::CheckForCompletedTasks");
+
+ task_graph_runner_->CollectCompletedTasks(namespace_token_,
+ &completed_tasks_);
+ for (Task::Vector::const_iterator it = completed_tasks_.begin();
+ it != completed_tasks_.end();
+ ++it) {
+ RasterizerTask* task = static_cast<RasterizerTask*>(it->get());
+
+ task->WillComplete();
+ task->CompleteOnOriginThread(this);
+ task->DidComplete();
+
+ task->RunReplyOnOriginThread();
+ }
+ completed_tasks_.clear();
+
+ context_provider_->ContextGL()->ShallowFlushCHROMIUM();
+}
+
+scoped_ptr<RasterBuffer> OneCopyRasterWorkerPool::AcquireBufferForRaster(
+ const Resource* resource) {
+ DCHECK_EQ(resource->format(), resource_pool_->resource_format());
+ return make_scoped_ptr<RasterBuffer>(
+ new RasterBufferImpl(resource_provider_, resource_pool_, resource));
+}
+
+void OneCopyRasterWorkerPool::ReleaseBufferForRaster(
+ scoped_ptr<RasterBuffer> buffer) {
+ // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
+}
+
+void OneCopyRasterWorkerPool::OnRasterFinished(TaskSet task_set) {
+ TRACE_EVENT1(
+ "cc", "OneCopyRasterWorkerPool::OnRasterFinished", "task_set", task_set);
+
+ DCHECK(raster_pending_[task_set]);
+ raster_pending_[task_set] = false;
+ if (raster_pending_.any()) {
+ TRACE_EVENT_ASYNC_STEP_INTO1(
+ "cc", "ScheduledTasks", this, "rasterizing", "state", StateAsValue());
+ } else {
+ TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this);
+ }
+ client_->DidFinishRunningTasks(task_set);
+}
+
+scoped_refptr<base::debug::ConvertableToTraceFormat>
+OneCopyRasterWorkerPool::StateAsValue() const {
+ scoped_refptr<base::debug::TracedValue> state =
+ new base::debug::TracedValue();
+
+ state->BeginArray("tasks_pending");
+ for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set)
+ state->AppendBoolean(raster_pending_[task_set]);
+ state->EndArray();
+ state->BeginDictionary("staging_state");
+ StagingStateAsValueInto(state.get());
+ state->EndDictionary();
+
+ return state;
+}
+void OneCopyRasterWorkerPool::StagingStateAsValueInto(
+ base::debug::TracedValue* staging_state) const {
+ staging_state->SetInteger("staging_resource_count",
+ resource_pool_->total_resource_count());
+ staging_state->SetInteger("bytes_used_for_staging_resources",
+ resource_pool_->total_memory_usage_bytes());
+ staging_state->SetInteger("pending_copy_count",
+ resource_pool_->total_resource_count() -
+ resource_pool_->acquired_resource_count());
+ staging_state->SetInteger("bytes_pending_copy",
+ resource_pool_->total_memory_usage_bytes() -
+ resource_pool_->acquired_memory_usage_bytes());
+}
+
+} // namespace cc
diff --git a/cc/resources/one_copy_raster_worker_pool.h b/cc/resources/one_copy_raster_worker_pool.h
new file mode 100644
index 0000000..1943868
--- /dev/null
+++ b/cc/resources/one_copy_raster_worker_pool.h
@@ -0,0 +1,88 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_ONE_COPY_RASTER_WORKER_POOL_H_
+#define CC_RESOURCES_ONE_COPY_RASTER_WORKER_POOL_H_
+
+#include "base/memory/weak_ptr.h"
+#include "base/values.h"
+#include "cc/output/context_provider.h"
+#include "cc/resources/raster_worker_pool.h"
+#include "cc/resources/rasterizer.h"
+
+namespace base {
+namespace debug {
+class ConvertableToTraceFormat;
+class TracedValue;
+}
+}
+
+namespace cc {
+class ResourcePool;
+class ResourceProvider;
+class ScopedResource;
+
+class CC_EXPORT OneCopyRasterWorkerPool : public RasterWorkerPool,
+ public Rasterizer,
+ public RasterizerTaskClient {
+ public:
+ virtual ~OneCopyRasterWorkerPool();
+
+ static scoped_ptr<RasterWorkerPool> Create(
+ base::SequencedTaskRunner* task_runner,
+ TaskGraphRunner* task_graph_runner,
+ ContextProvider* context_provider,
+ ResourceProvider* resource_provider,
+ ResourcePool* resource_pool);
+
+ // Overridden from RasterWorkerPool:
+ virtual Rasterizer* AsRasterizer() OVERRIDE;
+
+ // Overridden from Rasterizer:
+ virtual void SetClient(RasterizerClient* client) OVERRIDE;
+ virtual void Shutdown() OVERRIDE;
+ virtual void ScheduleTasks(RasterTaskQueue* queue) OVERRIDE;
+ virtual void CheckForCompletedTasks() OVERRIDE;
+
+ // Overridden from RasterizerTaskClient:
+ virtual scoped_ptr<RasterBuffer> AcquireBufferForRaster(
+ const Resource* resource) OVERRIDE;
+ virtual void ReleaseBufferForRaster(scoped_ptr<RasterBuffer> buffer) OVERRIDE;
+
+ protected:
+ OneCopyRasterWorkerPool(base::SequencedTaskRunner* task_runner,
+ TaskGraphRunner* task_graph_runner,
+ ContextProvider* context_provider,
+ ResourceProvider* resource_provider,
+ ResourcePool* resource_pool);
+
+ private:
+ void OnRasterFinished(TaskSet task_set);
+ scoped_refptr<base::debug::ConvertableToTraceFormat> StateAsValue() const;
+ void StagingStateAsValueInto(base::debug::TracedValue* staging_state) const;
+
+ scoped_refptr<base::SequencedTaskRunner> task_runner_;
+ TaskGraphRunner* task_graph_runner_;
+ const NamespaceToken namespace_token_;
+ RasterizerClient* client_;
+ ContextProvider* context_provider_;
+ ResourceProvider* resource_provider_;
+ ResourcePool* resource_pool_;
+ TaskSetCollection raster_pending_;
+ scoped_refptr<RasterizerTask> raster_finished_tasks_[kNumberOfTaskSets];
+
+ // Task graph used when scheduling tasks and vector used to gather
+ // completed tasks.
+ TaskGraph graph_;
+ Task::Vector completed_tasks_;
+
+ base::WeakPtrFactory<OneCopyRasterWorkerPool>
+ raster_finished_weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(OneCopyRasterWorkerPool);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_ONE_COPY_RASTER_WORKER_POOL_H_
diff --git a/cc/resources/picture.cc b/cc/resources/picture.cc
new file mode 100644
index 0000000..6fa5abc
--- /dev/null
+++ b/cc/resources/picture.cc
@@ -0,0 +1,556 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/picture.h"
+
+#include <algorithm>
+#include <limits>
+#include <set>
+
+#include "base/base64.h"
+#include "base/debug/trace_event.h"
+#include "base/debug/trace_event_argument.h"
+#include "base/values.h"
+#include "cc/base/math_util.h"
+#include "cc/base/util.h"
+#include "cc/debug/traced_picture.h"
+#include "cc/debug/traced_value.h"
+#include "cc/layers/content_layer_client.h"
+#include "skia/ext/pixel_ref_utils.h"
+#include "third_party/skia/include/core/SkCanvas.h"
+#include "third_party/skia/include/core/SkData.h"
+#include "third_party/skia/include/core/SkDrawFilter.h"
+#include "third_party/skia/include/core/SkPaint.h"
+#include "third_party/skia/include/core/SkPictureRecorder.h"
+#include "third_party/skia/include/core/SkStream.h"
+#include "third_party/skia/include/utils/SkNullCanvas.h"
+#include "third_party/skia/include/utils/SkPictureUtils.h"
+#include "ui/gfx/codec/jpeg_codec.h"
+#include "ui/gfx/codec/png_codec.h"
+#include "ui/gfx/rect_conversions.h"
+#include "ui/gfx/skia_util.h"
+
+namespace cc {
+
+namespace {
+
+SkData* EncodeBitmap(size_t* offset, const SkBitmap& bm) {
+ const int kJpegQuality = 80;
+ std::vector<unsigned char> data;
+
+ // If bitmap is opaque, encode as JPEG.
+ // Otherwise encode as PNG.
+ bool encoding_succeeded = false;
+ if (bm.isOpaque()) {
+ SkAutoLockPixels lock_bitmap(bm);
+ if (bm.empty())
+ return NULL;
+
+ encoding_succeeded = gfx::JPEGCodec::Encode(
+ reinterpret_cast<unsigned char*>(bm.getAddr32(0, 0)),
+ gfx::JPEGCodec::FORMAT_SkBitmap,
+ bm.width(),
+ bm.height(),
+ bm.rowBytes(),
+ kJpegQuality,
+ &data);
+ } else {
+ encoding_succeeded = gfx::PNGCodec::EncodeBGRASkBitmap(bm, false, &data);
+ }
+
+ if (encoding_succeeded) {
+ *offset = 0;
+ return SkData::NewWithCopy(&data.front(), data.size());
+ }
+ return NULL;
+}
+
+bool DecodeBitmap(const void* buffer, size_t size, SkBitmap* bm) {
+ const unsigned char* data = static_cast<const unsigned char *>(buffer);
+
+ // Try PNG first.
+ if (gfx::PNGCodec::Decode(data, size, bm))
+ return true;
+
+ // Try JPEG.
+ scoped_ptr<SkBitmap> decoded_jpeg(gfx::JPEGCodec::Decode(data, size));
+ if (decoded_jpeg) {
+ *bm = *decoded_jpeg;
+ return true;
+ }
+ return false;
+}
+
+} // namespace
+
+scoped_refptr<Picture> Picture::Create(
+ const gfx::Rect& layer_rect,
+ ContentLayerClient* client,
+ const SkTileGridFactory::TileGridInfo& tile_grid_info,
+ bool gather_pixel_refs,
+ RecordingMode recording_mode) {
+ scoped_refptr<Picture> picture = make_scoped_refptr(new Picture(layer_rect));
+
+ picture->Record(client, tile_grid_info, recording_mode);
+ if (gather_pixel_refs)
+ picture->GatherPixelRefs(tile_grid_info);
+
+ return picture;
+}
+
+Picture::Picture(const gfx::Rect& layer_rect)
+ : layer_rect_(layer_rect),
+ cell_size_(layer_rect.size()) {
+ // Instead of recording a trace event for object creation here, we wait for
+ // the picture to be recorded in Picture::Record.
+}
+
+scoped_refptr<Picture> Picture::CreateFromSkpValue(const base::Value* value) {
+ // Decode the picture from base64.
+ std::string encoded;
+ if (!value->GetAsString(&encoded))
+ return NULL;
+
+ std::string decoded;
+ base::Base64Decode(encoded, &decoded);
+ SkMemoryStream stream(decoded.data(), decoded.size());
+
+ // Read the picture. This creates an empty picture on failure.
+ SkPicture* skpicture = SkPicture::CreateFromStream(&stream, &DecodeBitmap);
+ if (skpicture == NULL)
+ return NULL;
+
+ gfx::Rect layer_rect(skpicture->width(), skpicture->height());
+ return make_scoped_refptr(new Picture(skpicture, layer_rect));
+}
+
+scoped_refptr<Picture> Picture::CreateFromValue(const base::Value* raw_value) {
+ const base::DictionaryValue* value = NULL;
+ if (!raw_value->GetAsDictionary(&value))
+ return NULL;
+
+ // Decode the picture from base64.
+ std::string encoded;
+ if (!value->GetString("skp64", &encoded))
+ return NULL;
+
+ std::string decoded;
+ base::Base64Decode(encoded, &decoded);
+ SkMemoryStream stream(decoded.data(), decoded.size());
+
+ const base::Value* layer_rect_value = NULL;
+ if (!value->Get("params.layer_rect", &layer_rect_value))
+ return NULL;
+
+ gfx::Rect layer_rect;
+ if (!MathUtil::FromValue(layer_rect_value, &layer_rect))
+ return NULL;
+
+ // Read the picture. This creates an empty picture on failure.
+ SkPicture* skpicture = SkPicture::CreateFromStream(&stream, &DecodeBitmap);
+ if (skpicture == NULL)
+ return NULL;
+
+ return make_scoped_refptr(new Picture(skpicture, layer_rect));
+}
+
+Picture::Picture(SkPicture* picture, const gfx::Rect& layer_rect)
+ : layer_rect_(layer_rect),
+ picture_(skia::AdoptRef(picture)),
+ cell_size_(layer_rect.size()) {
+}
+
+Picture::Picture(const skia::RefPtr<SkPicture>& picture,
+ const gfx::Rect& layer_rect,
+ const PixelRefMap& pixel_refs) :
+ layer_rect_(layer_rect),
+ picture_(picture),
+ pixel_refs_(pixel_refs),
+ cell_size_(layer_rect.size()) {
+}
+
+Picture::~Picture() {
+ TRACE_EVENT_OBJECT_DELETED_WITH_ID(
+ TRACE_DISABLED_BY_DEFAULT("cc.debug"), "cc::Picture", this);
+}
+
+bool Picture::IsSuitableForGpuRasterization() const {
+ DCHECK(picture_);
+
+ // TODO(alokp): SkPicture::suitableForGpuRasterization needs a GrContext.
+ // Ideally this GrContext should be the same as that for rasterizing this
+ // picture. But we are on the main thread while the rasterization context
+ // may be on the compositor or raster thread.
+ // SkPicture::suitableForGpuRasterization is not implemented yet.
+ // Pass a NULL context for now and discuss with skia folks if the context
+ // is really needed.
+ return picture_->suitableForGpuRasterization(NULL);
+}
+
+int Picture::ApproximateOpCount() const {
+ DCHECK(picture_);
+ return picture_->approximateOpCount();
+}
+
+bool Picture::HasText() const {
+ DCHECK(picture_);
+ return picture_->hasText();
+}
+
+void Picture::Record(ContentLayerClient* painter,
+ const SkTileGridFactory::TileGridInfo& tile_grid_info,
+ RecordingMode recording_mode) {
+ TRACE_EVENT2("cc",
+ "Picture::Record",
+ "data",
+ AsTraceableRecordData(),
+ "recording_mode",
+ recording_mode);
+
+ DCHECK(!picture_);
+ DCHECK(!tile_grid_info.fTileInterval.isEmpty());
+
+ SkTileGridFactory factory(tile_grid_info);
+ SkPictureRecorder recorder;
+
+ scoped_ptr<EXPERIMENTAL::SkRecording> recording;
+
+ skia::RefPtr<SkCanvas> canvas;
+ canvas = skia::SharePtr(recorder.beginRecording(
+ layer_rect_.width(), layer_rect_.height(), &factory));
+
+ ContentLayerClient::GraphicsContextStatus graphics_context_status =
+ ContentLayerClient::GRAPHICS_CONTEXT_ENABLED;
+
+ switch (recording_mode) {
+ case RECORD_NORMALLY:
+ // Already setup for normal recording.
+ break;
+ case RECORD_WITH_SK_NULL_CANVAS:
+ canvas = skia::AdoptRef(SkCreateNullCanvas());
+ break;
+ case RECORD_WITH_PAINTING_DISABLED:
+ // We pass a disable flag through the paint calls when perfromance
+ // testing (the only time this case should ever arise) when we want to
+ // prevent the Blink GraphicsContext object from consuming any compute
+ // time.
+ canvas = skia::AdoptRef(SkCreateNullCanvas());
+ graphics_context_status = ContentLayerClient::GRAPHICS_CONTEXT_DISABLED;
+ break;
+ case RECORD_WITH_SKRECORD:
+ recording.reset(new EXPERIMENTAL::SkRecording(layer_rect_.width(),
+ layer_rect_.height()));
+ canvas = skia::SharePtr(recording->canvas());
+ break;
+ default:
+ NOTREACHED();
+ }
+
+ canvas->save();
+ canvas->translate(SkFloatToScalar(-layer_rect_.x()),
+ SkFloatToScalar(-layer_rect_.y()));
+
+ SkRect layer_skrect = SkRect::MakeXYWH(layer_rect_.x(),
+ layer_rect_.y(),
+ layer_rect_.width(),
+ layer_rect_.height());
+ canvas->clipRect(layer_skrect);
+
+ painter->PaintContents(canvas.get(), layer_rect_, graphics_context_status);
+
+ canvas->restore();
+ picture_ = skia::AdoptRef(recorder.endRecording());
+ DCHECK(picture_);
+
+ if (recording) {
+ // SkRecording requires it's the only one holding onto canvas before we
+ // may call releasePlayback(). (This helps enforce thread-safety.)
+ canvas.clear();
+ playback_.reset(recording->releasePlayback());
+ }
+
+ EmitTraceSnapshot();
+}
+
+void Picture::GatherPixelRefs(
+ const SkTileGridFactory::TileGridInfo& tile_grid_info) {
+ TRACE_EVENT2("cc", "Picture::GatherPixelRefs",
+ "width", layer_rect_.width(),
+ "height", layer_rect_.height());
+
+ DCHECK(picture_);
+ DCHECK(pixel_refs_.empty());
+ if (!WillPlayBackBitmaps())
+ return;
+ cell_size_ = gfx::Size(
+ tile_grid_info.fTileInterval.width() +
+ 2 * tile_grid_info.fMargin.width(),
+ tile_grid_info.fTileInterval.height() +
+ 2 * tile_grid_info.fMargin.height());
+ DCHECK_GT(cell_size_.width(), 0);
+ DCHECK_GT(cell_size_.height(), 0);
+
+ int min_x = std::numeric_limits<int>::max();
+ int min_y = std::numeric_limits<int>::max();
+ int max_x = 0;
+ int max_y = 0;
+
+ skia::DiscardablePixelRefList pixel_refs;
+ skia::PixelRefUtils::GatherDiscardablePixelRefs(picture_.get(), &pixel_refs);
+ for (skia::DiscardablePixelRefList::const_iterator it = pixel_refs.begin();
+ it != pixel_refs.end();
+ ++it) {
+ gfx::Point min(
+ RoundDown(static_cast<int>(it->pixel_ref_rect.x()),
+ cell_size_.width()),
+ RoundDown(static_cast<int>(it->pixel_ref_rect.y()),
+ cell_size_.height()));
+ gfx::Point max(
+ RoundDown(static_cast<int>(std::ceil(it->pixel_ref_rect.right())),
+ cell_size_.width()),
+ RoundDown(static_cast<int>(std::ceil(it->pixel_ref_rect.bottom())),
+ cell_size_.height()));
+
+ for (int y = min.y(); y <= max.y(); y += cell_size_.height()) {
+ for (int x = min.x(); x <= max.x(); x += cell_size_.width()) {
+ PixelRefMapKey key(x, y);
+ pixel_refs_[key].push_back(it->pixel_ref);
+ }
+ }
+
+ min_x = std::min(min_x, min.x());
+ min_y = std::min(min_y, min.y());
+ max_x = std::max(max_x, max.x());
+ max_y = std::max(max_y, max.y());
+ }
+
+ min_pixel_cell_ = gfx::Point(min_x, min_y);
+ max_pixel_cell_ = gfx::Point(max_x, max_y);
+}
+
+int Picture::Raster(SkCanvas* canvas,
+ SkDrawPictureCallback* callback,
+ const Region& negated_content_region,
+ float contents_scale) const {
+ TRACE_EVENT_BEGIN1(
+ "cc",
+ "Picture::Raster",
+ "data",
+ AsTraceableRasterData(contents_scale));
+
+ DCHECK(picture_);
+
+ canvas->save();
+
+ for (Region::Iterator it(negated_content_region); it.has_rect(); it.next())
+ canvas->clipRect(gfx::RectToSkRect(it.rect()), SkRegion::kDifference_Op);
+
+ canvas->scale(contents_scale, contents_scale);
+ canvas->translate(layer_rect_.x(), layer_rect_.y());
+ if (playback_) {
+ playback_->draw(canvas);
+ } else if (callback) {
+ // If we have a callback, we need to call |draw()|, |drawPicture()| doesn't
+ // take a callback. This is used by |AnalysisCanvas| to early out.
+ picture_->draw(canvas, callback);
+ } else {
+ // Prefer to call |drawPicture()| on the canvas since it could place the
+ // entire picture on the canvas instead of parsing the skia operations.
+ canvas->drawPicture(picture_.get());
+ }
+ SkIRect bounds;
+ canvas->getClipDeviceBounds(&bounds);
+ canvas->restore();
+ TRACE_EVENT_END1(
+ "cc", "Picture::Raster",
+ "num_pixels_rasterized", bounds.width() * bounds.height());
+ return bounds.width() * bounds.height();
+}
+
+void Picture::Replay(SkCanvas* canvas) {
+ TRACE_EVENT_BEGIN0("cc", "Picture::Replay");
+ DCHECK(picture_);
+
+ if (playback_) {
+ playback_->draw(canvas);
+ } else {
+ picture_->draw(canvas);
+ }
+ SkIRect bounds;
+ canvas->getClipDeviceBounds(&bounds);
+ TRACE_EVENT_END1("cc", "Picture::Replay",
+ "num_pixels_replayed", bounds.width() * bounds.height());
+}
+
+scoped_ptr<base::Value> Picture::AsValue() const {
+ SkDynamicMemoryWStream stream;
+
+ if (playback_) {
+ // SkPlayback can't serialize itself, so re-record into an SkPicture.
+ SkPictureRecorder recorder;
+ skia::RefPtr<SkCanvas> canvas(skia::SharePtr(recorder.beginRecording(
+ layer_rect_.width(),
+ layer_rect_.height(),
+ NULL))); // Default (no) bounding-box hierarchy is fastest.
+ playback_->draw(canvas.get());
+ skia::RefPtr<SkPicture> picture(skia::AdoptRef(recorder.endRecording()));
+ picture->serialize(&stream, &EncodeBitmap);
+ } else {
+ // Serialize the picture.
+ picture_->serialize(&stream, &EncodeBitmap);
+ }
+
+ // Encode the picture as base64.
+ scoped_ptr<base::DictionaryValue> res(new base::DictionaryValue());
+ res->Set("params.layer_rect", MathUtil::AsValue(layer_rect_).release());
+
+ size_t serialized_size = stream.bytesWritten();
+ scoped_ptr<char[]> serialized_picture(new char[serialized_size]);
+ stream.copyTo(serialized_picture.get());
+ std::string b64_picture;
+ base::Base64Encode(std::string(serialized_picture.get(), serialized_size),
+ &b64_picture);
+ res->SetString("skp64", b64_picture);
+ return res.Pass();
+}
+
+void Picture::EmitTraceSnapshot() const {
+ TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
+ TRACE_DISABLED_BY_DEFAULT("cc.debug") "," TRACE_DISABLED_BY_DEFAULT(
+ "devtools.timeline.picture"),
+ "cc::Picture",
+ this,
+ TracedPicture::AsTraceablePicture(this));
+}
+
+void Picture::EmitTraceSnapshotAlias(Picture* original) const {
+ TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
+ TRACE_DISABLED_BY_DEFAULT("cc.debug") "," TRACE_DISABLED_BY_DEFAULT(
+ "devtools.timeline.picture"),
+ "cc::Picture",
+ this,
+ TracedPicture::AsTraceablePictureAlias(original));
+}
+
+base::LazyInstance<Picture::PixelRefs>
+ Picture::PixelRefIterator::empty_pixel_refs_;
+
+Picture::PixelRefIterator::PixelRefIterator()
+ : picture_(NULL),
+ current_pixel_refs_(empty_pixel_refs_.Pointer()),
+ current_index_(0),
+ min_point_(-1, -1),
+ max_point_(-1, -1),
+ current_x_(0),
+ current_y_(0) {
+}
+
+Picture::PixelRefIterator::PixelRefIterator(
+ const gfx::Rect& rect,
+ const Picture* picture)
+ : picture_(picture),
+ current_pixel_refs_(empty_pixel_refs_.Pointer()),
+ current_index_(0) {
+ gfx::Rect layer_rect = picture->layer_rect_;
+ gfx::Size cell_size = picture->cell_size_;
+ DCHECK(!cell_size.IsEmpty());
+
+ gfx::Rect query_rect(rect);
+ // Early out if the query rect doesn't intersect this picture.
+ if (!query_rect.Intersects(layer_rect)) {
+ min_point_ = gfx::Point(0, 0);
+ max_point_ = gfx::Point(0, 0);
+ current_x_ = 1;
+ current_y_ = 1;
+ return;
+ }
+
+ // First, subtract the layer origin as cells are stored in layer space.
+ query_rect.Offset(-layer_rect.OffsetFromOrigin());
+
+ // We have to find a cell_size aligned point that corresponds to
+ // query_rect. Point is a multiple of cell_size.
+ min_point_ = gfx::Point(
+ RoundDown(query_rect.x(), cell_size.width()),
+ RoundDown(query_rect.y(), cell_size.height()));
+ max_point_ = gfx::Point(
+ RoundDown(query_rect.right() - 1, cell_size.width()),
+ RoundDown(query_rect.bottom() - 1, cell_size.height()));
+
+ // Limit the points to known pixel ref boundaries.
+ min_point_ = gfx::Point(
+ std::max(min_point_.x(), picture->min_pixel_cell_.x()),
+ std::max(min_point_.y(), picture->min_pixel_cell_.y()));
+ max_point_ = gfx::Point(
+ std::min(max_point_.x(), picture->max_pixel_cell_.x()),
+ std::min(max_point_.y(), picture->max_pixel_cell_.y()));
+
+ // Make the current x be cell_size.width() less than min point, so that
+ // the first increment will point at min_point_.
+ current_x_ = min_point_.x() - cell_size.width();
+ current_y_ = min_point_.y();
+ if (current_y_ <= max_point_.y())
+ ++(*this);
+}
+
+Picture::PixelRefIterator::~PixelRefIterator() {
+}
+
+Picture::PixelRefIterator& Picture::PixelRefIterator::operator++() {
+ ++current_index_;
+ // If we're not at the end of the list, then we have the next item.
+ if (current_index_ < current_pixel_refs_->size())
+ return *this;
+
+ DCHECK(current_y_ <= max_point_.y());
+ while (true) {
+ gfx::Size cell_size = picture_->cell_size_;
+
+ // Advance the current grid cell.
+ current_x_ += cell_size.width();
+ if (current_x_ > max_point_.x()) {
+ current_y_ += cell_size.height();
+ current_x_ = min_point_.x();
+ if (current_y_ > max_point_.y()) {
+ current_pixel_refs_ = empty_pixel_refs_.Pointer();
+ current_index_ = 0;
+ break;
+ }
+ }
+
+ // If there are no pixel refs at this grid cell, keep incrementing.
+ PixelRefMapKey key(current_x_, current_y_);
+ PixelRefMap::const_iterator iter = picture_->pixel_refs_.find(key);
+ if (iter == picture_->pixel_refs_.end())
+ continue;
+
+ // We found a non-empty list: store it and get the first pixel ref.
+ current_pixel_refs_ = &iter->second;
+ current_index_ = 0;
+ break;
+ }
+ return *this;
+}
+
+scoped_refptr<base::debug::ConvertableToTraceFormat>
+ Picture::AsTraceableRasterData(float scale) const {
+ scoped_refptr<base::debug::TracedValue> raster_data =
+ new base::debug::TracedValue();
+ TracedValue::SetIDRef(this, raster_data.get(), "picture_id");
+ raster_data->SetDouble("scale", scale);
+ return raster_data;
+}
+
+scoped_refptr<base::debug::ConvertableToTraceFormat>
+ Picture::AsTraceableRecordData() const {
+ scoped_refptr<base::debug::TracedValue> record_data =
+ new base::debug::TracedValue();
+ TracedValue::SetIDRef(this, record_data.get(), "picture_id");
+ record_data->BeginArray("layer_rect");
+ MathUtil::AddToTracedValue(layer_rect_, record_data.get());
+ record_data->EndArray();
+ return record_data;
+}
+
+} // namespace cc
diff --git a/cc/resources/picture.h b/cc/resources/picture.h
new file mode 100644
index 0000000..69e3293
--- /dev/null
+++ b/cc/resources/picture.h
@@ -0,0 +1,172 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_PICTURE_H_
+#define CC_RESOURCES_PICTURE_H_
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/debug/trace_event.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "cc/base/cc_export.h"
+#include "cc/base/region.h"
+#include "skia/ext/refptr.h"
+#include "third_party/skia/include/core/SkBBHFactory.h"
+#include "third_party/skia/include/core/SkPicture.h"
+#include "third_party/skia/include/record/SkRecording.h"
+#include "ui/gfx/rect.h"
+
+class SkPixelRef;
+
+namespace base {
+class Value;
+}
+
+namespace skia {
+class AnalysisCanvas;
+}
+
+namespace cc {
+
+class ContentLayerClient;
+
+class CC_EXPORT Picture
+ : public base::RefCountedThreadSafe<Picture> {
+ public:
+ typedef std::pair<int, int> PixelRefMapKey;
+ typedef std::vector<SkPixelRef*> PixelRefs;
+ typedef base::hash_map<PixelRefMapKey, PixelRefs> PixelRefMap;
+
+ enum RecordingMode {
+ RECORD_NORMALLY,
+ RECORD_WITH_SK_NULL_CANVAS,
+ RECORD_WITH_PAINTING_DISABLED,
+ RECORD_WITH_SKRECORD,
+ RECORDING_MODE_COUNT, // Must be the last entry.
+ };
+
+ static scoped_refptr<Picture> Create(
+ const gfx::Rect& layer_rect,
+ ContentLayerClient* client,
+ const SkTileGridFactory::TileGridInfo& tile_grid_info,
+ bool gather_pixels_refs,
+ RecordingMode recording_mode);
+ static scoped_refptr<Picture> CreateFromValue(const base::Value* value);
+ static scoped_refptr<Picture> CreateFromSkpValue(const base::Value* value);
+
+ gfx::Rect LayerRect() const { return layer_rect_; }
+
+ // Has Record() been called yet?
+ bool HasRecording() const { return picture_.get() != NULL; }
+
+ bool IsSuitableForGpuRasterization() const;
+ int ApproximateOpCount() const;
+
+ bool HasText() const;
+
+ // Apply this scale and raster the negated region into the canvas.
+ // |negated_content_region| specifies the region to be clipped out of the
+ // raster operation, i.e., the parts of the canvas which will not get drawn
+ // to.
+ int Raster(SkCanvas* canvas,
+ SkDrawPictureCallback* callback,
+ const Region& negated_content_region,
+ float contents_scale) const;
+
+ // Draw the picture directly into the given canvas, without applying any
+ // clip/scale/layer transformations.
+ void Replay(SkCanvas* canvas);
+
+ scoped_ptr<base::Value> AsValue() const;
+
+ // This iterator imprecisely returns the set of pixel refs that are needed to
+ // raster this layer rect from this picture. Internally, pixel refs are
+ // clumped into tile grid buckets, so there may be false positives.
+ class CC_EXPORT PixelRefIterator {
+ public:
+ PixelRefIterator();
+ PixelRefIterator(const gfx::Rect& layer_rect, const Picture* picture);
+ ~PixelRefIterator();
+
+ SkPixelRef* operator->() const {
+ DCHECK_LT(current_index_, current_pixel_refs_->size());
+ return (*current_pixel_refs_)[current_index_];
+ }
+
+ SkPixelRef* operator*() const {
+ DCHECK_LT(current_index_, current_pixel_refs_->size());
+ return (*current_pixel_refs_)[current_index_];
+ }
+
+ PixelRefIterator& operator++();
+ operator bool() const {
+ return current_index_ < current_pixel_refs_->size();
+ }
+
+ private:
+ static base::LazyInstance<PixelRefs> empty_pixel_refs_;
+ const Picture* picture_;
+ const PixelRefs* current_pixel_refs_;
+ unsigned current_index_;
+
+ gfx::Point min_point_;
+ gfx::Point max_point_;
+ int current_x_;
+ int current_y_;
+ };
+
+ void EmitTraceSnapshot() const;
+ void EmitTraceSnapshotAlias(Picture* original) const;
+
+ bool WillPlayBackBitmaps() const { return picture_->willPlayBackBitmaps(); }
+
+ private:
+ explicit Picture(const gfx::Rect& layer_rect);
+ // This constructor assumes SkPicture is already ref'd and transfers
+ // ownership to this picture.
+ Picture(const skia::RefPtr<SkPicture>&,
+ const gfx::Rect& layer_rect,
+ const PixelRefMap& pixel_refs);
+ // This constructor will call AdoptRef on the SkPicture.
+ Picture(SkPicture*, const gfx::Rect& layer_rect);
+ ~Picture();
+
+ // Record a paint operation. To be able to safely use this SkPicture for
+ // playback on a different thread this can only be called once.
+ void Record(ContentLayerClient* client,
+ const SkTileGridFactory::TileGridInfo& tile_grid_info,
+ RecordingMode recording_mode);
+
+ // Gather pixel refs from recording.
+ void GatherPixelRefs(const SkTileGridFactory::TileGridInfo& tile_grid_info);
+
+ gfx::Rect layer_rect_;
+ skia::RefPtr<SkPicture> picture_;
+ scoped_ptr<const EXPERIMENTAL::SkPlayback> playback_;
+
+ PixelRefMap pixel_refs_;
+ gfx::Point min_pixel_cell_;
+ gfx::Point max_pixel_cell_;
+ gfx::Size cell_size_;
+
+ scoped_refptr<base::debug::ConvertableToTraceFormat>
+ AsTraceableRasterData(float scale) const;
+ scoped_refptr<base::debug::ConvertableToTraceFormat>
+ AsTraceableRecordData() const;
+
+ friend class base::RefCountedThreadSafe<Picture>;
+ friend class PixelRefIterator;
+ DISALLOW_COPY_AND_ASSIGN(Picture);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_PICTURE_H_
diff --git a/cc/resources/picture_layer_tiling.cc b/cc/resources/picture_layer_tiling.cc
new file mode 100644
index 0000000..ee17dc8
--- /dev/null
+++ b/cc/resources/picture_layer_tiling.cc
@@ -0,0 +1,1200 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/picture_layer_tiling.h"
+
+#include <algorithm>
+#include <cmath>
+#include <limits>
+#include <set>
+
+#include "base/debug/trace_event.h"
+#include "base/debug/trace_event_argument.h"
+#include "base/logging.h"
+#include "cc/base/math_util.h"
+#include "cc/resources/tile.h"
+#include "cc/resources/tile_priority.h"
+#include "ui/gfx/point_conversions.h"
+#include "ui/gfx/rect_conversions.h"
+#include "ui/gfx/safe_integer_conversions.h"
+#include "ui/gfx/size_conversions.h"
+
+namespace cc {
+namespace {
+
+const float kSoonBorderDistanceInScreenPixels = 312.f;
+
+class TileEvictionOrder {
+ public:
+ explicit TileEvictionOrder(TreePriority tree_priority)
+ : tree_priority_(tree_priority) {}
+ ~TileEvictionOrder() {}
+
+ bool operator()(const Tile* a, const Tile* b) {
+ const TilePriority& a_priority =
+ a->priority_for_tree_priority(tree_priority_);
+ const TilePriority& b_priority =
+ b->priority_for_tree_priority(tree_priority_);
+
+ DCHECK(a_priority.priority_bin == b_priority.priority_bin);
+ DCHECK(a->required_for_activation() == b->required_for_activation());
+
+ // Or if a is occluded and b is unoccluded.
+ bool a_is_occluded = a->is_occluded_for_tree_priority(tree_priority_);
+ bool b_is_occluded = b->is_occluded_for_tree_priority(tree_priority_);
+ if (a_is_occluded != b_is_occluded)
+ return a_is_occluded;
+
+ // Or if a is farther away from visible.
+ return a_priority.distance_to_visible > b_priority.distance_to_visible;
+ }
+
+ private:
+ TreePriority tree_priority_;
+};
+
+void ReleaseTile(Tile* tile, WhichTree tree) {
+ // Reset priority as tile is ref-counted and might still be used
+ // even though we no longer hold a reference to it here anymore.
+ tile->SetPriority(tree, TilePriority());
+ tile->set_shared(false);
+}
+
+} // namespace
+
+scoped_ptr<PictureLayerTiling> PictureLayerTiling::Create(
+ float contents_scale,
+ const gfx::Size& layer_bounds,
+ PictureLayerTilingClient* client) {
+ return make_scoped_ptr(new PictureLayerTiling(contents_scale,
+ layer_bounds,
+ client));
+}
+
+PictureLayerTiling::PictureLayerTiling(float contents_scale,
+ const gfx::Size& layer_bounds,
+ PictureLayerTilingClient* client)
+ : contents_scale_(contents_scale),
+ layer_bounds_(layer_bounds),
+ resolution_(NON_IDEAL_RESOLUTION),
+ client_(client),
+ tiling_data_(gfx::Size(), gfx::Size(), true),
+ last_impl_frame_time_in_seconds_(0.0),
+ has_visible_rect_tiles_(false),
+ has_skewport_rect_tiles_(false),
+ has_soon_border_rect_tiles_(false),
+ has_eventually_rect_tiles_(false),
+ eviction_tiles_cache_valid_(false),
+ eviction_cache_tree_priority_(SAME_PRIORITY_FOR_BOTH_TREES) {
+ gfx::Size content_bounds =
+ gfx::ToCeiledSize(gfx::ScaleSize(layer_bounds, contents_scale));
+ gfx::Size tile_size = client_->CalculateTileSize(content_bounds);
+ if (tile_size.IsEmpty()) {
+ layer_bounds_ = gfx::Size();
+ content_bounds = gfx::Size();
+ }
+
+ DCHECK(!gfx::ToFlooredSize(
+ gfx::ScaleSize(layer_bounds, contents_scale)).IsEmpty()) <<
+ "Tiling created with scale too small as contents become empty." <<
+ " Layer bounds: " << layer_bounds.ToString() <<
+ " Contents scale: " << contents_scale;
+
+ tiling_data_.SetTilingSize(content_bounds);
+ tiling_data_.SetMaxTextureSize(tile_size);
+}
+
+PictureLayerTiling::~PictureLayerTiling() {
+ for (TileMap::const_iterator it = tiles_.begin(); it != tiles_.end(); ++it)
+ ReleaseTile(it->second.get(), client_->GetTree());
+}
+
+void PictureLayerTiling::SetClient(PictureLayerTilingClient* client) {
+ client_ = client;
+}
+
+Tile* PictureLayerTiling::CreateTile(int i,
+ int j,
+ const PictureLayerTiling* twin_tiling) {
+ TileMapKey key(i, j);
+ DCHECK(tiles_.find(key) == tiles_.end());
+
+ gfx::Rect paint_rect = tiling_data_.TileBoundsWithBorder(i, j);
+ gfx::Rect tile_rect = paint_rect;
+ tile_rect.set_size(tiling_data_.max_texture_size());
+
+ // Check our twin for a valid tile.
+ if (twin_tiling &&
+ tiling_data_.max_texture_size() ==
+ twin_tiling->tiling_data_.max_texture_size()) {
+ if (Tile* candidate_tile = twin_tiling->TileAt(i, j)) {
+ gfx::Rect rect =
+ gfx::ScaleToEnclosingRect(paint_rect, 1.0f / contents_scale_);
+ if (!client_->GetInvalidation()->Intersects(rect)) {
+ DCHECK(!candidate_tile->is_shared());
+ candidate_tile->set_shared(true);
+ tiles_[key] = candidate_tile;
+ return candidate_tile;
+ }
+ }
+ }
+
+ // Create a new tile because our twin didn't have a valid one.
+ scoped_refptr<Tile> tile = client_->CreateTile(this, tile_rect);
+ if (tile.get()) {
+ DCHECK(!tile->is_shared());
+ tiles_[key] = tile;
+ }
+ return tile.get();
+}
+
+void PictureLayerTiling::CreateMissingTilesInLiveTilesRect() {
+ const PictureLayerTiling* twin_tiling = client_->GetTwinTiling(this);
+ bool include_borders = false;
+ for (TilingData::Iterator iter(
+ &tiling_data_, live_tiles_rect_, include_borders);
+ iter;
+ ++iter) {
+ TileMapKey key = iter.index();
+ TileMap::iterator find = tiles_.find(key);
+ if (find != tiles_.end())
+ continue;
+ CreateTile(key.first, key.second, twin_tiling);
+ }
+
+ VerifyLiveTilesRect();
+}
+
+void PictureLayerTiling::UpdateTilesToCurrentPile(
+ const Region& layer_invalidation,
+ const gfx::Size& new_layer_bounds) {
+ DCHECK(!new_layer_bounds.IsEmpty());
+
+ gfx::Size tile_size = tiling_data_.max_texture_size();
+
+ if (new_layer_bounds != layer_bounds_) {
+ gfx::Size content_bounds =
+ gfx::ToCeiledSize(gfx::ScaleSize(new_layer_bounds, contents_scale_));
+
+ tile_size = client_->CalculateTileSize(content_bounds);
+ if (tile_size.IsEmpty()) {
+ layer_bounds_ = gfx::Size();
+ content_bounds = gfx::Size();
+ } else {
+ layer_bounds_ = new_layer_bounds;
+ }
+
+ // The SetLiveTilesRect() method would drop tiles outside the new bounds,
+ // but may do so incorrectly if resizing the tiling causes the number of
+ // tiles in the tiling_data_ to change.
+ gfx::Rect content_rect(content_bounds);
+ int before_left = tiling_data_.TileXIndexFromSrcCoord(live_tiles_rect_.x());
+ int before_top = tiling_data_.TileYIndexFromSrcCoord(live_tiles_rect_.y());
+ int before_right =
+ tiling_data_.TileXIndexFromSrcCoord(live_tiles_rect_.right() - 1);
+ int before_bottom =
+ tiling_data_.TileYIndexFromSrcCoord(live_tiles_rect_.bottom() - 1);
+
+ // The live_tiles_rect_ is clamped to stay within the tiling size as we
+ // change it.
+ live_tiles_rect_.Intersect(content_rect);
+ tiling_data_.SetTilingSize(content_bounds);
+
+ int after_right = -1;
+ int after_bottom = -1;
+ if (!live_tiles_rect_.IsEmpty()) {
+ after_right =
+ tiling_data_.TileXIndexFromSrcCoord(live_tiles_rect_.right() - 1);
+ after_bottom =
+ tiling_data_.TileYIndexFromSrcCoord(live_tiles_rect_.bottom() - 1);
+ }
+
+ // There is no recycled twin since this is run on the pending tiling.
+ PictureLayerTiling* recycled_twin = NULL;
+ DCHECK_EQ(recycled_twin, client_->GetRecycledTwinTiling(this));
+ DCHECK_EQ(PENDING_TREE, client_->GetTree());
+
+ // Drop tiles outside the new layer bounds if the layer shrank.
+ for (int i = after_right + 1; i <= before_right; ++i) {
+ for (int j = before_top; j <= before_bottom; ++j)
+ RemoveTileAt(i, j, recycled_twin);
+ }
+ for (int i = before_left; i <= after_right; ++i) {
+ for (int j = after_bottom + 1; j <= before_bottom; ++j)
+ RemoveTileAt(i, j, recycled_twin);
+ }
+
+ // If the layer grew, the live_tiles_rect_ is not changed, but a new row
+ // and/or column of tiles may now exist inside the same live_tiles_rect_.
+ const PictureLayerTiling* twin_tiling = client_->GetTwinTiling(this);
+ if (after_right > before_right) {
+ DCHECK_EQ(after_right, before_right + 1);
+ for (int j = before_top; j <= after_bottom; ++j)
+ CreateTile(after_right, j, twin_tiling);
+ }
+ if (after_bottom > before_bottom) {
+ DCHECK_EQ(after_bottom, before_bottom + 1);
+ for (int i = before_left; i <= before_right; ++i)
+ CreateTile(i, after_bottom, twin_tiling);
+ }
+ }
+
+ if (tile_size != tiling_data_.max_texture_size()) {
+ tiling_data_.SetMaxTextureSize(tile_size);
+ // When the tile size changes, the TilingData positions no longer work
+ // as valid keys to the TileMap, so just drop all tiles.
+ Reset();
+ } else {
+ Invalidate(layer_invalidation);
+ }
+
+ PicturePileImpl* pile = client_->GetPile();
+ for (TileMap::const_iterator it = tiles_.begin(); it != tiles_.end(); ++it)
+ it->second->set_picture_pile(pile);
+ VerifyLiveTilesRect();
+}
+
+void PictureLayerTiling::RemoveTilesInRegion(const Region& layer_region) {
+ bool recreate_invalidated_tiles = false;
+ DoInvalidate(layer_region, recreate_invalidated_tiles);
+}
+
+void PictureLayerTiling::Invalidate(const Region& layer_region) {
+ bool recreate_invalidated_tiles = true;
+ DoInvalidate(layer_region, recreate_invalidated_tiles);
+}
+
+void PictureLayerTiling::DoInvalidate(const Region& layer_region,
+ bool recreate_invalidated_tiles) {
+ std::vector<TileMapKey> new_tile_keys;
+ gfx::Rect expanded_live_tiles_rect =
+ tiling_data_.ExpandRectIgnoringBordersToTileBounds(live_tiles_rect_);
+ for (Region::Iterator iter(layer_region); iter.has_rect(); iter.next()) {
+ gfx::Rect layer_rect = iter.rect();
+ gfx::Rect content_rect =
+ gfx::ScaleToEnclosingRect(layer_rect, contents_scale_);
+ // Consider tiles inside the live tiles rect even if only their border
+ // pixels intersect the invalidation. But don't consider tiles outside
+ // the live tiles rect with the same conditions, as they won't exist.
+ int border_pixels = tiling_data_.border_texels();
+ content_rect.Inset(-border_pixels, -border_pixels);
+ // Avoid needless work by not bothering to invalidate where there aren't
+ // tiles.
+ content_rect.Intersect(expanded_live_tiles_rect);
+ if (content_rect.IsEmpty())
+ continue;
+ // Since the content_rect includes border pixels already, don't include
+ // borders when iterating to avoid double counting them.
+ bool include_borders = false;
+ for (TilingData::Iterator iter(
+ &tiling_data_, content_rect, include_borders);
+ iter;
+ ++iter) {
+ // There is no recycled twin since this is run on the pending tiling.
+ PictureLayerTiling* recycled_twin = NULL;
+ DCHECK_EQ(recycled_twin, client_->GetRecycledTwinTiling(this));
+ DCHECK_EQ(PENDING_TREE, client_->GetTree());
+ if (RemoveTileAt(iter.index_x(), iter.index_y(), recycled_twin))
+ new_tile_keys.push_back(iter.index());
+ }
+ }
+
+ if (recreate_invalidated_tiles && !new_tile_keys.empty()) {
+ for (size_t i = 0; i < new_tile_keys.size(); ++i) {
+ // Don't try to share a tile with the twin layer, it's been invalidated so
+ // we have to make our own tile here.
+ const PictureLayerTiling* twin_tiling = NULL;
+ CreateTile(new_tile_keys[i].first, new_tile_keys[i].second, twin_tiling);
+ }
+ }
+}
+
+PictureLayerTiling::CoverageIterator::CoverageIterator()
+ : tiling_(NULL),
+ current_tile_(NULL),
+ tile_i_(0),
+ tile_j_(0),
+ left_(0),
+ top_(0),
+ right_(-1),
+ bottom_(-1) {
+}
+
+PictureLayerTiling::CoverageIterator::CoverageIterator(
+ const PictureLayerTiling* tiling,
+ float dest_scale,
+ const gfx::Rect& dest_rect)
+ : tiling_(tiling),
+ dest_rect_(dest_rect),
+ dest_to_content_scale_(0),
+ current_tile_(NULL),
+ tile_i_(0),
+ tile_j_(0),
+ left_(0),
+ top_(0),
+ right_(-1),
+ bottom_(-1) {
+ DCHECK(tiling_);
+ if (dest_rect_.IsEmpty())
+ return;
+
+ dest_to_content_scale_ = tiling_->contents_scale_ / dest_scale;
+
+ gfx::Rect content_rect =
+ gfx::ScaleToEnclosingRect(dest_rect_,
+ dest_to_content_scale_,
+ dest_to_content_scale_);
+ // IndexFromSrcCoord clamps to valid tile ranges, so it's necessary to
+ // check for non-intersection first.
+ content_rect.Intersect(gfx::Rect(tiling_->tiling_size()));
+ if (content_rect.IsEmpty())
+ return;
+
+ left_ = tiling_->tiling_data_.TileXIndexFromSrcCoord(content_rect.x());
+ top_ = tiling_->tiling_data_.TileYIndexFromSrcCoord(content_rect.y());
+ right_ = tiling_->tiling_data_.TileXIndexFromSrcCoord(
+ content_rect.right() - 1);
+ bottom_ = tiling_->tiling_data_.TileYIndexFromSrcCoord(
+ content_rect.bottom() - 1);
+
+ tile_i_ = left_ - 1;
+ tile_j_ = top_;
+ ++(*this);
+}
+
+PictureLayerTiling::CoverageIterator::~CoverageIterator() {
+}
+
+PictureLayerTiling::CoverageIterator&
+PictureLayerTiling::CoverageIterator::operator++() {
+ if (tile_j_ > bottom_)
+ return *this;
+
+ bool first_time = tile_i_ < left_;
+ bool new_row = false;
+ tile_i_++;
+ if (tile_i_ > right_) {
+ tile_i_ = left_;
+ tile_j_++;
+ new_row = true;
+ if (tile_j_ > bottom_) {
+ current_tile_ = NULL;
+ return *this;
+ }
+ }
+
+ current_tile_ = tiling_->TileAt(tile_i_, tile_j_);
+
+ // Calculate the current geometry rect. Due to floating point rounding
+ // and ToEnclosingRect, tiles might overlap in destination space on the
+ // edges.
+ gfx::Rect last_geometry_rect = current_geometry_rect_;
+
+ gfx::Rect content_rect = tiling_->tiling_data_.TileBounds(tile_i_, tile_j_);
+
+ current_geometry_rect_ =
+ gfx::ScaleToEnclosingRect(content_rect,
+ 1 / dest_to_content_scale_,
+ 1 / dest_to_content_scale_);
+
+ current_geometry_rect_.Intersect(dest_rect_);
+
+ if (first_time)
+ return *this;
+
+ // Iteration happens left->right, top->bottom. Running off the bottom-right
+ // edge is handled by the intersection above with dest_rect_. Here we make
+ // sure that the new current geometry rect doesn't overlap with the last.
+ int min_left;
+ int min_top;
+ if (new_row) {
+ min_left = dest_rect_.x();
+ min_top = last_geometry_rect.bottom();
+ } else {
+ min_left = last_geometry_rect.right();
+ min_top = last_geometry_rect.y();
+ }
+
+ int inset_left = std::max(0, min_left - current_geometry_rect_.x());
+ int inset_top = std::max(0, min_top - current_geometry_rect_.y());
+ current_geometry_rect_.Inset(inset_left, inset_top, 0, 0);
+
+ if (!new_row) {
+ DCHECK_EQ(last_geometry_rect.right(), current_geometry_rect_.x());
+ DCHECK_EQ(last_geometry_rect.bottom(), current_geometry_rect_.bottom());
+ DCHECK_EQ(last_geometry_rect.y(), current_geometry_rect_.y());
+ }
+
+ return *this;
+}
+
+gfx::Rect PictureLayerTiling::CoverageIterator::geometry_rect() const {
+ return current_geometry_rect_;
+}
+
+gfx::Rect
+PictureLayerTiling::CoverageIterator::full_tile_geometry_rect() const {
+ gfx::Rect rect = tiling_->tiling_data_.TileBoundsWithBorder(tile_i_, tile_j_);
+ rect.set_size(tiling_->tiling_data_.max_texture_size());
+ return rect;
+}
+
+gfx::RectF PictureLayerTiling::CoverageIterator::texture_rect() const {
+ gfx::PointF tex_origin =
+ tiling_->tiling_data_.TileBoundsWithBorder(tile_i_, tile_j_).origin();
+
+ // Convert from dest space => content space => texture space.
+ gfx::RectF texture_rect(current_geometry_rect_);
+ texture_rect.Scale(dest_to_content_scale_,
+ dest_to_content_scale_);
+ texture_rect.Intersect(gfx::Rect(tiling_->tiling_size()));
+ if (texture_rect.IsEmpty())
+ return texture_rect;
+ texture_rect.Offset(-tex_origin.OffsetFromOrigin());
+
+ return texture_rect;
+}
+
+gfx::Size PictureLayerTiling::CoverageIterator::texture_size() const {
+ return tiling_->tiling_data_.max_texture_size();
+}
+
+bool PictureLayerTiling::RemoveTileAt(int i,
+ int j,
+ PictureLayerTiling* recycled_twin) {
+ TileMap::iterator found = tiles_.find(TileMapKey(i, j));
+ if (found == tiles_.end())
+ return false;
+ ReleaseTile(found->second.get(), client_->GetTree());
+ tiles_.erase(found);
+ if (recycled_twin) {
+ // Recycled twin does not also have a recycled twin, so pass NULL.
+ recycled_twin->RemoveTileAt(i, j, NULL);
+ }
+ return true;
+}
+
+void PictureLayerTiling::Reset() {
+ live_tiles_rect_ = gfx::Rect();
+ PictureLayerTiling* recycled_twin = client_->GetRecycledTwinTiling(this);
+ for (TileMap::const_iterator it = tiles_.begin(); it != tiles_.end(); ++it) {
+ ReleaseTile(it->second.get(), client_->GetTree());
+ if (recycled_twin)
+ recycled_twin->RemoveTileAt(it->first.first, it->first.second, NULL);
+ }
+ tiles_.clear();
+}
+
+gfx::Rect PictureLayerTiling::ComputeSkewport(
+ double current_frame_time_in_seconds,
+ const gfx::Rect& visible_rect_in_content_space) const {
+ gfx::Rect skewport = visible_rect_in_content_space;
+ if (last_impl_frame_time_in_seconds_ == 0.0)
+ return skewport;
+
+ double time_delta =
+ current_frame_time_in_seconds - last_impl_frame_time_in_seconds_;
+ if (time_delta == 0.0)
+ return skewport;
+
+ float skewport_target_time_in_seconds =
+ client_->GetSkewportTargetTimeInSeconds();
+ double extrapolation_multiplier =
+ skewport_target_time_in_seconds / time_delta;
+
+ int old_x = last_visible_rect_in_content_space_.x();
+ int old_y = last_visible_rect_in_content_space_.y();
+ int old_right = last_visible_rect_in_content_space_.right();
+ int old_bottom = last_visible_rect_in_content_space_.bottom();
+
+ int new_x = visible_rect_in_content_space.x();
+ int new_y = visible_rect_in_content_space.y();
+ int new_right = visible_rect_in_content_space.right();
+ int new_bottom = visible_rect_in_content_space.bottom();
+
+ int skewport_limit = client_->GetSkewportExtrapolationLimitInContentPixels();
+
+ // Compute the maximum skewport based on |skewport_limit|.
+ gfx::Rect max_skewport = skewport;
+ max_skewport.Inset(
+ -skewport_limit, -skewport_limit, -skewport_limit, -skewport_limit);
+
+ // Inset the skewport by the needed adjustment.
+ skewport.Inset(extrapolation_multiplier * (new_x - old_x),
+ extrapolation_multiplier * (new_y - old_y),
+ extrapolation_multiplier * (old_right - new_right),
+ extrapolation_multiplier * (old_bottom - new_bottom));
+
+ // Clip the skewport to |max_skewport|.
+ skewport.Intersect(max_skewport);
+
+ // Finally, ensure that visible rect is contained in the skewport.
+ skewport.Union(visible_rect_in_content_space);
+ return skewport;
+}
+
+void PictureLayerTiling::UpdateTilePriorities(
+ WhichTree tree,
+ const gfx::Rect& viewport_in_layer_space,
+ float ideal_contents_scale,
+ double current_frame_time_in_seconds,
+ const Occlusion& occlusion_in_layer_space) {
+ if (!NeedsUpdateForFrameAtTime(current_frame_time_in_seconds)) {
+ // This should never be zero for the purposes of has_ever_been_updated().
+ DCHECK_NE(current_frame_time_in_seconds, 0.0);
+ return;
+ }
+
+ gfx::Rect visible_rect_in_content_space =
+ gfx::ScaleToEnclosingRect(viewport_in_layer_space, contents_scale_);
+
+ if (tiling_size().IsEmpty()) {
+ last_impl_frame_time_in_seconds_ = current_frame_time_in_seconds;
+ last_visible_rect_in_content_space_ = visible_rect_in_content_space;
+ return;
+ }
+
+ size_t max_tiles_for_interest_area = client_->GetMaxTilesForInterestArea();
+
+ gfx::Size tile_size = tiling_data_.max_texture_size();
+ int64 eventually_rect_area =
+ max_tiles_for_interest_area * tile_size.width() * tile_size.height();
+
+ gfx::Rect skewport = ComputeSkewport(current_frame_time_in_seconds,
+ visible_rect_in_content_space);
+ DCHECK(skewport.Contains(visible_rect_in_content_space));
+
+ gfx::Rect eventually_rect =
+ ExpandRectEquallyToAreaBoundedBy(visible_rect_in_content_space,
+ eventually_rect_area,
+ gfx::Rect(tiling_size()),
+ &expansion_cache_);
+
+ DCHECK(eventually_rect.IsEmpty() ||
+ gfx::Rect(tiling_size()).Contains(eventually_rect))
+ << "tiling_size: " << tiling_size().ToString()
+ << " eventually_rect: " << eventually_rect.ToString();
+
+ SetLiveTilesRect(eventually_rect);
+
+ last_impl_frame_time_in_seconds_ = current_frame_time_in_seconds;
+ last_visible_rect_in_content_space_ = visible_rect_in_content_space;
+
+ eviction_tiles_cache_valid_ = false;
+
+ TilePriority now_priority(resolution_, TilePriority::NOW, 0);
+ float content_to_screen_scale = ideal_contents_scale / contents_scale_;
+
+ // Assign now priority to all visible tiles.
+ bool include_borders = false;
+ has_visible_rect_tiles_ = false;
+ for (TilingData::Iterator iter(
+ &tiling_data_, visible_rect_in_content_space, include_borders);
+ iter;
+ ++iter) {
+ TileMap::iterator find = tiles_.find(iter.index());
+ if (find == tiles_.end())
+ continue;
+ has_visible_rect_tiles_ = true;
+ Tile* tile = find->second.get();
+
+ tile->SetPriority(tree, now_priority);
+
+ // Set whether tile is occluded or not.
+ gfx::Rect tile_query_rect = ScaleToEnclosingRect(
+ IntersectRects(tile->content_rect(), visible_rect_in_content_space),
+ 1.0f / contents_scale_);
+ bool is_occluded = occlusion_in_layer_space.IsOccluded(tile_query_rect);
+ tile->set_is_occluded(tree, is_occluded);
+ }
+
+ // Assign soon priority to skewport tiles.
+ has_skewport_rect_tiles_ = false;
+ for (TilingData::DifferenceIterator iter(
+ &tiling_data_, skewport, visible_rect_in_content_space);
+ iter;
+ ++iter) {
+ TileMap::iterator find = tiles_.find(iter.index());
+ if (find == tiles_.end())
+ continue;
+ has_skewport_rect_tiles_ = true;
+ Tile* tile = find->second.get();
+
+ gfx::Rect tile_bounds =
+ tiling_data_.TileBounds(iter.index_x(), iter.index_y());
+
+ float distance_to_visible =
+ visible_rect_in_content_space.ManhattanInternalDistance(tile_bounds) *
+ content_to_screen_scale;
+
+ TilePriority priority(resolution_, TilePriority::SOON, distance_to_visible);
+ tile->SetPriority(tree, priority);
+ }
+
+ // Assign eventually priority to interest rect tiles.
+ has_eventually_rect_tiles_ = false;
+ for (TilingData::DifferenceIterator iter(
+ &tiling_data_, eventually_rect, skewport);
+ iter;
+ ++iter) {
+ TileMap::iterator find = tiles_.find(iter.index());
+ if (find == tiles_.end())
+ continue;
+ has_eventually_rect_tiles_ = true;
+ Tile* tile = find->second.get();
+
+ gfx::Rect tile_bounds =
+ tiling_data_.TileBounds(iter.index_x(), iter.index_y());
+
+ float distance_to_visible =
+ visible_rect_in_content_space.ManhattanInternalDistance(tile_bounds) *
+ content_to_screen_scale;
+ TilePriority priority(
+ resolution_, TilePriority::EVENTUALLY, distance_to_visible);
+ tile->SetPriority(tree, priority);
+ }
+
+ // Upgrade the priority on border tiles to be SOON.
+ gfx::Rect soon_border_rect = visible_rect_in_content_space;
+ float border = kSoonBorderDistanceInScreenPixels / content_to_screen_scale;
+ soon_border_rect.Inset(-border, -border, -border, -border);
+ has_soon_border_rect_tiles_ = false;
+ for (TilingData::DifferenceIterator iter(
+ &tiling_data_, soon_border_rect, skewport);
+ iter;
+ ++iter) {
+ TileMap::iterator find = tiles_.find(iter.index());
+ if (find == tiles_.end())
+ continue;
+ has_soon_border_rect_tiles_ = true;
+ Tile* tile = find->second.get();
+
+ TilePriority priority(resolution_,
+ TilePriority::SOON,
+ tile->priority(tree).distance_to_visible);
+ tile->SetPriority(tree, priority);
+ }
+
+ // Update iteration rects.
+ current_visible_rect_ = visible_rect_in_content_space;
+ current_skewport_rect_ = skewport;
+ current_soon_border_rect_ = soon_border_rect;
+ current_eventually_rect_ = eventually_rect;
+}
+
+void PictureLayerTiling::SetLiveTilesRect(
+ const gfx::Rect& new_live_tiles_rect) {
+ DCHECK(new_live_tiles_rect.IsEmpty() ||
+ gfx::Rect(tiling_size()).Contains(new_live_tiles_rect))
+ << "tiling_size: " << tiling_size().ToString()
+ << " new_live_tiles_rect: " << new_live_tiles_rect.ToString();
+ if (live_tiles_rect_ == new_live_tiles_rect)
+ return;
+
+ // Iterate to delete all tiles outside of our new live_tiles rect.
+ PictureLayerTiling* recycled_twin = client_->GetRecycledTwinTiling(this);
+ for (TilingData::DifferenceIterator iter(&tiling_data_,
+ live_tiles_rect_,
+ new_live_tiles_rect);
+ iter;
+ ++iter) {
+ RemoveTileAt(iter.index_x(), iter.index_y(), recycled_twin);
+ }
+
+ const PictureLayerTiling* twin_tiling = client_->GetTwinTiling(this);
+
+ // Iterate to allocate new tiles for all regions with newly exposed area.
+ for (TilingData::DifferenceIterator iter(&tiling_data_,
+ new_live_tiles_rect,
+ live_tiles_rect_);
+ iter;
+ ++iter) {
+ TileMapKey key(iter.index());
+ CreateTile(key.first, key.second, twin_tiling);
+ }
+
+ live_tiles_rect_ = new_live_tiles_rect;
+ VerifyLiveTilesRect();
+}
+
+void PictureLayerTiling::VerifyLiveTilesRect() {
+#if DCHECK_IS_ON
+ for (TileMap::iterator it = tiles_.begin(); it != tiles_.end(); ++it) {
+ if (!it->second.get())
+ continue;
+ DCHECK(it->first.first < tiling_data_.num_tiles_x())
+ << this << " " << it->first.first << "," << it->first.second
+ << " num_tiles_x " << tiling_data_.num_tiles_x() << " live_tiles_rect "
+ << live_tiles_rect_.ToString();
+ DCHECK(it->first.second < tiling_data_.num_tiles_y())
+ << this << " " << it->first.first << "," << it->first.second
+ << " num_tiles_y " << tiling_data_.num_tiles_y() << " live_tiles_rect "
+ << live_tiles_rect_.ToString();
+ DCHECK(tiling_data_.TileBounds(it->first.first, it->first.second)
+ .Intersects(live_tiles_rect_))
+ << this << " " << it->first.first << "," << it->first.second
+ << " tile bounds "
+ << tiling_data_.TileBounds(it->first.first, it->first.second).ToString()
+ << " live_tiles_rect " << live_tiles_rect_.ToString();
+ }
+#endif
+}
+
+void PictureLayerTiling::DidBecomeRecycled() {
+ // DidBecomeActive below will set the active priority for tiles that are
+ // still in the tree. Calling this first on an active tiling that is becoming
+ // recycled takes care of tiles that are no longer in the active tree (eg.
+ // due to a pending invalidation).
+ for (TileMap::const_iterator it = tiles_.begin(); it != tiles_.end(); ++it) {
+ it->second->SetPriority(ACTIVE_TREE, TilePriority());
+ }
+}
+
+void PictureLayerTiling::DidBecomeActive() {
+ PicturePileImpl* active_pile = client_->GetPile();
+ for (TileMap::const_iterator it = tiles_.begin(); it != tiles_.end(); ++it) {
+ it->second->SetPriority(ACTIVE_TREE, it->second->priority(PENDING_TREE));
+ it->second->SetPriority(PENDING_TREE, TilePriority());
+
+ // Tile holds a ref onto a picture pile. If the tile never gets invalidated
+ // and recreated, then that picture pile ref could exist indefinitely. To
+ // prevent this, ask the client to update the pile to its own ref. This
+ // will cause PicturePileImpls to get deleted once the corresponding
+ // PictureLayerImpl and any in flight raster jobs go out of scope.
+ it->second->set_picture_pile(active_pile);
+ }
+}
+
+void PictureLayerTiling::GetAllTilesForTracing(
+ std::set<const Tile*>* tiles) const {
+ for (TileMap::const_iterator it = tiles_.begin(); it != tiles_.end(); ++it)
+ tiles->insert(it->second.get());
+}
+
+void PictureLayerTiling::AsValueInto(base::debug::TracedValue* state) const {
+ state->SetInteger("num_tiles", tiles_.size());
+ state->SetDouble("content_scale", contents_scale_);
+ state->BeginDictionary("tiling_size");
+ MathUtil::AddToTracedValue(tiling_size(), state);
+ state->EndDictionary();
+}
+
+size_t PictureLayerTiling::GPUMemoryUsageInBytes() const {
+ size_t amount = 0;
+ for (TileMap::const_iterator it = tiles_.begin(); it != tiles_.end(); ++it) {
+ const Tile* tile = it->second.get();
+ amount += tile->GPUMemoryUsageInBytes();
+ }
+ return amount;
+}
+
+PictureLayerTiling::RectExpansionCache::RectExpansionCache()
+ : previous_target(0) {
+}
+
+namespace {
+
+// This struct represents an event at which the expending rect intersects
+// one of its boundaries. 4 intersection events will occur during expansion.
+struct EdgeEvent {
+ enum { BOTTOM, TOP, LEFT, RIGHT } edge;
+ int* num_edges;
+ int distance;
+};
+
+// Compute the delta to expand from edges to cover target_area.
+int ComputeExpansionDelta(int num_x_edges, int num_y_edges,
+ int width, int height,
+ int64 target_area) {
+ // Compute coefficients for the quadratic equation:
+ // a*x^2 + b*x + c = 0
+ int a = num_y_edges * num_x_edges;
+ int b = num_y_edges * width + num_x_edges * height;
+ int64 c = static_cast<int64>(width) * height - target_area;
+
+ // Compute the delta for our edges using the quadratic equation.
+ int delta =
+ (a == 0) ? -c / b : (-b + static_cast<int>(std::sqrt(
+ static_cast<int64>(b) * b - 4.0 * a * c))) /
+ (2 * a);
+ return std::max(0, delta);
+}
+
+} // namespace
+
+gfx::Rect PictureLayerTiling::ExpandRectEquallyToAreaBoundedBy(
+ const gfx::Rect& starting_rect,
+ int64 target_area,
+ const gfx::Rect& bounding_rect,
+ RectExpansionCache* cache) {
+ if (starting_rect.IsEmpty())
+ return starting_rect;
+
+ if (cache &&
+ cache->previous_start == starting_rect &&
+ cache->previous_bounds == bounding_rect &&
+ cache->previous_target == target_area)
+ return cache->previous_result;
+
+ if (cache) {
+ cache->previous_start = starting_rect;
+ cache->previous_bounds = bounding_rect;
+ cache->previous_target = target_area;
+ }
+
+ DCHECK(!bounding_rect.IsEmpty());
+ DCHECK_GT(target_area, 0);
+
+ // Expand the starting rect to cover target_area, if it is smaller than it.
+ int delta = ComputeExpansionDelta(
+ 2, 2, starting_rect.width(), starting_rect.height(), target_area);
+ gfx::Rect expanded_starting_rect = starting_rect;
+ if (delta > 0)
+ expanded_starting_rect.Inset(-delta, -delta);
+
+ gfx::Rect rect = IntersectRects(expanded_starting_rect, bounding_rect);
+ if (rect.IsEmpty()) {
+ // The starting_rect and bounding_rect are far away.
+ if (cache)
+ cache->previous_result = rect;
+ return rect;
+ }
+ if (delta >= 0 && rect == expanded_starting_rect) {
+ // The starting rect already covers the entire bounding_rect and isn't too
+ // large for the target_area.
+ if (cache)
+ cache->previous_result = rect;
+ return rect;
+ }
+
+ // Continue to expand/shrink rect to let it cover target_area.
+
+ // These values will be updated by the loop and uses as the output.
+ int origin_x = rect.x();
+ int origin_y = rect.y();
+ int width = rect.width();
+ int height = rect.height();
+
+ // In the beginning we will consider 2 edges in each dimension.
+ int num_y_edges = 2;
+ int num_x_edges = 2;
+
+ // Create an event list.
+ EdgeEvent events[] = {
+ { EdgeEvent::BOTTOM, &num_y_edges, rect.y() - bounding_rect.y() },
+ { EdgeEvent::TOP, &num_y_edges, bounding_rect.bottom() - rect.bottom() },
+ { EdgeEvent::LEFT, &num_x_edges, rect.x() - bounding_rect.x() },
+ { EdgeEvent::RIGHT, &num_x_edges, bounding_rect.right() - rect.right() }
+ };
+
+ // Sort the events by distance (closest first).
+ if (events[0].distance > events[1].distance) std::swap(events[0], events[1]);
+ if (events[2].distance > events[3].distance) std::swap(events[2], events[3]);
+ if (events[0].distance > events[2].distance) std::swap(events[0], events[2]);
+ if (events[1].distance > events[3].distance) std::swap(events[1], events[3]);
+ if (events[1].distance > events[2].distance) std::swap(events[1], events[2]);
+
+ for (int event_index = 0; event_index < 4; event_index++) {
+ const EdgeEvent& event = events[event_index];
+
+ int delta = ComputeExpansionDelta(
+ num_x_edges, num_y_edges, width, height, target_area);
+
+ // Clamp delta to our event distance.
+ if (delta > event.distance)
+ delta = event.distance;
+
+ // Adjust the edge count for this kind of edge.
+ --*event.num_edges;
+
+ // Apply the delta to the edges and edge events.
+ for (int i = event_index; i < 4; i++) {
+ switch (events[i].edge) {
+ case EdgeEvent::BOTTOM:
+ origin_y -= delta;
+ height += delta;
+ break;
+ case EdgeEvent::TOP:
+ height += delta;
+ break;
+ case EdgeEvent::LEFT:
+ origin_x -= delta;
+ width += delta;
+ break;
+ case EdgeEvent::RIGHT:
+ width += delta;
+ break;
+ }
+ events[i].distance -= delta;
+ }
+
+ // If our delta is less then our event distance, we're done.
+ if (delta < event.distance)
+ break;
+ }
+
+ gfx::Rect result(origin_x, origin_y, width, height);
+ if (cache)
+ cache->previous_result = result;
+ return result;
+}
+
+void PictureLayerTiling::UpdateEvictionCacheIfNeeded(
+ TreePriority tree_priority) {
+ if (eviction_tiles_cache_valid_ &&
+ eviction_cache_tree_priority_ == tree_priority)
+ return;
+
+ eviction_tiles_now_.clear();
+ eviction_tiles_now_and_required_for_activation_.clear();
+ eviction_tiles_soon_.clear();
+ eviction_tiles_soon_and_required_for_activation_.clear();
+ eviction_tiles_eventually_.clear();
+ eviction_tiles_eventually_and_required_for_activation_.clear();
+
+ for (TileMap::iterator it = tiles_.begin(); it != tiles_.end(); ++it) {
+ // TODO(vmpstr): This should update the priority if UpdateTilePriorities
+ // changes not to do this.
+ Tile* tile = it->second.get();
+ const TilePriority& priority =
+ tile->priority_for_tree_priority(tree_priority);
+ switch (priority.priority_bin) {
+ case TilePriority::EVENTUALLY:
+ if (tile->required_for_activation())
+ eviction_tiles_eventually_and_required_for_activation_.push_back(
+ tile);
+ else
+ eviction_tiles_eventually_.push_back(tile);
+ break;
+ case TilePriority::SOON:
+ if (tile->required_for_activation())
+ eviction_tiles_soon_and_required_for_activation_.push_back(tile);
+ else
+ eviction_tiles_soon_.push_back(tile);
+ break;
+ case TilePriority::NOW:
+ if (tile->required_for_activation())
+ eviction_tiles_now_and_required_for_activation_.push_back(tile);
+ else
+ eviction_tiles_now_.push_back(tile);
+ break;
+ }
+ }
+
+ // TODO(vmpstr): Do this lazily. One option is to have a "sorted" flag that
+ // can be updated for each of the queues.
+ TileEvictionOrder sort_order(tree_priority);
+ std::sort(eviction_tiles_now_.begin(), eviction_tiles_now_.end(), sort_order);
+ std::sort(eviction_tiles_now_and_required_for_activation_.begin(),
+ eviction_tiles_now_and_required_for_activation_.end(),
+ sort_order);
+ std::sort(
+ eviction_tiles_soon_.begin(), eviction_tiles_soon_.end(), sort_order);
+ std::sort(eviction_tiles_soon_and_required_for_activation_.begin(),
+ eviction_tiles_soon_and_required_for_activation_.end(),
+ sort_order);
+ std::sort(eviction_tiles_eventually_.begin(),
+ eviction_tiles_eventually_.end(),
+ sort_order);
+ std::sort(eviction_tiles_eventually_and_required_for_activation_.begin(),
+ eviction_tiles_eventually_and_required_for_activation_.end(),
+ sort_order);
+
+ eviction_tiles_cache_valid_ = true;
+ eviction_cache_tree_priority_ = tree_priority;
+}
+
+const std::vector<Tile*>* PictureLayerTiling::GetEvictionTiles(
+ TreePriority tree_priority,
+ EvictionCategory category) {
+ UpdateEvictionCacheIfNeeded(tree_priority);
+ switch (category) {
+ case EVENTUALLY:
+ return &eviction_tiles_eventually_;
+ case EVENTUALLY_AND_REQUIRED_FOR_ACTIVATION:
+ return &eviction_tiles_eventually_and_required_for_activation_;
+ case SOON:
+ return &eviction_tiles_soon_;
+ case SOON_AND_REQUIRED_FOR_ACTIVATION:
+ return &eviction_tiles_soon_and_required_for_activation_;
+ case NOW:
+ return &eviction_tiles_now_;
+ case NOW_AND_REQUIRED_FOR_ACTIVATION:
+ return &eviction_tiles_now_and_required_for_activation_;
+ }
+ NOTREACHED();
+ return &eviction_tiles_eventually_;
+}
+
+PictureLayerTiling::TilingRasterTileIterator::TilingRasterTileIterator()
+ : tiling_(NULL), current_tile_(NULL) {}
+
+PictureLayerTiling::TilingRasterTileIterator::TilingRasterTileIterator(
+ PictureLayerTiling* tiling,
+ WhichTree tree)
+ : tiling_(tiling), phase_(VISIBLE_RECT), tree_(tree), current_tile_(NULL) {
+ if (!tiling_->has_visible_rect_tiles_) {
+ AdvancePhase();
+ return;
+ }
+
+ visible_iterator_ = TilingData::Iterator(&tiling_->tiling_data_,
+ tiling_->current_visible_rect_,
+ false /* include_borders */);
+ if (!visible_iterator_) {
+ AdvancePhase();
+ return;
+ }
+
+ current_tile_ =
+ tiling_->TileAt(visible_iterator_.index_x(), visible_iterator_.index_y());
+ if (!current_tile_ || !TileNeedsRaster(current_tile_))
+ ++(*this);
+}
+
+PictureLayerTiling::TilingRasterTileIterator::~TilingRasterTileIterator() {}
+
+void PictureLayerTiling::TilingRasterTileIterator::AdvancePhase() {
+ DCHECK_LT(phase_, EVENTUALLY_RECT);
+
+ do {
+ phase_ = static_cast<Phase>(phase_ + 1);
+ switch (phase_) {
+ case VISIBLE_RECT:
+ NOTREACHED();
+ return;
+ case SKEWPORT_RECT:
+ if (!tiling_->has_skewport_rect_tiles_)
+ continue;
+
+ spiral_iterator_ = TilingData::SpiralDifferenceIterator(
+ &tiling_->tiling_data_,
+ tiling_->current_skewport_rect_,
+ tiling_->current_visible_rect_,
+ tiling_->current_visible_rect_);
+ break;
+ case SOON_BORDER_RECT:
+ if (!tiling_->has_soon_border_rect_tiles_)
+ continue;
+
+ spiral_iterator_ = TilingData::SpiralDifferenceIterator(
+ &tiling_->tiling_data_,
+ tiling_->current_soon_border_rect_,
+ tiling_->current_skewport_rect_,
+ tiling_->current_visible_rect_);
+ break;
+ case EVENTUALLY_RECT:
+ if (!tiling_->has_eventually_rect_tiles_) {
+ current_tile_ = NULL;
+ return;
+ }
+
+ spiral_iterator_ = TilingData::SpiralDifferenceIterator(
+ &tiling_->tiling_data_,
+ tiling_->current_eventually_rect_,
+ tiling_->current_skewport_rect_,
+ tiling_->current_soon_border_rect_);
+ break;
+ }
+
+ while (spiral_iterator_) {
+ current_tile_ = tiling_->TileAt(spiral_iterator_.index_x(),
+ spiral_iterator_.index_y());
+ if (current_tile_ && TileNeedsRaster(current_tile_))
+ break;
+ ++spiral_iterator_;
+ }
+
+ if (!spiral_iterator_ && phase_ == EVENTUALLY_RECT) {
+ current_tile_ = NULL;
+ break;
+ }
+ } while (!spiral_iterator_);
+}
+
+PictureLayerTiling::TilingRasterTileIterator&
+PictureLayerTiling::TilingRasterTileIterator::
+operator++() {
+ current_tile_ = NULL;
+ while (!current_tile_ || !TileNeedsRaster(current_tile_)) {
+ std::pair<int, int> next_index;
+ switch (phase_) {
+ case VISIBLE_RECT:
+ ++visible_iterator_;
+ if (!visible_iterator_) {
+ AdvancePhase();
+ return *this;
+ }
+ next_index = visible_iterator_.index();
+ break;
+ case SKEWPORT_RECT:
+ case SOON_BORDER_RECT:
+ ++spiral_iterator_;
+ if (!spiral_iterator_) {
+ AdvancePhase();
+ return *this;
+ }
+ next_index = spiral_iterator_.index();
+ break;
+ case EVENTUALLY_RECT:
+ ++spiral_iterator_;
+ if (!spiral_iterator_) {
+ current_tile_ = NULL;
+ return *this;
+ }
+ next_index = spiral_iterator_.index();
+ break;
+ }
+ current_tile_ = tiling_->TileAt(next_index.first, next_index.second);
+ }
+ return *this;
+}
+
+PictureLayerTiling::TilingEvictionTileIterator::TilingEvictionTileIterator()
+ : eviction_tiles_(NULL), current_eviction_tiles_index_(0u) {
+}
+
+PictureLayerTiling::TilingEvictionTileIterator::TilingEvictionTileIterator(
+ PictureLayerTiling* tiling,
+ TreePriority tree_priority,
+ EvictionCategory category)
+ : eviction_tiles_(tiling->GetEvictionTiles(tree_priority, category)),
+ // Note: initializing to "0 - 1" works as overflow is well defined for
+ // unsigned integers.
+ current_eviction_tiles_index_(static_cast<size_t>(0) - 1) {
+ DCHECK(eviction_tiles_);
+ ++(*this);
+}
+
+PictureLayerTiling::TilingEvictionTileIterator::~TilingEvictionTileIterator() {
+}
+
+PictureLayerTiling::TilingEvictionTileIterator::operator bool() const {
+ return eviction_tiles_ &&
+ current_eviction_tiles_index_ != eviction_tiles_->size();
+}
+
+Tile* PictureLayerTiling::TilingEvictionTileIterator::operator*() {
+ DCHECK(*this);
+ return (*eviction_tiles_)[current_eviction_tiles_index_];
+}
+
+const Tile* PictureLayerTiling::TilingEvictionTileIterator::operator*() const {
+ DCHECK(*this);
+ return (*eviction_tiles_)[current_eviction_tiles_index_];
+}
+
+PictureLayerTiling::TilingEvictionTileIterator&
+PictureLayerTiling::TilingEvictionTileIterator::
+operator++() {
+ DCHECK(*this);
+ do {
+ ++current_eviction_tiles_index_;
+ } while (current_eviction_tiles_index_ != eviction_tiles_->size() &&
+ !(*eviction_tiles_)[current_eviction_tiles_index_]->HasResources());
+
+ return *this;
+}
+
+} // namespace cc
diff --git a/cc/resources/picture_layer_tiling.h b/cc/resources/picture_layer_tiling.h
new file mode 100644
index 0000000..2d0ae37
--- /dev/null
+++ b/cc/resources/picture_layer_tiling.h
@@ -0,0 +1,365 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_PICTURE_LAYER_TILING_H_
+#define CC_RESOURCES_PICTURE_LAYER_TILING_H_
+
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/memory/scoped_ptr.h"
+#include "cc/base/cc_export.h"
+#include "cc/base/region.h"
+#include "cc/base/tiling_data.h"
+#include "cc/resources/tile.h"
+#include "cc/resources/tile_priority.h"
+#include "cc/trees/occlusion.h"
+#include "ui/gfx/rect.h"
+
+namespace base {
+namespace debug {
+class TracedValue;
+}
+}
+
+namespace cc {
+
+class PictureLayerTiling;
+class PicturePileImpl;
+
+class CC_EXPORT PictureLayerTilingClient {
+ public:
+ // Create a tile at the given content_rect (in the contents scale of the
+ // tiling) This might return null if the client cannot create such a tile.
+ virtual scoped_refptr<Tile> CreateTile(
+ PictureLayerTiling* tiling,
+ const gfx::Rect& content_rect) = 0;
+ virtual PicturePileImpl* GetPile() = 0;
+ virtual gfx::Size CalculateTileSize(
+ const gfx::Size& content_bounds) const = 0;
+ virtual const Region* GetInvalidation() = 0;
+ virtual const PictureLayerTiling* GetTwinTiling(
+ const PictureLayerTiling* tiling) const = 0;
+ virtual PictureLayerTiling* GetRecycledTwinTiling(
+ const PictureLayerTiling* tiling) = 0;
+ virtual size_t GetMaxTilesForInterestArea() const = 0;
+ virtual float GetSkewportTargetTimeInSeconds() const = 0;
+ virtual int GetSkewportExtrapolationLimitInContentPixels() const = 0;
+ virtual WhichTree GetTree() const = 0;
+
+ protected:
+ virtual ~PictureLayerTilingClient() {}
+};
+
+class CC_EXPORT PictureLayerTiling {
+ public:
+ enum EvictionCategory {
+ EVENTUALLY,
+ EVENTUALLY_AND_REQUIRED_FOR_ACTIVATION,
+ SOON,
+ SOON_AND_REQUIRED_FOR_ACTIVATION,
+ NOW,
+ NOW_AND_REQUIRED_FOR_ACTIVATION
+ };
+
+ class CC_EXPORT TilingRasterTileIterator {
+ public:
+ TilingRasterTileIterator();
+ TilingRasterTileIterator(PictureLayerTiling* tiling, WhichTree tree);
+ ~TilingRasterTileIterator();
+
+ operator bool() const { return !!current_tile_; }
+ const Tile* operator*() const { return current_tile_; }
+ Tile* operator*() { return current_tile_; }
+ TilePriority::PriorityBin get_type() const {
+ switch (phase_) {
+ case VISIBLE_RECT:
+ return TilePriority::NOW;
+ case SKEWPORT_RECT:
+ case SOON_BORDER_RECT:
+ return TilePriority::SOON;
+ case EVENTUALLY_RECT:
+ return TilePriority::EVENTUALLY;
+ }
+ NOTREACHED();
+ return TilePriority::EVENTUALLY;
+ }
+
+ TilingRasterTileIterator& operator++();
+
+ private:
+ enum Phase {
+ VISIBLE_RECT,
+ SKEWPORT_RECT,
+ SOON_BORDER_RECT,
+ EVENTUALLY_RECT
+ };
+
+ void AdvancePhase();
+ bool TileNeedsRaster(Tile* tile) const {
+ return !tile->is_occluded(tree_) && !tile->IsReadyToDraw();
+ }
+
+ PictureLayerTiling* tiling_;
+
+ Phase phase_;
+ WhichTree tree_;
+
+ Tile* current_tile_;
+ TilingData::Iterator visible_iterator_;
+ TilingData::SpiralDifferenceIterator spiral_iterator_;
+ };
+
+ class CC_EXPORT TilingEvictionTileIterator {
+ public:
+ TilingEvictionTileIterator();
+ TilingEvictionTileIterator(PictureLayerTiling* tiling,
+ TreePriority tree_priority,
+ EvictionCategory category);
+ ~TilingEvictionTileIterator();
+
+ operator bool() const;
+ const Tile* operator*() const;
+ Tile* operator*();
+ TilingEvictionTileIterator& operator++();
+
+ private:
+ const std::vector<Tile*>* eviction_tiles_;
+ size_t current_eviction_tiles_index_;
+ };
+
+ ~PictureLayerTiling();
+
+ // Create a tiling with no tiles. CreateTiles must be called to add some.
+ static scoped_ptr<PictureLayerTiling> Create(
+ float contents_scale,
+ const gfx::Size& layer_bounds,
+ PictureLayerTilingClient* client);
+ gfx::Size layer_bounds() const { return layer_bounds_; }
+ void UpdateTilesToCurrentPile(const Region& layer_invalidation,
+ const gfx::Size& new_layer_bounds);
+ void CreateMissingTilesInLiveTilesRect();
+ void RemoveTilesInRegion(const Region& layer_region);
+
+ void SetClient(PictureLayerTilingClient* client);
+ void set_resolution(TileResolution resolution) { resolution_ = resolution; }
+ TileResolution resolution() const { return resolution_; }
+
+ gfx::Size tiling_size() const { return tiling_data_.tiling_size(); }
+ gfx::Rect live_tiles_rect() const { return live_tiles_rect_; }
+ gfx::Size tile_size() const { return tiling_data_.max_texture_size(); }
+ float contents_scale() const { return contents_scale_; }
+
+ Tile* TileAt(int i, int j) const {
+ TileMap::const_iterator iter = tiles_.find(TileMapKey(i, j));
+ return (iter == tiles_.end()) ? NULL : iter->second.get();
+ }
+
+ void CreateAllTilesForTesting() {
+ SetLiveTilesRect(gfx::Rect(tiling_data_.tiling_size()));
+ }
+
+ const TilingData& TilingDataForTesting() const { return tiling_data_; }
+
+ std::vector<Tile*> AllTilesForTesting() const {
+ std::vector<Tile*> all_tiles;
+ for (TileMap::const_iterator it = tiles_.begin();
+ it != tiles_.end(); ++it)
+ all_tiles.push_back(it->second.get());
+ return all_tiles;
+ }
+
+ std::vector<scoped_refptr<Tile> > AllRefTilesForTesting() const {
+ std::vector<scoped_refptr<Tile> > all_tiles;
+ for (TileMap::const_iterator it = tiles_.begin(); it != tiles_.end(); ++it)
+ all_tiles.push_back(it->second);
+ return all_tiles;
+ }
+
+ const gfx::Rect& GetCurrentVisibleRectForTesting() const {
+ return current_visible_rect_;
+ }
+
+ // Iterate over all tiles to fill content_rect. Even if tiles are invalid
+ // (i.e. no valid resource) this tiling should still iterate over them.
+ // The union of all geometry_rect calls for each element iterated over should
+ // exactly equal content_rect and no two geometry_rects should intersect.
+ class CC_EXPORT CoverageIterator {
+ public:
+ CoverageIterator();
+ CoverageIterator(const PictureLayerTiling* tiling,
+ float dest_scale,
+ const gfx::Rect& rect);
+ ~CoverageIterator();
+
+ // Visible rect (no borders), always in the space of content_rect,
+ // regardless of the contents scale of the tiling.
+ gfx::Rect geometry_rect() const;
+ // Texture rect (in texels) for geometry_rect
+ gfx::RectF texture_rect() const;
+ gfx::Size texture_size() const;
+
+ // Full rect (including borders) of the current tile, always in the space
+ // of content_rect, regardless of the contents scale of the tiling.
+ gfx::Rect full_tile_geometry_rect() const;
+
+ Tile* operator->() const { return current_tile_; }
+ Tile* operator*() const { return current_tile_; }
+
+ CoverageIterator& operator++();
+ operator bool() const { return tile_j_ <= bottom_; }
+
+ int i() const { return tile_i_; }
+ int j() const { return tile_j_; }
+
+ private:
+ const PictureLayerTiling* tiling_;
+ gfx::Rect dest_rect_;
+ float dest_to_content_scale_;
+
+ Tile* current_tile_;
+ gfx::Rect current_geometry_rect_;
+ int tile_i_;
+ int tile_j_;
+ int left_;
+ int top_;
+ int right_;
+ int bottom_;
+
+ friend class PictureLayerTiling;
+ };
+
+ void Reset();
+
+ void UpdateTilePriorities(WhichTree tree,
+ const gfx::Rect& viewport_in_layer_space,
+ float ideal_contents_scale,
+ double current_frame_time_in_seconds,
+ const Occlusion& occlusion_in_layer_space);
+
+ // Copies the src_tree priority into the dst_tree priority for all tiles.
+ // The src_tree priority is reset to the lowest priority possible. This
+ // also updates the pile on each tile to be the current client's pile.
+ void DidBecomeActive();
+
+ // Resets the active priority for all tiles in a tiling, when an active
+ // tiling is becoming recycled. This may include some tiles which are
+ // not in the the pending tiling (due to invalidations). This must
+ // be called before DidBecomeActive, as it resets the active priority
+ // while DidBecomeActive promotes pending priority on a similar set of tiles.
+ void DidBecomeRecycled();
+
+ bool NeedsUpdateForFrameAtTime(double frame_time_in_seconds) {
+ return frame_time_in_seconds != last_impl_frame_time_in_seconds_;
+ }
+
+ void GetAllTilesForTracing(std::set<const Tile*>* tiles) const;
+ void AsValueInto(base::debug::TracedValue* array) const;
+ size_t GPUMemoryUsageInBytes() const;
+
+ struct RectExpansionCache {
+ RectExpansionCache();
+
+ gfx::Rect previous_start;
+ gfx::Rect previous_bounds;
+ gfx::Rect previous_result;
+ int64 previous_target;
+ };
+
+ static
+ gfx::Rect ExpandRectEquallyToAreaBoundedBy(
+ const gfx::Rect& starting_rect,
+ int64 target_area,
+ const gfx::Rect& bounding_rect,
+ RectExpansionCache* cache);
+
+ bool has_ever_been_updated() const {
+ return last_impl_frame_time_in_seconds_ != 0.0;
+ }
+
+ protected:
+ friend class CoverageIterator;
+ friend class TilingRasterTileIterator;
+ friend class TilingEvictionTileIterator;
+
+ typedef std::pair<int, int> TileMapKey;
+ typedef base::hash_map<TileMapKey, scoped_refptr<Tile> > TileMap;
+
+ PictureLayerTiling(float contents_scale,
+ const gfx::Size& layer_bounds,
+ PictureLayerTilingClient* client);
+ void SetLiveTilesRect(const gfx::Rect& live_tiles_rect);
+ void VerifyLiveTilesRect();
+ Tile* CreateTile(int i, int j, const PictureLayerTiling* twin_tiling);
+ // Returns true if the Tile existed and was removed from the tiling.
+ bool RemoveTileAt(int i, int j, PictureLayerTiling* recycled_twin);
+
+ // Computes a skewport. The calculation extrapolates the last visible
+ // rect and the current visible rect to expand the skewport to where it
+ // would be in |skewport_target_time| seconds. Note that the skewport
+ // is guaranteed to contain the current visible rect.
+ gfx::Rect ComputeSkewport(double current_frame_time_in_seconds,
+ const gfx::Rect& visible_rect_in_content_space)
+ const;
+
+ void UpdateEvictionCacheIfNeeded(TreePriority tree_priority);
+ const std::vector<Tile*>* GetEvictionTiles(TreePriority tree_priority,
+ EvictionCategory category);
+
+ void Invalidate(const Region& layer_region);
+
+ void DoInvalidate(const Region& layer_region,
+ bool recreate_invalidated_tiles);
+
+ // Given properties.
+ float contents_scale_;
+ gfx::Size layer_bounds_;
+ TileResolution resolution_;
+ PictureLayerTilingClient* client_;
+
+ // Internal data.
+ TilingData tiling_data_;
+ TileMap tiles_; // It is not legal to have a NULL tile in the tiles_ map.
+ gfx::Rect live_tiles_rect_;
+
+ // State saved for computing velocities based upon finite differences.
+ double last_impl_frame_time_in_seconds_;
+ gfx::Rect last_visible_rect_in_content_space_;
+
+ // Iteration rects in content space
+ gfx::Rect current_visible_rect_;
+ gfx::Rect current_skewport_rect_;
+ gfx::Rect current_soon_border_rect_;
+ gfx::Rect current_eventually_rect_;
+
+ bool has_visible_rect_tiles_;
+ bool has_skewport_rect_tiles_;
+ bool has_soon_border_rect_tiles_;
+ bool has_eventually_rect_tiles_;
+
+ // TODO(reveman): Remove this in favour of an array of eviction_tiles_ when we
+ // change all enums to have a consistent way of getting the count/last
+ // element.
+ std::vector<Tile*> eviction_tiles_now_;
+ std::vector<Tile*> eviction_tiles_now_and_required_for_activation_;
+ std::vector<Tile*> eviction_tiles_soon_;
+ std::vector<Tile*> eviction_tiles_soon_and_required_for_activation_;
+ std::vector<Tile*> eviction_tiles_eventually_;
+ std::vector<Tile*> eviction_tiles_eventually_and_required_for_activation_;
+
+ bool eviction_tiles_cache_valid_;
+ TreePriority eviction_cache_tree_priority_;
+
+ private:
+ DISALLOW_ASSIGN(PictureLayerTiling);
+
+ RectExpansionCache expansion_cache_;
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_PICTURE_LAYER_TILING_H_
diff --git a/cc/resources/picture_layer_tiling_perftest.cc b/cc/resources/picture_layer_tiling_perftest.cc
new file mode 100644
index 0000000..78a6a08
--- /dev/null
+++ b/cc/resources/picture_layer_tiling_perftest.cc
@@ -0,0 +1,358 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/debug/lap_timer.h"
+#include "cc/resources/picture_layer_tiling.h"
+#include "cc/resources/resource_provider.h"
+#include "cc/resources/scoped_resource.h"
+#include "cc/test/fake_output_surface.h"
+#include "cc/test/fake_output_surface_client.h"
+#include "cc/test/fake_picture_layer_tiling_client.h"
+#include "cc/test/test_context_provider.h"
+#include "cc/test/test_shared_bitmap_manager.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/perf/perf_test.h"
+
+namespace cc {
+
+namespace {
+
+static const int kTimeLimitMillis = 2000;
+static const int kWarmupRuns = 5;
+static const int kTimeCheckInterval = 10;
+
+class PictureLayerTilingPerfTest : public testing::Test {
+ public:
+ PictureLayerTilingPerfTest()
+ : timer_(kWarmupRuns,
+ base::TimeDelta::FromMilliseconds(kTimeLimitMillis),
+ kTimeCheckInterval),
+ context_provider_(TestContextProvider::Create()) {
+ output_surface_ = FakeOutputSurface::Create3d(context_provider_).Pass();
+ CHECK(output_surface_->BindToClient(&output_surface_client_));
+
+ shared_bitmap_manager_.reset(new TestSharedBitmapManager());
+ resource_provider_ = ResourceProvider::Create(output_surface_.get(),
+ shared_bitmap_manager_.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false).Pass();
+ }
+
+ virtual void SetUp() OVERRIDE {
+ picture_layer_tiling_client_.SetTileSize(gfx::Size(256, 256));
+ picture_layer_tiling_client_.set_max_tiles_for_interest_area(250);
+ picture_layer_tiling_client_.set_tree(PENDING_TREE);
+ picture_layer_tiling_ = PictureLayerTiling::Create(
+ 1, gfx::Size(256 * 50, 256 * 50), &picture_layer_tiling_client_);
+ picture_layer_tiling_->CreateAllTilesForTesting();
+ }
+
+ virtual void TearDown() OVERRIDE {
+ picture_layer_tiling_.reset(NULL);
+ }
+
+ void RunInvalidateTest(const std::string& test_name, const Region& region) {
+ timer_.Reset();
+ do {
+ picture_layer_tiling_->UpdateTilesToCurrentPile(
+ region, picture_layer_tiling_->tiling_size());
+ timer_.NextLap();
+ } while (!timer_.HasTimeLimitExpired());
+
+ perf_test::PrintResult(
+ "invalidation", "", test_name, timer_.LapsPerSecond(), "runs/s", true);
+ }
+
+ void RunUpdateTilePrioritiesStationaryTest(const std::string& test_name,
+ const gfx::Transform& transform) {
+ gfx::Rect viewport_rect(0, 0, 1024, 768);
+
+ timer_.Reset();
+ do {
+ picture_layer_tiling_->UpdateTilePriorities(
+ PENDING_TREE, viewport_rect, 1.f, timer_.NumLaps() + 1, Occlusion());
+ timer_.NextLap();
+ } while (!timer_.HasTimeLimitExpired());
+
+ perf_test::PrintResult("update_tile_priorities_stationary",
+ "",
+ test_name,
+ timer_.LapsPerSecond(),
+ "runs/s",
+ true);
+ }
+
+ void RunUpdateTilePrioritiesScrollingTest(const std::string& test_name,
+ const gfx::Transform& transform) {
+ gfx::Size viewport_size(1024, 768);
+ gfx::Rect viewport_rect(viewport_size);
+ int xoffsets[] = {10, 0, -10, 0};
+ int yoffsets[] = {0, 10, 0, -10};
+ int offsetIndex = 0;
+ int offsetCount = 0;
+ const int maxOffsetCount = 1000;
+
+ timer_.Reset();
+ do {
+ picture_layer_tiling_->UpdateTilePriorities(
+ PENDING_TREE, viewport_rect, 1.f, timer_.NumLaps() + 1, Occlusion());
+
+ viewport_rect = gfx::Rect(viewport_rect.x() + xoffsets[offsetIndex],
+ viewport_rect.y() + yoffsets[offsetIndex],
+ viewport_rect.width(),
+ viewport_rect.height());
+
+ if (++offsetCount > maxOffsetCount) {
+ offsetCount = 0;
+ offsetIndex = (offsetIndex + 1) % 4;
+ }
+ timer_.NextLap();
+ } while (!timer_.HasTimeLimitExpired());
+
+ perf_test::PrintResult("update_tile_priorities_scrolling",
+ "",
+ test_name,
+ timer_.LapsPerSecond(),
+ "runs/s",
+ true);
+ }
+
+ void RunRasterIteratorConstructTest(const std::string& test_name,
+ const gfx::Rect& viewport) {
+ gfx::Size bounds(viewport.size());
+ picture_layer_tiling_ =
+ PictureLayerTiling::Create(1, bounds, &picture_layer_tiling_client_);
+ picture_layer_tiling_client_.set_tree(ACTIVE_TREE);
+ picture_layer_tiling_->UpdateTilePriorities(
+ ACTIVE_TREE, viewport, 1.0f, 1.0, Occlusion());
+
+ timer_.Reset();
+ do {
+ PictureLayerTiling::TilingRasterTileIterator it(
+ picture_layer_tiling_.get(), ACTIVE_TREE);
+ timer_.NextLap();
+ } while (!timer_.HasTimeLimitExpired());
+
+ perf_test::PrintResult("tiling_raster_tile_iterator_construct",
+ "",
+ test_name,
+ timer_.LapsPerSecond(),
+ "runs/s",
+ true);
+ }
+
+ void RunRasterIteratorConstructAndIterateTest(const std::string& test_name,
+ int num_tiles,
+ const gfx::Rect& viewport) {
+ gfx::Size bounds(10000, 10000);
+ picture_layer_tiling_ =
+ PictureLayerTiling::Create(1, bounds, &picture_layer_tiling_client_);
+ picture_layer_tiling_client_.set_tree(ACTIVE_TREE);
+ picture_layer_tiling_->UpdateTilePriorities(
+ ACTIVE_TREE, viewport, 1.0f, 1.0, Occlusion());
+
+ timer_.Reset();
+ do {
+ int count = num_tiles;
+ PictureLayerTiling::TilingRasterTileIterator it(
+ picture_layer_tiling_.get(), ACTIVE_TREE);
+ while (count--) {
+ ASSERT_TRUE(it) << "count: " << count;
+ ASSERT_TRUE(*it != NULL) << "count: " << count;
+ ++it;
+ }
+ timer_.NextLap();
+ } while (!timer_.HasTimeLimitExpired());
+
+ perf_test::PrintResult("tiling_raster_tile_iterator_construct_and_iterate",
+ "",
+ test_name,
+ timer_.LapsPerSecond(),
+ "runs/s",
+ true);
+ }
+
+ void RunEvictionIteratorConstructTest(const std::string& test_name,
+ const gfx::Rect& viewport) {
+ gfx::Size bounds(viewport.size());
+ picture_layer_tiling_ =
+ PictureLayerTiling::Create(1, bounds, &picture_layer_tiling_client_);
+ picture_layer_tiling_client_.set_tree(ACTIVE_TREE);
+ picture_layer_tiling_->UpdateTilePriorities(
+ ACTIVE_TREE, viewport, 1.0f, 1.0, Occlusion());
+
+ timer_.Reset();
+ TreePriority priorities[] = {SAME_PRIORITY_FOR_BOTH_TREES,
+ SMOOTHNESS_TAKES_PRIORITY,
+ NEW_CONTENT_TAKES_PRIORITY};
+ int priority_count = 0;
+ do {
+ PictureLayerTiling::TilingEvictionTileIterator it(
+ picture_layer_tiling_.get(),
+ priorities[priority_count],
+ PictureLayerTiling::NOW);
+ priority_count = (priority_count + 1) % arraysize(priorities);
+ timer_.NextLap();
+ } while (!timer_.HasTimeLimitExpired());
+
+ perf_test::PrintResult("tiling_eviction_tile_iterator_construct",
+ "",
+ test_name,
+ timer_.LapsPerSecond(),
+ "runs/s",
+ true);
+ }
+
+ void RunEvictionIteratorConstructAndIterateTest(const std::string& test_name,
+ int num_tiles,
+ const gfx::Rect& viewport) {
+ gfx::Size bounds(10000, 10000);
+ picture_layer_tiling_ =
+ PictureLayerTiling::Create(1, bounds, &picture_layer_tiling_client_);
+ picture_layer_tiling_client_.set_tree(ACTIVE_TREE);
+ picture_layer_tiling_->UpdateTilePriorities(
+ ACTIVE_TREE, viewport, 1.0f, 1.0, Occlusion());
+
+ TreePriority priorities[] = {SAME_PRIORITY_FOR_BOTH_TREES,
+ SMOOTHNESS_TAKES_PRIORITY,
+ NEW_CONTENT_TAKES_PRIORITY};
+
+ // Ensure all tiles have resources.
+ std::vector<Tile*> all_tiles = picture_layer_tiling_->AllTilesForTesting();
+ for (std::vector<Tile*>::iterator tile_it = all_tiles.begin();
+ tile_it != all_tiles.end();
+ ++tile_it) {
+ Tile* tile = *tile_it;
+ ManagedTileState::DrawInfo& draw_info = tile->draw_info();
+ draw_info.SetResourceForTesting(
+ ScopedResource::Create(resource_provider_.get()).Pass());
+ }
+
+ int priority_count = 0;
+ timer_.Reset();
+ do {
+ int count = num_tiles;
+ PictureLayerTiling::TilingEvictionTileIterator it(
+ picture_layer_tiling_.get(),
+ priorities[priority_count],
+ PictureLayerTiling::EVENTUALLY);
+ while (count--) {
+ ASSERT_TRUE(it) << "count: " << count;
+ ASSERT_TRUE(*it != NULL) << "count: " << count;
+ ++it;
+ }
+ priority_count = (priority_count + 1) % arraysize(priorities);
+ timer_.NextLap();
+ } while (!timer_.HasTimeLimitExpired());
+
+ // Remove all resources from tiles to make sure the tile version destructor
+ // doesn't complain.
+ for (std::vector<Tile*>::iterator tile_it = all_tiles.begin();
+ tile_it != all_tiles.end();
+ ++tile_it) {
+ Tile* tile = *tile_it;
+ ManagedTileState::DrawInfo& draw_info = tile->draw_info();
+ draw_info.SetResourceForTesting(scoped_ptr<ScopedResource>());
+ }
+
+ perf_test::PrintResult(
+ "tiling_eviction_tile_iterator_construct_and_iterate",
+ "",
+ test_name,
+ timer_.LapsPerSecond(),
+ "runs/s",
+ true);
+ }
+
+ private:
+ FakePictureLayerTilingClient picture_layer_tiling_client_;
+ scoped_ptr<PictureLayerTiling> picture_layer_tiling_;
+
+ LapTimer timer_;
+
+ scoped_refptr<ContextProvider> context_provider_;
+ FakeOutputSurfaceClient output_surface_client_;
+ scoped_ptr<FakeOutputSurface> output_surface_;
+ scoped_ptr<SharedBitmapManager> shared_bitmap_manager_;
+ scoped_ptr<ResourceProvider> resource_provider_;
+};
+
+TEST_F(PictureLayerTilingPerfTest, Invalidate) {
+ Region one_tile(gfx::Rect(256, 256));
+ RunInvalidateTest("1x1", one_tile);
+
+ Region half_region(gfx::Rect(25 * 256, 50 * 256));
+ RunInvalidateTest("25x50", half_region);
+
+ Region full_region(gfx::Rect(50 * 256, 50 * 256));
+ RunInvalidateTest("50x50", full_region);
+}
+
+#if defined(OS_ANDROID)
+// TODO(vmpstr): Investigate why this is noisy (crbug.com/310220).
+TEST_F(PictureLayerTilingPerfTest, DISABLED_UpdateTilePriorities) {
+#else
+TEST_F(PictureLayerTilingPerfTest, UpdateTilePriorities) {
+#endif // defined(OS_ANDROID)
+ gfx::Transform transform;
+
+ RunUpdateTilePrioritiesStationaryTest("no_transform", transform);
+ RunUpdateTilePrioritiesScrollingTest("no_transform", transform);
+
+ transform.Rotate(10);
+ RunUpdateTilePrioritiesStationaryTest("rotation", transform);
+ RunUpdateTilePrioritiesScrollingTest("rotation", transform);
+
+ transform.ApplyPerspectiveDepth(10);
+ RunUpdateTilePrioritiesStationaryTest("perspective", transform);
+ RunUpdateTilePrioritiesScrollingTest("perspective", transform);
+}
+
+TEST_F(PictureLayerTilingPerfTest, TilingRasterTileIteratorConstruct) {
+ RunRasterIteratorConstructTest("0_0_100x100", gfx::Rect(0, 0, 100, 100));
+ RunRasterIteratorConstructTest("50_0_100x100", gfx::Rect(50, 0, 100, 100));
+ RunRasterIteratorConstructTest("100_0_100x100", gfx::Rect(100, 0, 100, 100));
+ RunRasterIteratorConstructTest("150_0_100x100", gfx::Rect(150, 0, 100, 100));
+}
+
+TEST_F(PictureLayerTilingPerfTest,
+ TilingRasterTileIteratorConstructAndIterate) {
+ RunRasterIteratorConstructAndIterateTest(
+ "32_100x100", 32, gfx::Rect(0, 0, 100, 100));
+ RunRasterIteratorConstructAndIterateTest(
+ "32_500x500", 32, gfx::Rect(0, 0, 500, 500));
+ RunRasterIteratorConstructAndIterateTest(
+ "64_100x100", 64, gfx::Rect(0, 0, 100, 100));
+ RunRasterIteratorConstructAndIterateTest(
+ "64_500x500", 64, gfx::Rect(0, 0, 500, 500));
+}
+
+TEST_F(PictureLayerTilingPerfTest, TilingEvictionTileIteratorConstruct) {
+ RunEvictionIteratorConstructTest("0_0_100x100", gfx::Rect(0, 0, 100, 100));
+ RunEvictionIteratorConstructTest("50_0_100x100", gfx::Rect(50, 0, 100, 100));
+ RunEvictionIteratorConstructTest("100_0_100x100",
+ gfx::Rect(100, 0, 100, 100));
+ RunEvictionIteratorConstructTest("150_0_100x100",
+ gfx::Rect(150, 0, 100, 100));
+}
+
+TEST_F(PictureLayerTilingPerfTest,
+ TilingEvictionTileIteratorConstructAndIterate) {
+ RunEvictionIteratorConstructAndIterateTest(
+ "32_100x100", 32, gfx::Rect(0, 0, 100, 100));
+ RunEvictionIteratorConstructAndIterateTest(
+ "32_500x500", 32, gfx::Rect(0, 0, 500, 500));
+ RunEvictionIteratorConstructAndIterateTest(
+ "64_100x100", 64, gfx::Rect(0, 0, 100, 100));
+ RunEvictionIteratorConstructAndIterateTest(
+ "64_500x500", 64, gfx::Rect(0, 0, 500, 500));
+}
+
+} // namespace
+
+} // namespace cc
diff --git a/cc/resources/picture_layer_tiling_set.cc b/cc/resources/picture_layer_tiling_set.cc
new file mode 100644
index 0000000..d6594df
--- /dev/null
+++ b/cc/resources/picture_layer_tiling_set.cc
@@ -0,0 +1,367 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/picture_layer_tiling_set.h"
+
+#include <limits>
+
+namespace cc {
+
+namespace {
+
+class LargestToSmallestScaleFunctor {
+ public:
+ bool operator() (PictureLayerTiling* left, PictureLayerTiling* right) {
+ return left->contents_scale() > right->contents_scale();
+ }
+};
+
+} // namespace
+
+
+PictureLayerTilingSet::PictureLayerTilingSet(
+ PictureLayerTilingClient* client,
+ const gfx::Size& layer_bounds)
+ : client_(client),
+ layer_bounds_(layer_bounds) {
+}
+
+PictureLayerTilingSet::~PictureLayerTilingSet() {
+}
+
+void PictureLayerTilingSet::SetClient(PictureLayerTilingClient* client) {
+ client_ = client;
+ for (size_t i = 0; i < tilings_.size(); ++i)
+ tilings_[i]->SetClient(client_);
+}
+
+void PictureLayerTilingSet::RemoveTilesInRegion(const Region& region) {
+ for (size_t i = 0; i < tilings_.size(); ++i)
+ tilings_[i]->RemoveTilesInRegion(region);
+}
+
+bool PictureLayerTilingSet::SyncTilings(const PictureLayerTilingSet& other,
+ const gfx::Size& new_layer_bounds,
+ const Region& layer_invalidation,
+ float minimum_contents_scale) {
+ if (new_layer_bounds.IsEmpty()) {
+ RemoveAllTilings();
+ layer_bounds_ = new_layer_bounds;
+ return false;
+ }
+
+ tilings_.reserve(other.tilings_.size());
+
+ // Remove any tilings that aren't in |other| or don't meet the minimum.
+ for (size_t i = 0; i < tilings_.size(); ++i) {
+ float scale = tilings_[i]->contents_scale();
+ if (scale >= minimum_contents_scale && !!other.TilingAtScale(scale))
+ continue;
+ // Swap with the last element and remove it.
+ tilings_.swap(tilings_.begin() + i, tilings_.end() - 1);
+ tilings_.pop_back();
+ --i;
+ }
+
+ bool have_high_res_tiling = false;
+
+ // Add any missing tilings from |other| that meet the minimum.
+ for (size_t i = 0; i < other.tilings_.size(); ++i) {
+ float contents_scale = other.tilings_[i]->contents_scale();
+ if (contents_scale < minimum_contents_scale)
+ continue;
+ if (PictureLayerTiling* this_tiling = TilingAtScale(contents_scale)) {
+ this_tiling->set_resolution(other.tilings_[i]->resolution());
+
+ this_tiling->UpdateTilesToCurrentPile(layer_invalidation,
+ new_layer_bounds);
+ this_tiling->CreateMissingTilesInLiveTilesRect();
+ if (this_tiling->resolution() == HIGH_RESOLUTION)
+ have_high_res_tiling = true;
+
+ DCHECK(this_tiling->tile_size() ==
+ client_->CalculateTileSize(this_tiling->tiling_size()))
+ << "tile_size: " << this_tiling->tile_size().ToString()
+ << " tiling_size: " << this_tiling->tiling_size().ToString()
+ << " CalculateTileSize: "
+ << client_->CalculateTileSize(this_tiling->tiling_size()).ToString();
+ continue;
+ }
+ scoped_ptr<PictureLayerTiling> new_tiling = PictureLayerTiling::Create(
+ contents_scale,
+ new_layer_bounds,
+ client_);
+ new_tiling->set_resolution(other.tilings_[i]->resolution());
+ if (new_tiling->resolution() == HIGH_RESOLUTION)
+ have_high_res_tiling = true;
+ tilings_.push_back(new_tiling.Pass());
+ }
+ tilings_.sort(LargestToSmallestScaleFunctor());
+
+ layer_bounds_ = new_layer_bounds;
+ return have_high_res_tiling;
+}
+
+PictureLayerTiling* PictureLayerTilingSet::AddTiling(float contents_scale) {
+ for (size_t i = 0; i < tilings_.size(); ++i)
+ DCHECK_NE(tilings_[i]->contents_scale(), contents_scale);
+
+ tilings_.push_back(PictureLayerTiling::Create(contents_scale,
+ layer_bounds_,
+ client_));
+ PictureLayerTiling* appended = tilings_.back();
+
+ tilings_.sort(LargestToSmallestScaleFunctor());
+ return appended;
+}
+
+int PictureLayerTilingSet::NumHighResTilings() const {
+ int num_high_res = 0;
+ for (size_t i = 0; i < tilings_.size(); ++i) {
+ if (tilings_[i]->resolution() == HIGH_RESOLUTION)
+ num_high_res++;
+ }
+ return num_high_res;
+}
+
+PictureLayerTiling* PictureLayerTilingSet::TilingAtScale(float scale) const {
+ for (size_t i = 0; i < tilings_.size(); ++i) {
+ if (tilings_[i]->contents_scale() == scale)
+ return tilings_[i];
+ }
+ return NULL;
+}
+
+void PictureLayerTilingSet::RemoveAllTilings() {
+ tilings_.clear();
+}
+
+void PictureLayerTilingSet::Remove(PictureLayerTiling* tiling) {
+ ScopedPtrVector<PictureLayerTiling>::iterator iter =
+ std::find(tilings_.begin(), tilings_.end(), tiling);
+ if (iter == tilings_.end())
+ return;
+ tilings_.erase(iter);
+}
+
+void PictureLayerTilingSet::RemoveAllTiles() {
+ for (size_t i = 0; i < tilings_.size(); ++i)
+ tilings_[i]->Reset();
+}
+
+PictureLayerTilingSet::CoverageIterator::CoverageIterator(
+ const PictureLayerTilingSet* set,
+ float contents_scale,
+ const gfx::Rect& content_rect,
+ float ideal_contents_scale)
+ : set_(set),
+ contents_scale_(contents_scale),
+ ideal_contents_scale_(ideal_contents_scale),
+ current_tiling_(-1) {
+ missing_region_.Union(content_rect);
+
+ for (ideal_tiling_ = 0;
+ static_cast<size_t>(ideal_tiling_) < set_->tilings_.size();
+ ++ideal_tiling_) {
+ PictureLayerTiling* tiling = set_->tilings_[ideal_tiling_];
+ if (tiling->contents_scale() < ideal_contents_scale_) {
+ if (ideal_tiling_ > 0)
+ ideal_tiling_--;
+ break;
+ }
+ }
+
+ DCHECK_LE(set_->tilings_.size(),
+ static_cast<size_t>(std::numeric_limits<int>::max()));
+
+ int num_tilings = set_->tilings_.size();
+ if (ideal_tiling_ == num_tilings && ideal_tiling_ > 0)
+ ideal_tiling_--;
+
+ ++(*this);
+}
+
+PictureLayerTilingSet::CoverageIterator::~CoverageIterator() {
+}
+
+gfx::Rect PictureLayerTilingSet::CoverageIterator::geometry_rect() const {
+ if (!tiling_iter_) {
+ if (!region_iter_.has_rect())
+ return gfx::Rect();
+ return region_iter_.rect();
+ }
+ return tiling_iter_.geometry_rect();
+}
+
+gfx::RectF PictureLayerTilingSet::CoverageIterator::texture_rect() const {
+ if (!tiling_iter_)
+ return gfx::RectF();
+ return tiling_iter_.texture_rect();
+}
+
+gfx::Size PictureLayerTilingSet::CoverageIterator::texture_size() const {
+ if (!tiling_iter_)
+ return gfx::Size();
+ return tiling_iter_.texture_size();
+}
+
+Tile* PictureLayerTilingSet::CoverageIterator::operator->() const {
+ if (!tiling_iter_)
+ return NULL;
+ return *tiling_iter_;
+}
+
+Tile* PictureLayerTilingSet::CoverageIterator::operator*() const {
+ if (!tiling_iter_)
+ return NULL;
+ return *tiling_iter_;
+}
+
+PictureLayerTiling* PictureLayerTilingSet::CoverageIterator::CurrentTiling() {
+ if (current_tiling_ < 0)
+ return NULL;
+ if (static_cast<size_t>(current_tiling_) >= set_->tilings_.size())
+ return NULL;
+ return set_->tilings_[current_tiling_];
+}
+
+int PictureLayerTilingSet::CoverageIterator::NextTiling() const {
+ // Order returned by this method is:
+ // 1. Ideal tiling index
+ // 2. Tiling index < Ideal in decreasing order (higher res than ideal)
+ // 3. Tiling index > Ideal in increasing order (lower res than ideal)
+ // 4. Tiling index > tilings.size() (invalid index)
+ if (current_tiling_ < 0)
+ return ideal_tiling_;
+ else if (current_tiling_ > ideal_tiling_)
+ return current_tiling_ + 1;
+ else if (current_tiling_)
+ return current_tiling_ - 1;
+ else
+ return ideal_tiling_ + 1;
+}
+
+PictureLayerTilingSet::CoverageIterator&
+PictureLayerTilingSet::CoverageIterator::operator++() {
+ bool first_time = current_tiling_ < 0;
+
+ if (!*this && !first_time)
+ return *this;
+
+ if (tiling_iter_)
+ ++tiling_iter_;
+
+ // Loop until we find a valid place to stop.
+ while (true) {
+ while (tiling_iter_ &&
+ (!*tiling_iter_ || !tiling_iter_->IsReadyToDraw())) {
+ missing_region_.Union(tiling_iter_.geometry_rect());
+ ++tiling_iter_;
+ }
+ if (tiling_iter_)
+ return *this;
+
+ // If the set of current rects for this tiling is done, go to the next
+ // tiling and set up to iterate through all of the remaining holes.
+ // This will also happen the first time through the loop.
+ if (!region_iter_.has_rect()) {
+ current_tiling_ = NextTiling();
+ current_region_.Swap(&missing_region_);
+ missing_region_.Clear();
+ region_iter_ = Region::Iterator(current_region_);
+
+ // All done and all filled.
+ if (!region_iter_.has_rect()) {
+ current_tiling_ = set_->tilings_.size();
+ return *this;
+ }
+
+ // No more valid tiles, return this checkerboard rect.
+ if (current_tiling_ >= static_cast<int>(set_->tilings_.size()))
+ return *this;
+ }
+
+ // Pop a rect off. If there are no more tilings, then these will be
+ // treated as geometry with null tiles that the caller can checkerboard.
+ gfx::Rect last_rect = region_iter_.rect();
+ region_iter_.next();
+
+ // Done, found next checkerboard rect to return.
+ if (current_tiling_ >= static_cast<int>(set_->tilings_.size()))
+ return *this;
+
+ // Construct a new iterator for the next tiling, but we need to loop
+ // again until we get to a valid one.
+ tiling_iter_ = PictureLayerTiling::CoverageIterator(
+ set_->tilings_[current_tiling_],
+ contents_scale_,
+ last_rect);
+ }
+
+ return *this;
+}
+
+PictureLayerTilingSet::CoverageIterator::operator bool() const {
+ return current_tiling_ < static_cast<int>(set_->tilings_.size()) ||
+ region_iter_.has_rect();
+}
+
+void PictureLayerTilingSet::DidBecomeActive() {
+ for (size_t i = 0; i < tilings_.size(); ++i)
+ tilings_[i]->DidBecomeActive();
+}
+
+void PictureLayerTilingSet::DidBecomeRecycled() {
+ for (size_t i = 0; i < tilings_.size(); ++i)
+ tilings_[i]->DidBecomeRecycled();
+}
+
+void PictureLayerTilingSet::AsValueInto(base::debug::TracedValue* state) const {
+ for (size_t i = 0; i < tilings_.size(); ++i) {
+ state->BeginDictionary();
+ tilings_[i]->AsValueInto(state);
+ state->EndDictionary();
+ }
+}
+
+size_t PictureLayerTilingSet::GPUMemoryUsageInBytes() const {
+ size_t amount = 0;
+ for (size_t i = 0; i < tilings_.size(); ++i)
+ amount += tilings_[i]->GPUMemoryUsageInBytes();
+ return amount;
+}
+
+PictureLayerTilingSet::TilingRange PictureLayerTilingSet::GetTilingRange(
+ TilingRangeType type) const {
+ // Doesn't seem to be the case right now but if it ever becomes a performance
+ // problem to compute these ranges each time this function is called, we can
+ // compute them only when the tiling set has changed instead.
+ TilingRange high_res_range(0, 0);
+ TilingRange low_res_range(tilings_.size(), tilings_.size());
+ for (size_t i = 0; i < tilings_.size(); ++i) {
+ const PictureLayerTiling* tiling = tilings_[i];
+ if (tiling->resolution() == HIGH_RESOLUTION)
+ high_res_range = TilingRange(i, i + 1);
+ if (tiling->resolution() == LOW_RESOLUTION)
+ low_res_range = TilingRange(i, i + 1);
+ }
+
+ switch (type) {
+ case HIGHER_THAN_HIGH_RES:
+ return TilingRange(0, high_res_range.start);
+ case HIGH_RES:
+ return high_res_range;
+ case BETWEEN_HIGH_AND_LOW_RES:
+ return TilingRange(high_res_range.end, low_res_range.start);
+ case LOW_RES:
+ return low_res_range;
+ case LOWER_THAN_LOW_RES:
+ return TilingRange(low_res_range.end, tilings_.size());
+ }
+
+ NOTREACHED();
+ return TilingRange(0, 0);
+}
+
+} // namespace cc
diff --git a/cc/resources/picture_layer_tiling_set.h b/cc/resources/picture_layer_tiling_set.h
new file mode 100644
index 0000000..f19c1b8
--- /dev/null
+++ b/cc/resources/picture_layer_tiling_set.h
@@ -0,0 +1,140 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_PICTURE_LAYER_TILING_SET_H_
+#define CC_RESOURCES_PICTURE_LAYER_TILING_SET_H_
+
+#include "cc/base/region.h"
+#include "cc/base/scoped_ptr_vector.h"
+#include "cc/resources/picture_layer_tiling.h"
+#include "ui/gfx/size.h"
+
+namespace base {
+namespace debug {
+class TracedValue;
+}
+}
+
+namespace cc {
+
+class CC_EXPORT PictureLayerTilingSet {
+ public:
+ enum TilingRangeType {
+ HIGHER_THAN_HIGH_RES,
+ HIGH_RES,
+ BETWEEN_HIGH_AND_LOW_RES,
+ LOW_RES,
+ LOWER_THAN_LOW_RES
+ };
+ struct TilingRange {
+ TilingRange(size_t start, size_t end) : start(start), end(end) {}
+
+ size_t start;
+ size_t end;
+ };
+
+ PictureLayerTilingSet(PictureLayerTilingClient* client,
+ const gfx::Size& layer_bounds);
+ ~PictureLayerTilingSet();
+
+ void SetClient(PictureLayerTilingClient* client);
+ const PictureLayerTilingClient* client() const { return client_; }
+
+ void RemoveTilesInRegion(const Region& region);
+
+ // Make this set of tilings match the same set of content scales from |other|.
+ // Delete any tilings that don't meet |minimum_contents_scale|. Recreate
+ // any tiles that intersect |layer_invalidation|. Update the size of all
+ // tilings to |new_layer_bounds|.
+ // Returns true if we had at least one high res tiling synced.
+ bool SyncTilings(const PictureLayerTilingSet& other,
+ const gfx::Size& new_layer_bounds,
+ const Region& layer_invalidation,
+ float minimum_contents_scale);
+
+ gfx::Size layer_bounds() const { return layer_bounds_; }
+
+ PictureLayerTiling* AddTiling(float contents_scale);
+ size_t num_tilings() const { return tilings_.size(); }
+ int NumHighResTilings() const;
+ PictureLayerTiling* tiling_at(size_t idx) { return tilings_[idx]; }
+ const PictureLayerTiling* tiling_at(size_t idx) const {
+ return tilings_[idx];
+ }
+
+ PictureLayerTiling* TilingAtScale(float scale) const;
+
+ // Remove all tilings.
+ void RemoveAllTilings();
+
+ // Remove one tiling.
+ void Remove(PictureLayerTiling* tiling);
+
+ // Remove all tiles; keep all tilings.
+ void RemoveAllTiles();
+
+ void DidBecomeActive();
+ void DidBecomeRecycled();
+
+ // For a given rect, iterates through tiles that can fill it. If no
+ // set of tiles with resources can fill the rect, then it will iterate
+ // through null tiles with valid geometry_rect() until the rect is full.
+ // If all tiles have resources, the union of all geometry_rects will
+ // exactly fill rect with no overlap.
+ class CC_EXPORT CoverageIterator {
+ public:
+ CoverageIterator(const PictureLayerTilingSet* set,
+ float contents_scale,
+ const gfx::Rect& content_rect,
+ float ideal_contents_scale);
+ ~CoverageIterator();
+
+ // Visible rect (no borders), always in the space of rect,
+ // regardless of the relative contents scale of the tiling.
+ gfx::Rect geometry_rect() const;
+ // Texture rect (in texels) for geometry_rect
+ gfx::RectF texture_rect() const;
+ // Texture size in texels
+ gfx::Size texture_size() const;
+
+ Tile* operator->() const;
+ Tile* operator*() const;
+
+ CoverageIterator& operator++();
+ operator bool() const;
+
+ PictureLayerTiling* CurrentTiling();
+
+ private:
+ int NextTiling() const;
+
+ const PictureLayerTilingSet* set_;
+ float contents_scale_;
+ float ideal_contents_scale_;
+ PictureLayerTiling::CoverageIterator tiling_iter_;
+ int current_tiling_;
+ int ideal_tiling_;
+
+ Region current_region_;
+ Region missing_region_;
+ Region::Iterator region_iter_;
+ };
+
+ void AsValueInto(base::debug::TracedValue* array) const;
+ size_t GPUMemoryUsageInBytes() const;
+
+ TilingRange GetTilingRange(TilingRangeType type) const;
+
+ private:
+ PictureLayerTilingClient* client_;
+ gfx::Size layer_bounds_;
+ ScopedPtrVector<PictureLayerTiling> tilings_;
+
+ friend class Iterator;
+ DISALLOW_COPY_AND_ASSIGN(PictureLayerTilingSet);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_PICTURE_LAYER_TILING_SET_H_
diff --git a/cc/resources/picture_layer_tiling_set_unittest.cc b/cc/resources/picture_layer_tiling_set_unittest.cc
new file mode 100644
index 0000000..e566d11
--- /dev/null
+++ b/cc/resources/picture_layer_tiling_set_unittest.cc
@@ -0,0 +1,566 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/picture_layer_tiling_set.h"
+
+#include <map>
+#include <vector>
+
+#include "cc/resources/resource_pool.h"
+#include "cc/resources/resource_provider.h"
+#include "cc/test/fake_output_surface.h"
+#include "cc/test/fake_output_surface_client.h"
+#include "cc/test/fake_picture_layer_tiling_client.h"
+#include "cc/test/fake_tile_manager_client.h"
+#include "cc/test/test_shared_bitmap_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gfx/size_conversions.h"
+
+namespace cc {
+namespace {
+
+TEST(PictureLayerTilingSetTest, NoResources) {
+ FakePictureLayerTilingClient client;
+ gfx::Size layer_bounds(1000, 800);
+ PictureLayerTilingSet set(&client, layer_bounds);
+ client.SetTileSize(gfx::Size(256, 256));
+
+ set.AddTiling(1.0);
+ set.AddTiling(1.5);
+ set.AddTiling(2.0);
+
+ float contents_scale = 2.0;
+ gfx::Size content_bounds(
+ gfx::ToCeiledSize(gfx::ScaleSize(layer_bounds, contents_scale)));
+ gfx::Rect content_rect(content_bounds);
+
+ Region remaining(content_rect);
+ PictureLayerTilingSet::CoverageIterator iter(
+ &set,
+ contents_scale,
+ content_rect,
+ contents_scale);
+ for (; iter; ++iter) {
+ gfx::Rect geometry_rect = iter.geometry_rect();
+ EXPECT_TRUE(content_rect.Contains(geometry_rect));
+ ASSERT_TRUE(remaining.Contains(geometry_rect));
+ remaining.Subtract(geometry_rect);
+
+ // No tiles have resources, so no iter represents a real tile.
+ EXPECT_FALSE(*iter);
+ }
+ EXPECT_TRUE(remaining.IsEmpty());
+}
+
+TEST(PictureLayerTilingSetTest, TilingRange) {
+ FakePictureLayerTilingClient client;
+ gfx::Size layer_bounds(10, 10);
+ PictureLayerTilingSet::TilingRange higher_than_high_res_range(0, 0);
+ PictureLayerTilingSet::TilingRange high_res_range(0, 0);
+ PictureLayerTilingSet::TilingRange between_high_and_low_res_range(0, 0);
+ PictureLayerTilingSet::TilingRange low_res_range(0, 0);
+ PictureLayerTilingSet::TilingRange lower_than_low_res_range(0, 0);
+ PictureLayerTiling* high_res_tiling;
+ PictureLayerTiling* low_res_tiling;
+
+ PictureLayerTilingSet set(&client, layer_bounds);
+ set.AddTiling(2.0);
+ high_res_tiling = set.AddTiling(1.0);
+ high_res_tiling->set_resolution(HIGH_RESOLUTION);
+ set.AddTiling(0.5);
+ low_res_tiling = set.AddTiling(0.25);
+ low_res_tiling->set_resolution(LOW_RESOLUTION);
+ set.AddTiling(0.125);
+
+ higher_than_high_res_range =
+ set.GetTilingRange(PictureLayerTilingSet::HIGHER_THAN_HIGH_RES);
+ EXPECT_EQ(0u, higher_than_high_res_range.start);
+ EXPECT_EQ(1u, higher_than_high_res_range.end);
+
+ high_res_range = set.GetTilingRange(PictureLayerTilingSet::HIGH_RES);
+ EXPECT_EQ(1u, high_res_range.start);
+ EXPECT_EQ(2u, high_res_range.end);
+
+ between_high_and_low_res_range =
+ set.GetTilingRange(PictureLayerTilingSet::BETWEEN_HIGH_AND_LOW_RES);
+ EXPECT_EQ(2u, between_high_and_low_res_range.start);
+ EXPECT_EQ(3u, between_high_and_low_res_range.end);
+
+ low_res_range = set.GetTilingRange(PictureLayerTilingSet::LOW_RES);
+ EXPECT_EQ(3u, low_res_range.start);
+ EXPECT_EQ(4u, low_res_range.end);
+
+ lower_than_low_res_range =
+ set.GetTilingRange(PictureLayerTilingSet::LOWER_THAN_LOW_RES);
+ EXPECT_EQ(4u, lower_than_low_res_range.start);
+ EXPECT_EQ(5u, lower_than_low_res_range.end);
+
+ PictureLayerTilingSet set_without_low_res(&client, layer_bounds);
+ set_without_low_res.AddTiling(2.0);
+ high_res_tiling = set_without_low_res.AddTiling(1.0);
+ high_res_tiling->set_resolution(HIGH_RESOLUTION);
+ set_without_low_res.AddTiling(0.5);
+ set_without_low_res.AddTiling(0.25);
+
+ higher_than_high_res_range = set_without_low_res.GetTilingRange(
+ PictureLayerTilingSet::HIGHER_THAN_HIGH_RES);
+ EXPECT_EQ(0u, higher_than_high_res_range.start);
+ EXPECT_EQ(1u, higher_than_high_res_range.end);
+
+ high_res_range =
+ set_without_low_res.GetTilingRange(PictureLayerTilingSet::HIGH_RES);
+ EXPECT_EQ(1u, high_res_range.start);
+ EXPECT_EQ(2u, high_res_range.end);
+
+ between_high_and_low_res_range = set_without_low_res.GetTilingRange(
+ PictureLayerTilingSet::BETWEEN_HIGH_AND_LOW_RES);
+ EXPECT_EQ(2u, between_high_and_low_res_range.start);
+ EXPECT_EQ(4u, between_high_and_low_res_range.end);
+
+ low_res_range =
+ set_without_low_res.GetTilingRange(PictureLayerTilingSet::LOW_RES);
+ EXPECT_EQ(0u, low_res_range.end - low_res_range.start);
+
+ lower_than_low_res_range = set_without_low_res.GetTilingRange(
+ PictureLayerTilingSet::LOWER_THAN_LOW_RES);
+ EXPECT_EQ(0u, lower_than_low_res_range.end - lower_than_low_res_range.start);
+
+ PictureLayerTilingSet set_with_only_high_and_low_res(&client, layer_bounds);
+ high_res_tiling = set_with_only_high_and_low_res.AddTiling(1.0);
+ high_res_tiling->set_resolution(HIGH_RESOLUTION);
+ low_res_tiling = set_with_only_high_and_low_res.AddTiling(0.5);
+ low_res_tiling->set_resolution(LOW_RESOLUTION);
+
+ higher_than_high_res_range = set_with_only_high_and_low_res.GetTilingRange(
+ PictureLayerTilingSet::HIGHER_THAN_HIGH_RES);
+ EXPECT_EQ(0u,
+ higher_than_high_res_range.end - higher_than_high_res_range.start);
+
+ high_res_range = set_with_only_high_and_low_res.GetTilingRange(
+ PictureLayerTilingSet::HIGH_RES);
+ EXPECT_EQ(0u, high_res_range.start);
+ EXPECT_EQ(1u, high_res_range.end);
+
+ between_high_and_low_res_range =
+ set_with_only_high_and_low_res.GetTilingRange(
+ PictureLayerTilingSet::BETWEEN_HIGH_AND_LOW_RES);
+ EXPECT_EQ(0u,
+ between_high_and_low_res_range.end -
+ between_high_and_low_res_range.start);
+
+ low_res_range = set_with_only_high_and_low_res.GetTilingRange(
+ PictureLayerTilingSet::LOW_RES);
+ EXPECT_EQ(1u, low_res_range.start);
+ EXPECT_EQ(2u, low_res_range.end);
+
+ lower_than_low_res_range = set_with_only_high_and_low_res.GetTilingRange(
+ PictureLayerTilingSet::LOWER_THAN_LOW_RES);
+ EXPECT_EQ(0u, lower_than_low_res_range.end - lower_than_low_res_range.start);
+
+ PictureLayerTilingSet set_with_only_high_res(&client, layer_bounds);
+ high_res_tiling = set_with_only_high_res.AddTiling(1.0);
+ high_res_tiling->set_resolution(HIGH_RESOLUTION);
+
+ higher_than_high_res_range = set_with_only_high_res.GetTilingRange(
+ PictureLayerTilingSet::HIGHER_THAN_HIGH_RES);
+ EXPECT_EQ(0u,
+ higher_than_high_res_range.end - higher_than_high_res_range.start);
+
+ high_res_range =
+ set_with_only_high_res.GetTilingRange(PictureLayerTilingSet::HIGH_RES);
+ EXPECT_EQ(0u, high_res_range.start);
+ EXPECT_EQ(1u, high_res_range.end);
+
+ between_high_and_low_res_range = set_with_only_high_res.GetTilingRange(
+ PictureLayerTilingSet::BETWEEN_HIGH_AND_LOW_RES);
+ EXPECT_EQ(0u,
+ between_high_and_low_res_range.end -
+ between_high_and_low_res_range.start);
+
+ low_res_range =
+ set_with_only_high_res.GetTilingRange(PictureLayerTilingSet::LOW_RES);
+ EXPECT_EQ(0u, low_res_range.end - low_res_range.start);
+
+ lower_than_low_res_range = set_with_only_high_res.GetTilingRange(
+ PictureLayerTilingSet::LOWER_THAN_LOW_RES);
+ EXPECT_EQ(0u, lower_than_low_res_range.end - lower_than_low_res_range.start);
+}
+
+class PictureLayerTilingSetTestWithResources : public testing::Test {
+ public:
+ void runTest(
+ int num_tilings,
+ float min_scale,
+ float scale_increment,
+ float ideal_contents_scale,
+ float expected_scale) {
+ FakeOutputSurfaceClient output_surface_client;
+ scoped_ptr<FakeOutputSurface> output_surface =
+ FakeOutputSurface::Create3d();
+ CHECK(output_surface->BindToClient(&output_surface_client));
+
+ scoped_ptr<SharedBitmapManager> shared_bitmap_manager(
+ new TestSharedBitmapManager());
+ scoped_ptr<ResourceProvider> resource_provider =
+ ResourceProvider::Create(output_surface.get(),
+ shared_bitmap_manager.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false);
+
+ FakePictureLayerTilingClient client(resource_provider.get());
+ client.SetTileSize(gfx::Size(256, 256));
+ client.set_tree(PENDING_TREE);
+ gfx::Size layer_bounds(1000, 800);
+ PictureLayerTilingSet set(&client, layer_bounds);
+
+ float scale = min_scale;
+ for (int i = 0; i < num_tilings; ++i, scale += scale_increment) {
+ PictureLayerTiling* tiling = set.AddTiling(scale);
+ tiling->CreateAllTilesForTesting();
+ std::vector<Tile*> tiles = tiling->AllTilesForTesting();
+ client.tile_manager()->InitializeTilesWithResourcesForTesting(tiles);
+ }
+
+ float max_contents_scale = scale;
+ gfx::Size content_bounds(
+ gfx::ToCeiledSize(gfx::ScaleSize(layer_bounds, max_contents_scale)));
+ gfx::Rect content_rect(content_bounds);
+
+ Region remaining(content_rect);
+ PictureLayerTilingSet::CoverageIterator iter(
+ &set,
+ max_contents_scale,
+ content_rect,
+ ideal_contents_scale);
+ for (; iter; ++iter) {
+ gfx::Rect geometry_rect = iter.geometry_rect();
+ EXPECT_TRUE(content_rect.Contains(geometry_rect));
+ ASSERT_TRUE(remaining.Contains(geometry_rect));
+ remaining.Subtract(geometry_rect);
+
+ float scale = iter.CurrentTiling()->contents_scale();
+ EXPECT_EQ(expected_scale, scale);
+
+ if (num_tilings)
+ EXPECT_TRUE(*iter);
+ else
+ EXPECT_FALSE(*iter);
+ }
+ EXPECT_TRUE(remaining.IsEmpty());
+ }
+};
+
+TEST_F(PictureLayerTilingSetTestWithResources, NoTilings) {
+ runTest(0, 0.f, 0.f, 2.f, 0.f);
+}
+TEST_F(PictureLayerTilingSetTestWithResources, OneTiling_Smaller) {
+ runTest(1, 1.f, 0.f, 2.f, 1.f);
+}
+TEST_F(PictureLayerTilingSetTestWithResources, OneTiling_Larger) {
+ runTest(1, 3.f, 0.f, 2.f, 3.f);
+}
+TEST_F(PictureLayerTilingSetTestWithResources, TwoTilings_Smaller) {
+ runTest(2, 1.f, 1.f, 3.f, 2.f);
+}
+
+TEST_F(PictureLayerTilingSetTestWithResources, TwoTilings_SmallerEqual) {
+ runTest(2, 1.f, 1.f, 2.f, 2.f);
+}
+
+TEST_F(PictureLayerTilingSetTestWithResources, TwoTilings_LargerEqual) {
+ runTest(2, 1.f, 1.f, 1.f, 1.f);
+}
+
+TEST_F(PictureLayerTilingSetTestWithResources, TwoTilings_Larger) {
+ runTest(2, 2.f, 8.f, 1.f, 2.f);
+}
+
+TEST_F(PictureLayerTilingSetTestWithResources, ManyTilings_Equal) {
+ runTest(10, 1.f, 1.f, 5.f, 5.f);
+}
+
+TEST_F(PictureLayerTilingSetTestWithResources, ManyTilings_NotEqual) {
+ runTest(10, 1.f, 1.f, 4.5f, 5.f);
+}
+
+class PictureLayerTilingSetSyncTest : public testing::Test {
+ public:
+ PictureLayerTilingSetSyncTest()
+ : tile_size_(gfx::Size(10, 10)),
+ source_bounds_(gfx::Size(30, 20)),
+ target_bounds_(gfx::Size(30, 30)) {
+ source_client_.SetTileSize(tile_size_);
+ source_client_.set_tree(PENDING_TREE);
+ target_client_.SetTileSize(tile_size_);
+ target_client_.set_tree(PENDING_TREE);
+ source_.reset(new PictureLayerTilingSet(&source_client_, source_bounds_));
+ target_.reset(new PictureLayerTilingSet(&target_client_, target_bounds_));
+ }
+
+ // Sync from source to target.
+ void SyncTilings(const gfx::Size& new_bounds,
+ const Region& invalidation,
+ float minimum_scale) {
+ for (size_t i = 0; i < source_->num_tilings(); ++i)
+ source_->tiling_at(i)->CreateAllTilesForTesting();
+ for (size_t i = 0; i < target_->num_tilings(); ++i)
+ target_->tiling_at(i)->CreateAllTilesForTesting();
+
+ target_->SyncTilings(
+ *source_.get(), new_bounds, invalidation, minimum_scale);
+ }
+ void SyncTilings(const gfx::Size& new_bounds) {
+ Region invalidation;
+ SyncTilings(new_bounds, invalidation, 0.f);
+ }
+ void SyncTilings(const gfx::Size& new_bounds, const Region& invalidation) {
+ SyncTilings(new_bounds, invalidation, 0.f);
+ }
+ void SyncTilings(const gfx::Size& new_bounds, float minimum_scale) {
+ Region invalidation;
+ SyncTilings(new_bounds, invalidation, minimum_scale);
+ }
+
+ void VerifyTargetEqualsSource(const gfx::Size& new_bounds) {
+ ASSERT_FALSE(new_bounds.IsEmpty());
+ EXPECT_EQ(target_->num_tilings(), source_->num_tilings());
+ EXPECT_EQ(target_->layer_bounds().ToString(), new_bounds.ToString());
+
+ for (size_t i = 0; i < target_->num_tilings(); ++i) {
+ ASSERT_GT(source_->num_tilings(), i);
+ const PictureLayerTiling* source_tiling = source_->tiling_at(i);
+ const PictureLayerTiling* target_tiling = target_->tiling_at(i);
+ EXPECT_EQ(target_tiling->layer_bounds().ToString(),
+ new_bounds.ToString());
+ EXPECT_EQ(source_tiling->contents_scale(),
+ target_tiling->contents_scale());
+ }
+
+ EXPECT_EQ(source_->client(), &source_client_);
+ EXPECT_EQ(target_->client(), &target_client_);
+ ValidateTargetTilingSet();
+ }
+
+ void ValidateTargetTilingSet() {
+ // Tilings should be sorted largest to smallest.
+ if (target_->num_tilings() > 0) {
+ float last_scale = target_->tiling_at(0)->contents_scale();
+ for (size_t i = 1; i < target_->num_tilings(); ++i) {
+ const PictureLayerTiling* target_tiling = target_->tiling_at(i);
+ EXPECT_LT(target_tiling->contents_scale(), last_scale);
+ last_scale = target_tiling->contents_scale();
+ }
+ }
+
+ for (size_t i = 0; i < target_->num_tilings(); ++i)
+ ValidateTiling(target_->tiling_at(i), target_client_.GetPile());
+ }
+
+ void ValidateTiling(const PictureLayerTiling* tiling,
+ const PicturePileImpl* pile) {
+ if (tiling->tiling_size().IsEmpty()) {
+ EXPECT_TRUE(tiling->live_tiles_rect().IsEmpty());
+ } else if (!tiling->live_tiles_rect().IsEmpty()) {
+ gfx::Rect tiling_rect(tiling->tiling_size());
+ EXPECT_TRUE(tiling_rect.Contains(tiling->live_tiles_rect()));
+ }
+
+ std::vector<Tile*> tiles = tiling->AllTilesForTesting();
+ for (size_t i = 0; i < tiles.size(); ++i) {
+ const Tile* tile = tiles[i];
+ ASSERT_TRUE(!!tile);
+ EXPECT_EQ(tile->picture_pile(), pile);
+ EXPECT_TRUE(tile->content_rect().Intersects(tiling->live_tiles_rect()))
+ << "All tiles must be inside the live tiles rect."
+ << " Tile rect: " << tile->content_rect().ToString()
+ << " Live rect: " << tiling->live_tiles_rect().ToString()
+ << " Scale: " << tiling->contents_scale();
+ }
+
+ for (PictureLayerTiling::CoverageIterator iter(
+ tiling, tiling->contents_scale(), tiling->live_tiles_rect());
+ iter;
+ ++iter) {
+ EXPECT_TRUE(*iter) << "The live tiles rect must be full.";
+ }
+ }
+
+ gfx::Size tile_size_;
+
+ FakePictureLayerTilingClient source_client_;
+ gfx::Size source_bounds_;
+ scoped_ptr<PictureLayerTilingSet> source_;
+
+ FakePictureLayerTilingClient target_client_;
+ gfx::Size target_bounds_;
+ scoped_ptr<PictureLayerTilingSet> target_;
+};
+
+TEST_F(PictureLayerTilingSetSyncTest, EmptyBounds) {
+ float source_scales[] = {1.f, 1.2f};
+ for (size_t i = 0; i < arraysize(source_scales); ++i)
+ source_->AddTiling(source_scales[i]);
+
+ gfx::Size new_bounds;
+ SyncTilings(new_bounds);
+ EXPECT_EQ(target_->num_tilings(), 0u);
+ EXPECT_EQ(target_->layer_bounds().ToString(), new_bounds.ToString());
+}
+
+TEST_F(PictureLayerTilingSetSyncTest, AllNew) {
+ float source_scales[] = {0.5f, 1.f, 1.2f};
+ for (size_t i = 0; i < arraysize(source_scales); ++i)
+ source_->AddTiling(source_scales[i]);
+ float target_scales[] = {0.75f, 1.4f, 3.f};
+ for (size_t i = 0; i < arraysize(target_scales); ++i)
+ target_->AddTiling(target_scales[i]);
+
+ gfx::Size new_bounds(15, 40);
+ SyncTilings(new_bounds);
+ VerifyTargetEqualsSource(new_bounds);
+}
+
+Tile* FindTileAtOrigin(PictureLayerTiling* tiling) {
+ std::vector<Tile*> tiles = tiling->AllTilesForTesting();
+ for (size_t i = 0; i < tiles.size(); ++i) {
+ if (tiles[i]->content_rect().origin() == gfx::Point())
+ return tiles[i];
+ }
+ return NULL;
+}
+
+TEST_F(PictureLayerTilingSetSyncTest, KeepExisting) {
+ float source_scales[] = {0.7f, 1.f, 1.1f, 2.f};
+ for (size_t i = 0; i < arraysize(source_scales); ++i)
+ source_->AddTiling(source_scales[i]);
+ float target_scales[] = {0.5f, 1.f, 2.f};
+ for (size_t i = 0; i < arraysize(target_scales); ++i)
+ target_->AddTiling(target_scales[i]);
+
+ PictureLayerTiling* tiling1 = source_->TilingAtScale(1.f);
+ ASSERT_TRUE(tiling1);
+ tiling1->CreateAllTilesForTesting();
+ EXPECT_EQ(tiling1->contents_scale(), 1.f);
+ Tile* tile1 = FindTileAtOrigin(tiling1);
+ ASSERT_TRUE(tile1);
+
+ PictureLayerTiling* tiling2 = source_->TilingAtScale(2.f);
+ tiling2->CreateAllTilesForTesting();
+ ASSERT_TRUE(tiling2);
+ EXPECT_EQ(tiling2->contents_scale(), 2.f);
+ Tile* tile2 = FindTileAtOrigin(tiling2);
+ ASSERT_TRUE(tile2);
+
+ gfx::Size new_bounds(15, 40);
+ SyncTilings(new_bounds);
+ VerifyTargetEqualsSource(new_bounds);
+
+ EXPECT_EQ(tiling1, source_->TilingAtScale(1.f));
+ EXPECT_EQ(tile1, FindTileAtOrigin(tiling1));
+ EXPECT_FALSE(tiling1->live_tiles_rect().IsEmpty());
+
+ EXPECT_EQ(tiling2, source_->TilingAtScale(2.f));
+ EXPECT_EQ(tile2, FindTileAtOrigin(tiling2));
+ EXPECT_FALSE(tiling2->live_tiles_rect().IsEmpty());
+}
+
+TEST_F(PictureLayerTilingSetSyncTest, EmptySet) {
+ float target_scales[] = {0.2f, 1.f};
+ for (size_t i = 0; i < arraysize(target_scales); ++i)
+ target_->AddTiling(target_scales[i]);
+
+ gfx::Size new_bounds(15, 40);
+ SyncTilings(new_bounds);
+ VerifyTargetEqualsSource(new_bounds);
+}
+
+TEST_F(PictureLayerTilingSetSyncTest, MinimumScale) {
+ float source_scales[] = {0.7f, 1.f, 1.1f, 2.f};
+ for (size_t i = 0; i < arraysize(source_scales); ++i)
+ source_->AddTiling(source_scales[i]);
+ float target_scales[] = {0.5f, 0.7f, 1.f, 1.1f, 2.f};
+ for (size_t i = 0; i < arraysize(target_scales); ++i)
+ target_->AddTiling(target_scales[i]);
+
+ gfx::Size new_bounds(15, 40);
+ float minimum_scale = 1.5f;
+ SyncTilings(new_bounds, minimum_scale);
+
+ EXPECT_EQ(target_->num_tilings(), 1u);
+ EXPECT_EQ(target_->tiling_at(0)->contents_scale(), 2.f);
+ ValidateTargetTilingSet();
+}
+
+TEST_F(PictureLayerTilingSetSyncTest, Invalidation) {
+ source_->AddTiling(2.f);
+ target_->AddTiling(2.f);
+ target_->tiling_at(0)->CreateAllTilesForTesting();
+
+ Region layer_invalidation;
+ layer_invalidation.Union(gfx::Rect(0, 0, 1, 1));
+ layer_invalidation.Union(gfx::Rect(0, 15, 1, 1));
+ // Out of bounds layer_invalidation.
+ layer_invalidation.Union(gfx::Rect(100, 100, 1, 1));
+
+ Region content_invalidation;
+ for (Region::Iterator iter(layer_invalidation); iter.has_rect();
+ iter.next()) {
+ gfx::Rect content_rect = gfx::ScaleToEnclosingRect(iter.rect(), 2.f);
+ content_invalidation.Union(content_rect);
+ }
+
+ std::vector<Tile*> old_tiles = target_->tiling_at(0)->AllTilesForTesting();
+ std::map<gfx::Point, scoped_refptr<Tile> > old_tile_map;
+ for (size_t i = 0; i < old_tiles.size(); ++i)
+ old_tile_map[old_tiles[i]->content_rect().origin()] = old_tiles[i];
+
+ SyncTilings(target_bounds_, layer_invalidation);
+ VerifyTargetEqualsSource(target_bounds_);
+
+ std::vector<Tile*> new_tiles = target_->tiling_at(0)->AllTilesForTesting();
+ for (size_t i = 0; i < new_tiles.size(); ++i) {
+ const Tile* tile = new_tiles[i];
+ std::map<gfx::Point, scoped_refptr<Tile> >::iterator find =
+ old_tile_map.find(tile->content_rect().origin());
+ if (content_invalidation.Intersects(tile->content_rect()))
+ EXPECT_NE(tile, find->second.get());
+ else
+ EXPECT_EQ(tile, find->second.get());
+ }
+}
+
+TEST_F(PictureLayerTilingSetSyncTest, TileSizeChange) {
+ source_->AddTiling(1.f);
+ target_->AddTiling(1.f);
+
+ target_->tiling_at(0)->CreateAllTilesForTesting();
+ std::vector<Tile*> original_tiles =
+ target_->tiling_at(0)->AllTilesForTesting();
+ EXPECT_GT(original_tiles.size(), 0u);
+ gfx::Size new_tile_size(100, 100);
+ target_client_.SetTileSize(new_tile_size);
+ EXPECT_NE(target_->tiling_at(0)->tile_size().ToString(),
+ new_tile_size.ToString());
+
+ gfx::Size new_bounds(15, 40);
+ SyncTilings(new_bounds);
+ VerifyTargetEqualsSource(new_bounds);
+
+ EXPECT_EQ(target_->tiling_at(0)->tile_size().ToString(),
+ new_tile_size.ToString());
+
+ // All old tiles should not be present in new tiles.
+ std::vector<Tile*> new_tiles = target_->tiling_at(0)->AllTilesForTesting();
+ for (size_t i = 0; i < original_tiles.size(); ++i) {
+ std::vector<Tile*>::iterator find =
+ std::find(new_tiles.begin(), new_tiles.end(), original_tiles[i]);
+ EXPECT_TRUE(find == new_tiles.end());
+ }
+}
+
+} // namespace
+} // namespace cc
diff --git a/cc/resources/picture_layer_tiling_unittest.cc b/cc/resources/picture_layer_tiling_unittest.cc
new file mode 100644
index 0000000..1a242a3
--- /dev/null
+++ b/cc/resources/picture_layer_tiling_unittest.cc
@@ -0,0 +1,2171 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/picture_layer_tiling.h"
+
+#include <limits>
+#include <set>
+
+#include "cc/base/math_util.h"
+#include "cc/resources/picture_layer_tiling_set.h"
+#include "cc/test/fake_output_surface.h"
+#include "cc/test/fake_output_surface_client.h"
+#include "cc/test/fake_picture_layer_tiling_client.h"
+#include "cc/test/test_context_provider.h"
+#include "cc/test/test_shared_bitmap_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gfx/rect_conversions.h"
+#include "ui/gfx/size_conversions.h"
+
+namespace cc {
+namespace {
+
+static gfx::Rect ViewportInLayerSpace(
+ const gfx::Transform& transform,
+ const gfx::Size& device_viewport) {
+
+ gfx::Transform inverse;
+ if (!transform.GetInverse(&inverse))
+ return gfx::Rect();
+
+ gfx::RectF viewport_in_layer_space = MathUtil::ProjectClippedRect(
+ inverse, gfx::RectF(gfx::Point(0, 0), device_viewport));
+ return ToEnclosingRect(viewport_in_layer_space);
+}
+
+static void UpdateAllTilePriorities(PictureLayerTilingSet* set,
+ WhichTree tree,
+ const gfx::Rect& visible_layer_rect,
+ float layer_contents_scale,
+ double current_frame_time_in_seconds) {
+ for (size_t i = 0; i < set->num_tilings(); ++i) {
+ set->tiling_at(i)->UpdateTilePriorities(tree,
+ visible_layer_rect,
+ layer_contents_scale,
+ current_frame_time_in_seconds,
+ Occlusion());
+ }
+}
+
+class TestablePictureLayerTiling : public PictureLayerTiling {
+ public:
+ using PictureLayerTiling::SetLiveTilesRect;
+ using PictureLayerTiling::TileAt;
+
+ static scoped_ptr<TestablePictureLayerTiling> Create(
+ float contents_scale,
+ const gfx::Size& layer_bounds,
+ PictureLayerTilingClient* client) {
+ return make_scoped_ptr(new TestablePictureLayerTiling(
+ contents_scale,
+ layer_bounds,
+ client));
+ }
+
+ gfx::Rect live_tiles_rect() const { return live_tiles_rect_; }
+
+ using PictureLayerTiling::ComputeSkewport;
+
+ protected:
+ TestablePictureLayerTiling(float contents_scale,
+ const gfx::Size& layer_bounds,
+ PictureLayerTilingClient* client)
+ : PictureLayerTiling(contents_scale, layer_bounds, client) { }
+};
+
+class PictureLayerTilingIteratorTest : public testing::Test {
+ public:
+ PictureLayerTilingIteratorTest() {}
+ virtual ~PictureLayerTilingIteratorTest() {}
+
+ void Initialize(const gfx::Size& tile_size,
+ float contents_scale,
+ const gfx::Size& layer_bounds) {
+ client_.SetTileSize(tile_size);
+ client_.set_tree(PENDING_TREE);
+ tiling_ = TestablePictureLayerTiling::Create(contents_scale,
+ layer_bounds,
+ &client_);
+ }
+
+ void SetLiveRectAndVerifyTiles(const gfx::Rect& live_tiles_rect) {
+ tiling_->SetLiveTilesRect(live_tiles_rect);
+
+ std::vector<Tile*> tiles = tiling_->AllTilesForTesting();
+ for (std::vector<Tile*>::iterator iter = tiles.begin();
+ iter != tiles.end();
+ ++iter) {
+ EXPECT_TRUE(live_tiles_rect.Intersects((*iter)->content_rect()));
+ }
+ }
+
+ void VerifyTilesExactlyCoverRect(
+ float rect_scale,
+ const gfx::Rect& request_rect,
+ const gfx::Rect& expect_rect) {
+ EXPECT_TRUE(request_rect.Contains(expect_rect));
+
+ // Iterators are not valid if this ratio is too large (i.e. the
+ // tiling is too high-res for a low-res destination rect.) This is an
+ // artifact of snapping geometry to integer coordinates and then mapping
+ // back to floating point texture coordinates.
+ float dest_to_contents_scale = tiling_->contents_scale() / rect_scale;
+ ASSERT_LE(dest_to_contents_scale, 2.0);
+
+ Region remaining = expect_rect;
+ for (PictureLayerTiling::CoverageIterator
+ iter(tiling_.get(), rect_scale, request_rect);
+ iter;
+ ++iter) {
+ // Geometry cannot overlap previous geometry at all
+ gfx::Rect geometry = iter.geometry_rect();
+ EXPECT_TRUE(expect_rect.Contains(geometry));
+ EXPECT_TRUE(remaining.Contains(geometry));
+ remaining.Subtract(geometry);
+
+ // Sanity check that texture coords are within the texture rect.
+ gfx::RectF texture_rect = iter.texture_rect();
+ EXPECT_GE(texture_rect.x(), 0);
+ EXPECT_GE(texture_rect.y(), 0);
+ EXPECT_LE(texture_rect.right(), client_.TileSize().width());
+ EXPECT_LE(texture_rect.bottom(), client_.TileSize().height());
+
+ EXPECT_EQ(iter.texture_size(), client_.TileSize());
+ }
+
+ // The entire rect must be filled by geometry from the tiling.
+ EXPECT_TRUE(remaining.IsEmpty());
+ }
+
+ void VerifyTilesExactlyCoverRect(float rect_scale, const gfx::Rect& rect) {
+ VerifyTilesExactlyCoverRect(rect_scale, rect, rect);
+ }
+
+ void VerifyTiles(
+ float rect_scale,
+ const gfx::Rect& rect,
+ base::Callback<void(Tile* tile,
+ const gfx::Rect& geometry_rect)> callback) {
+ VerifyTiles(tiling_.get(),
+ rect_scale,
+ rect,
+ callback);
+ }
+
+ void VerifyTiles(
+ PictureLayerTiling* tiling,
+ float rect_scale,
+ const gfx::Rect& rect,
+ base::Callback<void(Tile* tile,
+ const gfx::Rect& geometry_rect)> callback) {
+ Region remaining = rect;
+ for (PictureLayerTiling::CoverageIterator iter(tiling, rect_scale, rect);
+ iter;
+ ++iter) {
+ remaining.Subtract(iter.geometry_rect());
+ callback.Run(*iter, iter.geometry_rect());
+ }
+ EXPECT_TRUE(remaining.IsEmpty());
+ }
+
+ void VerifyTilesCoverNonContainedRect(float rect_scale,
+ const gfx::Rect& dest_rect) {
+ float dest_to_contents_scale = tiling_->contents_scale() / rect_scale;
+ gfx::Rect clamped_rect = gfx::ScaleToEnclosingRect(
+ gfx::Rect(tiling_->tiling_size()), 1.f / dest_to_contents_scale);
+ clamped_rect.Intersect(dest_rect);
+ VerifyTilesExactlyCoverRect(rect_scale, dest_rect, clamped_rect);
+ }
+
+ void set_max_tiles_for_interest_area(size_t area) {
+ client_.set_max_tiles_for_interest_area(area);
+ }
+
+ protected:
+ FakePictureLayerTilingClient client_;
+ scoped_ptr<TestablePictureLayerTiling> tiling_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PictureLayerTilingIteratorTest);
+};
+
+TEST_F(PictureLayerTilingIteratorTest, ResizeDeletesTiles) {
+ // Verifies that a resize with invalidation for newly exposed pixels will
+ // deletes tiles that intersect that invalidation.
+ gfx::Size tile_size(100, 100);
+ gfx::Size original_layer_size(10, 10);
+ Initialize(tile_size, 1.f, original_layer_size);
+ SetLiveRectAndVerifyTiles(gfx::Rect(original_layer_size));
+
+ // Tiling only has one tile, since its total size is less than one.
+ EXPECT_TRUE(tiling_->TileAt(0, 0));
+
+ // Stop creating tiles so that any invalidations are left as holes.
+ client_.set_allow_create_tile(false);
+
+ Region invalidation =
+ SubtractRegions(gfx::Rect(tile_size), gfx::Rect(original_layer_size));
+ tiling_->UpdateTilesToCurrentPile(invalidation, gfx::Size(200, 200));
+ EXPECT_FALSE(tiling_->TileAt(0, 0));
+}
+
+TEST_F(PictureLayerTilingIteratorTest, CreateMissingTilesStaysInsideLiveRect) {
+ // The tiling has three rows and columns.
+ Initialize(gfx::Size(100, 100), 1, gfx::Size(250, 250));
+ EXPECT_EQ(3, tiling_->TilingDataForTesting().num_tiles_x());
+ EXPECT_EQ(3, tiling_->TilingDataForTesting().num_tiles_y());
+
+ // The live tiles rect is at the very edge of the right-most and
+ // bottom-most tiles. Their border pixels would still be inside the live
+ // tiles rect, but the tiles should not exist just for that.
+ int right = tiling_->TilingDataForTesting().TileBounds(2, 2).x();
+ int bottom = tiling_->TilingDataForTesting().TileBounds(2, 2).y();
+
+ SetLiveRectAndVerifyTiles(gfx::Rect(right, bottom));
+ EXPECT_FALSE(tiling_->TileAt(2, 0));
+ EXPECT_FALSE(tiling_->TileAt(2, 1));
+ EXPECT_FALSE(tiling_->TileAt(2, 2));
+ EXPECT_FALSE(tiling_->TileAt(1, 2));
+ EXPECT_FALSE(tiling_->TileAt(0, 2));
+
+ // Verify CreateMissingTilesInLiveTilesRect respects this.
+ tiling_->CreateMissingTilesInLiveTilesRect();
+ EXPECT_FALSE(tiling_->TileAt(2, 0));
+ EXPECT_FALSE(tiling_->TileAt(2, 1));
+ EXPECT_FALSE(tiling_->TileAt(2, 2));
+ EXPECT_FALSE(tiling_->TileAt(1, 2));
+ EXPECT_FALSE(tiling_->TileAt(0, 2));
+}
+
+TEST_F(PictureLayerTilingIteratorTest, ResizeTilingOverTileBorders) {
+ // The tiling has four rows and three columns.
+ Initialize(gfx::Size(100, 100), 1, gfx::Size(250, 350));
+ EXPECT_EQ(3, tiling_->TilingDataForTesting().num_tiles_x());
+ EXPECT_EQ(4, tiling_->TilingDataForTesting().num_tiles_y());
+
+ // The live tiles rect covers the whole tiling.
+ SetLiveRectAndVerifyTiles(gfx::Rect(250, 350));
+
+ // Tiles in the bottom row and right column exist.
+ EXPECT_TRUE(tiling_->TileAt(2, 0));
+ EXPECT_TRUE(tiling_->TileAt(2, 1));
+ EXPECT_TRUE(tiling_->TileAt(2, 2));
+ EXPECT_TRUE(tiling_->TileAt(2, 3));
+ EXPECT_TRUE(tiling_->TileAt(1, 3));
+ EXPECT_TRUE(tiling_->TileAt(0, 3));
+
+ int right = tiling_->TilingDataForTesting().TileBounds(2, 2).x();
+ int bottom = tiling_->TilingDataForTesting().TileBounds(2, 3).y();
+
+ // Shrink the tiling so that the last tile row/column is entirely in the
+ // border pixels of the interior tiles. That row/column is removed.
+ Region invalidation;
+ tiling_->UpdateTilesToCurrentPile(invalidation,
+ gfx::Size(right + 1, bottom + 1));
+ EXPECT_EQ(2, tiling_->TilingDataForTesting().num_tiles_x());
+ EXPECT_EQ(3, tiling_->TilingDataForTesting().num_tiles_y());
+
+ // The live tiles rect was clamped to the pile size.
+ EXPECT_EQ(gfx::Rect(right + 1, bottom + 1), tiling_->live_tiles_rect());
+
+ // Since the row/column is gone, the tiles should be gone too.
+ EXPECT_FALSE(tiling_->TileAt(2, 0));
+ EXPECT_FALSE(tiling_->TileAt(2, 1));
+ EXPECT_FALSE(tiling_->TileAt(2, 2));
+ EXPECT_FALSE(tiling_->TileAt(2, 3));
+ EXPECT_FALSE(tiling_->TileAt(1, 3));
+ EXPECT_FALSE(tiling_->TileAt(0, 3));
+
+ // Growing outside the current right/bottom tiles border pixels should create
+ // the tiles again, even though the live rect has not changed size.
+ tiling_->UpdateTilesToCurrentPile(invalidation,
+ gfx::Size(right + 2, bottom + 2));
+ EXPECT_EQ(3, tiling_->TilingDataForTesting().num_tiles_x());
+ EXPECT_EQ(4, tiling_->TilingDataForTesting().num_tiles_y());
+
+ // Not changed.
+ EXPECT_EQ(gfx::Rect(right + 1, bottom + 1), tiling_->live_tiles_rect());
+
+ // The last row/column tiles are inside the live tiles rect.
+ EXPECT_TRUE(gfx::Rect(right + 1, bottom + 1).Intersects(
+ tiling_->TilingDataForTesting().TileBounds(2, 0)));
+ EXPECT_TRUE(gfx::Rect(right + 1, bottom + 1).Intersects(
+ tiling_->TilingDataForTesting().TileBounds(0, 3)));
+
+ EXPECT_TRUE(tiling_->TileAt(2, 0));
+ EXPECT_TRUE(tiling_->TileAt(2, 1));
+ EXPECT_TRUE(tiling_->TileAt(2, 2));
+ EXPECT_TRUE(tiling_->TileAt(2, 3));
+ EXPECT_TRUE(tiling_->TileAt(1, 3));
+ EXPECT_TRUE(tiling_->TileAt(0, 3));
+}
+
+TEST_F(PictureLayerTilingIteratorTest, ResizeLiveTileRectOverTileBorders) {
+ // The tiling has three rows and columns.
+ Initialize(gfx::Size(100, 100), 1, gfx::Size(250, 350));
+ EXPECT_EQ(3, tiling_->TilingDataForTesting().num_tiles_x());
+ EXPECT_EQ(4, tiling_->TilingDataForTesting().num_tiles_y());
+
+ // The live tiles rect covers the whole tiling.
+ SetLiveRectAndVerifyTiles(gfx::Rect(250, 350));
+
+ // Tiles in the bottom row and right column exist.
+ EXPECT_TRUE(tiling_->TileAt(2, 0));
+ EXPECT_TRUE(tiling_->TileAt(2, 1));
+ EXPECT_TRUE(tiling_->TileAt(2, 2));
+ EXPECT_TRUE(tiling_->TileAt(2, 3));
+ EXPECT_TRUE(tiling_->TileAt(1, 3));
+ EXPECT_TRUE(tiling_->TileAt(0, 3));
+
+ // Shrink the live tiles rect to the very edge of the right-most and
+ // bottom-most tiles. Their border pixels would still be inside the live
+ // tiles rect, but the tiles should not exist just for that.
+ int right = tiling_->TilingDataForTesting().TileBounds(2, 3).x();
+ int bottom = tiling_->TilingDataForTesting().TileBounds(2, 3).y();
+
+ SetLiveRectAndVerifyTiles(gfx::Rect(right, bottom));
+ EXPECT_FALSE(tiling_->TileAt(2, 0));
+ EXPECT_FALSE(tiling_->TileAt(2, 1));
+ EXPECT_FALSE(tiling_->TileAt(2, 2));
+ EXPECT_FALSE(tiling_->TileAt(2, 3));
+ EXPECT_FALSE(tiling_->TileAt(1, 3));
+ EXPECT_FALSE(tiling_->TileAt(0, 3));
+
+ // Including the bottom row and right column again, should create the tiles.
+ SetLiveRectAndVerifyTiles(gfx::Rect(right + 1, bottom + 1));
+ EXPECT_TRUE(tiling_->TileAt(2, 0));
+ EXPECT_TRUE(tiling_->TileAt(2, 1));
+ EXPECT_TRUE(tiling_->TileAt(2, 2));
+ EXPECT_TRUE(tiling_->TileAt(2, 3));
+ EXPECT_TRUE(tiling_->TileAt(1, 2));
+ EXPECT_TRUE(tiling_->TileAt(0, 2));
+
+ // Shrink the live tiles rect to the very edge of the left-most and
+ // top-most tiles. Their border pixels would still be inside the live
+ // tiles rect, but the tiles should not exist just for that.
+ int left = tiling_->TilingDataForTesting().TileBounds(0, 0).right();
+ int top = tiling_->TilingDataForTesting().TileBounds(0, 0).bottom();
+
+ SetLiveRectAndVerifyTiles(gfx::Rect(left, top, 250 - left, 350 - top));
+ EXPECT_FALSE(tiling_->TileAt(0, 3));
+ EXPECT_FALSE(tiling_->TileAt(0, 2));
+ EXPECT_FALSE(tiling_->TileAt(0, 1));
+ EXPECT_FALSE(tiling_->TileAt(0, 0));
+ EXPECT_FALSE(tiling_->TileAt(1, 0));
+ EXPECT_FALSE(tiling_->TileAt(2, 0));
+
+ // Including the top row and left column again, should create the tiles.
+ SetLiveRectAndVerifyTiles(
+ gfx::Rect(left - 1, top - 1, 250 - left, 350 - top));
+ EXPECT_TRUE(tiling_->TileAt(0, 3));
+ EXPECT_TRUE(tiling_->TileAt(0, 2));
+ EXPECT_TRUE(tiling_->TileAt(0, 1));
+ EXPECT_TRUE(tiling_->TileAt(0, 0));
+ EXPECT_TRUE(tiling_->TileAt(1, 0));
+ EXPECT_TRUE(tiling_->TileAt(2, 0));
+}
+
+TEST_F(PictureLayerTilingIteratorTest, ResizeLiveTileRectOverSameTiles) {
+ // The tiling has four rows and three columns.
+ Initialize(gfx::Size(100, 100), 1, gfx::Size(250, 350));
+ EXPECT_EQ(3, tiling_->TilingDataForTesting().num_tiles_x());
+ EXPECT_EQ(4, tiling_->TilingDataForTesting().num_tiles_y());
+
+ // The live tiles rect covers the whole tiling.
+ SetLiveRectAndVerifyTiles(gfx::Rect(250, 350));
+
+ // All tiles exist.
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 4; ++j)
+ EXPECT_TRUE(tiling_->TileAt(i, j)) << i << "," << j;
+ }
+
+ // Shrink the live tiles rect, but still cover all the tiles.
+ SetLiveRectAndVerifyTiles(gfx::Rect(1, 1, 249, 349));
+
+ // All tiles still exist.
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 4; ++j)
+ EXPECT_TRUE(tiling_->TileAt(i, j)) << i << "," << j;
+ }
+
+ // Grow the live tiles rect, but still cover all the same tiles.
+ SetLiveRectAndVerifyTiles(gfx::Rect(0, 0, 250, 350));
+
+ // All tiles still exist.
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 4; ++j)
+ EXPECT_TRUE(tiling_->TileAt(i, j)) << i << "," << j;
+ }
+}
+
+TEST_F(PictureLayerTilingIteratorTest, ResizeOverBorderPixelsDeletesTiles) {
+ // Verifies that a resize with invalidation for newly exposed pixels will
+ // deletes tiles that intersect that invalidation.
+ gfx::Size tile_size(100, 100);
+ gfx::Size original_layer_size(99, 99);
+ Initialize(tile_size, 1.f, original_layer_size);
+ SetLiveRectAndVerifyTiles(gfx::Rect(original_layer_size));
+
+ // Tiling only has one tile, since its total size is less than one.
+ EXPECT_TRUE(tiling_->TileAt(0, 0));
+
+ // Stop creating tiles so that any invalidations are left as holes.
+ client_.set_allow_create_tile(false);
+
+ Region invalidation =
+ SubtractRegions(gfx::Rect(tile_size), gfx::Rect(original_layer_size));
+ tiling_->UpdateTilesToCurrentPile(invalidation, gfx::Size(200, 200));
+ EXPECT_FALSE(tiling_->TileAt(0, 0));
+
+ // The original tile was the same size after resize, but it would include new
+ // border pixels.
+ EXPECT_EQ(gfx::Rect(original_layer_size),
+ tiling_->TilingDataForTesting().TileBounds(0, 0));
+}
+
+TEST_F(PictureLayerTilingIteratorTest, LiveTilesExactlyCoverLiveTileRect) {
+ Initialize(gfx::Size(100, 100), 1, gfx::Size(1099, 801));
+ SetLiveRectAndVerifyTiles(gfx::Rect(100, 100));
+ SetLiveRectAndVerifyTiles(gfx::Rect(101, 99));
+ SetLiveRectAndVerifyTiles(gfx::Rect(1099, 1));
+ SetLiveRectAndVerifyTiles(gfx::Rect(1, 801));
+ SetLiveRectAndVerifyTiles(gfx::Rect(1099, 1));
+ SetLiveRectAndVerifyTiles(gfx::Rect(201, 800));
+}
+
+TEST_F(PictureLayerTilingIteratorTest, IteratorCoversLayerBoundsNoScale) {
+ Initialize(gfx::Size(100, 100), 1, gfx::Size(1099, 801));
+ VerifyTilesExactlyCoverRect(1, gfx::Rect());
+ VerifyTilesExactlyCoverRect(1, gfx::Rect(0, 0, 1099, 801));
+ VerifyTilesExactlyCoverRect(1, gfx::Rect(52, 83, 789, 412));
+
+ // With borders, a size of 3x3 = 1 pixel of content.
+ Initialize(gfx::Size(3, 3), 1, gfx::Size(10, 10));
+ VerifyTilesExactlyCoverRect(1, gfx::Rect(0, 0, 1, 1));
+ VerifyTilesExactlyCoverRect(1, gfx::Rect(0, 0, 2, 2));
+ VerifyTilesExactlyCoverRect(1, gfx::Rect(1, 1, 2, 2));
+ VerifyTilesExactlyCoverRect(1, gfx::Rect(3, 2, 5, 2));
+}
+
+TEST_F(PictureLayerTilingIteratorTest, IteratorCoversLayerBoundsTilingScale) {
+ Initialize(gfx::Size(200, 100), 2.0f, gfx::Size(1005, 2010));
+ VerifyTilesExactlyCoverRect(1, gfx::Rect());
+ VerifyTilesExactlyCoverRect(1, gfx::Rect(0, 0, 1005, 2010));
+ VerifyTilesExactlyCoverRect(1, gfx::Rect(50, 112, 512, 381));
+
+ Initialize(gfx::Size(3, 3), 2.0f, gfx::Size(10, 10));
+ VerifyTilesExactlyCoverRect(1, gfx::Rect());
+ VerifyTilesExactlyCoverRect(1, gfx::Rect(0, 0, 1, 1));
+ VerifyTilesExactlyCoverRect(1, gfx::Rect(0, 0, 2, 2));
+ VerifyTilesExactlyCoverRect(1, gfx::Rect(1, 1, 2, 2));
+ VerifyTilesExactlyCoverRect(1, gfx::Rect(3, 2, 5, 2));
+
+ Initialize(gfx::Size(100, 200), 0.5f, gfx::Size(1005, 2010));
+ VerifyTilesExactlyCoverRect(1, gfx::Rect(0, 0, 1005, 2010));
+ VerifyTilesExactlyCoverRect(1, gfx::Rect(50, 112, 512, 381));
+
+ Initialize(gfx::Size(150, 250), 0.37f, gfx::Size(1005, 2010));
+ VerifyTilesExactlyCoverRect(1, gfx::Rect(0, 0, 1005, 2010));
+ VerifyTilesExactlyCoverRect(1, gfx::Rect(50, 112, 512, 381));
+
+ Initialize(gfx::Size(312, 123), 0.01f, gfx::Size(1005, 2010));
+ VerifyTilesExactlyCoverRect(1, gfx::Rect(0, 0, 1005, 2010));
+ VerifyTilesExactlyCoverRect(1, gfx::Rect(50, 112, 512, 381));
+}
+
+TEST_F(PictureLayerTilingIteratorTest, IteratorCoversLayerBoundsBothScale) {
+ Initialize(gfx::Size(50, 50), 4.0f, gfx::Size(800, 600));
+ VerifyTilesExactlyCoverRect(2.0f, gfx::Rect());
+ VerifyTilesExactlyCoverRect(2.0f, gfx::Rect(0, 0, 1600, 1200));
+ VerifyTilesExactlyCoverRect(2.0f, gfx::Rect(512, 365, 253, 182));
+
+ float scale = 6.7f;
+ gfx::Size bounds(800, 600);
+ gfx::Rect full_rect(gfx::ToCeiledSize(gfx::ScaleSize(bounds, scale)));
+ Initialize(gfx::Size(256, 512), 5.2f, bounds);
+ VerifyTilesExactlyCoverRect(scale, full_rect);
+ VerifyTilesExactlyCoverRect(scale, gfx::Rect(2014, 1579, 867, 1033));
+}
+
+TEST_F(PictureLayerTilingIteratorTest, IteratorEmptyRect) {
+ Initialize(gfx::Size(100, 100), 1.0f, gfx::Size(800, 600));
+
+ gfx::Rect empty;
+ PictureLayerTiling::CoverageIterator iter(tiling_.get(), 1.0f, empty);
+ EXPECT_FALSE(iter);
+}
+
+TEST_F(PictureLayerTilingIteratorTest, NonIntersectingRect) {
+ Initialize(gfx::Size(100, 100), 1.0f, gfx::Size(800, 600));
+ gfx::Rect non_intersecting(1000, 1000, 50, 50);
+ PictureLayerTiling::CoverageIterator iter(tiling_.get(), 1, non_intersecting);
+ EXPECT_FALSE(iter);
+}
+
+TEST_F(PictureLayerTilingIteratorTest, LayerEdgeTextureCoordinates) {
+ Initialize(gfx::Size(300, 300), 1.0f, gfx::Size(256, 256));
+ // All of these sizes are 256x256, scaled and ceiled.
+ VerifyTilesExactlyCoverRect(1.0f, gfx::Rect(0, 0, 256, 256));
+ VerifyTilesExactlyCoverRect(0.8f, gfx::Rect(0, 0, 205, 205));
+ VerifyTilesExactlyCoverRect(1.2f, gfx::Rect(0, 0, 308, 308));
+}
+
+TEST_F(PictureLayerTilingIteratorTest, NonContainedDestRect) {
+ Initialize(gfx::Size(100, 100), 1.0f, gfx::Size(400, 400));
+
+ // Too large in all dimensions
+ VerifyTilesCoverNonContainedRect(1.0f, gfx::Rect(-1000, -1000, 2000, 2000));
+ VerifyTilesCoverNonContainedRect(1.5f, gfx::Rect(-1000, -1000, 2000, 2000));
+ VerifyTilesCoverNonContainedRect(0.5f, gfx::Rect(-1000, -1000, 2000, 2000));
+
+ // Partially covering content, but too large
+ VerifyTilesCoverNonContainedRect(1.0f, gfx::Rect(-1000, 100, 2000, 100));
+ VerifyTilesCoverNonContainedRect(1.5f, gfx::Rect(-1000, 100, 2000, 100));
+ VerifyTilesCoverNonContainedRect(0.5f, gfx::Rect(-1000, 100, 2000, 100));
+}
+
+TEST(PictureLayerTilingTest, SkewportLimits) {
+ FakePictureLayerTilingClient client;
+ client.set_skewport_extrapolation_limit_in_content_pixels(75);
+ client.set_tree(ACTIVE_TREE);
+ scoped_ptr<TestablePictureLayerTiling> tiling;
+
+ gfx::Rect viewport(0, 0, 100, 100);
+ gfx::Size layer_bounds(200, 200);
+
+ client.SetTileSize(gfx::Size(100, 100));
+ tiling = TestablePictureLayerTiling::Create(1.0f, layer_bounds, &client);
+
+ tiling->UpdateTilePriorities(ACTIVE_TREE, viewport, 1.f, 1.0, Occlusion());
+
+ // Move viewport down 50 pixels in 0.5 seconds.
+ gfx::Rect down_skewport =
+ tiling->ComputeSkewport(1.5, gfx::Rect(0, 50, 100, 100));
+
+ EXPECT_EQ(0, down_skewport.x());
+ EXPECT_EQ(50, down_skewport.y());
+ EXPECT_EQ(100, down_skewport.width());
+ EXPECT_EQ(175, down_skewport.height());
+ EXPECT_TRUE(down_skewport.Contains(gfx::Rect(0, 50, 100, 100)));
+
+ // Move viewport down 50 and right 10 pixels.
+ gfx::Rect down_right_skewport =
+ tiling->ComputeSkewport(1.5, gfx::Rect(10, 50, 100, 100));
+
+ EXPECT_EQ(10, down_right_skewport.x());
+ EXPECT_EQ(50, down_right_skewport.y());
+ EXPECT_EQ(120, down_right_skewport.width());
+ EXPECT_EQ(175, down_right_skewport.height());
+ EXPECT_TRUE(down_right_skewport.Contains(gfx::Rect(10, 50, 100, 100)));
+
+ // Move viewport left.
+ gfx::Rect left_skewport =
+ tiling->ComputeSkewport(1.5, gfx::Rect(-50, 0, 100, 100));
+
+ EXPECT_EQ(-125, left_skewport.x());
+ EXPECT_EQ(0, left_skewport.y());
+ EXPECT_EQ(175, left_skewport.width());
+ EXPECT_EQ(100, left_skewport.height());
+ EXPECT_TRUE(left_skewport.Contains(gfx::Rect(-50, 0, 100, 100)));
+
+ // Expand viewport.
+ gfx::Rect expand_skewport =
+ tiling->ComputeSkewport(1.5, gfx::Rect(-50, -50, 200, 200));
+
+ // x and y moved by -75 (-50 - 75 = -125).
+ // right side and bottom side moved by 75 [(350 - 125) - (200 - 50) = 75].
+ EXPECT_EQ(-125, expand_skewport.x());
+ EXPECT_EQ(-125, expand_skewport.y());
+ EXPECT_EQ(350, expand_skewport.width());
+ EXPECT_EQ(350, expand_skewport.height());
+ EXPECT_TRUE(expand_skewport.Contains(gfx::Rect(-50, -50, 200, 200)));
+
+ // Expand the viewport past the limit.
+ gfx::Rect big_expand_skewport =
+ tiling->ComputeSkewport(1.5, gfx::Rect(-500, -500, 1500, 1500));
+
+ EXPECT_EQ(-575, big_expand_skewport.x());
+ EXPECT_EQ(-575, big_expand_skewport.y());
+ EXPECT_EQ(1650, big_expand_skewport.width());
+ EXPECT_EQ(1650, big_expand_skewport.height());
+ EXPECT_TRUE(big_expand_skewport.Contains(gfx::Rect(-500, -500, 1500, 1500)));
+}
+
+TEST(PictureLayerTilingTest, ComputeSkewport) {
+ FakePictureLayerTilingClient client;
+ scoped_ptr<TestablePictureLayerTiling> tiling;
+
+ gfx::Rect viewport(0, 0, 100, 100);
+ gfx::Size layer_bounds(200, 200);
+
+ client.SetTileSize(gfx::Size(100, 100));
+ client.set_tree(ACTIVE_TREE);
+ tiling = TestablePictureLayerTiling::Create(1.0f, layer_bounds, &client);
+
+ tiling->UpdateTilePriorities(ACTIVE_TREE, viewport, 1.f, 1.0, Occlusion());
+
+ // Move viewport down 50 pixels in 0.5 seconds.
+ gfx::Rect down_skewport =
+ tiling->ComputeSkewport(1.5, gfx::Rect(0, 50, 100, 100));
+
+ EXPECT_EQ(0, down_skewport.x());
+ EXPECT_EQ(50, down_skewport.y());
+ EXPECT_EQ(100, down_skewport.width());
+ EXPECT_EQ(200, down_skewport.height());
+
+ // Shrink viewport.
+ gfx::Rect shrink_skewport =
+ tiling->ComputeSkewport(1.5, gfx::Rect(25, 25, 50, 50));
+
+ EXPECT_EQ(25, shrink_skewport.x());
+ EXPECT_EQ(25, shrink_skewport.y());
+ EXPECT_EQ(50, shrink_skewport.width());
+ EXPECT_EQ(50, shrink_skewport.height());
+
+ // Move viewport down 50 and right 10 pixels.
+ gfx::Rect down_right_skewport =
+ tiling->ComputeSkewport(1.5, gfx::Rect(10, 50, 100, 100));
+
+ EXPECT_EQ(10, down_right_skewport.x());
+ EXPECT_EQ(50, down_right_skewport.y());
+ EXPECT_EQ(120, down_right_skewport.width());
+ EXPECT_EQ(200, down_right_skewport.height());
+
+ // Move viewport left.
+ gfx::Rect left_skewport =
+ tiling->ComputeSkewport(1.5, gfx::Rect(-20, 0, 100, 100));
+
+ EXPECT_EQ(-60, left_skewport.x());
+ EXPECT_EQ(0, left_skewport.y());
+ EXPECT_EQ(140, left_skewport.width());
+ EXPECT_EQ(100, left_skewport.height());
+
+ // Expand viewport in 0.2 seconds.
+ gfx::Rect expanded_skewport =
+ tiling->ComputeSkewport(1.2, gfx::Rect(-5, -5, 110, 110));
+
+ EXPECT_EQ(-30, expanded_skewport.x());
+ EXPECT_EQ(-30, expanded_skewport.y());
+ EXPECT_EQ(160, expanded_skewport.width());
+ EXPECT_EQ(160, expanded_skewport.height());
+}
+
+TEST(PictureLayerTilingTest, ViewportDistanceWithScale) {
+ FakePictureLayerTilingClient client;
+ scoped_ptr<TestablePictureLayerTiling> tiling;
+
+ gfx::Rect viewport(0, 0, 100, 100);
+ gfx::Size layer_bounds(1500, 1500);
+
+ client.SetTileSize(gfx::Size(10, 10));
+ client.set_tree(ACTIVE_TREE);
+
+ // Tiling at 0.25 scale: this should create 47x47 tiles of size 10x10.
+ // The reason is that each tile has a one pixel border, so tile at (1, 2)
+ // for instance begins at (8, 16) pixels. So tile at (46, 46) will begin at
+ // (368, 368) and extend to the end of 1500 * 0.25 = 375 edge of the
+ // tiling.
+ tiling = TestablePictureLayerTiling::Create(0.25f, layer_bounds, &client);
+ gfx::Rect viewport_in_content_space =
+ gfx::ToEnclosedRect(gfx::ScaleRect(viewport, 0.25f));
+
+ tiling->UpdateTilePriorities(ACTIVE_TREE, viewport, 1.f, 1.0, Occlusion());
+
+ gfx::Rect soon_rect = viewport;
+ soon_rect.Inset(-312.f, -312.f, -312.f, -312.f);
+ gfx::Rect soon_rect_in_content_space =
+ gfx::ToEnclosedRect(gfx::ScaleRect(soon_rect, 0.25f));
+
+ // Sanity checks.
+ for (int i = 0; i < 47; ++i) {
+ for (int j = 0; j < 47; ++j) {
+ EXPECT_TRUE(tiling->TileAt(i, j)) << "i: " << i << " j: " << j;
+ }
+ }
+ for (int i = 0; i < 47; ++i) {
+ EXPECT_FALSE(tiling->TileAt(i, 47)) << "i: " << i;
+ EXPECT_FALSE(tiling->TileAt(47, i)) << "i: " << i;
+ }
+
+ // No movement in the viewport implies that tiles will either be NOW
+ // or EVENTUALLY, with the exception of tiles that are between 0 and 312
+ // pixels away from the viewport, which will be in the SOON bin.
+ bool have_now = false;
+ bool have_eventually = false;
+ bool have_soon = false;
+ for (int i = 0; i < 47; ++i) {
+ for (int j = 0; j < 47; ++j) {
+ Tile* tile = tiling->TileAt(i, j);
+ TilePriority priority = tile->priority(ACTIVE_TREE);
+
+ gfx::Rect tile_rect = tiling->TilingDataForTesting().TileBounds(i, j);
+ if (viewport_in_content_space.Intersects(tile_rect)) {
+ EXPECT_EQ(TilePriority::NOW, priority.priority_bin);
+ EXPECT_FLOAT_EQ(0.f, priority.distance_to_visible);
+ have_now = true;
+ } else if (soon_rect_in_content_space.Intersects(tile_rect)) {
+ EXPECT_EQ(TilePriority::SOON, priority.priority_bin);
+ have_soon = true;
+ } else {
+ EXPECT_EQ(TilePriority::EVENTUALLY, priority.priority_bin);
+ EXPECT_GT(priority.distance_to_visible, 0.f);
+ have_eventually = true;
+ }
+ }
+ }
+
+ EXPECT_TRUE(have_now);
+ EXPECT_TRUE(have_soon);
+ EXPECT_TRUE(have_eventually);
+
+ // Spot check some distances.
+ // Tile at 5, 1 should begin at 41x9 in content space (without borders),
+ // so the distance to a viewport that ends at 25x25 in content space
+ // should be 17 (41 - 25 + 1). In layer space, then that should be
+ // 17 / 0.25 = 68 pixels.
+
+ // We can verify that the content rect (with borders) is one pixel off
+ // 41,9 8x8 on all sides.
+ EXPECT_EQ(tiling->TileAt(5, 1)->content_rect().ToString(), "40,8 10x10");
+
+ TilePriority priority = tiling->TileAt(5, 1)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(68.f, priority.distance_to_visible);
+
+ priority = tiling->TileAt(2, 5)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(68.f, priority.distance_to_visible);
+
+ priority = tiling->TileAt(3, 4)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(40.f, priority.distance_to_visible);
+
+ // Move the viewport down 40 pixels.
+ viewport = gfx::Rect(0, 40, 100, 100);
+ viewport_in_content_space =
+ gfx::ToEnclosedRect(gfx::ScaleRect(viewport, 0.25f));
+ gfx::Rect skewport = tiling->ComputeSkewport(2.0, viewport_in_content_space);
+
+ soon_rect = viewport;
+ soon_rect.Inset(-312.f, -312.f, -312.f, -312.f);
+ soon_rect_in_content_space =
+ gfx::ToEnclosedRect(gfx::ScaleRect(soon_rect, 0.25f));
+
+ EXPECT_EQ(0, skewport.x());
+ EXPECT_EQ(10, skewport.y());
+ EXPECT_EQ(25, skewport.width());
+ EXPECT_EQ(35, skewport.height());
+
+ tiling->UpdateTilePriorities(ACTIVE_TREE, viewport, 1.f, 2.0, Occlusion());
+
+ have_now = false;
+ have_eventually = false;
+ have_soon = false;
+
+ // Viewport moved, so we expect to find some NOW tiles, some SOON tiles and
+ // some EVENTUALLY tiles.
+ for (int i = 0; i < 47; ++i) {
+ for (int j = 0; j < 47; ++j) {
+ Tile* tile = tiling->TileAt(i, j);
+ TilePriority priority = tile->priority(ACTIVE_TREE);
+
+ gfx::Rect tile_rect = tiling->TilingDataForTesting().TileBounds(i, j);
+ if (viewport_in_content_space.Intersects(tile_rect)) {
+ EXPECT_EQ(TilePriority::NOW, priority.priority_bin) << "i: " << i
+ << " j: " << j;
+ EXPECT_FLOAT_EQ(0.f, priority.distance_to_visible) << "i: " << i
+ << " j: " << j;
+ have_now = true;
+ } else if (skewport.Intersects(tile_rect) ||
+ soon_rect_in_content_space.Intersects(tile_rect)) {
+ EXPECT_EQ(TilePriority::SOON, priority.priority_bin) << "i: " << i
+ << " j: " << j;
+ EXPECT_GT(priority.distance_to_visible, 0.f) << "i: " << i
+ << " j: " << j;
+ have_soon = true;
+ } else {
+ EXPECT_EQ(TilePriority::EVENTUALLY, priority.priority_bin)
+ << "i: " << i << " j: " << j;
+ EXPECT_GT(priority.distance_to_visible, 0.f) << "i: " << i
+ << " j: " << j;
+ have_eventually = true;
+ }
+ }
+ }
+
+ EXPECT_TRUE(have_now);
+ EXPECT_TRUE(have_soon);
+ EXPECT_TRUE(have_eventually);
+
+ priority = tiling->TileAt(5, 1)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(68.f, priority.distance_to_visible);
+
+ priority = tiling->TileAt(2, 5)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(28.f, priority.distance_to_visible);
+
+ priority = tiling->TileAt(3, 4)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(4.f, priority.distance_to_visible);
+
+ // Change the underlying layer scale.
+ tiling->UpdateTilePriorities(ACTIVE_TREE, viewport, 2.0f, 3.0, Occlusion());
+
+ priority = tiling->TileAt(5, 1)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(136.f, priority.distance_to_visible);
+
+ priority = tiling->TileAt(2, 5)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(56.f, priority.distance_to_visible);
+
+ priority = tiling->TileAt(3, 4)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(8.f, priority.distance_to_visible);
+
+ // Test additional scales.
+ tiling = TestablePictureLayerTiling::Create(0.2f, layer_bounds, &client);
+ tiling->UpdateTilePriorities(ACTIVE_TREE, viewport, 1.0f, 4.0, Occlusion());
+
+ priority = tiling->TileAt(5, 1)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(110.f, priority.distance_to_visible);
+
+ priority = tiling->TileAt(2, 5)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(70.f, priority.distance_to_visible);
+
+ priority = tiling->TileAt(3, 4)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(60.f, priority.distance_to_visible);
+
+ tiling->UpdateTilePriorities(ACTIVE_TREE, viewport, 0.5f, 5.0, Occlusion());
+
+ priority = tiling->TileAt(5, 1)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(55.f, priority.distance_to_visible);
+
+ priority = tiling->TileAt(2, 5)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(35.f, priority.distance_to_visible);
+
+ priority = tiling->TileAt(3, 4)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(30.f, priority.distance_to_visible);
+}
+
+TEST(PictureLayerTilingTest, ExpandRectEqual) {
+ gfx::Rect in(40, 50, 100, 200);
+ gfx::Rect bounds(-1000, -1000, 10000, 10000);
+ int64 target_area = 100 * 200;
+ gfx::Rect out = PictureLayerTiling::ExpandRectEquallyToAreaBoundedBy(
+ in, target_area, bounds, NULL);
+ EXPECT_EQ(in.ToString(), out.ToString());
+}
+
+TEST(PictureLayerTilingTest, ExpandRectSmaller) {
+ gfx::Rect in(40, 50, 100, 200);
+ gfx::Rect bounds(-1000, -1000, 10000, 10000);
+ int64 target_area = 100 * 100;
+ gfx::Rect out = PictureLayerTiling::ExpandRectEquallyToAreaBoundedBy(
+ in, target_area, bounds, NULL);
+ EXPECT_EQ(out.bottom() - in.bottom(), in.y() - out.y());
+ EXPECT_EQ(out.right() - in.right(), in.x() - out.x());
+ EXPECT_EQ(out.width() - in.width(), out.height() - in.height());
+
+ // |in| represents the visible rect, and |out| represents the eventually rect.
+ // If the eventually rect doesn't contain the visible rect, we will start
+ // losing tiles.
+ EXPECT_TRUE(out.Contains(in));
+ EXPECT_TRUE(bounds.Contains(out));
+}
+
+TEST(PictureLayerTilingTest, ExpandRectUnbounded) {
+ gfx::Rect in(40, 50, 100, 200);
+ gfx::Rect bounds(-1000, -1000, 10000, 10000);
+ int64 target_area = 200 * 200;
+ gfx::Rect out = PictureLayerTiling::ExpandRectEquallyToAreaBoundedBy(
+ in, target_area, bounds, NULL);
+ EXPECT_EQ(out.bottom() - in.bottom(), in.y() - out.y());
+ EXPECT_EQ(out.right() - in.right(), in.x() - out.x());
+ EXPECT_EQ(out.width() - in.width(), out.height() - in.height());
+ EXPECT_NEAR(200 * 200, out.width() * out.height(), 100);
+ EXPECT_TRUE(bounds.Contains(out));
+}
+
+TEST(PictureLayerTilingTest, ExpandRectBoundedSmaller) {
+ gfx::Rect in(40, 50, 100, 200);
+ gfx::Rect bounds(50, 60, 40, 30);
+ int64 target_area = 200 * 200;
+ gfx::Rect out = PictureLayerTiling::ExpandRectEquallyToAreaBoundedBy(
+ in, target_area, bounds, NULL);
+ EXPECT_EQ(bounds.ToString(), out.ToString());
+}
+
+TEST(PictureLayerTilingTest, ExpandRectBoundedEqual) {
+ gfx::Rect in(40, 50, 100, 200);
+ gfx::Rect bounds = in;
+ int64 target_area = 200 * 200;
+ gfx::Rect out = PictureLayerTiling::ExpandRectEquallyToAreaBoundedBy(
+ in, target_area, bounds, NULL);
+ EXPECT_EQ(bounds.ToString(), out.ToString());
+}
+
+TEST(PictureLayerTilingTest, ExpandRectBoundedSmallerStretchVertical) {
+ gfx::Rect in(40, 50, 100, 200);
+ gfx::Rect bounds(45, 0, 90, 300);
+ int64 target_area = 200 * 200;
+ gfx::Rect out = PictureLayerTiling::ExpandRectEquallyToAreaBoundedBy(
+ in, target_area, bounds, NULL);
+ EXPECT_EQ(bounds.ToString(), out.ToString());
+}
+
+TEST(PictureLayerTilingTest, ExpandRectBoundedEqualStretchVertical) {
+ gfx::Rect in(40, 50, 100, 200);
+ gfx::Rect bounds(40, 0, 100, 300);
+ int64 target_area = 200 * 200;
+ gfx::Rect out = PictureLayerTiling::ExpandRectEquallyToAreaBoundedBy(
+ in, target_area, bounds, NULL);
+ EXPECT_EQ(bounds.ToString(), out.ToString());
+}
+
+TEST(PictureLayerTilingTest, ExpandRectBoundedSmallerStretchHorizontal) {
+ gfx::Rect in(40, 50, 100, 200);
+ gfx::Rect bounds(0, 55, 180, 190);
+ int64 target_area = 200 * 200;
+ gfx::Rect out = PictureLayerTiling::ExpandRectEquallyToAreaBoundedBy(
+ in, target_area, bounds, NULL);
+ EXPECT_EQ(bounds.ToString(), out.ToString());
+}
+
+TEST(PictureLayerTilingTest, ExpandRectBoundedEqualStretchHorizontal) {
+ gfx::Rect in(40, 50, 100, 200);
+ gfx::Rect bounds(0, 50, 180, 200);
+ int64 target_area = 200 * 200;
+ gfx::Rect out = PictureLayerTiling::ExpandRectEquallyToAreaBoundedBy(
+ in, target_area, bounds, NULL);
+ EXPECT_EQ(bounds.ToString(), out.ToString());
+}
+
+TEST(PictureLayerTilingTest, ExpandRectBoundedLeft) {
+ gfx::Rect in(40, 50, 100, 200);
+ gfx::Rect bounds(20, -1000, 10000, 10000);
+ int64 target_area = 200 * 200;
+ gfx::Rect out = PictureLayerTiling::ExpandRectEquallyToAreaBoundedBy(
+ in, target_area, bounds, NULL);
+ EXPECT_EQ(out.bottom() - in.bottom(), in.y() - out.y());
+ EXPECT_EQ(out.bottom() - in.bottom(), out.right() - in.right());
+ EXPECT_LE(out.width() * out.height(), target_area);
+ EXPECT_GT(out.width() * out.height(),
+ target_area - out.width() - out.height() * 2);
+ EXPECT_TRUE(bounds.Contains(out));
+}
+
+TEST(PictureLayerTilingTest, ExpandRectBoundedRight) {
+ gfx::Rect in(40, 50, 100, 200);
+ gfx::Rect bounds(-1000, -1000, 1000+120, 10000);
+ int64 target_area = 200 * 200;
+ gfx::Rect out = PictureLayerTiling::ExpandRectEquallyToAreaBoundedBy(
+ in, target_area, bounds, NULL);
+ EXPECT_EQ(out.bottom() - in.bottom(), in.y() - out.y());
+ EXPECT_EQ(out.bottom() - in.bottom(), in.x() - out.x());
+ EXPECT_LE(out.width() * out.height(), target_area);
+ EXPECT_GT(out.width() * out.height(),
+ target_area - out.width() - out.height() * 2);
+ EXPECT_TRUE(bounds.Contains(out));
+}
+
+TEST(PictureLayerTilingTest, ExpandRectBoundedTop) {
+ gfx::Rect in(40, 50, 100, 200);
+ gfx::Rect bounds(-1000, 30, 10000, 10000);
+ int64 target_area = 200 * 200;
+ gfx::Rect out = PictureLayerTiling::ExpandRectEquallyToAreaBoundedBy(
+ in, target_area, bounds, NULL);
+ EXPECT_EQ(out.right() - in.right(), in.x() - out.x());
+ EXPECT_EQ(out.right() - in.right(), out.bottom() - in.bottom());
+ EXPECT_LE(out.width() * out.height(), target_area);
+ EXPECT_GT(out.width() * out.height(),
+ target_area - out.width() * 2 - out.height());
+ EXPECT_TRUE(bounds.Contains(out));
+}
+
+TEST(PictureLayerTilingTest, ExpandRectBoundedBottom) {
+ gfx::Rect in(40, 50, 100, 200);
+ gfx::Rect bounds(-1000, -1000, 10000, 1000 + 220);
+ int64 target_area = 200 * 200;
+ gfx::Rect out = PictureLayerTiling::ExpandRectEquallyToAreaBoundedBy(
+ in, target_area, bounds, NULL);
+ EXPECT_EQ(out.right() - in.right(), in.x() - out.x());
+ EXPECT_EQ(out.right() - in.right(), in.y() - out.y());
+ EXPECT_LE(out.width() * out.height(), target_area);
+ EXPECT_GT(out.width() * out.height(),
+ target_area - out.width() * 2 - out.height());
+ EXPECT_TRUE(bounds.Contains(out));
+}
+
+TEST(PictureLayerTilingTest, ExpandRectSquishedHorizontally) {
+ gfx::Rect in(40, 50, 100, 200);
+ gfx::Rect bounds(0, -4000, 100+40+20, 100000);
+ int64 target_area = 400 * 400;
+ gfx::Rect out = PictureLayerTiling::ExpandRectEquallyToAreaBoundedBy(
+ in, target_area, bounds, NULL);
+ EXPECT_EQ(20, out.right() - in.right());
+ EXPECT_EQ(40, in.x() - out.x());
+ EXPECT_EQ(out.bottom() - in.bottom(), in.y() - out.y());
+ EXPECT_LE(out.width() * out.height(), target_area);
+ EXPECT_GT(out.width() * out.height(),
+ target_area - out.width() * 2);
+ EXPECT_TRUE(bounds.Contains(out));
+}
+
+TEST(PictureLayerTilingTest, ExpandRectSquishedVertically) {
+ gfx::Rect in(40, 50, 100, 200);
+ gfx::Rect bounds(-4000, 0, 100000, 200+50+30);
+ int64 target_area = 400 * 400;
+ gfx::Rect out = PictureLayerTiling::ExpandRectEquallyToAreaBoundedBy(
+ in, target_area, bounds, NULL);
+ EXPECT_EQ(30, out.bottom() - in.bottom());
+ EXPECT_EQ(50, in.y() - out.y());
+ EXPECT_EQ(out.right() - in.right(), in.x() - out.x());
+ EXPECT_LE(out.width() * out.height(), target_area);
+ EXPECT_GT(out.width() * out.height(),
+ target_area - out.height() * 2);
+ EXPECT_TRUE(bounds.Contains(out));
+}
+
+TEST(PictureLayerTilingTest, ExpandRectOutOfBoundsFarAway) {
+ gfx::Rect in(400, 500, 100, 200);
+ gfx::Rect bounds(0, 0, 10, 10);
+ int64 target_area = 400 * 400;
+ gfx::Rect out = PictureLayerTiling::ExpandRectEquallyToAreaBoundedBy(
+ in, target_area, bounds, NULL);
+ EXPECT_TRUE(out.IsEmpty());
+}
+
+TEST(PictureLayerTilingTest, ExpandRectOutOfBoundsExpandedFullyCover) {
+ gfx::Rect in(40, 50, 100, 100);
+ gfx::Rect bounds(0, 0, 10, 10);
+ int64 target_area = 400 * 400;
+ gfx::Rect out = PictureLayerTiling::ExpandRectEquallyToAreaBoundedBy(
+ in, target_area, bounds, NULL);
+ EXPECT_EQ(bounds.ToString(), out.ToString());
+}
+
+TEST(PictureLayerTilingTest, ExpandRectOutOfBoundsExpandedPartlyCover) {
+ gfx::Rect in(600, 600, 100, 100);
+ gfx::Rect bounds(0, 0, 500, 500);
+ int64 target_area = 400 * 400;
+ gfx::Rect out = PictureLayerTiling::ExpandRectEquallyToAreaBoundedBy(
+ in, target_area, bounds, NULL);
+ EXPECT_EQ(bounds.right(), out.right());
+ EXPECT_EQ(bounds.bottom(), out.bottom());
+ EXPECT_LE(out.width() * out.height(), target_area);
+ EXPECT_GT(out.width() * out.height(),
+ target_area - out.width() - out.height());
+ EXPECT_TRUE(bounds.Contains(out));
+}
+
+TEST(PictureLayerTilingTest, EmptyStartingRect) {
+ // If a layer has a non-invertible transform, then the starting rect
+ // for the layer would be empty.
+ gfx::Rect in(40, 40, 0, 0);
+ gfx::Rect bounds(0, 0, 10, 10);
+ int64 target_area = 400 * 400;
+ gfx::Rect out = PictureLayerTiling::ExpandRectEquallyToAreaBoundedBy(
+ in, target_area, bounds, NULL);
+ EXPECT_TRUE(out.IsEmpty());
+}
+
+TEST(PictureLayerTilingTest, TilingRasterTileIteratorStaticViewport) {
+ FakePictureLayerTilingClient client;
+ scoped_ptr<TestablePictureLayerTiling> tiling;
+
+ gfx::Rect viewport(50, 50, 100, 100);
+ gfx::Size layer_bounds(800, 800);
+
+ gfx::Rect soon_rect = viewport;
+ soon_rect.Inset(-312.f, -312.f, -312.f, -312.f);
+
+ client.SetTileSize(gfx::Size(30, 30));
+ client.set_tree(ACTIVE_TREE);
+
+ tiling = TestablePictureLayerTiling::Create(1.0f, layer_bounds, &client);
+ tiling->UpdateTilePriorities(ACTIVE_TREE, viewport, 1.0f, 1.0, Occlusion());
+
+ PictureLayerTiling::TilingRasterTileIterator empty_iterator;
+ EXPECT_FALSE(empty_iterator);
+
+ std::vector<Tile*> all_tiles = tiling->AllTilesForTesting();
+
+ // Sanity check.
+ EXPECT_EQ(841u, all_tiles.size());
+
+ // The explanation of each iteration is as follows:
+ // 1. First iteration tests that we can get all of the tiles correctly.
+ // 2. Second iteration ensures that we can get all of the tiles again (first
+ // iteration didn't change any tiles), as well set all tiles to be ready to
+ // draw.
+ // 3. Third iteration ensures that no tiles are returned, since they were all
+ // marked as ready to draw.
+ for (int i = 0; i < 3; ++i) {
+ PictureLayerTiling::TilingRasterTileIterator it(tiling.get(), ACTIVE_TREE);
+
+ // There are 3 bins in TilePriority.
+ bool have_tiles[3] = {};
+
+ // On the third iteration, we should get no tiles since everything was
+ // marked as ready to draw.
+ if (i == 2) {
+ EXPECT_FALSE(it);
+ continue;
+ }
+
+ EXPECT_TRUE(it);
+ std::set<Tile*> unique_tiles;
+ unique_tiles.insert(*it);
+ Tile* last_tile = *it;
+ have_tiles[last_tile->priority(ACTIVE_TREE).priority_bin] = true;
+
+ // On the second iteration, mark everything as ready to draw (solid color).
+ if (i == 1) {
+ ManagedTileState::DrawInfo& draw_info = last_tile->draw_info();
+ draw_info.SetSolidColorForTesting(SK_ColorRED);
+ }
+ ++it;
+ int eventually_bin_order_correct_count = 0;
+ int eventually_bin_order_incorrect_count = 0;
+ while (it) {
+ Tile* new_tile = *it;
+ ++it;
+ unique_tiles.insert(new_tile);
+
+ TilePriority last_priority = last_tile->priority(ACTIVE_TREE);
+ TilePriority new_priority = new_tile->priority(ACTIVE_TREE);
+ EXPECT_LE(last_priority.priority_bin, new_priority.priority_bin);
+ if (last_priority.priority_bin == new_priority.priority_bin) {
+ if (last_priority.priority_bin == TilePriority::EVENTUALLY) {
+ bool order_correct = last_priority.distance_to_visible <=
+ new_priority.distance_to_visible;
+ eventually_bin_order_correct_count += order_correct;
+ eventually_bin_order_incorrect_count += !order_correct;
+ } else if (!soon_rect.Intersects(new_tile->content_rect()) &&
+ !soon_rect.Intersects(last_tile->content_rect())) {
+ EXPECT_LE(last_priority.distance_to_visible,
+ new_priority.distance_to_visible);
+ EXPECT_EQ(TilePriority::NOW, new_priority.priority_bin);
+ } else if (new_priority.distance_to_visible > 0.f) {
+ EXPECT_EQ(TilePriority::SOON, new_priority.priority_bin);
+ }
+ }
+ have_tiles[new_priority.priority_bin] = true;
+
+ last_tile = new_tile;
+
+ // On the second iteration, mark everything as ready to draw (solid
+ // color).
+ if (i == 1) {
+ ManagedTileState::DrawInfo& draw_info = last_tile->draw_info();
+ draw_info.SetSolidColorForTesting(SK_ColorRED);
+ }
+ }
+
+ EXPECT_GT(eventually_bin_order_correct_count,
+ eventually_bin_order_incorrect_count);
+
+ // We should have now and eventually tiles, as well as soon tiles from
+ // the border region.
+ EXPECT_TRUE(have_tiles[TilePriority::NOW]);
+ EXPECT_TRUE(have_tiles[TilePriority::SOON]);
+ EXPECT_TRUE(have_tiles[TilePriority::EVENTUALLY]);
+
+ EXPECT_EQ(unique_tiles.size(), all_tiles.size());
+ }
+}
+
+TEST(PictureLayerTilingTest, TilingRasterTileIteratorMovingViewport) {
+ FakePictureLayerTilingClient client;
+ scoped_ptr<TestablePictureLayerTiling> tiling;
+
+ gfx::Rect viewport(50, 0, 100, 100);
+ gfx::Rect moved_viewport(50, 0, 100, 500);
+ gfx::Size layer_bounds(1000, 1000);
+
+ client.SetTileSize(gfx::Size(30, 30));
+ client.set_tree(ACTIVE_TREE);
+
+ tiling = TestablePictureLayerTiling::Create(1.f, layer_bounds, &client);
+ tiling->UpdateTilePriorities(ACTIVE_TREE, viewport, 1.0f, 1.0, Occlusion());
+ tiling->UpdateTilePriorities(
+ ACTIVE_TREE, moved_viewport, 1.0f, 2.0, Occlusion());
+
+ gfx::Rect soon_rect = moved_viewport;
+ soon_rect.Inset(-312.f, -312.f, -312.f, -312.f);
+
+ // There are 3 bins in TilePriority.
+ bool have_tiles[3] = {};
+ Tile* last_tile = NULL;
+ int eventually_bin_order_correct_count = 0;
+ int eventually_bin_order_incorrect_count = 0;
+ for (PictureLayerTiling::TilingRasterTileIterator it(tiling.get(),
+ ACTIVE_TREE);
+ it;
+ ++it) {
+ if (!last_tile)
+ last_tile = *it;
+
+ Tile* new_tile = *it;
+
+ TilePriority last_priority = last_tile->priority(ACTIVE_TREE);
+ TilePriority new_priority = new_tile->priority(ACTIVE_TREE);
+
+ have_tiles[new_priority.priority_bin] = true;
+
+ EXPECT_LE(last_priority.priority_bin, new_priority.priority_bin);
+ if (last_priority.priority_bin == new_priority.priority_bin) {
+ if (last_priority.priority_bin == TilePriority::EVENTUALLY) {
+ bool order_correct = last_priority.distance_to_visible <=
+ new_priority.distance_to_visible;
+ eventually_bin_order_correct_count += order_correct;
+ eventually_bin_order_incorrect_count += !order_correct;
+ } else if (!soon_rect.Intersects(new_tile->content_rect()) &&
+ !soon_rect.Intersects(last_tile->content_rect())) {
+ EXPECT_LE(last_priority.distance_to_visible,
+ new_priority.distance_to_visible);
+ } else if (new_priority.distance_to_visible > 0.f) {
+ EXPECT_EQ(TilePriority::SOON, new_priority.priority_bin);
+ }
+ }
+ last_tile = new_tile;
+ }
+
+ EXPECT_GT(eventually_bin_order_correct_count,
+ eventually_bin_order_incorrect_count);
+
+ EXPECT_TRUE(have_tiles[TilePriority::NOW]);
+ EXPECT_TRUE(have_tiles[TilePriority::SOON]);
+ EXPECT_TRUE(have_tiles[TilePriority::EVENTUALLY]);
+}
+
+static void TileExists(bool exists, Tile* tile,
+ const gfx::Rect& geometry_rect) {
+ EXPECT_EQ(exists, tile != NULL) << geometry_rect.ToString();
+}
+
+TEST(PictureLayerTilingTest, TilingEvictionTileIteratorStaticViewport) {
+ FakeOutputSurfaceClient output_surface_client;
+ scoped_ptr<FakeOutputSurface> output_surface = FakeOutputSurface::Create3d();
+ CHECK(output_surface->BindToClient(&output_surface_client));
+ TestSharedBitmapManager shared_bitmap_manager;
+ scoped_ptr<ResourceProvider> resource_provider = ResourceProvider::Create(
+ output_surface.get(), &shared_bitmap_manager, NULL, 0, false, 1, false);
+
+ FakePictureLayerTilingClient client(resource_provider.get());
+ scoped_ptr<TestablePictureLayerTiling> tiling;
+
+ gfx::Rect viewport(50, 50, 100, 100);
+ gfx::Size layer_bounds(2000, 2000);
+
+ client.SetTileSize(gfx::Size(30, 30));
+ client.set_tree(ACTIVE_TREE);
+
+ tiling = TestablePictureLayerTiling::Create(1.0f, layer_bounds, &client);
+ tiling->UpdateTilePriorities(ACTIVE_TREE, viewport, 1.0f, 1.0, Occlusion());
+
+ PictureLayerTiling::TilingRasterTileIterator empty_iterator;
+ EXPECT_FALSE(empty_iterator);
+
+ std::vector<Tile*> all_tiles = tiling->AllTilesForTesting();
+
+ PictureLayerTiling::TilingEvictionTileIterator it(
+ tiling.get(), SMOOTHNESS_TAKES_PRIORITY, PictureLayerTiling::NOW);
+
+ // Tiles don't have resources to evict.
+ EXPECT_FALSE(it);
+
+ // Sanity check.
+ EXPECT_EQ(5184u, all_tiles.size());
+
+ client.tile_manager()->InitializeTilesWithResourcesForTesting(all_tiles);
+
+ std::set<Tile*> all_tiles_set(all_tiles.begin(), all_tiles.end());
+
+ std::set<Tile*> eviction_tiles;
+
+ it = PictureLayerTiling::TilingEvictionTileIterator(
+ tiling.get(), SMOOTHNESS_TAKES_PRIORITY, PictureLayerTiling::EVENTUALLY);
+ EXPECT_TRUE(it);
+ for (; it; ++it) {
+ Tile* tile = *it;
+ EXPECT_TRUE(tile);
+ EXPECT_EQ(TilePriority::EVENTUALLY,
+ tile->priority(ACTIVE_TREE).priority_bin);
+ EXPECT_FALSE(tile->required_for_activation());
+ eviction_tiles.insert(tile);
+ }
+
+ it = PictureLayerTiling::TilingEvictionTileIterator(
+ tiling.get(), SMOOTHNESS_TAKES_PRIORITY, PictureLayerTiling::SOON);
+ EXPECT_TRUE(it);
+ for (; it; ++it) {
+ Tile* tile = *it;
+ EXPECT_TRUE(tile);
+ EXPECT_EQ(TilePriority::SOON, tile->priority(ACTIVE_TREE).priority_bin);
+ EXPECT_FALSE(tile->required_for_activation());
+ eviction_tiles.insert(tile);
+ }
+
+ it = PictureLayerTiling::TilingEvictionTileIterator(
+ tiling.get(), SMOOTHNESS_TAKES_PRIORITY, PictureLayerTiling::NOW);
+ EXPECT_TRUE(it);
+ for (; it; ++it) {
+ Tile* tile = *it;
+ EXPECT_TRUE(tile);
+ EXPECT_EQ(TilePriority::NOW, tile->priority(ACTIVE_TREE).priority_bin);
+ EXPECT_FALSE(tile->required_for_activation());
+ eviction_tiles.insert(tile);
+ }
+
+ it = PictureLayerTiling::TilingEvictionTileIterator(
+ tiling.get(),
+ SMOOTHNESS_TAKES_PRIORITY,
+ PictureLayerTiling::NOW_AND_REQUIRED_FOR_ACTIVATION);
+ EXPECT_FALSE(it);
+
+ EXPECT_GT(all_tiles_set.size(), 0u);
+ EXPECT_EQ(all_tiles_set, eviction_tiles);
+}
+
+TEST_F(PictureLayerTilingIteratorTest, TilesExist) {
+ gfx::Size layer_bounds(1099, 801);
+ Initialize(gfx::Size(100, 100), 1.f, layer_bounds);
+ VerifyTilesExactlyCoverRect(1.f, gfx::Rect(layer_bounds));
+ VerifyTiles(1.f, gfx::Rect(layer_bounds), base::Bind(&TileExists, false));
+
+ client_.set_tree(ACTIVE_TREE);
+ tiling_->UpdateTilePriorities(
+ ACTIVE_TREE,
+ gfx::Rect(layer_bounds), // visible content rect
+ 1.f, // current contents scale
+ 1.0, // current frame time
+ Occlusion());
+ VerifyTiles(1.f, gfx::Rect(layer_bounds), base::Bind(&TileExists, true));
+
+ // Make the viewport rect empty. All tiles are killed and become zombies.
+ tiling_->UpdateTilePriorities(ACTIVE_TREE,
+ gfx::Rect(), // visible content rect
+ 1.f, // current contents scale
+ 2.0, // current frame time
+ Occlusion());
+ VerifyTiles(1.f, gfx::Rect(layer_bounds), base::Bind(&TileExists, false));
+}
+
+TEST_F(PictureLayerTilingIteratorTest, TilesExistGiantViewport) {
+ gfx::Size layer_bounds(1099, 801);
+ Initialize(gfx::Size(100, 100), 1.f, layer_bounds);
+ VerifyTilesExactlyCoverRect(1.f, gfx::Rect(layer_bounds));
+ VerifyTiles(1.f, gfx::Rect(layer_bounds), base::Bind(&TileExists, false));
+
+ gfx::Rect giant_rect(-10000000, -10000000, 1000000000, 1000000000);
+
+ client_.set_tree(ACTIVE_TREE);
+ tiling_->UpdateTilePriorities(
+ ACTIVE_TREE,
+ gfx::Rect(layer_bounds), // visible content rect
+ 1.f, // current contents scale
+ 1.0, // current frame time
+ Occlusion());
+ VerifyTiles(1.f, gfx::Rect(layer_bounds), base::Bind(&TileExists, true));
+
+ // If the visible content rect is empty, it should still have live tiles.
+ tiling_->UpdateTilePriorities(ACTIVE_TREE,
+ giant_rect, // visible content rect
+ 1.f, // current contents scale
+ 2.0, // current frame time
+ Occlusion());
+ VerifyTiles(1.f, gfx::Rect(layer_bounds), base::Bind(&TileExists, true));
+}
+
+TEST_F(PictureLayerTilingIteratorTest, TilesExistOutsideViewport) {
+ gfx::Size layer_bounds(1099, 801);
+ Initialize(gfx::Size(100, 100), 1.f, layer_bounds);
+ VerifyTilesExactlyCoverRect(1.f, gfx::Rect(layer_bounds));
+ VerifyTiles(1.f, gfx::Rect(layer_bounds), base::Bind(&TileExists, false));
+
+ // This rect does not intersect with the layer, as the layer is outside the
+ // viewport.
+ gfx::Rect viewport_rect(1100, 0, 1000, 1000);
+ EXPECT_FALSE(viewport_rect.Intersects(gfx::Rect(layer_bounds)));
+
+ client_.set_tree(ACTIVE_TREE);
+ tiling_->UpdateTilePriorities(ACTIVE_TREE,
+ viewport_rect, // visible content rect
+ 1.f, // current contents scale
+ 1.0, // current frame time
+ Occlusion());
+ VerifyTiles(1.f, gfx::Rect(layer_bounds), base::Bind(&TileExists, true));
+}
+
+static void TilesIntersectingRectExist(const gfx::Rect& rect,
+ bool intersect_exists,
+ Tile* tile,
+ const gfx::Rect& geometry_rect) {
+ bool intersects = rect.Intersects(geometry_rect);
+ bool expected_exists = intersect_exists ? intersects : !intersects;
+ EXPECT_EQ(expected_exists, tile != NULL)
+ << "Rects intersecting " << rect.ToString() << " should exist. "
+ << "Current tile rect is " << geometry_rect.ToString();
+}
+
+TEST_F(PictureLayerTilingIteratorTest,
+ TilesExistLargeViewportAndLayerWithSmallVisibleArea) {
+ gfx::Size layer_bounds(10000, 10000);
+ Initialize(gfx::Size(100, 100), 1.f, layer_bounds);
+ VerifyTilesExactlyCoverRect(1.f, gfx::Rect(layer_bounds));
+ VerifyTiles(1.f, gfx::Rect(layer_bounds), base::Bind(&TileExists, false));
+
+ gfx::Rect visible_rect(8000, 8000, 50, 50);
+
+ client_.set_tree(ACTIVE_TREE);
+ set_max_tiles_for_interest_area(1);
+ tiling_->UpdateTilePriorities(ACTIVE_TREE,
+ visible_rect, // visible content rect
+ 1.f, // current contents scale
+ 1.0, // current frame time
+ Occlusion());
+ VerifyTiles(1.f,
+ gfx::Rect(layer_bounds),
+ base::Bind(&TilesIntersectingRectExist, visible_rect, true));
+}
+
+TEST_F(PictureLayerTilingIteratorTest, AddTilingsToMatchScale) {
+ gfx::Size layer_bounds(1099, 801);
+ gfx::Size tile_size(100, 100);
+
+ client_.SetTileSize(tile_size);
+ client_.set_tree(PENDING_TREE);
+
+ PictureLayerTilingSet active_set(&client_, layer_bounds);
+
+ active_set.AddTiling(1.f);
+
+ VerifyTiles(active_set.tiling_at(0),
+ 1.f,
+ gfx::Rect(layer_bounds),
+ base::Bind(&TileExists, false));
+
+ UpdateAllTilePriorities(&active_set,
+ PENDING_TREE,
+ gfx::Rect(layer_bounds), // visible content rect
+ 1.f, // current contents scale
+ 1.0); // current frame time
+
+ // The active tiling has tiles now.
+ VerifyTiles(active_set.tiling_at(0),
+ 1.f,
+ gfx::Rect(layer_bounds),
+ base::Bind(&TileExists, true));
+
+ // Add the same tilings to the pending set.
+ PictureLayerTilingSet pending_set(&client_, layer_bounds);
+ Region invalidation;
+ pending_set.SyncTilings(active_set, layer_bounds, invalidation, 0.f);
+
+ // The pending tiling starts with no tiles.
+ VerifyTiles(pending_set.tiling_at(0),
+ 1.f,
+ gfx::Rect(layer_bounds),
+ base::Bind(&TileExists, false));
+
+ // UpdateTilePriorities on the pending tiling at the same frame time. The
+ // pending tiling should get tiles.
+ UpdateAllTilePriorities(&pending_set,
+ PENDING_TREE,
+ gfx::Rect(layer_bounds), // visible content rect
+ 1.f, // current contents scale
+ 1.0); // current frame time
+
+ VerifyTiles(pending_set.tiling_at(0),
+ 1.f,
+ gfx::Rect(layer_bounds),
+ base::Bind(&TileExists, true));
+}
+
+TEST(UpdateTilePrioritiesTest, VisibleTiles) {
+ // The TilePriority of visible tiles should have zero distance_to_visible
+ // and time_to_visible.
+
+ FakePictureLayerTilingClient client;
+ scoped_ptr<TestablePictureLayerTiling> tiling;
+
+ gfx::Size device_viewport(800, 600);
+ gfx::Size last_layer_bounds(200, 200);
+ gfx::Size current_layer_bounds(200, 200);
+ float current_layer_contents_scale = 1.f;
+ gfx::Transform current_screen_transform;
+ double current_frame_time_in_seconds = 1.0;
+
+ gfx::Rect viewport_in_layer_space = ViewportInLayerSpace(
+ current_screen_transform, device_viewport);
+
+ client.SetTileSize(gfx::Size(100, 100));
+ client.set_tree(ACTIVE_TREE);
+ tiling = TestablePictureLayerTiling::Create(1.0f, // contents_scale
+ current_layer_bounds,
+ &client);
+
+ tiling->UpdateTilePriorities(ACTIVE_TREE,
+ viewport_in_layer_space,
+ current_layer_contents_scale,
+ current_frame_time_in_seconds,
+ Occlusion());
+
+ ASSERT_TRUE(tiling->TileAt(0, 0));
+ ASSERT_TRUE(tiling->TileAt(0, 1));
+ ASSERT_TRUE(tiling->TileAt(1, 0));
+ ASSERT_TRUE(tiling->TileAt(1, 1));
+
+ TilePriority priority = tiling->TileAt(0, 0)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(0.f, priority.distance_to_visible);
+ EXPECT_FLOAT_EQ(TilePriority::NOW, priority.priority_bin);
+
+ priority = tiling->TileAt(0, 1)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(0.f, priority.distance_to_visible);
+ EXPECT_FLOAT_EQ(TilePriority::NOW, priority.priority_bin);
+
+ priority = tiling->TileAt(1, 0)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(0.f, priority.distance_to_visible);
+ EXPECT_FLOAT_EQ(TilePriority::NOW, priority.priority_bin);
+
+ priority = tiling->TileAt(1, 1)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(0.f, priority.distance_to_visible);
+ EXPECT_FLOAT_EQ(TilePriority::NOW, priority.priority_bin);
+}
+
+TEST(UpdateTilePrioritiesTest, OffscreenTiles) {
+ // The TilePriority of offscreen tiles (without movement) should have nonzero
+ // distance_to_visible and infinite time_to_visible.
+
+ FakePictureLayerTilingClient client;
+ scoped_ptr<TestablePictureLayerTiling> tiling;
+
+ gfx::Size device_viewport(800, 600);
+ gfx::Size last_layer_bounds(200, 200);
+ gfx::Size current_layer_bounds(200, 200);
+ float current_layer_contents_scale = 1.f;
+ gfx::Transform last_screen_transform;
+ gfx::Transform current_screen_transform;
+ double current_frame_time_in_seconds = 1.0;
+
+ current_screen_transform.Translate(850, 0);
+ last_screen_transform = current_screen_transform;
+
+ gfx::Rect viewport_in_layer_space = ViewportInLayerSpace(
+ current_screen_transform, device_viewport);
+
+ client.SetTileSize(gfx::Size(100, 100));
+ client.set_tree(ACTIVE_TREE);
+ tiling = TestablePictureLayerTiling::Create(1.0f, // contents_scale
+ current_layer_bounds,
+ &client);
+
+ tiling->UpdateTilePriorities(ACTIVE_TREE,
+ viewport_in_layer_space,
+ current_layer_contents_scale,
+ current_frame_time_in_seconds,
+ Occlusion());
+
+ ASSERT_TRUE(tiling->TileAt(0, 0));
+ ASSERT_TRUE(tiling->TileAt(0, 1));
+ ASSERT_TRUE(tiling->TileAt(1, 0));
+ ASSERT_TRUE(tiling->TileAt(1, 1));
+
+ TilePriority priority = tiling->TileAt(0, 0)->priority(ACTIVE_TREE);
+ EXPECT_GT(priority.distance_to_visible, 0.f);
+ EXPECT_NE(TilePriority::NOW, priority.priority_bin);
+
+ priority = tiling->TileAt(0, 1)->priority(ACTIVE_TREE);
+ EXPECT_GT(priority.distance_to_visible, 0.f);
+ EXPECT_NE(TilePriority::NOW, priority.priority_bin);
+
+ priority = tiling->TileAt(1, 0)->priority(ACTIVE_TREE);
+ EXPECT_GT(priority.distance_to_visible, 0.f);
+ EXPECT_NE(TilePriority::NOW, priority.priority_bin);
+
+ priority = tiling->TileAt(1, 1)->priority(ACTIVE_TREE);
+ EXPECT_GT(priority.distance_to_visible, 0.f);
+ EXPECT_NE(TilePriority::NOW, priority.priority_bin);
+
+ // Furthermore, in this scenario tiles on the right hand side should have a
+ // larger distance to visible.
+ TilePriority left = tiling->TileAt(0, 0)->priority(ACTIVE_TREE);
+ TilePriority right = tiling->TileAt(1, 0)->priority(ACTIVE_TREE);
+ EXPECT_GT(right.distance_to_visible, left.distance_to_visible);
+
+ left = tiling->TileAt(0, 1)->priority(ACTIVE_TREE);
+ right = tiling->TileAt(1, 1)->priority(ACTIVE_TREE);
+ EXPECT_GT(right.distance_to_visible, left.distance_to_visible);
+}
+
+TEST(UpdateTilePrioritiesTest, PartiallyOffscreenLayer) {
+ // Sanity check that a layer with some tiles visible and others offscreen has
+ // correct TilePriorities for each tile.
+
+ FakePictureLayerTilingClient client;
+ scoped_ptr<TestablePictureLayerTiling> tiling;
+
+ gfx::Size device_viewport(800, 600);
+ gfx::Size last_layer_bounds(200, 200);
+ gfx::Size current_layer_bounds(200, 200);
+ float current_layer_contents_scale = 1.f;
+ gfx::Transform last_screen_transform;
+ gfx::Transform current_screen_transform;
+ double current_frame_time_in_seconds = 1.0;
+
+ current_screen_transform.Translate(705, 505);
+ last_screen_transform = current_screen_transform;
+
+ gfx::Rect viewport_in_layer_space = ViewportInLayerSpace(
+ current_screen_transform, device_viewport);
+
+ client.SetTileSize(gfx::Size(100, 100));
+ client.set_tree(ACTIVE_TREE);
+ tiling = TestablePictureLayerTiling::Create(1.0f, // contents_scale
+ current_layer_bounds,
+ &client);
+
+ tiling->UpdateTilePriorities(ACTIVE_TREE,
+ viewport_in_layer_space,
+ current_layer_contents_scale,
+ current_frame_time_in_seconds,
+ Occlusion());
+
+ ASSERT_TRUE(tiling->TileAt(0, 0));
+ ASSERT_TRUE(tiling->TileAt(0, 1));
+ ASSERT_TRUE(tiling->TileAt(1, 0));
+ ASSERT_TRUE(tiling->TileAt(1, 1));
+
+ TilePriority priority = tiling->TileAt(0, 0)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(0.f, priority.distance_to_visible);
+ EXPECT_FLOAT_EQ(TilePriority::NOW, priority.priority_bin);
+
+ priority = tiling->TileAt(0, 1)->priority(ACTIVE_TREE);
+ EXPECT_GT(priority.distance_to_visible, 0.f);
+ EXPECT_NE(TilePriority::NOW, priority.priority_bin);
+
+ priority = tiling->TileAt(1, 0)->priority(ACTIVE_TREE);
+ EXPECT_GT(priority.distance_to_visible, 0.f);
+ EXPECT_NE(TilePriority::NOW, priority.priority_bin);
+
+ priority = tiling->TileAt(1, 1)->priority(ACTIVE_TREE);
+ EXPECT_GT(priority.distance_to_visible, 0.f);
+ EXPECT_NE(TilePriority::NOW, priority.priority_bin);
+}
+
+TEST(UpdateTilePrioritiesTest, PartiallyOffscreenRotatedLayer) {
+ // Each tile of a layer may be affected differently by a transform; Check
+ // that UpdateTilePriorities correctly accounts for the transform between
+ // layer space and screen space.
+
+ FakePictureLayerTilingClient client;
+ scoped_ptr<TestablePictureLayerTiling> tiling;
+
+ gfx::Size device_viewport(800, 600);
+ gfx::Size last_layer_bounds(200, 200);
+ gfx::Size current_layer_bounds(200, 200);
+ float current_layer_contents_scale = 1.f;
+ gfx::Transform last_screen_transform;
+ gfx::Transform current_screen_transform;
+ double current_frame_time_in_seconds = 1.0;
+
+ // A diagonally rotated layer that is partially off the bottom of the screen.
+ // In this configuration, only the top-left tile would be visible.
+ current_screen_transform.Translate(600, 750);
+ current_screen_transform.RotateAboutZAxis(45);
+ last_screen_transform = current_screen_transform;
+
+ gfx::Rect viewport_in_layer_space = ViewportInLayerSpace(
+ current_screen_transform, device_viewport);
+
+ client.SetTileSize(gfx::Size(100, 100));
+ client.set_tree(ACTIVE_TREE);
+ tiling = TestablePictureLayerTiling::Create(1.0f, // contents_scale
+ current_layer_bounds,
+ &client);
+
+ tiling->UpdateTilePriorities(ACTIVE_TREE,
+ viewport_in_layer_space,
+ current_layer_contents_scale,
+ current_frame_time_in_seconds,
+ Occlusion());
+
+ ASSERT_TRUE(tiling->TileAt(0, 0));
+ ASSERT_TRUE(tiling->TileAt(0, 1));
+ ASSERT_TRUE(tiling->TileAt(1, 0));
+ ASSERT_TRUE(tiling->TileAt(1, 1));
+
+ TilePriority priority = tiling->TileAt(0, 0)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(0.f, priority.distance_to_visible);
+ EXPECT_EQ(TilePriority::NOW, priority.priority_bin);
+
+ priority = tiling->TileAt(0, 1)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(0.f, priority.distance_to_visible);
+ EXPECT_EQ(TilePriority::NOW, priority.priority_bin);
+
+ priority = tiling->TileAt(1, 0)->priority(ACTIVE_TREE);
+ EXPECT_GT(priority.distance_to_visible, 0.f);
+ EXPECT_NE(TilePriority::NOW, priority.priority_bin);
+
+ priority = tiling->TileAt(1, 1)->priority(ACTIVE_TREE);
+ EXPECT_GT(priority.distance_to_visible, 0.f);
+ EXPECT_NE(TilePriority::NOW, priority.priority_bin);
+
+ // Furthermore, in this scenario the bottom-right tile should have the larger
+ // distance to visible.
+ TilePriority top_left = tiling->TileAt(0, 0)->priority(ACTIVE_TREE);
+ TilePriority top_right = tiling->TileAt(1, 0)->priority(ACTIVE_TREE);
+ TilePriority bottom_right = tiling->TileAt(1, 1)->priority(ACTIVE_TREE);
+ EXPECT_GT(top_right.distance_to_visible, top_left.distance_to_visible);
+
+ EXPECT_EQ(bottom_right.distance_to_visible, top_right.distance_to_visible);
+}
+
+TEST(UpdateTilePrioritiesTest, PerspectiveLayer) {
+ // Perspective transforms need to take a different code path.
+ // This test checks tile priorities of a perspective layer.
+
+ FakePictureLayerTilingClient client;
+ scoped_ptr<TestablePictureLayerTiling> tiling;
+
+ gfx::Size device_viewport(800, 600);
+ gfx::Rect visible_layer_rect(0, 0, 0, 0); // offscreen.
+ gfx::Size last_layer_bounds(200, 200);
+ gfx::Size current_layer_bounds(200, 200);
+ float current_layer_contents_scale = 1.f;
+ gfx::Transform last_screen_transform;
+ gfx::Transform current_screen_transform;
+ double current_frame_time_in_seconds = 1.0;
+
+ // A 3d perspective layer rotated about its Y axis, translated to almost
+ // fully offscreen. The left side will appear closer (i.e. larger in 2d) than
+ // the right side, so the top-left tile will technically be closer than the
+ // top-right.
+
+ // Translate layer to offscreen
+ current_screen_transform.Translate(400.0, 630.0);
+ // Apply perspective about the center of the layer
+ current_screen_transform.Translate(100.0, 100.0);
+ current_screen_transform.ApplyPerspectiveDepth(100.0);
+ current_screen_transform.RotateAboutYAxis(10.0);
+ current_screen_transform.Translate(-100.0, -100.0);
+ last_screen_transform = current_screen_transform;
+
+ // Sanity check that this transform wouldn't cause w<0 clipping.
+ bool clipped;
+ MathUtil::MapQuad(current_screen_transform,
+ gfx::QuadF(gfx::RectF(0, 0, 200, 200)),
+ &clipped);
+ ASSERT_FALSE(clipped);
+
+ gfx::Rect viewport_in_layer_space = ViewportInLayerSpace(
+ current_screen_transform, device_viewport);
+
+ client.SetTileSize(gfx::Size(100, 100));
+ client.set_tree(ACTIVE_TREE);
+ tiling = TestablePictureLayerTiling::Create(1.0f, // contents_scale
+ current_layer_bounds,
+ &client);
+
+ tiling->UpdateTilePriorities(ACTIVE_TREE,
+ viewport_in_layer_space,
+ current_layer_contents_scale,
+ current_frame_time_in_seconds,
+ Occlusion());
+
+ ASSERT_TRUE(tiling->TileAt(0, 0));
+ ASSERT_TRUE(tiling->TileAt(0, 1));
+ ASSERT_TRUE(tiling->TileAt(1, 0));
+ ASSERT_TRUE(tiling->TileAt(1, 1));
+
+ // All tiles will have a positive distance_to_visible
+ // and an infinite time_to_visible.
+ TilePriority priority = tiling->TileAt(0, 0)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(priority.distance_to_visible, 0.f);
+ EXPECT_EQ(TilePriority::NOW, priority.priority_bin);
+
+ priority = tiling->TileAt(0, 1)->priority(ACTIVE_TREE);
+ EXPECT_GT(priority.distance_to_visible, 0.f);
+ EXPECT_NE(TilePriority::NOW, priority.priority_bin);
+
+ priority = tiling->TileAt(1, 0)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(priority.distance_to_visible, 0.f);
+ EXPECT_EQ(TilePriority::NOW, priority.priority_bin);
+
+ priority = tiling->TileAt(1, 1)->priority(ACTIVE_TREE);
+ EXPECT_GT(priority.distance_to_visible, 0.f);
+ EXPECT_NE(TilePriority::NOW, priority.priority_bin);
+
+ // Furthermore, in this scenario the top-left distance_to_visible
+ // will be smallest, followed by top-right. The bottom layers
+ // will of course be further than the top layers.
+ TilePriority top_left = tiling->TileAt(0, 0)->priority(ACTIVE_TREE);
+ TilePriority top_right = tiling->TileAt(1, 0)->priority(ACTIVE_TREE);
+ TilePriority bottom_left = tiling->TileAt(0, 1)->priority(ACTIVE_TREE);
+ TilePriority bottom_right = tiling->TileAt(1, 1)->priority(ACTIVE_TREE);
+
+ EXPECT_GT(bottom_right.distance_to_visible, top_right.distance_to_visible);
+
+ EXPECT_GT(bottom_left.distance_to_visible, top_left.distance_to_visible);
+}
+
+TEST(UpdateTilePrioritiesTest, PerspectiveLayerClippedByW) {
+ // Perspective transforms need to take a different code path.
+ // This test checks tile priorities of a perspective layer.
+
+ FakePictureLayerTilingClient client;
+ scoped_ptr<TestablePictureLayerTiling> tiling;
+
+ gfx::Size device_viewport(800, 600);
+ gfx::Size last_layer_bounds(200, 200);
+ gfx::Size current_layer_bounds(200, 200);
+ float current_layer_contents_scale = 1.f;
+ gfx::Transform last_screen_transform;
+ gfx::Transform current_screen_transform;
+ double current_frame_time_in_seconds = 1.0;
+
+ // A 3d perspective layer rotated about its Y axis, translated to almost
+ // fully offscreen. The left side will appear closer (i.e. larger in 2d) than
+ // the right side, so the top-left tile will technically be closer than the
+ // top-right.
+
+ // Translate layer to offscreen
+ current_screen_transform.Translate(400.0, 970.0);
+ // Apply perspective and rotation about the center of the layer
+ current_screen_transform.Translate(100.0, 100.0);
+ current_screen_transform.ApplyPerspectiveDepth(10.0);
+ current_screen_transform.RotateAboutYAxis(10.0);
+ current_screen_transform.Translate(-100.0, -100.0);
+ last_screen_transform = current_screen_transform;
+
+ // Sanity check that this transform does cause w<0 clipping for the left side
+ // of the layer, but not the right side.
+ bool clipped;
+ MathUtil::MapQuad(current_screen_transform,
+ gfx::QuadF(gfx::RectF(0, 0, 100, 200)),
+ &clipped);
+ ASSERT_TRUE(clipped);
+
+ MathUtil::MapQuad(current_screen_transform,
+ gfx::QuadF(gfx::RectF(100, 0, 100, 200)),
+ &clipped);
+ ASSERT_FALSE(clipped);
+
+ gfx::Rect viewport_in_layer_space = ViewportInLayerSpace(
+ current_screen_transform, device_viewport);
+
+ client.SetTileSize(gfx::Size(100, 100));
+ client.set_tree(ACTIVE_TREE);
+ tiling = TestablePictureLayerTiling::Create(1.0f, // contents_scale
+ current_layer_bounds,
+ &client);
+
+ tiling->UpdateTilePriorities(ACTIVE_TREE,
+ viewport_in_layer_space,
+ current_layer_contents_scale,
+ current_frame_time_in_seconds,
+ Occlusion());
+
+ ASSERT_TRUE(tiling->TileAt(0, 0));
+ ASSERT_TRUE(tiling->TileAt(0, 1));
+ ASSERT_TRUE(tiling->TileAt(1, 0));
+ ASSERT_TRUE(tiling->TileAt(1, 1));
+
+ // Left-side tiles will be clipped by the transform, so we have to assume
+ // they are visible just in case.
+ TilePriority priority = tiling->TileAt(0, 0)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(0.f, priority.distance_to_visible);
+ EXPECT_FLOAT_EQ(TilePriority::NOW, priority.priority_bin);
+
+ priority = tiling->TileAt(0, 1)->priority(ACTIVE_TREE);
+ EXPECT_GT(priority.distance_to_visible, 0.f);
+ EXPECT_NE(TilePriority::NOW, priority.priority_bin);
+
+ // Right-side tiles will have a positive distance_to_visible
+ // and an infinite time_to_visible.
+ priority = tiling->TileAt(1, 0)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(priority.distance_to_visible, 0.f);
+ EXPECT_EQ(TilePriority::NOW, priority.priority_bin);
+
+ priority = tiling->TileAt(1, 1)->priority(ACTIVE_TREE);
+ EXPECT_GT(priority.distance_to_visible, 0.f);
+ EXPECT_NE(TilePriority::NOW, priority.priority_bin);
+}
+
+TEST(UpdateTilePrioritiesTest, BasicMotion) {
+ // Test that time_to_visible is computed correctly when
+ // there is some motion.
+
+ FakePictureLayerTilingClient client;
+ scoped_ptr<TestablePictureLayerTiling> tiling;
+
+ gfx::Size device_viewport(800, 600);
+ gfx::Rect visible_layer_rect(0, 0, 0, 0);
+ gfx::Size last_layer_bounds(200, 200);
+ gfx::Size current_layer_bounds(200, 200);
+ float last_layer_contents_scale = 1.f;
+ float current_layer_contents_scale = 1.f;
+ gfx::Transform last_screen_transform;
+ gfx::Transform current_screen_transform;
+ double last_frame_time_in_seconds = 1.0;
+ double current_frame_time_in_seconds = 2.0;
+
+ // Offscreen layer is coming closer to viewport at 1000 pixels per second.
+ current_screen_transform.Translate(1800, 0);
+ last_screen_transform.Translate(2800, 0);
+
+ gfx::Rect viewport_in_layer_space = ViewportInLayerSpace(
+ current_screen_transform, device_viewport);
+
+ client.SetTileSize(gfx::Size(100, 100));
+ client.set_tree(ACTIVE_TREE);
+ tiling = TestablePictureLayerTiling::Create(1.0f, // contents_scale
+ current_layer_bounds,
+ &client);
+
+ // previous ("last") frame
+ tiling->UpdateTilePriorities(ACTIVE_TREE,
+ viewport_in_layer_space,
+ last_layer_contents_scale,
+ last_frame_time_in_seconds,
+ Occlusion());
+
+ // current frame
+ tiling->UpdateTilePriorities(ACTIVE_TREE,
+ viewport_in_layer_space,
+ current_layer_contents_scale,
+ current_frame_time_in_seconds,
+ Occlusion());
+
+ ASSERT_TRUE(tiling->TileAt(0, 0));
+ ASSERT_TRUE(tiling->TileAt(0, 1));
+ ASSERT_TRUE(tiling->TileAt(1, 0));
+ ASSERT_TRUE(tiling->TileAt(1, 1));
+
+ TilePriority priority = tiling->TileAt(0, 0)->priority(ACTIVE_TREE);
+ EXPECT_GT(priority.distance_to_visible, 0.f);
+ EXPECT_NE(TilePriority::NOW, priority.priority_bin);
+
+ priority = tiling->TileAt(0, 1)->priority(ACTIVE_TREE);
+ EXPECT_GT(priority.distance_to_visible, 0.f);
+ EXPECT_NE(TilePriority::NOW, priority.priority_bin);
+
+ // time_to_visible for the right hand side layers needs an extra 0.099
+ // seconds because this tile is 99 pixels further away.
+ priority = tiling->TileAt(1, 0)->priority(ACTIVE_TREE);
+ EXPECT_GT(priority.distance_to_visible, 0.f);
+ EXPECT_NE(TilePriority::NOW, priority.priority_bin);
+
+ priority = tiling->TileAt(1, 1)->priority(ACTIVE_TREE);
+ EXPECT_GT(priority.distance_to_visible, 0.f);
+ EXPECT_NE(TilePriority::NOW, priority.priority_bin);
+}
+
+TEST(UpdateTilePrioritiesTest, RotationMotion) {
+ // Each tile of a layer may be affected differently by a transform; Check
+ // that UpdateTilePriorities correctly accounts for the transform between
+ // layer space and screen space.
+
+ FakePictureLayerTilingClient client;
+ scoped_ptr<TestablePictureLayerTiling> tiling;
+
+ gfx::Size device_viewport(800, 600);
+ gfx::Rect visible_layer_rect(0, 0, 0, 0); // offscren.
+ gfx::Size last_layer_bounds(200, 200);
+ gfx::Size current_layer_bounds(200, 200);
+ float last_layer_contents_scale = 1.f;
+ float current_layer_contents_scale = 1.f;
+ gfx::Transform last_screen_transform;
+ gfx::Transform current_screen_transform;
+ double last_frame_time_in_seconds = 1.0;
+ double current_frame_time_in_seconds = 2.0;
+
+ // Rotation motion is set up specifically so that:
+ // - rotation occurs about the center of the layer
+ // - the top-left tile becomes visible on rotation
+ // - the top-right tile will have an infinite time_to_visible
+ // because it is rotating away from viewport.
+ // - bottom-left layer will have a positive non-zero time_to_visible
+ // because it is rotating toward the viewport.
+ current_screen_transform.Translate(400, 550);
+ current_screen_transform.RotateAboutZAxis(45);
+
+ last_screen_transform.Translate(400, 550);
+
+ gfx::Rect viewport_in_layer_space = ViewportInLayerSpace(
+ current_screen_transform, device_viewport);
+
+ client.SetTileSize(gfx::Size(100, 100));
+ client.set_tree(ACTIVE_TREE);
+ tiling = TestablePictureLayerTiling::Create(1.0f, // contents_scale
+ current_layer_bounds,
+ &client);
+
+ // previous ("last") frame
+ tiling->UpdateTilePriorities(ACTIVE_TREE,
+ viewport_in_layer_space,
+ last_layer_contents_scale,
+ last_frame_time_in_seconds,
+ Occlusion());
+
+ // current frame
+ tiling->UpdateTilePriorities(ACTIVE_TREE,
+ viewport_in_layer_space,
+ current_layer_contents_scale,
+ current_frame_time_in_seconds,
+ Occlusion());
+
+ ASSERT_TRUE(tiling->TileAt(0, 0));
+ ASSERT_TRUE(tiling->TileAt(0, 1));
+ ASSERT_TRUE(tiling->TileAt(1, 0));
+ ASSERT_TRUE(tiling->TileAt(1, 1));
+
+ TilePriority priority = tiling->TileAt(0, 0)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(0.f, priority.distance_to_visible);
+ EXPECT_EQ(TilePriority::NOW, priority.priority_bin);
+
+ priority = tiling->TileAt(0, 1)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(0.f, priority.distance_to_visible);
+ EXPECT_EQ(TilePriority::NOW, priority.priority_bin);
+
+ priority = tiling->TileAt(1, 0)->priority(ACTIVE_TREE);
+ EXPECT_FLOAT_EQ(0.f, priority.distance_to_visible);
+ EXPECT_EQ(TilePriority::NOW, priority.priority_bin);
+}
+
+TEST(PictureLayerTilingTest, ResetClearsPriorities) {
+ FakePictureLayerTilingClient client;
+ scoped_ptr<TestablePictureLayerTiling> tiling;
+
+ client.SetTileSize(gfx::Size(100, 100));
+ client.set_tree(ACTIVE_TREE);
+ tiling = TestablePictureLayerTiling::Create(1.0f, // contents_scale
+ gfx::Size(100, 100),
+ &client);
+ tiling->UpdateTilePriorities(
+ ACTIVE_TREE, gfx::Rect(0, 0, 100, 100), 1.0f, 1.0f, Occlusion());
+
+ std::vector<scoped_refptr<Tile> > tiles = tiling->AllRefTilesForTesting();
+ ASSERT_GT(tiles.size(), 0u);
+ for (std::vector<scoped_refptr<Tile> >::const_iterator it = tiles.begin();
+ it != tiles.end();
+ ++it) {
+ EXPECT_NE(TilePriority(), (*it)->priority(ACTIVE_TREE));
+ }
+
+ tiling->Reset();
+ for (std::vector<scoped_refptr<Tile> >::const_iterator it = tiles.begin();
+ it != tiles.end();
+ ++it) {
+ EXPECT_EQ(TilePriority(), (*it)->priority(ACTIVE_TREE));
+ }
+ tiles.clear();
+}
+
+TEST(PictureLayerTilingTest, RecycledTilesCleared) {
+ // This test performs the following:
+ // Setup:
+ // - Two tilings, one active one recycled with all tiles shared.
+ // Procedure:
+ // - Viewport moves somewhere far away and active tiling clears tiles.
+ // - Viewport moves back and a new active tiling tile is created.
+ // Result:
+ // - Recycle tiling does _not_ have the tile in the same location (thus it
+ // will be shared next time a pending tiling is created).
+
+ FakePictureLayerTilingClient active_client;
+ scoped_ptr<TestablePictureLayerTiling> active_tiling;
+
+ active_client.SetTileSize(gfx::Size(100, 100));
+ active_client.set_tree(ACTIVE_TREE);
+ active_client.set_max_tiles_for_interest_area(10);
+ active_tiling = TestablePictureLayerTiling::Create(1.0f, // contents_scale
+ gfx::Size(10000, 10000),
+ &active_client);
+ // Create all tiles on this tiling.
+ active_tiling->UpdateTilePriorities(
+ ACTIVE_TREE, gfx::Rect(0, 0, 100, 100), 1.0f, 1.0f, Occlusion());
+
+ FakePictureLayerTilingClient recycle_client;
+ recycle_client.SetTileSize(gfx::Size(100, 100));
+ recycle_client.set_tree(PENDING_TREE);
+ recycle_client.set_twin_tiling(active_tiling.get());
+ recycle_client.set_max_tiles_for_interest_area(10);
+
+ scoped_ptr<TestablePictureLayerTiling> recycle_tiling;
+ recycle_tiling = TestablePictureLayerTiling::Create(1.0f, // contents_scale
+ gfx::Size(10000, 10000),
+ &recycle_client);
+
+ // Create all tiles on the second tiling. All tiles should be shared.
+ recycle_tiling->UpdateTilePriorities(
+ PENDING_TREE, gfx::Rect(0, 0, 100, 100), 1.0f, 1.0f, Occlusion());
+
+ // Set the second tiling as recycled.
+ active_client.set_twin_tiling(NULL);
+ active_client.set_recycled_twin_tiling(recycle_tiling.get());
+ recycle_client.set_twin_tiling(NULL);
+
+ // Verify that tiles exist and are shared.
+ EXPECT_TRUE(active_tiling->TileAt(0, 0));
+ EXPECT_TRUE(recycle_tiling->TileAt(0, 0));
+ EXPECT_EQ(active_tiling->TileAt(0, 0), recycle_tiling->TileAt(0, 0));
+
+ // Move the viewport far away from the (0, 0) tile.
+ active_tiling->UpdateTilePriorities(
+ ACTIVE_TREE, gfx::Rect(9000, 9000, 100, 100), 1.0f, 2.0, Occlusion());
+ // Ensure the tile was deleted on both tilings.
+ EXPECT_FALSE(active_tiling->TileAt(0, 0));
+ EXPECT_FALSE(recycle_tiling->TileAt(0, 0));
+
+ // Move the viewport back to (0, 0) tile.
+ active_tiling->UpdateTilePriorities(
+ ACTIVE_TREE, gfx::Rect(0, 0, 100, 100), 1.0f, 3.0, Occlusion());
+
+ // Ensure that we now have a tile here, but the recycle tiling does not.
+ EXPECT_TRUE(active_tiling->TileAt(0, 0));
+ EXPECT_FALSE(recycle_tiling->TileAt(0, 0));
+}
+
+TEST(PictureLayerTilingTest, RecycledTilesClearedOnReset) {
+ FakePictureLayerTilingClient active_client;
+ scoped_ptr<TestablePictureLayerTiling> active_tiling;
+
+ active_client.SetTileSize(gfx::Size(100, 100));
+ active_client.set_tree(ACTIVE_TREE);
+ active_tiling = TestablePictureLayerTiling::Create(1.0f, // contents_scale
+ gfx::Size(100, 100),
+ &active_client);
+ // Create all tiles on this tiling.
+ active_tiling->UpdateTilePriorities(
+ ACTIVE_TREE, gfx::Rect(0, 0, 100, 100), 1.0f, 1.0f, Occlusion());
+
+ FakePictureLayerTilingClient recycle_client;
+ recycle_client.SetTileSize(gfx::Size(100, 100));
+ recycle_client.set_tree(PENDING_TREE);
+ recycle_client.set_twin_tiling(active_tiling.get());
+ recycle_client.set_max_tiles_for_interest_area(10);
+
+ scoped_ptr<TestablePictureLayerTiling> recycle_tiling;
+ recycle_tiling = TestablePictureLayerTiling::Create(1.0f, // contents_scale
+ gfx::Size(100, 100),
+ &recycle_client);
+
+ // Create all tiles on the recycle tiling. All tiles should be shared.
+ recycle_tiling->UpdateTilePriorities(
+ PENDING_TREE, gfx::Rect(0, 0, 100, 100), 1.0f, 1.0f, Occlusion());
+
+ // Set the second tiling as recycled.
+ active_client.set_twin_tiling(NULL);
+ active_client.set_recycled_twin_tiling(recycle_tiling.get());
+ recycle_client.set_twin_tiling(NULL);
+
+ // Verify that tiles exist and are shared.
+ EXPECT_TRUE(active_tiling->TileAt(0, 0));
+ EXPECT_TRUE(recycle_tiling->TileAt(0, 0));
+ EXPECT_EQ(active_tiling->TileAt(0, 0), recycle_tiling->TileAt(0, 0));
+
+ // Reset the active tiling. The recycle tiles should be released too.
+ active_tiling->Reset();
+ EXPECT_FALSE(active_tiling->TileAt(0, 0));
+ EXPECT_FALSE(recycle_tiling->TileAt(0, 0));
+}
+
+} // namespace
+} // namespace cc
diff --git a/cc/resources/picture_pile.cc b/cc/resources/picture_pile.cc
new file mode 100644
index 0000000..4858d24
--- /dev/null
+++ b/cc/resources/picture_pile.cc
@@ -0,0 +1,563 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/picture_pile.h"
+
+#include <algorithm>
+#include <limits>
+#include <vector>
+
+#include "cc/base/region.h"
+#include "cc/debug/rendering_stats_instrumentation.h"
+#include "cc/resources/picture_pile_impl.h"
+#include "cc/resources/raster_worker_pool.h"
+#include "cc/resources/tile_priority.h"
+
+namespace {
+// Layout pixel buffer around the visible layer rect to record. Any base
+// picture that intersects the visible layer rect expanded by this distance
+// will be recorded.
+const int kPixelDistanceToRecord = 8000;
+// We don't perform solid color analysis on images that have more than 10 skia
+// operations.
+const int kOpCountThatIsOkToAnalyze = 10;
+
+// TODO(humper): The density threshold here is somewhat arbitrary; need a
+// way to set // this from the command line so we can write a benchmark
+// script and find a sweet spot.
+const float kDensityThreshold = 0.5f;
+
+bool rect_sort_y(const gfx::Rect& r1, const gfx::Rect& r2) {
+ return r1.y() < r2.y() || (r1.y() == r2.y() && r1.x() < r2.x());
+}
+
+bool rect_sort_x(const gfx::Rect& r1, const gfx::Rect& r2) {
+ return r1.x() < r2.x() || (r1.x() == r2.x() && r1.y() < r2.y());
+}
+
+float PerformClustering(const std::vector<gfx::Rect>& tiles,
+ std::vector<gfx::Rect>* clustered_rects) {
+ // These variables track the record area and invalid area
+ // for the entire clustering
+ int total_record_area = 0;
+ int total_invalid_area = 0;
+
+ // These variables track the record area and invalid area
+ // for the current cluster being constructed.
+ gfx::Rect cur_record_rect;
+ int cluster_record_area = 0, cluster_invalid_area = 0;
+
+ for (std::vector<gfx::Rect>::const_iterator it = tiles.begin();
+ it != tiles.end();
+ it++) {
+ gfx::Rect invalid_tile = *it;
+
+ // For each tile, we consider adding the invalid tile to the
+ // current record rectangle. Only add it if the amount of empty
+ // space created is below a density threshold.
+ int tile_area = invalid_tile.width() * invalid_tile.height();
+
+ gfx::Rect proposed_union = cur_record_rect;
+ proposed_union.Union(invalid_tile);
+ int proposed_area = proposed_union.width() * proposed_union.height();
+ float proposed_density =
+ static_cast<float>(cluster_invalid_area + tile_area) /
+ static_cast<float>(proposed_area);
+
+ if (proposed_density >= kDensityThreshold) {
+ // It's okay to add this invalid tile to the
+ // current recording rectangle.
+ cur_record_rect = proposed_union;
+ cluster_record_area = proposed_area;
+ cluster_invalid_area += tile_area;
+ total_invalid_area += tile_area;
+ } else {
+ // Adding this invalid tile to the current recording rectangle
+ // would exceed our badness threshold, so put the current rectangle
+ // in the list of recording rects, and start a new one.
+ clustered_rects->push_back(cur_record_rect);
+ total_record_area += cluster_record_area;
+ cur_record_rect = invalid_tile;
+ cluster_invalid_area = tile_area;
+ cluster_record_area = tile_area;
+ }
+ }
+
+ DCHECK(!cur_record_rect.IsEmpty());
+ clustered_rects->push_back(cur_record_rect);
+ total_record_area += cluster_record_area;;
+
+ DCHECK_NE(total_record_area, 0);
+
+ return static_cast<float>(total_invalid_area) /
+ static_cast<float>(total_record_area);
+}
+
+float ClusterTiles(const std::vector<gfx::Rect>& invalid_tiles,
+ std::vector<gfx::Rect>* record_rects) {
+ TRACE_EVENT1("cc", "ClusterTiles",
+ "count",
+ invalid_tiles.size());
+
+ if (invalid_tiles.size() <= 1) {
+ // Quickly handle the special case for common
+ // single-invalidation update, and also the less common
+ // case of no tiles passed in.
+ *record_rects = invalid_tiles;
+ return 1;
+ }
+
+ // Sort the invalid tiles by y coordinate.
+ std::vector<gfx::Rect> invalid_tiles_vertical = invalid_tiles;
+ std::sort(invalid_tiles_vertical.begin(),
+ invalid_tiles_vertical.end(),
+ rect_sort_y);
+
+ float vertical_density;
+ std::vector<gfx::Rect> vertical_clustering;
+ vertical_density = PerformClustering(invalid_tiles_vertical,
+ &vertical_clustering);
+
+ // If vertical density is optimal, then we can return early.
+ if (vertical_density == 1.f) {
+ *record_rects = vertical_clustering;
+ return vertical_density;
+ }
+
+ // Now try again with a horizontal sort, see which one is best
+ std::vector<gfx::Rect> invalid_tiles_horizontal = invalid_tiles;
+ std::sort(invalid_tiles_horizontal.begin(),
+ invalid_tiles_horizontal.end(),
+ rect_sort_x);
+
+ float horizontal_density;
+ std::vector<gfx::Rect> horizontal_clustering;
+ horizontal_density = PerformClustering(invalid_tiles_horizontal,
+ &horizontal_clustering);
+
+ if (vertical_density < horizontal_density) {
+ *record_rects = horizontal_clustering;
+ return horizontal_density;
+ }
+
+ *record_rects = vertical_clustering;
+ return vertical_density;
+}
+
+} // namespace
+
+namespace cc {
+
+PicturePile::PicturePile() : is_suitable_for_gpu_rasterization_(true) {
+}
+
+PicturePile::~PicturePile() {
+}
+
+bool PicturePile::UpdateAndExpandInvalidation(
+ ContentLayerClient* painter,
+ Region* invalidation,
+ SkColor background_color,
+ bool contents_opaque,
+ bool contents_fill_bounds_completely,
+ const gfx::Size& layer_size,
+ const gfx::Rect& visible_layer_rect,
+ int frame_number,
+ Picture::RecordingMode recording_mode,
+ RenderingStatsInstrumentation* stats_instrumentation) {
+ background_color_ = background_color;
+ contents_opaque_ = contents_opaque;
+ contents_fill_bounds_completely_ = contents_fill_bounds_completely;
+
+ bool updated = false;
+
+ Region resize_invalidation;
+ gfx::Size old_tiling_size = tiling_size();
+ if (old_tiling_size != layer_size) {
+ tiling_.SetTilingSize(layer_size);
+ updated = true;
+ }
+
+ gfx::Rect interest_rect = visible_layer_rect;
+ interest_rect.Inset(
+ -kPixelDistanceToRecord,
+ -kPixelDistanceToRecord,
+ -kPixelDistanceToRecord,
+ -kPixelDistanceToRecord);
+ recorded_viewport_ = interest_rect;
+ recorded_viewport_.Intersect(gfx::Rect(tiling_size()));
+
+ gfx::Rect interest_rect_over_tiles =
+ tiling_.ExpandRectToTileBounds(interest_rect);
+
+ if (old_tiling_size != layer_size) {
+ has_any_recordings_ = false;
+
+ // Drop recordings that are outside the new layer bounds or that changed
+ // size.
+ std::vector<PictureMapKey> to_erase;
+ int min_toss_x = tiling_.num_tiles_x();
+ if (tiling_size().width() > old_tiling_size.width()) {
+ min_toss_x =
+ tiling_.FirstBorderTileXIndexFromSrcCoord(old_tiling_size.width());
+ }
+ int min_toss_y = tiling_.num_tiles_y();
+ if (tiling_size().height() > old_tiling_size.height()) {
+ min_toss_y =
+ tiling_.FirstBorderTileYIndexFromSrcCoord(old_tiling_size.height());
+ }
+ for (PictureMap::const_iterator it = picture_map_.begin();
+ it != picture_map_.end();
+ ++it) {
+ const PictureMapKey& key = it->first;
+ if (key.first < min_toss_x && key.second < min_toss_y) {
+ has_any_recordings_ |= !!it->second.GetPicture();
+ continue;
+ }
+ to_erase.push_back(key);
+ }
+
+ for (size_t i = 0; i < to_erase.size(); ++i)
+ picture_map_.erase(to_erase[i]);
+
+ // If a recording is dropped and not re-recorded below, invalidate that
+ // full recording to cause any raster tiles that would use it to be
+ // dropped.
+ // If the recording will be replaced below, just invalidate newly exposed
+ // areas to force raster tiles that include the old recording to know
+ // there is new recording to display.
+ gfx::Rect old_tiling_rect_over_tiles =
+ tiling_.ExpandRectToTileBounds(gfx::Rect(old_tiling_size));
+ if (min_toss_x < tiling_.num_tiles_x()) {
+ // The bounds which we want to invalidate are the tiles along the old
+ // edge of the pile. We'll call this bounding box the OLD EDGE RECT.
+ //
+ // In the picture below, the old edge rect would be the bounding box
+ // of tiles {h,i,j}. |min_toss_x| would be equal to the horizontal index
+ // of the same tiles.
+ //
+ // old pile edge-v new pile edge-v
+ // ---------------+ - - - - - - - -+
+ // mmppssvvyybbeeh|h .
+ // mmppssvvyybbeeh|h .
+ // nnqqttwwzzccffi|i .
+ // nnqqttwwzzccffi|i .
+ // oorruuxxaaddggj|j .
+ // oorruuxxaaddggj|j .
+ // ---------------+ - - - - - - - -+ <- old pile edge
+ // .
+ // - - - - - - - - - - - - - - - -+ <- new pile edge
+ //
+ // If you were to slide a vertical beam from the left edge of the
+ // old edge rect toward the right, it would either hit the right edge
+ // of the old edge rect, or the interest rect (expanded to the bounds
+ // of the tiles it touches). The same is true for a beam parallel to
+ // any of the four edges, sliding accross the old edge rect. We use
+ // the union of these four rectangles generated by these beams to
+ // determine which part of the old edge rect is outside of the expanded
+ // interest rect.
+ //
+ // Case 1: Intersect rect is outside the old edge rect. It can be
+ // either on the left or the right. The |left_rect| and |right_rect|,
+ // cover this case, one will be empty and one will cover the full
+ // old edge rect. In the picture below, |left_rect| would cover the
+ // old edge rect, and |right_rect| would be empty.
+ // +----------------------+ |^^^^^^^^^^^^^^^|
+ // |===> OLD EDGE RECT | | |
+ // |===> | | INTEREST RECT |
+ // |===> | | |
+ // |===> | | |
+ // +----------------------+ |vvvvvvvvvvvvvvv|
+ //
+ // Case 2: Interest rect is inside the old edge rect. It will always
+ // fill the entire old edge rect horizontally since the old edge rect
+ // is a single tile wide, and the interest rect has been expanded to the
+ // bounds of the tiles it touches. In this case the |left_rect| and
+ // |right_rect| will be empty, but the case is handled by the |top_rect|
+ // and |bottom_rect|. In the picture below, neither the |top_rect| nor
+ // |bottom_rect| would empty, they would each cover the area of the old
+ // edge rect outside the expanded interest rect.
+ // +-----------------+
+ // |:::::::::::::::::|
+ // |:::::::::::::::::|
+ // |vvvvvvvvvvvvvvvvv|
+ // | |
+ // +-----------------+
+ // | INTEREST RECT |
+ // | |
+ // +-----------------+
+ // | |
+ // | OLD EDGE RECT |
+ // +-----------------+
+ //
+ // Lastly, we need to consider tiles inside the expanded interest rect.
+ // For those tiles, we want to invalidate exactly the newly exposed
+ // pixels. In the picture below the tiles in the old edge rect have been
+ // resized and the area covered by periods must be invalidated. The
+ // |exposed_rect| will cover exactly that area.
+ // v-old pile edge
+ // +---------+-------+
+ // | ........|
+ // | ........|
+ // | OLD EDGE.RECT..|
+ // | ........|
+ // | ........|
+ // | ........|
+ // | ........|
+ // | ........|
+ // | ........|
+ // +---------+-------+
+
+ int left = tiling_.TilePositionX(min_toss_x);
+ int right = left + tiling_.TileSizeX(min_toss_x);
+ int top = old_tiling_rect_over_tiles.y();
+ int bottom = old_tiling_rect_over_tiles.bottom();
+
+ int left_until = std::min(interest_rect_over_tiles.x(), right);
+ int right_until = std::max(interest_rect_over_tiles.right(), left);
+ int top_until = std::min(interest_rect_over_tiles.y(), bottom);
+ int bottom_until = std::max(interest_rect_over_tiles.bottom(), top);
+
+ int exposed_left = old_tiling_size.width();
+ int exposed_left_until = tiling_size().width();
+ int exposed_top = top;
+ int exposed_bottom = tiling_size().height();
+ DCHECK_GE(exposed_left, left);
+
+ gfx::Rect left_rect(left, top, left_until - left, bottom - top);
+ gfx::Rect right_rect(right_until, top, right - right_until, bottom - top);
+ gfx::Rect top_rect(left, top, right - left, top_until - top);
+ gfx::Rect bottom_rect(
+ left, bottom_until, right - left, bottom - bottom_until);
+ gfx::Rect exposed_rect(exposed_left,
+ exposed_top,
+ exposed_left_until - exposed_left,
+ exposed_bottom - exposed_top);
+ resize_invalidation.Union(left_rect);
+ resize_invalidation.Union(right_rect);
+ resize_invalidation.Union(top_rect);
+ resize_invalidation.Union(bottom_rect);
+ resize_invalidation.Union(exposed_rect);
+ }
+ if (min_toss_y < tiling_.num_tiles_y()) {
+ // The same thing occurs here as in the case above, but the invalidation
+ // rect is the bounding box around the bottom row of tiles in the old
+ // pile. This would be tiles {o,r,u,x,a,d,g,j} in the above picture.
+
+ int top = tiling_.TilePositionY(min_toss_y);
+ int bottom = top + tiling_.TileSizeY(min_toss_y);
+ int left = old_tiling_rect_over_tiles.x();
+ int right = old_tiling_rect_over_tiles.right();
+
+ int top_until = std::min(interest_rect_over_tiles.y(), bottom);
+ int bottom_until = std::max(interest_rect_over_tiles.bottom(), top);
+ int left_until = std::min(interest_rect_over_tiles.x(), right);
+ int right_until = std::max(interest_rect_over_tiles.right(), left);
+
+ int exposed_top = old_tiling_size.height();
+ int exposed_top_until = tiling_size().height();
+ int exposed_left = left;
+ int exposed_right = tiling_size().width();
+ DCHECK_GE(exposed_top, top);
+
+ gfx::Rect left_rect(left, top, left_until - left, bottom - top);
+ gfx::Rect right_rect(right_until, top, right - right_until, bottom - top);
+ gfx::Rect top_rect(left, top, right - left, top_until - top);
+ gfx::Rect bottom_rect(
+ left, bottom_until, right - left, bottom - bottom_until);
+ gfx::Rect exposed_rect(exposed_left,
+ exposed_top,
+ exposed_right - exposed_left,
+ exposed_top_until - exposed_top);
+ resize_invalidation.Union(left_rect);
+ resize_invalidation.Union(right_rect);
+ resize_invalidation.Union(top_rect);
+ resize_invalidation.Union(bottom_rect);
+ resize_invalidation.Union(exposed_rect);
+ }
+ }
+
+ Region invalidation_expanded_to_full_tiles;
+ for (Region::Iterator i(*invalidation); i.has_rect(); i.next()) {
+ gfx::Rect invalid_rect = i.rect();
+
+ // Expand invalidation that is outside tiles that intersect the interest
+ // rect. These tiles are no longer valid and should be considerered fully
+ // invalid, so we can know to not keep around raster tiles that intersect
+ // with these recording tiles.
+ gfx::Rect invalid_rect_outside_interest_rect_tiles = invalid_rect;
+ // TODO(danakj): We should have a Rect-subtract-Rect-to-2-rects operator
+ // instead of using Rect::Subtract which gives you the bounding box of the
+ // subtraction.
+ invalid_rect_outside_interest_rect_tiles.Subtract(interest_rect_over_tiles);
+ invalidation_expanded_to_full_tiles.Union(tiling_.ExpandRectToTileBounds(
+ invalid_rect_outside_interest_rect_tiles));
+
+ // Split this inflated invalidation across tile boundaries and apply it
+ // to all tiles that it touches.
+ bool include_borders = true;
+ for (TilingData::Iterator iter(&tiling_, invalid_rect, include_borders);
+ iter;
+ ++iter) {
+ const PictureMapKey& key = iter.index();
+
+ PictureMap::iterator picture_it = picture_map_.find(key);
+ if (picture_it == picture_map_.end())
+ continue;
+
+ // Inform the grid cell that it has been invalidated in this frame.
+ updated = picture_it->second.Invalidate(frame_number) || updated;
+ // Invalidate drops the picture so the whole tile better be invalidated if
+ // it won't be re-recorded below.
+ DCHECK_IMPLIES(!tiling_.TileBounds(key.first, key.second)
+ .Intersects(interest_rect_over_tiles),
+ invalidation_expanded_to_full_tiles.Contains(
+ tiling_.TileBounds(key.first, key.second)));
+ }
+ }
+
+ invalidation->Union(invalidation_expanded_to_full_tiles);
+ invalidation->Union(resize_invalidation);
+
+ // Make a list of all invalid tiles; we will attempt to
+ // cluster these into multiple invalidation regions.
+ std::vector<gfx::Rect> invalid_tiles;
+ bool include_borders = true;
+ for (TilingData::Iterator it(&tiling_, interest_rect, include_borders); it;
+ ++it) {
+ const PictureMapKey& key = it.index();
+ PictureInfo& info = picture_map_[key];
+
+ gfx::Rect rect = PaddedRect(key);
+ int distance_to_visible =
+ rect.ManhattanInternalDistance(visible_layer_rect);
+
+ if (info.NeedsRecording(frame_number, distance_to_visible)) {
+ gfx::Rect tile = tiling_.TileBounds(key.first, key.second);
+ invalid_tiles.push_back(tile);
+ } else if (!info.GetPicture()) {
+ if (recorded_viewport_.Intersects(rect)) {
+ // Recorded viewport is just an optimization for a fully recorded
+ // interest rect. In this case, a tile in that rect has declined
+ // to be recorded (probably due to frequent invalidations).
+ // TODO(enne): Shrink the recorded_viewport_ rather than clearing.
+ recorded_viewport_ = gfx::Rect();
+ }
+
+ // If a tile in the interest rect is not recorded, the entire tile needs
+ // to be considered invalid, so that we know not to keep around raster
+ // tiles that intersect this recording tile.
+ invalidation->Union(tiling_.TileBounds(it.index_x(), it.index_y()));
+ }
+ }
+
+ std::vector<gfx::Rect> record_rects;
+ ClusterTiles(invalid_tiles, &record_rects);
+
+ if (record_rects.empty())
+ return updated;
+
+ for (std::vector<gfx::Rect>::iterator it = record_rects.begin();
+ it != record_rects.end();
+ it++) {
+ gfx::Rect record_rect = *it;
+ record_rect = PadRect(record_rect);
+
+ int repeat_count = std::max(1, slow_down_raster_scale_factor_for_debug_);
+ scoped_refptr<Picture> picture;
+
+ // Note: Currently, gathering of pixel refs when using a single
+ // raster thread doesn't provide any benefit. This might change
+ // in the future but we avoid it for now to reduce the cost of
+ // Picture::Create.
+ bool gather_pixel_refs = RasterWorkerPool::GetNumRasterThreads() > 1;
+
+ {
+ base::TimeDelta best_duration = base::TimeDelta::Max();
+ for (int i = 0; i < repeat_count; i++) {
+ base::TimeTicks start_time = stats_instrumentation->StartRecording();
+ picture = Picture::Create(record_rect,
+ painter,
+ tile_grid_info_,
+ gather_pixel_refs,
+ recording_mode);
+ // Note the '&&' with previous is-suitable state.
+ // This means that once a picture-pile becomes unsuitable for gpu
+ // rasterization due to some content, it will continue to be unsuitable
+ // even if that content is replaced by gpu-friendly content.
+ // This is an optimization to avoid iterating though all pictures in
+ // the pile after each invalidation.
+ is_suitable_for_gpu_rasterization_ &=
+ picture->IsSuitableForGpuRasterization();
+ has_text_ |= picture->HasText();
+ base::TimeDelta duration =
+ stats_instrumentation->EndRecording(start_time);
+ best_duration = std::min(duration, best_duration);
+ }
+ int recorded_pixel_count =
+ picture->LayerRect().width() * picture->LayerRect().height();
+ stats_instrumentation->AddRecord(best_duration, recorded_pixel_count);
+ }
+
+ bool found_tile_for_recorded_picture = false;
+
+ bool include_borders = true;
+ for (TilingData::Iterator it(&tiling_, record_rect, include_borders); it;
+ ++it) {
+ const PictureMapKey& key = it.index();
+ gfx::Rect tile = PaddedRect(key);
+ if (record_rect.Contains(tile)) {
+ PictureInfo& info = picture_map_[key];
+ info.SetPicture(picture);
+ found_tile_for_recorded_picture = true;
+ }
+ }
+ DetermineIfSolidColor();
+ DCHECK(found_tile_for_recorded_picture);
+ }
+
+ has_any_recordings_ = true;
+ DCHECK(CanRasterSlowTileCheck(recorded_viewport_));
+ return true;
+}
+
+void PicturePile::SetEmptyBounds() {
+ tiling_.SetTilingSize(gfx::Size());
+ picture_map_.clear();
+ has_any_recordings_ = false;
+ recorded_viewport_ = gfx::Rect();
+}
+
+void PicturePile::DetermineIfSolidColor() {
+ is_solid_color_ = false;
+ solid_color_ = SK_ColorTRANSPARENT;
+
+ if (picture_map_.empty()) {
+ return;
+ }
+
+ PictureMap::const_iterator it = picture_map_.begin();
+ const Picture* picture = it->second.GetPicture();
+
+ // Missing recordings due to frequent invalidations or being too far away
+ // from the interest rect will cause the a null picture to exist.
+ if (!picture)
+ return;
+
+ // Don't bother doing more work if the first image is too complicated.
+ if (picture->ApproximateOpCount() > kOpCountThatIsOkToAnalyze)
+ return;
+
+ // Make sure all of the mapped images point to the same picture.
+ for (++it; it != picture_map_.end(); ++it) {
+ if (it->second.GetPicture() != picture)
+ return;
+ }
+ skia::AnalysisCanvas canvas(recorded_viewport_.width(),
+ recorded_viewport_.height());
+ picture->Raster(&canvas, NULL, Region(), 1.0f);
+ is_solid_color_ = canvas.GetColorIfSolid(&solid_color_);
+}
+
+} // namespace cc
diff --git a/cc/resources/picture_pile.h b/cc/resources/picture_pile.h
new file mode 100644
index 0000000..0e8dca6
--- /dev/null
+++ b/cc/resources/picture_pile.h
@@ -0,0 +1,69 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_PICTURE_PILE_H_
+#define CC_RESOURCES_PICTURE_PILE_H_
+
+#include "cc/resources/picture_pile_base.h"
+#include "ui/gfx/rect.h"
+
+namespace cc {
+class PicturePileImpl;
+class Region;
+class RenderingStatsInstrumentation;
+
+class CC_EXPORT PicturePile : public PicturePileBase {
+ public:
+ PicturePile();
+
+ // Re-record parts of the picture that are invalid.
+ // Invalidations are in layer space, and will be expanded to cover everything
+ // that was either recorded/changed or that has no recording, leaving out only
+ // pieces that we had a recording for and it was not changed.
+ // Return true iff the pile was modified.
+ bool UpdateAndExpandInvalidation(
+ ContentLayerClient* painter,
+ Region* invalidation,
+ SkColor background_color,
+ bool contents_opaque,
+ bool contents_fill_bounds_completely,
+ const gfx::Size& layer_size,
+ const gfx::Rect& visible_layer_rect,
+ int frame_number,
+ Picture::RecordingMode recording_mode,
+ RenderingStatsInstrumentation* stats_instrumentation);
+
+ void SetEmptyBounds();
+
+ void set_slow_down_raster_scale_factor(int factor) {
+ slow_down_raster_scale_factor_for_debug_ = factor;
+ }
+
+ void set_show_debug_picture_borders(bool show) {
+ show_debug_picture_borders_ = show;
+ }
+
+ bool is_suitable_for_gpu_rasterization() const {
+ return is_suitable_for_gpu_rasterization_;
+ }
+ void SetUnsuitableForGpuRasterizationForTesting() {
+ is_suitable_for_gpu_rasterization_ = false;
+ }
+
+ protected:
+ virtual ~PicturePile();
+
+ private:
+ friend class PicturePileImpl;
+
+ void DetermineIfSolidColor();
+
+ bool is_suitable_for_gpu_rasterization_;
+
+ DISALLOW_COPY_AND_ASSIGN(PicturePile);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_PICTURE_PILE_H_
diff --git a/cc/resources/picture_pile_base.cc b/cc/resources/picture_pile_base.cc
new file mode 100644
index 0000000..d850e41
--- /dev/null
+++ b/cc/resources/picture_pile_base.cc
@@ -0,0 +1,258 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/picture_pile_base.h"
+
+#include <algorithm>
+#include <set>
+#include <vector>
+
+#include "base/debug/trace_event_argument.h"
+#include "base/logging.h"
+#include "base/values.h"
+#include "cc/base/math_util.h"
+#include "cc/debug/traced_value.h"
+#include "third_party/skia/include/core/SkColor.h"
+#include "ui/gfx/rect_conversions.h"
+
+namespace {
+// Dimensions of the tiles in this picture pile as well as the dimensions of
+// the base picture in each tile.
+const int kBasePictureSize = 512;
+const int kTileGridBorderPixels = 1;
+#ifdef NDEBUG
+const bool kDefaultClearCanvasSetting = false;
+#else
+const bool kDefaultClearCanvasSetting = true;
+#endif
+
+// Invalidation frequency settings. kInvalidationFrequencyThreshold is a value
+// between 0 and 1 meaning invalidation frequency between 0% and 100% that
+// indicates when to stop invalidating offscreen regions.
+// kFrequentInvalidationDistanceThreshold defines what it means to be
+// "offscreen" in terms of distance to visible in css pixels.
+const float kInvalidationFrequencyThreshold = 0.75f;
+const int kFrequentInvalidationDistanceThreshold = 512;
+
+} // namespace
+
+namespace cc {
+
+PicturePileBase::PicturePileBase()
+ : min_contents_scale_(0),
+ background_color_(SkColorSetARGBInline(0, 0, 0, 0)),
+ slow_down_raster_scale_factor_for_debug_(0),
+ contents_opaque_(false),
+ contents_fill_bounds_completely_(false),
+ show_debug_picture_borders_(false),
+ clear_canvas_with_debug_color_(kDefaultClearCanvasSetting),
+ has_any_recordings_(false),
+ has_text_(false),
+ is_mask_(false),
+ is_solid_color_(false),
+ solid_color_(SK_ColorTRANSPARENT) {
+ tiling_.SetMaxTextureSize(gfx::Size(kBasePictureSize, kBasePictureSize));
+ tile_grid_info_.fTileInterval.setEmpty();
+ tile_grid_info_.fMargin.setEmpty();
+ tile_grid_info_.fOffset.setZero();
+}
+
+PicturePileBase::PicturePileBase(const PicturePileBase* other)
+ : picture_map_(other->picture_map_),
+ tiling_(other->tiling_),
+ recorded_viewport_(other->recorded_viewport_),
+ min_contents_scale_(other->min_contents_scale_),
+ tile_grid_info_(other->tile_grid_info_),
+ background_color_(other->background_color_),
+ slow_down_raster_scale_factor_for_debug_(
+ other->slow_down_raster_scale_factor_for_debug_),
+ contents_opaque_(other->contents_opaque_),
+ contents_fill_bounds_completely_(other->contents_fill_bounds_completely_),
+ show_debug_picture_borders_(other->show_debug_picture_borders_),
+ clear_canvas_with_debug_color_(other->clear_canvas_with_debug_color_),
+ has_any_recordings_(other->has_any_recordings_),
+ has_text_(other->has_text_),
+ is_mask_(other->is_mask_),
+ is_solid_color_(other->is_solid_color_),
+ solid_color_(other->solid_color_) {
+}
+
+PicturePileBase::~PicturePileBase() {
+}
+
+void PicturePileBase::SetMinContentsScale(float min_contents_scale) {
+ DCHECK(min_contents_scale);
+ if (min_contents_scale_ == min_contents_scale)
+ return;
+
+ // Picture contents are played back scaled. When the final contents scale is
+ // less than 1 (i.e. low res), then multiple recorded pixels will be used
+ // to raster one final pixel. To avoid splitting a final pixel across
+ // pictures (which would result in incorrect rasterization due to blending), a
+ // buffer margin is added so that any picture can be snapped to integral
+ // final pixels.
+ //
+ // For example, if a 1/4 contents scale is used, then that would be 3 buffer
+ // pixels, since that's the minimum number of pixels to add so that resulting
+ // content can be snapped to a four pixel aligned grid.
+ int buffer_pixels = static_cast<int>(ceil(1 / min_contents_scale) - 1);
+ buffer_pixels = std::max(0, buffer_pixels);
+ SetBufferPixels(buffer_pixels);
+ min_contents_scale_ = min_contents_scale;
+}
+
+// static
+void PicturePileBase::ComputeTileGridInfo(
+ const gfx::Size& tile_grid_size,
+ SkTileGridFactory::TileGridInfo* info) {
+ DCHECK(info);
+ info->fTileInterval.set(tile_grid_size.width() - 2 * kTileGridBorderPixels,
+ tile_grid_size.height() - 2 * kTileGridBorderPixels);
+ DCHECK_GT(info->fTileInterval.width(), 0);
+ DCHECK_GT(info->fTileInterval.height(), 0);
+ info->fMargin.set(kTileGridBorderPixels, kTileGridBorderPixels);
+ // Offset the tile grid coordinate space to take into account the fact
+ // that the top-most and left-most tiles do not have top and left borders
+ // respectively.
+ info->fOffset.set(-kTileGridBorderPixels, -kTileGridBorderPixels);
+}
+
+void PicturePileBase::SetTileGridSize(const gfx::Size& tile_grid_size) {
+ ComputeTileGridInfo(tile_grid_size, &tile_grid_info_);
+}
+
+void PicturePileBase::SetBufferPixels(int new_buffer_pixels) {
+ if (new_buffer_pixels == buffer_pixels())
+ return;
+
+ Clear();
+ tiling_.SetBorderTexels(new_buffer_pixels);
+}
+
+void PicturePileBase::Clear() {
+ picture_map_.clear();
+ recorded_viewport_ = gfx::Rect();
+}
+
+bool PicturePileBase::HasRecordingAt(int x, int y) {
+ PictureMap::const_iterator found = picture_map_.find(PictureMapKey(x, y));
+ if (found == picture_map_.end())
+ return false;
+ return !!found->second.GetPicture();
+}
+
+bool PicturePileBase::CanRaster(float contents_scale,
+ const gfx::Rect& content_rect) {
+ if (tiling_.tiling_size().IsEmpty())
+ return false;
+ gfx::Rect layer_rect = gfx::ScaleToEnclosingRect(
+ content_rect, 1.f / contents_scale);
+ layer_rect.Intersect(gfx::Rect(tiling_.tiling_size()));
+
+ // Common case inside of viewport to avoid the slower map lookups.
+ if (recorded_viewport_.Contains(layer_rect)) {
+ // Sanity check that there are no false positives in recorded_viewport_.
+ DCHECK(CanRasterSlowTileCheck(layer_rect));
+ return true;
+ }
+
+ return CanRasterSlowTileCheck(layer_rect);
+}
+
+bool PicturePileBase::CanRasterSlowTileCheck(
+ const gfx::Rect& layer_rect) const {
+ bool include_borders = false;
+ for (TilingData::Iterator tile_iter(&tiling_, layer_rect, include_borders);
+ tile_iter;
+ ++tile_iter) {
+ PictureMap::const_iterator map_iter = picture_map_.find(tile_iter.index());
+ if (map_iter == picture_map_.end())
+ return false;
+ if (!map_iter->second.GetPicture())
+ return false;
+ }
+ return true;
+}
+
+gfx::Rect PicturePileBase::PaddedRect(const PictureMapKey& key) const {
+ gfx::Rect tile = tiling_.TileBounds(key.first, key.second);
+ return PadRect(tile);
+}
+
+gfx::Rect PicturePileBase::PadRect(const gfx::Rect& rect) const {
+ gfx::Rect padded_rect = rect;
+ padded_rect.Inset(
+ -buffer_pixels(), -buffer_pixels(), -buffer_pixels(), -buffer_pixels());
+ return padded_rect;
+}
+
+void PicturePileBase::AsValueInto(base::debug::TracedValue* pictures) const {
+ gfx::Rect tiling_rect(tiling_.tiling_size());
+ std::set<const void*> appended_pictures;
+ bool include_borders = true;
+ for (TilingData::Iterator tile_iter(&tiling_, tiling_rect, include_borders);
+ tile_iter;
+ ++tile_iter) {
+ PictureMap::const_iterator map_iter = picture_map_.find(tile_iter.index());
+ if (map_iter == picture_map_.end())
+ continue;
+
+ const Picture* picture = map_iter->second.GetPicture();
+ if (picture && (appended_pictures.count(picture) == 0)) {
+ appended_pictures.insert(picture);
+ TracedValue::AppendIDRef(picture, pictures);
+ }
+ }
+}
+
+PicturePileBase::PictureInfo::PictureInfo() : last_frame_number_(0) {}
+
+PicturePileBase::PictureInfo::~PictureInfo() {}
+
+void PicturePileBase::PictureInfo::AdvanceInvalidationHistory(
+ int frame_number) {
+ DCHECK_GE(frame_number, last_frame_number_);
+ if (frame_number == last_frame_number_)
+ return;
+
+ invalidation_history_ <<= (frame_number - last_frame_number_);
+ last_frame_number_ = frame_number;
+}
+
+bool PicturePileBase::PictureInfo::Invalidate(int frame_number) {
+ AdvanceInvalidationHistory(frame_number);
+ invalidation_history_.set(0);
+
+ bool did_invalidate = !!picture_.get();
+ picture_ = NULL;
+ return did_invalidate;
+}
+
+bool PicturePileBase::PictureInfo::NeedsRecording(int frame_number,
+ int distance_to_visible) {
+ AdvanceInvalidationHistory(frame_number);
+
+ // We only need recording if we don't have a picture. Furthermore, we only
+ // need a recording if we're within frequent invalidation distance threshold
+ // or the invalidation is not frequent enough (below invalidation frequency
+ // threshold).
+ return !picture_.get() &&
+ ((distance_to_visible <= kFrequentInvalidationDistanceThreshold) ||
+ (GetInvalidationFrequency() < kInvalidationFrequencyThreshold));
+}
+
+void PicturePileBase::PictureInfo::SetPicture(scoped_refptr<Picture> picture) {
+ picture_ = picture;
+}
+
+const Picture* PicturePileBase::PictureInfo::GetPicture() const {
+ return picture_.get();
+}
+
+float PicturePileBase::PictureInfo::GetInvalidationFrequency() const {
+ return invalidation_history_.count() /
+ static_cast<float>(INVALIDATION_FRAMES_TRACKED);
+}
+
+} // namespace cc
diff --git a/cc/resources/picture_pile_base.h b/cc/resources/picture_pile_base.h
new file mode 100644
index 0000000..1067c3e
--- /dev/null
+++ b/cc/resources/picture_pile_base.h
@@ -0,0 +1,141 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_PICTURE_PILE_BASE_H_
+#define CC_RESOURCES_PICTURE_PILE_BASE_H_
+
+#include <bitset>
+#include <list>
+#include <utility>
+
+#include "base/containers/hash_tables.h"
+#include "base/memory/ref_counted.h"
+#include "cc/base/cc_export.h"
+#include "cc/base/region.h"
+#include "cc/base/tiling_data.h"
+#include "cc/resources/picture.h"
+#include "ui/gfx/size.h"
+
+namespace base {
+namespace debug {
+class TracedValue;
+}
+class Value;
+}
+
+namespace cc {
+
+class CC_EXPORT PicturePileBase : public base::RefCounted<PicturePileBase> {
+ public:
+ PicturePileBase();
+ explicit PicturePileBase(const PicturePileBase* other);
+
+ gfx::Size tiling_size() const { return tiling_.tiling_size(); }
+ void SetMinContentsScale(float min_contents_scale);
+
+ // If non-empty, all pictures tiles inside this rect are recorded. There may
+ // be recordings outside this rect, but everything inside the rect is
+ // recorded.
+ gfx::Rect recorded_viewport() const { return recorded_viewport_; }
+
+ int num_tiles_x() const { return tiling_.num_tiles_x(); }
+ int num_tiles_y() const { return tiling_.num_tiles_y(); }
+ gfx::Rect tile_bounds(int x, int y) const { return tiling_.TileBounds(x, y); }
+ bool HasRecordingAt(int x, int y);
+ bool CanRaster(float contents_scale, const gfx::Rect& content_rect);
+
+ // If this pile contains any valid recordings. May have false positives.
+ bool HasRecordings() const { return has_any_recordings_; }
+
+ // If this pile has ever contained any recordings with text.
+ bool has_text() const { return has_text_; }
+
+ bool is_solid_color() const { return is_solid_color_; }
+ SkColor solid_color() const { return solid_color_; }
+
+ void set_is_mask(bool is_mask) { is_mask_ = is_mask; }
+ bool is_mask() const { return is_mask_; }
+
+ static void ComputeTileGridInfo(const gfx::Size& tile_grid_size,
+ SkTileGridFactory::TileGridInfo* info);
+
+ void SetTileGridSize(const gfx::Size& tile_grid_size);
+ TilingData& tiling() { return tiling_; }
+
+ void AsValueInto(base::debug::TracedValue* array) const;
+
+ protected:
+ class CC_EXPORT PictureInfo {
+ public:
+ enum {
+ INVALIDATION_FRAMES_TRACKED = 32
+ };
+
+ PictureInfo();
+ ~PictureInfo();
+
+ bool Invalidate(int frame_number);
+ bool NeedsRecording(int frame_number, int distance_to_visible);
+ void SetPicture(scoped_refptr<Picture> picture);
+ const Picture* GetPicture() const;
+
+ float GetInvalidationFrequencyForTesting() const {
+ return GetInvalidationFrequency();
+ }
+
+ private:
+ void AdvanceInvalidationHistory(int frame_number);
+ float GetInvalidationFrequency() const;
+
+ int last_frame_number_;
+ scoped_refptr<const Picture> picture_;
+ std::bitset<INVALIDATION_FRAMES_TRACKED> invalidation_history_;
+ };
+
+ typedef std::pair<int, int> PictureMapKey;
+ typedef base::hash_map<PictureMapKey, PictureInfo> PictureMap;
+
+ virtual ~PicturePileBase();
+
+ int buffer_pixels() const { return tiling_.border_texels(); }
+ void Clear();
+
+ gfx::Rect PaddedRect(const PictureMapKey& key) const;
+ gfx::Rect PadRect(const gfx::Rect& rect) const;
+
+ // An internal CanRaster check that goes to the picture_map rather than
+ // using the recorded_viewport hint.
+ bool CanRasterSlowTileCheck(const gfx::Rect& layer_rect) const;
+
+ // A picture pile is a tiled set of pictures. The picture map is a map of tile
+ // indices to picture infos.
+ PictureMap picture_map_;
+ TilingData tiling_;
+ gfx::Rect recorded_viewport_;
+ float min_contents_scale_;
+ SkTileGridFactory::TileGridInfo tile_grid_info_;
+ SkColor background_color_;
+ int slow_down_raster_scale_factor_for_debug_;
+ bool contents_opaque_;
+ bool contents_fill_bounds_completely_;
+ bool show_debug_picture_borders_;
+ bool clear_canvas_with_debug_color_;
+ // A hint about whether there are any recordings. This may be a false
+ // positive.
+ bool has_any_recordings_;
+ bool has_text_;
+ bool is_mask_;
+ bool is_solid_color_;
+ SkColor solid_color_;
+
+ private:
+ void SetBufferPixels(int buffer_pixels);
+
+ friend class base::RefCounted<PicturePileBase>;
+ DISALLOW_COPY_AND_ASSIGN(PicturePileBase);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_PICTURE_PILE_BASE_H_
diff --git a/cc/resources/picture_pile_impl.cc b/cc/resources/picture_pile_impl.cc
new file mode 100644
index 0000000..f4fbdf2
--- /dev/null
+++ b/cc/resources/picture_pile_impl.cc
@@ -0,0 +1,425 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <algorithm>
+#include <limits>
+
+#include "base/debug/trace_event.h"
+#include "cc/base/region.h"
+#include "cc/debug/debug_colors.h"
+#include "cc/resources/picture_pile_impl.h"
+#include "skia/ext/analysis_canvas.h"
+#include "third_party/skia/include/core/SkCanvas.h"
+#include "third_party/skia/include/core/SkPictureRecorder.h"
+#include "third_party/skia/include/core/SkSize.h"
+#include "ui/gfx/rect_conversions.h"
+#include "ui/gfx/size_conversions.h"
+#include "ui/gfx/skia_util.h"
+
+namespace cc {
+
+scoped_refptr<PicturePileImpl> PicturePileImpl::Create() {
+ return make_scoped_refptr(new PicturePileImpl);
+}
+
+scoped_refptr<PicturePileImpl> PicturePileImpl::CreateFromOther(
+ const PicturePileBase* other) {
+ return make_scoped_refptr(new PicturePileImpl(other));
+}
+
+PicturePileImpl::PicturePileImpl() {
+}
+
+PicturePileImpl::PicturePileImpl(const PicturePileBase* other)
+ : PicturePileBase(other) {
+}
+
+PicturePileImpl::~PicturePileImpl() {
+}
+
+void PicturePileImpl::RasterDirect(
+ SkCanvas* canvas,
+ const gfx::Rect& canvas_rect,
+ float contents_scale,
+ RenderingStatsInstrumentation* rendering_stats_instrumentation) {
+ RasterCommon(canvas,
+ NULL,
+ canvas_rect,
+ contents_scale,
+ rendering_stats_instrumentation,
+ false);
+}
+
+void PicturePileImpl::RasterForAnalysis(
+ skia::AnalysisCanvas* canvas,
+ const gfx::Rect& canvas_rect,
+ float contents_scale,
+ RenderingStatsInstrumentation* stats_instrumentation) const {
+ RasterCommon(
+ canvas, canvas, canvas_rect, contents_scale, stats_instrumentation, true);
+}
+
+void PicturePileImpl::RasterToBitmap(
+ SkCanvas* canvas,
+ const gfx::Rect& canvas_rect,
+ float contents_scale,
+ RenderingStatsInstrumentation* rendering_stats_instrumentation) const {
+ canvas->discard();
+ if (clear_canvas_with_debug_color_) {
+ // Any non-painted areas in the content bounds will be left in this color.
+ canvas->clear(DebugColors::NonPaintedFillColor());
+ }
+
+ // If this picture has opaque contents, it is guaranteeing that it will
+ // draw an opaque rect the size of the layer. If it is not, then we must
+ // clear this canvas ourselves.
+ if (contents_opaque_ || contents_fill_bounds_completely_) {
+ // Even if completely covered, for rasterizations that touch the edge of the
+ // layer, we also need to raster the background color underneath the last
+ // texel (since the recording won't cover it) and outside the last texel
+ // (due to linear filtering when using this texture).
+ gfx::Rect content_tiling_rect = gfx::ToEnclosingRect(
+ gfx::ScaleRect(gfx::Rect(tiling_.tiling_size()), contents_scale));
+
+ // The final texel of content may only be partially covered by a
+ // rasterization; this rect represents the content rect that is fully
+ // covered by content.
+ gfx::Rect deflated_content_tiling_rect = content_tiling_rect;
+ deflated_content_tiling_rect.Inset(0, 0, 1, 1);
+ if (!deflated_content_tiling_rect.Contains(canvas_rect)) {
+ if (clear_canvas_with_debug_color_) {
+ // Any non-painted areas outside of the content bounds are left in
+ // this color. If this is seen then it means that cc neglected to
+ // rerasterize a tile that used to intersect with the content rect
+ // after the content bounds grew.
+ canvas->save();
+ canvas->translate(-canvas_rect.x(), -canvas_rect.y());
+ canvas->clipRect(gfx::RectToSkRect(content_tiling_rect),
+ SkRegion::kDifference_Op);
+ canvas->drawColor(DebugColors::MissingResizeInvalidations(),
+ SkXfermode::kSrc_Mode);
+ canvas->restore();
+ }
+
+ // Drawing at most 2 x 2 x (canvas width + canvas height) texels is 2-3X
+ // faster than clearing, so special case this.
+ canvas->save();
+ canvas->translate(-canvas_rect.x(), -canvas_rect.y());
+ gfx::Rect inflated_content_tiling_rect = content_tiling_rect;
+ inflated_content_tiling_rect.Inset(0, 0, -1, -1);
+ canvas->clipRect(gfx::RectToSkRect(inflated_content_tiling_rect),
+ SkRegion::kReplace_Op);
+ canvas->clipRect(gfx::RectToSkRect(deflated_content_tiling_rect),
+ SkRegion::kDifference_Op);
+ canvas->drawColor(background_color_, SkXfermode::kSrc_Mode);
+ canvas->restore();
+ }
+ } else {
+ TRACE_EVENT_INSTANT0("cc", "SkCanvas::clear", TRACE_EVENT_SCOPE_THREAD);
+ // Clearing is about ~4x faster than drawing a rect even if the content
+ // isn't covering a majority of the canvas.
+ canvas->clear(SK_ColorTRANSPARENT);
+ }
+
+ RasterCommon(canvas,
+ NULL,
+ canvas_rect,
+ contents_scale,
+ rendering_stats_instrumentation,
+ false);
+}
+
+void PicturePileImpl::CoalesceRasters(const gfx::Rect& canvas_rect,
+ const gfx::Rect& content_rect,
+ float contents_scale,
+ PictureRegionMap* results) const {
+ DCHECK(results);
+ // Rasterize the collection of relevant picture piles.
+ gfx::Rect layer_rect = gfx::ScaleToEnclosingRect(
+ content_rect, 1.f / contents_scale);
+
+ // Make sure pictures don't overlap by keeping track of previous right/bottom.
+ int min_content_left = -1;
+ int min_content_top = -1;
+ int last_row_index = -1;
+ int last_col_index = -1;
+ gfx::Rect last_content_rect;
+
+ // Coalesce rasters of the same picture into different rects:
+ // - Compute the clip of each of the pile chunks,
+ // - Subtract it from the canvas rect to get difference region
+ // - Later, use the difference region to subtract each of the comprising
+ // rects from the canvas.
+ // Note that in essence, we're trying to mimic clipRegion with intersect op
+ // that also respects the current canvas transform and clip. In order to use
+ // the canvas transform, we must stick to clipRect operations (clipRegion
+ // ignores the transform). Intersect then can be written as subtracting the
+ // negation of the region we're trying to intersect. Luckily, we know that all
+ // of the rects will have to fit into |content_rect|, so we can start with
+ // that and subtract chunk rects to get the region that we need to subtract
+ // from the canvas. Then, we can use clipRect with difference op to subtract
+ // each rect in the region.
+ bool include_borders = true;
+ for (TilingData::Iterator tile_iter(&tiling_, layer_rect, include_borders);
+ tile_iter;
+ ++tile_iter) {
+ PictureMap::const_iterator map_iter = picture_map_.find(tile_iter.index());
+ if (map_iter == picture_map_.end())
+ continue;
+ const PictureInfo& info = map_iter->second;
+ const Picture* picture = info.GetPicture();
+ if (!picture)
+ continue;
+
+ // This is intentionally *enclosed* rect, so that the clip is aligned on
+ // integral post-scale content pixels and does not extend past the edges
+ // of the picture chunk's layer rect. The min_contents_scale enforces that
+ // enough buffer pixels have been added such that the enclosed rect
+ // encompasses all invalidated pixels at any larger scale level.
+ gfx::Rect chunk_rect = PaddedRect(tile_iter.index());
+ gfx::Rect content_clip =
+ gfx::ScaleToEnclosedRect(chunk_rect, contents_scale);
+ DCHECK(!content_clip.IsEmpty()) << "Layer rect: "
+ << picture->LayerRect().ToString()
+ << "Contents scale: " << contents_scale;
+ content_clip.Intersect(canvas_rect);
+
+ // Make sure iterator goes top->bottom.
+ DCHECK_GE(tile_iter.index_y(), last_row_index);
+ if (tile_iter.index_y() > last_row_index) {
+ // First tile in a new row.
+ min_content_left = content_clip.x();
+ min_content_top = last_content_rect.bottom();
+ } else {
+ // Make sure iterator goes left->right.
+ DCHECK_GT(tile_iter.index_x(), last_col_index);
+ min_content_left = last_content_rect.right();
+ min_content_top = last_content_rect.y();
+ }
+
+ last_col_index = tile_iter.index_x();
+ last_row_index = tile_iter.index_y();
+
+ // Only inset if the content_clip is less than then previous min.
+ int inset_left = std::max(0, min_content_left - content_clip.x());
+ int inset_top = std::max(0, min_content_top - content_clip.y());
+ content_clip.Inset(inset_left, inset_top, 0, 0);
+
+ PictureRegionMap::iterator it = results->find(picture);
+ Region* clip_region;
+ if (it == results->end()) {
+ // The clip for a set of coalesced pictures starts out clipping the entire
+ // canvas. Each picture added to the set must subtract its own bounds
+ // from the clip region, poking a hole so that the picture is unclipped.
+ clip_region = &(*results)[picture];
+ *clip_region = canvas_rect;
+ } else {
+ clip_region = &it->second;
+ }
+
+ DCHECK(clip_region->Contains(content_clip))
+ << "Content clips should not overlap.";
+ clip_region->Subtract(content_clip);
+ last_content_rect = content_clip;
+ }
+}
+
+void PicturePileImpl::RasterCommon(
+ SkCanvas* canvas,
+ SkDrawPictureCallback* callback,
+ const gfx::Rect& canvas_rect,
+ float contents_scale,
+ RenderingStatsInstrumentation* rendering_stats_instrumentation,
+ bool is_analysis) const {
+ DCHECK(contents_scale >= min_contents_scale_);
+
+ canvas->translate(-canvas_rect.x(), -canvas_rect.y());
+ gfx::Rect content_tiling_rect = gfx::ToEnclosingRect(
+ gfx::ScaleRect(gfx::Rect(tiling_.tiling_size()), contents_scale));
+ content_tiling_rect.Intersect(canvas_rect);
+
+ canvas->clipRect(gfx::RectToSkRect(content_tiling_rect),
+ SkRegion::kIntersect_Op);
+
+ PictureRegionMap picture_region_map;
+ CoalesceRasters(
+ canvas_rect, content_tiling_rect, contents_scale, &picture_region_map);
+
+#ifndef NDEBUG
+ Region total_clip;
+#endif // NDEBUG
+
+ // Iterate the coalesced map and use each picture's region
+ // to clip the canvas.
+ for (PictureRegionMap::iterator it = picture_region_map.begin();
+ it != picture_region_map.end();
+ ++it) {
+ const Picture* picture = it->first;
+ Region negated_clip_region = it->second;
+
+#ifndef NDEBUG
+ Region positive_clip = content_tiling_rect;
+ positive_clip.Subtract(negated_clip_region);
+ // Make sure we never rasterize the same region twice.
+ DCHECK(!total_clip.Intersects(positive_clip));
+ total_clip.Union(positive_clip);
+#endif // NDEBUG
+
+ base::TimeDelta best_duration = base::TimeDelta::Max();
+ int repeat_count = std::max(1, slow_down_raster_scale_factor_for_debug_);
+ int rasterized_pixel_count = 0;
+
+ for (int j = 0; j < repeat_count; ++j) {
+ base::TimeTicks start_time;
+ if (rendering_stats_instrumentation)
+ start_time = rendering_stats_instrumentation->StartRecording();
+
+ rasterized_pixel_count = picture->Raster(
+ canvas, callback, negated_clip_region, contents_scale);
+
+ if (rendering_stats_instrumentation) {
+ base::TimeDelta duration =
+ rendering_stats_instrumentation->EndRecording(start_time);
+ best_duration = std::min(best_duration, duration);
+ }
+ }
+
+ if (rendering_stats_instrumentation) {
+ if (is_analysis) {
+ rendering_stats_instrumentation->AddAnalysis(best_duration,
+ rasterized_pixel_count);
+ } else {
+ rendering_stats_instrumentation->AddRaster(best_duration,
+ rasterized_pixel_count);
+ }
+ }
+ }
+
+#ifndef NDEBUG
+ // Fill the clip with debug color. This allows us to
+ // distinguish between non painted areas and problems with missing
+ // pictures.
+ SkPaint paint;
+ for (Region::Iterator it(total_clip); it.has_rect(); it.next())
+ canvas->clipRect(gfx::RectToSkRect(it.rect()), SkRegion::kDifference_Op);
+ paint.setColor(DebugColors::MissingPictureFillColor());
+ paint.setXfermodeMode(SkXfermode::kSrc_Mode);
+ canvas->drawPaint(paint);
+#endif // NDEBUG
+}
+
+skia::RefPtr<SkPicture> PicturePileImpl::GetFlattenedPicture() {
+ TRACE_EVENT0("cc", "PicturePileImpl::GetFlattenedPicture");
+
+ gfx::Rect tiling_rect(tiling_.tiling_size());
+ SkPictureRecorder recorder;
+ SkCanvas* canvas =
+ recorder.beginRecording(tiling_rect.width(), tiling_rect.height());
+ if (!tiling_rect.IsEmpty())
+ RasterToBitmap(canvas, tiling_rect, 1.0, NULL);
+ skia::RefPtr<SkPicture> picture = skia::AdoptRef(recorder.endRecording());
+
+ return picture;
+}
+
+void PicturePileImpl::AnalyzeInRect(const gfx::Rect& content_rect,
+ float contents_scale,
+ PicturePileImpl::Analysis* analysis) const {
+ AnalyzeInRect(content_rect, contents_scale, analysis, NULL);
+}
+
+void PicturePileImpl::AnalyzeInRect(
+ const gfx::Rect& content_rect,
+ float contents_scale,
+ PicturePileImpl::Analysis* analysis,
+ RenderingStatsInstrumentation* stats_instrumentation) const {
+ DCHECK(analysis);
+ TRACE_EVENT0("cc", "PicturePileImpl::AnalyzeInRect");
+
+ gfx::Rect layer_rect = gfx::ScaleToEnclosingRect(
+ content_rect, 1.0f / contents_scale);
+
+ layer_rect.Intersect(gfx::Rect(tiling_.tiling_size()));
+
+ skia::AnalysisCanvas canvas(layer_rect.width(), layer_rect.height());
+
+ RasterForAnalysis(&canvas, layer_rect, 1.0f, stats_instrumentation);
+
+ analysis->is_solid_color = canvas.GetColorIfSolid(&analysis->solid_color);
+}
+
+// Since there are situations when we can skip analysis, the variables have to
+// be set to their safest values. That is, we have to assume that the tile is
+// not solid color. As well, we have to assume that the tile has text so we
+// don't early out incorrectly.
+PicturePileImpl::Analysis::Analysis() : is_solid_color(false) {
+}
+
+PicturePileImpl::Analysis::~Analysis() {
+}
+
+PicturePileImpl::PixelRefIterator::PixelRefIterator(
+ const gfx::Rect& content_rect,
+ float contents_scale,
+ const PicturePileImpl* picture_pile)
+ : picture_pile_(picture_pile),
+ layer_rect_(
+ gfx::ScaleToEnclosingRect(content_rect, 1.f / contents_scale)),
+ tile_iterator_(&picture_pile_->tiling_,
+ layer_rect_,
+ false /* include_borders */) {
+ // Early out if there isn't a single tile.
+ if (!tile_iterator_)
+ return;
+
+ AdvanceToTilePictureWithPixelRefs();
+}
+
+PicturePileImpl::PixelRefIterator::~PixelRefIterator() {
+}
+
+PicturePileImpl::PixelRefIterator&
+ PicturePileImpl::PixelRefIterator::operator++() {
+ ++pixel_ref_iterator_;
+ if (pixel_ref_iterator_)
+ return *this;
+
+ ++tile_iterator_;
+ AdvanceToTilePictureWithPixelRefs();
+ return *this;
+}
+
+void PicturePileImpl::PixelRefIterator::AdvanceToTilePictureWithPixelRefs() {
+ for (; tile_iterator_; ++tile_iterator_) {
+ PictureMap::const_iterator it =
+ picture_pile_->picture_map_.find(tile_iterator_.index());
+ if (it == picture_pile_->picture_map_.end())
+ continue;
+
+ const Picture* picture = it->second.GetPicture();
+ if (!picture || (processed_pictures_.count(picture) != 0) ||
+ !picture->WillPlayBackBitmaps())
+ continue;
+
+ processed_pictures_.insert(picture);
+ pixel_ref_iterator_ = Picture::PixelRefIterator(layer_rect_, picture);
+ if (pixel_ref_iterator_)
+ break;
+ }
+}
+
+void PicturePileImpl::DidBeginTracing() {
+ std::set<const void*> processed_pictures;
+ for (PictureMap::iterator it = picture_map_.begin();
+ it != picture_map_.end();
+ ++it) {
+ const Picture* picture = it->second.GetPicture();
+ if (picture && (processed_pictures.count(picture) == 0)) {
+ picture->EmitTraceSnapshot();
+ processed_pictures.insert(picture);
+ }
+ }
+}
+
+} // namespace cc
diff --git a/cc/resources/picture_pile_impl.h b/cc/resources/picture_pile_impl.h
new file mode 100644
index 0000000..75ee536
--- /dev/null
+++ b/cc/resources/picture_pile_impl.h
@@ -0,0 +1,132 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_PICTURE_PILE_IMPL_H_
+#define CC_RESOURCES_PICTURE_PILE_IMPL_H_
+
+#include <list>
+#include <map>
+#include <set>
+#include <vector>
+
+#include "base/time/time.h"
+#include "cc/base/cc_export.h"
+#include "cc/debug/rendering_stats_instrumentation.h"
+#include "cc/resources/picture_pile_base.h"
+#include "skia/ext/analysis_canvas.h"
+#include "skia/ext/refptr.h"
+#include "third_party/skia/include/core/SkPicture.h"
+
+namespace cc {
+
+class CC_EXPORT PicturePileImpl : public PicturePileBase {
+ public:
+ static scoped_refptr<PicturePileImpl> Create();
+ static scoped_refptr<PicturePileImpl> CreateFromOther(
+ const PicturePileBase* other);
+
+ // Raster a subrect of this PicturePileImpl into the given canvas. It is
+ // assumed that contents_scale has already been applied to this canvas.
+ // Writes the total number of pixels rasterized and the time spent
+ // rasterizing to the stats if the respective pointer is not NULL. When
+ // slow-down-raster-scale-factor is set to a value greater than 1, the
+ // reported rasterize time is the minimum measured value over all runs.
+ void RasterDirect(
+ SkCanvas* canvas,
+ const gfx::Rect& canvas_rect,
+ float contents_scale,
+ RenderingStatsInstrumentation* rendering_stats_instrumentation);
+
+ // Similar to the above RasterDirect method, but this is a convenience method
+ // for when it is known that the raster is going to an intermediate bitmap
+ // that itself will then be blended and thus that a canvas clear is required.
+ // Note that this function may write outside the canvas_rect.
+ void RasterToBitmap(
+ SkCanvas* canvas,
+ const gfx::Rect& canvas_rect,
+ float contents_scale,
+ RenderingStatsInstrumentation* stats_instrumentation) const;
+
+ // Called when analyzing a tile. We can use AnalysisCanvas as
+ // SkDrawPictureCallback, which allows us to early out from analysis.
+ void RasterForAnalysis(
+ skia::AnalysisCanvas* canvas,
+ const gfx::Rect& canvas_rect,
+ float contents_scale,
+ RenderingStatsInstrumentation* stats_instrumentation) const;
+
+ skia::RefPtr<SkPicture> GetFlattenedPicture();
+
+ struct CC_EXPORT Analysis {
+ Analysis();
+ ~Analysis();
+
+ bool is_solid_color;
+ SkColor solid_color;
+ };
+
+ void AnalyzeInRect(const gfx::Rect& content_rect,
+ float contents_scale,
+ Analysis* analysis) const;
+
+ void AnalyzeInRect(
+ const gfx::Rect& content_rect,
+ float contents_scale,
+ Analysis* analysis,
+ RenderingStatsInstrumentation* stats_instrumentation) const;
+
+ class CC_EXPORT PixelRefIterator {
+ public:
+ PixelRefIterator(const gfx::Rect& content_rect,
+ float contents_scale,
+ const PicturePileImpl* picture_pile);
+ ~PixelRefIterator();
+
+ SkPixelRef* operator->() const { return *pixel_ref_iterator_; }
+ SkPixelRef* operator*() const { return *pixel_ref_iterator_; }
+ PixelRefIterator& operator++();
+ operator bool() const { return pixel_ref_iterator_; }
+
+ private:
+ void AdvanceToTilePictureWithPixelRefs();
+
+ const PicturePileImpl* picture_pile_;
+ gfx::Rect layer_rect_;
+ TilingData::Iterator tile_iterator_;
+ Picture::PixelRefIterator pixel_ref_iterator_;
+ std::set<const void*> processed_pictures_;
+ };
+
+ void DidBeginTracing();
+
+ protected:
+ friend class PicturePile;
+ friend class PixelRefIterator;
+
+ PicturePileImpl();
+ explicit PicturePileImpl(const PicturePileBase* other);
+ virtual ~PicturePileImpl();
+
+ private:
+ typedef std::map<const Picture*, Region> PictureRegionMap;
+
+ void CoalesceRasters(const gfx::Rect& canvas_rect,
+ const gfx::Rect& content_rect,
+ float contents_scale,
+ PictureRegionMap* result) const;
+
+ void RasterCommon(
+ SkCanvas* canvas,
+ SkDrawPictureCallback* callback,
+ const gfx::Rect& canvas_rect,
+ float contents_scale,
+ RenderingStatsInstrumentation* rendering_stats_instrumentation,
+ bool is_analysis) const;
+
+ DISALLOW_COPY_AND_ASSIGN(PicturePileImpl);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_PICTURE_PILE_IMPL_H_
diff --git a/cc/resources/picture_pile_impl_perftest.cc b/cc/resources/picture_pile_impl_perftest.cc
new file mode 100644
index 0000000..7f12fd1
--- /dev/null
+++ b/cc/resources/picture_pile_impl_perftest.cc
@@ -0,0 +1,88 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/picture_pile_impl.h"
+
+#include "cc/debug/lap_timer.h"
+#include "cc/test/fake_picture_pile_impl.h"
+#include "cc/test/fake_rendering_stats_instrumentation.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/perf/perf_test.h"
+
+namespace cc {
+namespace {
+
+const int kTimeLimitMillis = 2000;
+const int kWarmupRuns = 5;
+const int kTimeCheckInterval = 10;
+
+const int kTileSize = 100;
+const int kLayerSize = 1000;
+
+class PicturePileImplPerfTest : public testing::Test {
+ public:
+ PicturePileImplPerfTest()
+ : timer_(kWarmupRuns,
+ base::TimeDelta::FromMilliseconds(kTimeLimitMillis),
+ kTimeCheckInterval) {}
+
+ void RunAnalyzeTest(const std::string& test_name, float contents_scale) {
+ scoped_refptr<PicturePileImpl> pile = FakePicturePileImpl::CreateFilledPile(
+ gfx::Size(kTileSize, kTileSize), gfx::Size(kLayerSize, kLayerSize));
+ // Content rect that will align with top-left tile at scale 1.0.
+ gfx::Rect content_rect(0, 0, kTileSize, kTileSize);
+
+ PicturePileImpl::Analysis analysis;
+ timer_.Reset();
+ do {
+ pile->AnalyzeInRect(content_rect, contents_scale, &analysis);
+ timer_.NextLap();
+ } while (!timer_.HasTimeLimitExpired());
+
+ perf_test::PrintResult(
+ "analyze", "", test_name, timer_.LapsPerSecond(), "runs/s", true);
+ }
+
+ void RunRasterTest(const std::string& test_name, float contents_scale) {
+ scoped_refptr<PicturePileImpl> pile = FakePicturePileImpl::CreateFilledPile(
+ gfx::Size(kTileSize, kTileSize), gfx::Size(kLayerSize, kLayerSize));
+ // Content rect that will align with top-left tile at scale 1.0.
+ gfx::Rect content_rect(0, 0, kTileSize, kTileSize);
+
+ SkBitmap bitmap;
+ bitmap.allocN32Pixels(1, 1);
+ SkCanvas canvas(bitmap);
+
+ FakeRenderingStatsInstrumentation rendering_stats_instrumentation;
+ timer_.Reset();
+ do {
+ pile->RasterToBitmap(&canvas,
+ content_rect,
+ contents_scale,
+ &rendering_stats_instrumentation);
+ timer_.NextLap();
+ } while (!timer_.HasTimeLimitExpired());
+
+ perf_test::PrintResult(
+ "raster", "", test_name, timer_.LapsPerSecond(), "runs/s", true);
+ }
+
+ private:
+ LapTimer timer_;
+};
+
+TEST_F(PicturePileImplPerfTest, Analyze) {
+ RunAnalyzeTest("1", 1.0f);
+ RunAnalyzeTest("4", 0.5f);
+ RunAnalyzeTest("100", 0.1f);
+}
+
+TEST_F(PicturePileImplPerfTest, Raster) {
+ RunRasterTest("1", 1.0f);
+ RunRasterTest("4", 0.5f);
+ RunRasterTest("100", 0.1f);
+}
+
+} // namespace
+} // namespace cc
diff --git a/cc/resources/picture_pile_impl_unittest.cc b/cc/resources/picture_pile_impl_unittest.cc
new file mode 100644
index 0000000..d0daa6b
--- /dev/null
+++ b/cc/resources/picture_pile_impl_unittest.cc
@@ -0,0 +1,892 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/scoped_ptr.h"
+#include "cc/test/fake_picture_pile_impl.h"
+#include "cc/test/fake_rendering_stats_instrumentation.h"
+#include "cc/test/skia_common.h"
+#include "skia/ext/refptr.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/skia/include/core/SkPixelRef.h"
+#include "third_party/skia/include/core/SkShader.h"
+#include "ui/gfx/rect.h"
+#include "ui/gfx/size_conversions.h"
+
+namespace cc {
+namespace {
+
+TEST(PicturePileImplTest, AnalyzeIsSolidUnscaled) {
+ gfx::Size tile_size(100, 100);
+ gfx::Size layer_bounds(400, 400);
+
+ scoped_refptr<FakePicturePileImpl> pile =
+ FakePicturePileImpl::CreateFilledPile(tile_size, layer_bounds);
+
+ SkColor solid_color = SkColorSetARGB(255, 12, 23, 34);
+ SkPaint solid_paint;
+ solid_paint.setColor(solid_color);
+
+ SkColor non_solid_color = SkColorSetARGB(128, 45, 56, 67);
+ SkPaint non_solid_paint;
+ non_solid_paint.setColor(non_solid_color);
+
+ pile->add_draw_rect_with_paint(gfx::Rect(0, 0, 400, 400), solid_paint);
+ pile->RerecordPile();
+
+ // Ensure everything is solid
+ for (int y = 0; y <= 300; y += 100) {
+ for (int x = 0; x <= 300; x += 100) {
+ PicturePileImpl::Analysis analysis;
+ gfx::Rect rect(x, y, 100, 100);
+ pile->AnalyzeInRect(rect, 1.0, &analysis);
+ EXPECT_TRUE(analysis.is_solid_color) << rect.ToString();
+ EXPECT_EQ(analysis.solid_color, solid_color) << rect.ToString();
+ }
+ }
+
+ // One pixel non solid
+ pile->add_draw_rect_with_paint(gfx::Rect(50, 50, 1, 1), non_solid_paint);
+ pile->RerecordPile();
+
+ PicturePileImpl::Analysis analysis;
+ pile->AnalyzeInRect(gfx::Rect(0, 0, 100, 100), 1.0, &analysis);
+ EXPECT_FALSE(analysis.is_solid_color);
+
+ pile->AnalyzeInRect(gfx::Rect(100, 0, 100, 100), 1.0, &analysis);
+ EXPECT_TRUE(analysis.is_solid_color);
+ EXPECT_EQ(analysis.solid_color, solid_color);
+
+ // Boundaries should be clipped
+ analysis.is_solid_color = false;
+ pile->AnalyzeInRect(gfx::Rect(350, 0, 100, 100), 1.0, &analysis);
+ EXPECT_TRUE(analysis.is_solid_color);
+ EXPECT_EQ(analysis.solid_color, solid_color);
+
+ analysis.is_solid_color = false;
+ pile->AnalyzeInRect(gfx::Rect(0, 350, 100, 100), 1.0, &analysis);
+ EXPECT_TRUE(analysis.is_solid_color);
+ EXPECT_EQ(analysis.solid_color, solid_color);
+
+ analysis.is_solid_color = false;
+ pile->AnalyzeInRect(gfx::Rect(350, 350, 100, 100), 1.0, &analysis);
+ EXPECT_TRUE(analysis.is_solid_color);
+ EXPECT_EQ(analysis.solid_color, solid_color);
+}
+
+TEST(PicturePileImplTest, AnalyzeIsSolidScaled) {
+ gfx::Size tile_size(100, 100);
+ gfx::Size layer_bounds(400, 400);
+
+ scoped_refptr<FakePicturePileImpl> pile =
+ FakePicturePileImpl::CreateFilledPile(tile_size, layer_bounds);
+
+ SkColor solid_color = SkColorSetARGB(255, 12, 23, 34);
+ SkPaint solid_paint;
+ solid_paint.setColor(solid_color);
+
+ SkColor non_solid_color = SkColorSetARGB(128, 45, 56, 67);
+ SkPaint non_solid_paint;
+ non_solid_paint.setColor(non_solid_color);
+
+ pile->add_draw_rect_with_paint(gfx::Rect(0, 0, 400, 400), solid_paint);
+ pile->RerecordPile();
+
+ // Ensure everything is solid
+ for (int y = 0; y <= 30; y += 10) {
+ for (int x = 0; x <= 30; x += 10) {
+ PicturePileImpl::Analysis analysis;
+ gfx::Rect rect(x, y, 10, 10);
+ pile->AnalyzeInRect(rect, 0.1f, &analysis);
+ EXPECT_TRUE(analysis.is_solid_color) << rect.ToString();
+ EXPECT_EQ(analysis.solid_color, solid_color) << rect.ToString();
+ }
+ }
+
+ // One pixel non solid
+ pile->add_draw_rect_with_paint(gfx::Rect(50, 50, 1, 1), non_solid_paint);
+ pile->RerecordPile();
+
+ PicturePileImpl::Analysis analysis;
+ pile->AnalyzeInRect(gfx::Rect(0, 0, 10, 10), 0.1f, &analysis);
+ EXPECT_FALSE(analysis.is_solid_color);
+
+ pile->AnalyzeInRect(gfx::Rect(10, 0, 10, 10), 0.1f, &analysis);
+ EXPECT_TRUE(analysis.is_solid_color);
+ EXPECT_EQ(analysis.solid_color, solid_color);
+
+ // Boundaries should be clipped
+ analysis.is_solid_color = false;
+ pile->AnalyzeInRect(gfx::Rect(35, 0, 10, 10), 0.1f, &analysis);
+ EXPECT_TRUE(analysis.is_solid_color);
+ EXPECT_EQ(analysis.solid_color, solid_color);
+
+ analysis.is_solid_color = false;
+ pile->AnalyzeInRect(gfx::Rect(0, 35, 10, 10), 0.1f, &analysis);
+ EXPECT_TRUE(analysis.is_solid_color);
+ EXPECT_EQ(analysis.solid_color, solid_color);
+
+ analysis.is_solid_color = false;
+ pile->AnalyzeInRect(gfx::Rect(35, 35, 10, 10), 0.1f, &analysis);
+ EXPECT_TRUE(analysis.is_solid_color);
+ EXPECT_EQ(analysis.solid_color, solid_color);
+}
+
+TEST(PicturePileImplTest, AnalyzeIsSolidEmpty) {
+ gfx::Size tile_size(100, 100);
+ gfx::Size layer_bounds(400, 400);
+
+ scoped_refptr<FakePicturePileImpl> pile =
+ FakePicturePileImpl::CreateFilledPile(tile_size, layer_bounds);
+ PicturePileImpl::Analysis analysis;
+ EXPECT_FALSE(analysis.is_solid_color);
+
+ pile->AnalyzeInRect(gfx::Rect(0, 0, 400, 400), 1.f, &analysis);
+
+ EXPECT_TRUE(analysis.is_solid_color);
+ EXPECT_EQ(analysis.solid_color, SkColorSetARGB(0, 0, 0, 0));
+}
+
+TEST(PicturePileImplTest, PixelRefIteratorEmpty) {
+ gfx::Size tile_size(128, 128);
+ gfx::Size layer_bounds(256, 256);
+
+ // Create a filled pile with no recording.
+ scoped_refptr<FakePicturePileImpl> pile =
+ FakePicturePileImpl::CreateFilledPile(tile_size, layer_bounds);
+
+ // Tile sized iterators.
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 128, 128), 1.0, pile.get());
+ EXPECT_FALSE(iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 256, 256), 2.0, pile.get());
+ EXPECT_FALSE(iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 64, 64), 0.5, pile.get());
+ EXPECT_FALSE(iterator);
+ }
+ // Shifted tile sized iterators.
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(140, 140, 128, 128), 1.0, pile.get());
+ EXPECT_FALSE(iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(280, 280, 256, 256), 2.0, pile.get());
+ EXPECT_FALSE(iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(70, 70, 64, 64), 0.5, pile.get());
+ EXPECT_FALSE(iterator);
+ }
+ // Layer sized iterators.
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 256, 256), 1.0, pile.get());
+ EXPECT_FALSE(iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 512, 512), 2.0, pile.get());
+ EXPECT_FALSE(iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 128, 128), 0.5, pile.get());
+ EXPECT_FALSE(iterator);
+ }
+}
+
+TEST(PicturePileImplTest, PixelRefIteratorNoDiscardableRefs) {
+ gfx::Size tile_size(128, 128);
+ gfx::Size layer_bounds(256, 256);
+
+ scoped_refptr<FakePicturePileImpl> pile =
+ FakePicturePileImpl::CreateFilledPile(tile_size, layer_bounds);
+
+ SkPaint simple_paint;
+ simple_paint.setColor(SkColorSetARGB(255, 12, 23, 34));
+
+ SkBitmap non_discardable_bitmap;
+ CreateBitmap(gfx::Size(128, 128), "notdiscardable", &non_discardable_bitmap);
+
+ pile->add_draw_rect_with_paint(gfx::Rect(0, 0, 256, 256), simple_paint);
+ pile->add_draw_rect_with_paint(gfx::Rect(128, 128, 512, 512), simple_paint);
+ pile->add_draw_rect_with_paint(gfx::Rect(512, 0, 256, 256), simple_paint);
+ pile->add_draw_rect_with_paint(gfx::Rect(0, 512, 256, 256), simple_paint);
+ pile->add_draw_bitmap(non_discardable_bitmap, gfx::Point(128, 0));
+ pile->add_draw_bitmap(non_discardable_bitmap, gfx::Point(0, 128));
+ pile->add_draw_bitmap(non_discardable_bitmap, gfx::Point(150, 150));
+
+ pile->RerecordPile();
+
+ // Tile sized iterators.
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 128, 128), 1.0, pile.get());
+ EXPECT_FALSE(iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 256, 256), 2.0, pile.get());
+ EXPECT_FALSE(iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 64, 64), 0.5, pile.get());
+ EXPECT_FALSE(iterator);
+ }
+ // Shifted tile sized iterators.
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(140, 140, 128, 128), 1.0, pile.get());
+ EXPECT_FALSE(iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(280, 280, 256, 256), 2.0, pile.get());
+ EXPECT_FALSE(iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(70, 70, 64, 64), 0.5, pile.get());
+ EXPECT_FALSE(iterator);
+ }
+ // Layer sized iterators.
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 256, 256), 1.0, pile.get());
+ EXPECT_FALSE(iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 512, 512), 2.0, pile.get());
+ EXPECT_FALSE(iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 128, 128), 0.5, pile.get());
+ EXPECT_FALSE(iterator);
+ }
+}
+
+TEST(PicturePileImplTest, PixelRefIteratorDiscardableRefs) {
+ gfx::Size tile_size(128, 128);
+ gfx::Size layer_bounds(256, 256);
+
+ scoped_refptr<FakePicturePileImpl> pile =
+ FakePicturePileImpl::CreateFilledPile(tile_size, layer_bounds);
+
+ SkBitmap discardable_bitmap[2][2];
+ CreateBitmap(gfx::Size(32, 32), "discardable", &discardable_bitmap[0][0]);
+ CreateBitmap(gfx::Size(32, 32), "discardable", &discardable_bitmap[1][0]);
+ CreateBitmap(gfx::Size(32, 32), "discardable", &discardable_bitmap[1][1]);
+
+ // Discardable pixel refs are found in the following cells:
+ // |---|---|
+ // | x | |
+ // |---|---|
+ // | x | x |
+ // |---|---|
+ pile->add_draw_bitmap(discardable_bitmap[0][0], gfx::Point(0, 0));
+ pile->add_draw_bitmap(discardable_bitmap[1][0], gfx::Point(0, 130));
+ pile->add_draw_bitmap(discardable_bitmap[1][1], gfx::Point(140, 140));
+
+ pile->RerecordPile();
+
+ // Tile sized iterators. These should find only one pixel ref.
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 128, 128), 1.0, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0][0].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 256, 256), 2.0, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0][0].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 64, 64), 0.5, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0][0].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+ // Shifted tile sized iterators. These should find only one pixel ref.
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(140, 140, 128, 128), 1.0, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1][1].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(280, 280, 256, 256), 2.0, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1][1].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(70, 70, 64, 64), 0.5, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1][1].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+ // Ensure there's no discardable pixel refs in the empty cell
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(140, 0, 128, 128), 1.0, pile.get());
+ EXPECT_FALSE(iterator);
+ }
+ // Layer sized iterators. These should find all 3 pixel refs.
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 256, 256), 1.0, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0][0].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1][0].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1][1].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 512, 512), 2.0, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0][0].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1][0].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1][1].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 128, 128), 0.5, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0][0].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1][0].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1][1].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+}
+
+TEST(PicturePileImplTest, PixelRefIteratorDiscardableRefsOneTile) {
+ gfx::Size tile_size(256, 256);
+ gfx::Size layer_bounds(512, 512);
+
+ scoped_refptr<FakePicturePileImpl> pile =
+ FakePicturePileImpl::CreateFilledPile(tile_size, layer_bounds);
+
+ SkBitmap discardable_bitmap[2][2];
+ CreateBitmap(gfx::Size(32, 32), "discardable", &discardable_bitmap[0][0]);
+ CreateBitmap(gfx::Size(32, 32), "discardable", &discardable_bitmap[0][1]);
+ CreateBitmap(gfx::Size(32, 32), "discardable", &discardable_bitmap[1][1]);
+
+ // Discardable pixel refs are found in the following cells:
+ // |---|---|
+ // | x | x |
+ // |---|---|
+ // | | x |
+ // |---|---|
+ pile->add_draw_bitmap(discardable_bitmap[0][0], gfx::Point(0, 0));
+ pile->add_draw_bitmap(discardable_bitmap[0][1], gfx::Point(260, 0));
+ pile->add_draw_bitmap(discardable_bitmap[1][1], gfx::Point(260, 260));
+
+ pile->RerecordPile();
+
+ // Tile sized iterators. These should find only one pixel ref.
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 256, 256), 1.0, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0][0].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 512, 512), 2.0, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0][0].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 128, 128), 0.5, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0][0].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+ // Shifted tile sized iterators. These should find only one pixel ref.
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(260, 260, 256, 256), 1.0, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1][1].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(520, 520, 512, 512), 2.0, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1][1].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(130, 130, 128, 128), 0.5, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1][1].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+ // Ensure there's no discardable pixel refs in the empty cell
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 256, 256, 256), 1.0, pile.get());
+ EXPECT_FALSE(iterator);
+ }
+ // Layer sized iterators. These should find three pixel ref.
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 512, 512), 1.0, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0][0].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0][1].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1][1].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 1024, 1024), 2.0, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0][0].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0][1].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1][1].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 256, 256), 0.5, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0][0].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0][1].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1][1].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+
+ // Copy test.
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 512, 512), 1.0, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0][0].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0][1].pixelRef());
+
+ // copy now points to the same spot as iterator,
+ // but both can be incremented independently.
+ PicturePileImpl::PixelRefIterator copy = iterator;
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1][1].pixelRef());
+ EXPECT_FALSE(++iterator);
+
+ EXPECT_TRUE(copy);
+ EXPECT_TRUE(*copy == discardable_bitmap[0][1].pixelRef());
+ EXPECT_TRUE(++copy);
+ EXPECT_TRUE(*copy == discardable_bitmap[1][1].pixelRef());
+ EXPECT_FALSE(++copy);
+}
+
+TEST(PicturePileImplTest, PixelRefIteratorDiscardableRefsBaseNonDiscardable) {
+ gfx::Size tile_size(256, 256);
+ gfx::Size layer_bounds(512, 512);
+
+ scoped_refptr<FakePicturePileImpl> pile =
+ FakePicturePileImpl::CreateFilledPile(tile_size, layer_bounds);
+
+ SkBitmap non_discardable_bitmap;
+ CreateBitmap(gfx::Size(512, 512), "notdiscardable", &non_discardable_bitmap);
+
+ SkBitmap discardable_bitmap[2][2];
+ CreateBitmap(gfx::Size(128, 128), "discardable", &discardable_bitmap[0][0]);
+ CreateBitmap(gfx::Size(128, 128), "discardable", &discardable_bitmap[0][1]);
+ CreateBitmap(gfx::Size(128, 128), "discardable", &discardable_bitmap[1][1]);
+
+ // One large non-discardable bitmap covers the whole grid.
+ // Discardable pixel refs are found in the following cells:
+ // |---|---|
+ // | x | x |
+ // |---|---|
+ // | | x |
+ // |---|---|
+ pile->add_draw_bitmap(non_discardable_bitmap, gfx::Point(0, 0));
+ pile->add_draw_bitmap(discardable_bitmap[0][0], gfx::Point(0, 0));
+ pile->add_draw_bitmap(discardable_bitmap[0][1], gfx::Point(260, 0));
+ pile->add_draw_bitmap(discardable_bitmap[1][1], gfx::Point(260, 260));
+
+ pile->RerecordPile();
+
+ // Tile sized iterators. These should find only one pixel ref.
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 256, 256), 1.0, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0][0].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 512, 512), 2.0, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0][0].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 128, 128), 0.5, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0][0].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+ // Shifted tile sized iterators. These should find only one pixel ref.
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(260, 260, 256, 256), 1.0, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1][1].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(520, 520, 512, 512), 2.0, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1][1].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(130, 130, 128, 128), 0.5, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1][1].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+ // Ensure there's no discardable pixel refs in the empty cell
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 256, 256, 256), 1.0, pile.get());
+ EXPECT_FALSE(iterator);
+ }
+ // Layer sized iterators. These should find three pixel ref.
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 512, 512), 1.0, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0][0].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0][1].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1][1].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 1024, 1024), 2.0, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0][0].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0][1].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1][1].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+ {
+ PicturePileImpl::PixelRefIterator iterator(
+ gfx::Rect(0, 0, 256, 256), 0.5, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0][0].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0][1].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1][1].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+}
+
+class FullContentsTest : public ::testing::TestWithParam<bool> {};
+
+TEST_P(FullContentsTest, RasterFullContents) {
+ gfx::Size tile_size(1000, 1000);
+ gfx::Size layer_bounds(3, 5);
+ float contents_scale = 1.5f;
+ float raster_divisions = 2.f;
+ // Param in this case is whether the content is fully opaque
+ // or just filled completely. For this test they should behave the same.
+ bool contents_opaque = GetParam();
+ bool fills_content = !GetParam();
+
+ scoped_refptr<FakePicturePileImpl> pile =
+ FakePicturePileImpl::CreateFilledPile(tile_size, layer_bounds);
+ // Because the caller sets content opaque, it also promises that it
+ // has at least filled in layer_bounds opaquely.
+ SkPaint white_paint;
+ white_paint.setColor(SK_ColorWHITE);
+ pile->add_draw_rect_with_paint(gfx::Rect(layer_bounds), white_paint);
+
+ pile->SetMinContentsScale(contents_scale);
+ pile->set_background_color(SK_ColorBLACK);
+ pile->set_contents_opaque(contents_opaque);
+ pile->set_contents_fill_bounds_completely(fills_content);
+ pile->set_clear_canvas_with_debug_color(false);
+ pile->RerecordPile();
+
+ gfx::Size content_bounds(
+ gfx::ToCeiledSize(gfx::ScaleSize(layer_bounds, contents_scale)));
+
+ // Simulate drawing into different tiles at different offsets.
+ int step_x = std::ceil(content_bounds.width() / raster_divisions);
+ int step_y = std::ceil(content_bounds.height() / raster_divisions);
+ for (int offset_x = 0; offset_x < content_bounds.width();
+ offset_x += step_x) {
+ for (int offset_y = 0; offset_y < content_bounds.height();
+ offset_y += step_y) {
+ gfx::Rect content_rect(offset_x, offset_y, step_x, step_y);
+ content_rect.Intersect(gfx::Rect(content_bounds));
+
+ // Simulate a canvas rect larger than the content rect. Every pixel
+ // up to one pixel outside the content rect is guaranteed to be opaque.
+ // Outside of that is undefined.
+ gfx::Rect canvas_rect(content_rect);
+ canvas_rect.Inset(0, 0, -1, -1);
+
+ SkBitmap bitmap;
+ bitmap.allocN32Pixels(canvas_rect.width(), canvas_rect.height());
+ SkCanvas canvas(bitmap);
+ canvas.clear(SK_ColorTRANSPARENT);
+
+ FakeRenderingStatsInstrumentation rendering_stats_instrumentation;
+
+ pile->RasterToBitmap(&canvas,
+ canvas_rect,
+ contents_scale,
+ &rendering_stats_instrumentation);
+
+ SkColor* pixels = reinterpret_cast<SkColor*>(bitmap.getPixels());
+ int num_pixels = bitmap.width() * bitmap.height();
+ bool all_white = true;
+ for (int i = 0; i < num_pixels; ++i) {
+ EXPECT_EQ(SkColorGetA(pixels[i]), 255u);
+ all_white &= (SkColorGetR(pixels[i]) == 255);
+ all_white &= (SkColorGetG(pixels[i]) == 255);
+ all_white &= (SkColorGetB(pixels[i]) == 255);
+ }
+
+ // If the canvas doesn't extend past the edge of the content,
+ // it should be entirely white. Otherwise, the edge of the content
+ // will be non-white.
+ EXPECT_EQ(all_white, gfx::Rect(content_bounds).Contains(canvas_rect));
+ }
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(PicturePileImpl,
+ FullContentsTest,
+ ::testing::Values(false, true));
+
+TEST(PicturePileImpl, RasterContentsTransparent) {
+ gfx::Size tile_size(1000, 1000);
+ gfx::Size layer_bounds(5, 3);
+ float contents_scale = 0.5f;
+
+ scoped_refptr<FakePicturePileImpl> pile =
+ FakePicturePileImpl::CreateFilledPile(tile_size, layer_bounds);
+ pile->set_background_color(SK_ColorTRANSPARENT);
+ pile->set_contents_opaque(false);
+ pile->SetMinContentsScale(contents_scale);
+ pile->set_clear_canvas_with_debug_color(false);
+ pile->RerecordPile();
+
+ gfx::Size content_bounds(
+ gfx::ToCeiledSize(gfx::ScaleSize(layer_bounds, contents_scale)));
+
+ gfx::Rect canvas_rect(content_bounds);
+ canvas_rect.Inset(0, 0, -1, -1);
+
+ SkBitmap bitmap;
+ bitmap.allocN32Pixels(canvas_rect.width(), canvas_rect.height());
+ SkCanvas canvas(bitmap);
+
+ FakeRenderingStatsInstrumentation rendering_stats_instrumentation;
+ pile->RasterToBitmap(
+ &canvas, canvas_rect, contents_scale, &rendering_stats_instrumentation);
+
+ SkColor* pixels = reinterpret_cast<SkColor*>(bitmap.getPixels());
+ int num_pixels = bitmap.width() * bitmap.height();
+ for (int i = 0; i < num_pixels; ++i) {
+ EXPECT_EQ(SkColorGetA(pixels[i]), 0u);
+ }
+}
+
+class OverlapTest : public ::testing::TestWithParam<float> {
+ public:
+ static float MinContentsScale() { return 1.f / 4.f; }
+};
+
+TEST_P(OverlapTest, NoOverlap) {
+ gfx::Size tile_size(10, 10);
+ gfx::Size layer_bounds(30, 30);
+ gfx::Size bigger_than_layer_bounds(300, 300);
+ float contents_scale = GetParam();
+ // Pick an opaque color to not have to deal with premultiplication off-by-one.
+ SkColor test_color = SkColorSetARGB(255, 45, 56, 67);
+
+ scoped_refptr<FakePicturePileImpl> pile =
+ FakePicturePileImpl::CreateFilledPile(tile_size, layer_bounds);
+ pile->set_background_color(SK_ColorTRANSPARENT);
+ pile->set_contents_opaque(false);
+ pile->SetMinContentsScale(MinContentsScale());
+ pile->set_clear_canvas_with_debug_color(true);
+ SkPaint color_paint;
+ color_paint.setColor(test_color);
+ // Additive paint, so that if two paints overlap, the color will change.
+ color_paint.setXfermodeMode(SkXfermode::kPlus_Mode);
+ // Paint outside the layer to make sure that blending works.
+ pile->add_draw_rect_with_paint(gfx::RectF(bigger_than_layer_bounds),
+ color_paint);
+ pile->RerecordPile();
+
+ gfx::Size content_bounds(
+ gfx::ToCeiledSize(gfx::ScaleSize(layer_bounds, contents_scale)));
+
+ SkBitmap bitmap;
+ bitmap.allocN32Pixels(content_bounds.width(), content_bounds.height());
+ SkCanvas canvas(bitmap);
+
+ FakeRenderingStatsInstrumentation rendering_stats_instrumentation;
+ pile->RasterToBitmap(&canvas,
+ gfx::Rect(content_bounds),
+ contents_scale,
+ &rendering_stats_instrumentation);
+
+ for (int y = 0; y < bitmap.height(); y++) {
+ for (int x = 0; x < bitmap.width(); x++) {
+ SkColor color = bitmap.getColor(x, y);
+ EXPECT_EQ(SkColorGetR(test_color), SkColorGetR(color)) << "x: " << x
+ << ", y: " << y;
+ EXPECT_EQ(SkColorGetG(test_color), SkColorGetG(color)) << "x: " << x
+ << ", y: " << y;
+ EXPECT_EQ(SkColorGetB(test_color), SkColorGetB(color)) << "x: " << x
+ << ", y: " << y;
+ EXPECT_EQ(SkColorGetA(test_color), SkColorGetA(color)) << "x: " << x
+ << ", y: " << y;
+ if (test_color != color)
+ break;
+ }
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(PicturePileImpl,
+ OverlapTest,
+ ::testing::Values(1.f, 0.873f, 1.f / 4.f, 4.f));
+
+TEST(PicturePileImplTest, PixelRefIteratorBorders) {
+ // 3 tile width / 1 tile height pile
+ gfx::Size tile_size(128, 128);
+ gfx::Size layer_bounds(320, 128);
+
+ // Fake picture pile impl uses a tile grid the size of the tile. So,
+ // any iteration that intersects with a tile will return all pixel refs
+ // inside of it.
+ scoped_refptr<FakePicturePileImpl> pile =
+ FakePicturePileImpl::CreateFilledPile(tile_size, layer_bounds);
+ pile->SetMinContentsScale(0.5f);
+
+ // Bitmaps 0-2 are exactly on tiles 0-2, so that they overlap the borders
+ // of adjacent tiles.
+ gfx::Rect bitmap_rects[] = {pile->tiling().TileBounds(0, 0),
+ pile->tiling().TileBounds(1, 0),
+ pile->tiling().TileBounds(2, 0), };
+ SkBitmap discardable_bitmap[arraysize(bitmap_rects)];
+
+ for (size_t i = 0; i < arraysize(bitmap_rects); ++i) {
+ CreateBitmap(bitmap_rects[i].size(), "discardable", &discardable_bitmap[i]);
+ pile->add_draw_bitmap(discardable_bitmap[i], bitmap_rects[i].origin());
+ }
+
+ // Sanity check that bitmaps 0-2 intersect the borders of their adjacent
+ // tiles, but not the actual tiles.
+ EXPECT_TRUE(
+ bitmap_rects[0].Intersects(pile->tiling().TileBoundsWithBorder(1, 0)));
+ EXPECT_FALSE(bitmap_rects[0].Intersects(pile->tiling().TileBounds(1, 0)));
+ EXPECT_TRUE(
+ bitmap_rects[1].Intersects(pile->tiling().TileBoundsWithBorder(0, 0)));
+ EXPECT_FALSE(bitmap_rects[1].Intersects(pile->tiling().TileBounds(0, 0)));
+ EXPECT_TRUE(
+ bitmap_rects[1].Intersects(pile->tiling().TileBoundsWithBorder(2, 0)));
+ EXPECT_FALSE(bitmap_rects[1].Intersects(pile->tiling().TileBounds(2, 0)));
+ EXPECT_TRUE(
+ bitmap_rects[2].Intersects(pile->tiling().TileBoundsWithBorder(1, 0)));
+ EXPECT_FALSE(bitmap_rects[2].Intersects(pile->tiling().TileBounds(1, 0)));
+
+ pile->RerecordPile();
+
+ // Tile-sized iterators.
+ {
+ // Because tile 0's borders extend onto tile 1, it will include both
+ // bitmap 0 and 1. However, it should *not* include bitmap 2.
+ PicturePileImpl::PixelRefIterator iterator(
+ pile->tiling().TileBounds(0, 0), 1.f, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+ {
+ // Tile 1 + borders hits all bitmaps.
+ PicturePileImpl::PixelRefIterator iterator(
+ pile->tiling().TileBounds(1, 0), 1.f, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[0].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[2].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+ {
+ // Tile 2 should not include bitmap 0, which is only on tile 0 and the
+ // borders of tile 1.
+ PicturePileImpl::PixelRefIterator iterator(
+ pile->tiling().TileBounds(2, 0), 1.f, pile.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[2].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+}
+
+} // namespace
+} // namespace cc
diff --git a/cc/resources/picture_pile_unittest.cc b/cc/resources/picture_pile_unittest.cc
new file mode 100644
index 0000000..87938fa
--- /dev/null
+++ b/cc/resources/picture_pile_unittest.cc
@@ -0,0 +1,1280 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <map>
+#include <utility>
+
+#include "cc/resources/picture_pile.h"
+#include "cc/test/fake_content_layer_client.h"
+#include "cc/test/fake_rendering_stats_instrumentation.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gfx/rect_conversions.h"
+#include "ui/gfx/size_conversions.h"
+
+namespace cc {
+namespace {
+
+class TestPicturePile : public PicturePile {
+ public:
+ using PicturePile::buffer_pixels;
+ using PicturePile::CanRasterSlowTileCheck;
+ using PicturePile::Clear;
+
+ PictureMap& picture_map() { return picture_map_; }
+ const gfx::Rect& recorded_viewport() const { return recorded_viewport_; }
+
+ bool CanRasterLayerRect(const gfx::Rect& layer_rect) {
+ return CanRaster(1.f, layer_rect);
+ }
+
+ typedef PicturePile::PictureInfo PictureInfo;
+ typedef PicturePile::PictureMapKey PictureMapKey;
+ typedef PicturePile::PictureMap PictureMap;
+
+ protected:
+ virtual ~TestPicturePile() {}
+};
+
+class PicturePileTestBase {
+ public:
+ PicturePileTestBase()
+ : background_color_(SK_ColorBLUE),
+ min_scale_(0.125),
+ frame_number_(0),
+ contents_opaque_(false) {}
+
+ void InitializeData() {
+ pile_ = make_scoped_refptr(new TestPicturePile());
+ pile_->SetTileGridSize(gfx::Size(1000, 1000));
+ pile_->SetMinContentsScale(min_scale_);
+ client_ = FakeContentLayerClient();
+ SetTilingSize(pile_->tiling().max_texture_size());
+ }
+
+ void SetTilingSize(const gfx::Size& tiling_size) {
+ Region invalidation;
+ gfx::Rect viewport_rect(tiling_size);
+ UpdateAndExpandInvalidation(&invalidation, tiling_size, viewport_rect);
+ }
+
+ gfx::Size tiling_size() const { return pile_->tiling_size(); }
+ gfx::Rect tiling_rect() const { return gfx::Rect(pile_->tiling_size()); }
+
+ bool UpdateAndExpandInvalidation(Region* invalidation,
+ const gfx::Size& layer_size,
+ const gfx::Rect& visible_layer_rect) {
+ frame_number_++;
+ return pile_->UpdateAndExpandInvalidation(&client_,
+ invalidation,
+ background_color_,
+ contents_opaque_,
+ false,
+ layer_size,
+ visible_layer_rect,
+ frame_number_,
+ Picture::RECORD_NORMALLY,
+ &stats_instrumentation_);
+ }
+
+ bool UpdateWholePile() {
+ Region invalidation = tiling_rect();
+ bool result = UpdateAndExpandInvalidation(
+ &invalidation, tiling_size(), tiling_rect());
+ EXPECT_EQ(tiling_rect().ToString(), invalidation.ToString());
+ return result;
+ }
+
+ FakeContentLayerClient client_;
+ FakeRenderingStatsInstrumentation stats_instrumentation_;
+ scoped_refptr<TestPicturePile> pile_;
+ SkColor background_color_;
+ float min_scale_;
+ int frame_number_;
+ bool contents_opaque_;
+};
+
+class PicturePileTest : public PicturePileTestBase, public testing::Test {
+ public:
+ virtual void SetUp() OVERRIDE { InitializeData(); }
+};
+
+TEST_F(PicturePileTest, SmallInvalidateInflated) {
+ // Invalidate something inside a tile.
+ Region invalidate_rect(gfx::Rect(50, 50, 1, 1));
+ UpdateAndExpandInvalidation(&invalidate_rect, tiling_size(), tiling_rect());
+ EXPECT_EQ(gfx::Rect(50, 50, 1, 1).ToString(), invalidate_rect.ToString());
+
+ EXPECT_EQ(1, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(1, pile_->tiling().num_tiles_y());
+
+ TestPicturePile::PictureInfo& picture_info =
+ pile_->picture_map().find(TestPicturePile::PictureMapKey(0, 0))->second;
+ // We should have a picture.
+ EXPECT_TRUE(!!picture_info.GetPicture());
+ gfx::Rect picture_rect = gfx::ScaleToEnclosedRect(
+ picture_info.GetPicture()->LayerRect(), min_scale_);
+
+ // The the picture should be large enough that scaling it never makes a rect
+ // smaller than 1 px wide or tall.
+ EXPECT_FALSE(picture_rect.IsEmpty()) << "Picture rect " <<
+ picture_rect.ToString();
+}
+
+TEST_F(PicturePileTest, LargeInvalidateInflated) {
+ // Invalidate something inside a tile.
+ Region invalidate_rect(gfx::Rect(50, 50, 100, 100));
+ UpdateAndExpandInvalidation(&invalidate_rect, tiling_size(), tiling_rect());
+ EXPECT_EQ(gfx::Rect(50, 50, 100, 100).ToString(), invalidate_rect.ToString());
+
+ EXPECT_EQ(1, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(1, pile_->tiling().num_tiles_y());
+
+ TestPicturePile::PictureInfo& picture_info =
+ pile_->picture_map().find(TestPicturePile::PictureMapKey(0, 0))->second;
+ EXPECT_TRUE(!!picture_info.GetPicture());
+
+ int expected_inflation = pile_->buffer_pixels();
+
+ const Picture* base_picture = picture_info.GetPicture();
+ gfx::Rect base_picture_rect(pile_->tiling_size());
+ base_picture_rect.Inset(-expected_inflation, -expected_inflation);
+ EXPECT_EQ(base_picture_rect.ToString(),
+ base_picture->LayerRect().ToString());
+}
+
+TEST_F(PicturePileTest, InvalidateOnTileBoundaryInflated) {
+ gfx::Size new_tiling_size =
+ gfx::ToCeiledSize(gfx::ScaleSize(pile_->tiling_size(), 2.f));
+ // This creates initial pictures.
+ SetTilingSize(new_tiling_size);
+
+ // Due to border pixels, we should have 3 tiles.
+ EXPECT_EQ(3, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(3, pile_->tiling().num_tiles_y());
+
+ // We should have 1/.125 - 1 = 7 border pixels.
+ EXPECT_EQ(7, pile_->buffer_pixels());
+ EXPECT_EQ(7, pile_->tiling().border_texels());
+
+ // Invalidate everything to have a non zero invalidation frequency.
+ UpdateWholePile();
+
+ // Invalidate something just over a tile boundary by a single pixel.
+ // This will invalidate the tile (1, 1), as well as 1 row of pixels in (1, 0).
+ Region invalidate_rect(
+ gfx::Rect(pile_->tiling().TileBoundsWithBorder(0, 0).right(),
+ pile_->tiling().TileBoundsWithBorder(0, 0).bottom() - 1,
+ 50,
+ 50));
+ Region expected_invalidation = invalidate_rect;
+ UpdateAndExpandInvalidation(&invalidate_rect, tiling_size(), tiling_rect());
+ EXPECT_EQ(expected_invalidation.ToString(), invalidate_rect.ToString());
+
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureInfo& picture_info =
+ pile_->picture_map()
+ .find(TestPicturePile::PictureMapKey(i, j))
+ ->second;
+
+ // Expect (1, 1) and (1, 0) to be invalidated once more
+ // than the rest of the tiles.
+ if (i == 1 && (j == 0 || j == 1)) {
+ EXPECT_FLOAT_EQ(
+ 2.0f / TestPicturePile::PictureInfo::INVALIDATION_FRAMES_TRACKED,
+ picture_info.GetInvalidationFrequencyForTesting());
+ } else {
+ EXPECT_FLOAT_EQ(
+ 1.0f / TestPicturePile::PictureInfo::INVALIDATION_FRAMES_TRACKED,
+ picture_info.GetInvalidationFrequencyForTesting());
+ }
+ }
+ }
+}
+
+TEST_F(PicturePileTest, StopRecordingOffscreenInvalidations) {
+ gfx::Size new_tiling_size =
+ gfx::ToCeiledSize(gfx::ScaleSize(pile_->tiling_size(), 4.f));
+ SetTilingSize(new_tiling_size);
+
+ gfx::Rect viewport(tiling_size().width(), 1);
+
+ // Update the whole pile until the invalidation frequency is high.
+ for (int frame = 0; frame < 33; ++frame) {
+ UpdateWholePile();
+ }
+
+ // Make sure we have a high invalidation frequency.
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureInfo& picture_info =
+ pile_->picture_map()
+ .find(TestPicturePile::PictureMapKey(i, j))
+ ->second;
+ EXPECT_FLOAT_EQ(1.0f, picture_info.GetInvalidationFrequencyForTesting())
+ << "i " << i << " j " << j;
+ }
+ }
+
+ // Update once more with a small viewport.
+ Region invalidation(tiling_rect());
+ UpdateAndExpandInvalidation(&invalidation, tiling_size(), viewport);
+ EXPECT_EQ(tiling_rect().ToString(), invalidation.ToString());
+
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureInfo& picture_info =
+ pile_->picture_map()
+ .find(TestPicturePile::PictureMapKey(i, j))
+ ->second;
+ EXPECT_FLOAT_EQ(1.0f, picture_info.GetInvalidationFrequencyForTesting());
+
+ // If the y far enough away we expect to find no picture (no re-recording
+ // happened). For close y, the picture should change.
+ if (j >= 2)
+ EXPECT_FALSE(picture_info.GetPicture()) << "i " << i << " j " << j;
+ else
+ EXPECT_TRUE(picture_info.GetPicture()) << "i " << i << " j " << j;
+ }
+ }
+
+ // Update a partial tile that doesn't get recorded. We should expand the
+ // invalidation to the entire tiles that overlap it.
+ Region small_invalidation =
+ gfx::Rect(pile_->tiling().TileBounds(3, 4).x(),
+ pile_->tiling().TileBounds(3, 4).y() + 10,
+ 1,
+ 1);
+ UpdateAndExpandInvalidation(&small_invalidation, tiling_size(), viewport);
+ EXPECT_TRUE(small_invalidation.Contains(gfx::UnionRects(
+ pile_->tiling().TileBounds(2, 4), pile_->tiling().TileBounds(3, 4))))
+ << small_invalidation.ToString();
+
+ // Now update with no invalidation and full viewport
+ Region empty_invalidation;
+ UpdateAndExpandInvalidation(
+ &empty_invalidation, tiling_size(), tiling_rect());
+ EXPECT_EQ(Region().ToString(), empty_invalidation.ToString());
+
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureInfo& picture_info =
+ pile_->picture_map()
+ .find(TestPicturePile::PictureMapKey(i, j))
+ ->second;
+ // Expect the invalidation frequency to be less than 1, since we just
+ // updated with no invalidations.
+ EXPECT_LT(picture_info.GetInvalidationFrequencyForTesting(), 1.f);
+
+ // We expect that there are pictures everywhere now.
+ EXPECT_TRUE(picture_info.GetPicture()) << "i " << i << " j " << j;
+ }
+ }
+}
+
+TEST_F(PicturePileTest, ClearingInvalidatesRecordedRect) {
+ gfx::Rect rect(0, 0, 5, 5);
+ EXPECT_TRUE(pile_->CanRasterLayerRect(rect));
+ EXPECT_TRUE(pile_->CanRasterSlowTileCheck(rect));
+
+ pile_->Clear();
+
+ // Make sure both the cache-aware check (using recorded region) and the normal
+ // check are both false after clearing.
+ EXPECT_FALSE(pile_->CanRasterLayerRect(rect));
+ EXPECT_FALSE(pile_->CanRasterSlowTileCheck(rect));
+}
+
+TEST_F(PicturePileTest, FrequentInvalidationCanRaster) {
+ // This test makes sure that if part of the page is frequently invalidated
+ // and doesn't get re-recorded, then CanRaster is not true for any
+ // tiles touching it, but is true for adjacent tiles, even if it
+ // overlaps on borders (edge case).
+ gfx::Size new_tiling_size =
+ gfx::ToCeiledSize(gfx::ScaleSize(pile_->tiling_size(), 4.f));
+ SetTilingSize(new_tiling_size);
+
+ gfx::Rect tile01_borders = pile_->tiling().TileBoundsWithBorder(0, 1);
+ gfx::Rect tile02_borders = pile_->tiling().TileBoundsWithBorder(0, 2);
+ gfx::Rect tile01_noborders = pile_->tiling().TileBounds(0, 1);
+ gfx::Rect tile02_noborders = pile_->tiling().TileBounds(0, 2);
+
+ // Sanity check these two tiles are overlapping with borders, since this is
+ // what the test is trying to repro.
+ EXPECT_TRUE(tile01_borders.Intersects(tile02_borders));
+ EXPECT_FALSE(tile01_noborders.Intersects(tile02_noborders));
+ UpdateWholePile();
+ EXPECT_TRUE(pile_->CanRasterLayerRect(tile01_noborders));
+ EXPECT_TRUE(pile_->CanRasterSlowTileCheck(tile01_noborders));
+ EXPECT_TRUE(pile_->CanRasterLayerRect(tile02_noborders));
+ EXPECT_TRUE(pile_->CanRasterSlowTileCheck(tile02_noborders));
+ // Sanity check that an initial paint goes down the fast path of having
+ // a valid recorded viewport.
+ EXPECT_TRUE(!pile_->recorded_viewport().IsEmpty());
+
+ // Update the whole layer until the invalidation frequency is high.
+ for (int frame = 0; frame < 33; ++frame) {
+ UpdateWholePile();
+ }
+
+ // Update once more with a small viewport.
+ gfx::Rect viewport(tiling_size().width(), 1);
+ Region invalidation(tiling_rect());
+ UpdateAndExpandInvalidation(&invalidation, tiling_size(), viewport);
+ EXPECT_EQ(tiling_rect().ToString(), invalidation.ToString());
+
+ // Sanity check some pictures exist and others don't.
+ EXPECT_TRUE(pile_->picture_map()
+ .find(TestPicturePile::PictureMapKey(0, 1))
+ ->second.GetPicture());
+ EXPECT_FALSE(pile_->picture_map()
+ .find(TestPicturePile::PictureMapKey(0, 2))
+ ->second.GetPicture());
+
+ EXPECT_TRUE(pile_->CanRasterLayerRect(tile01_noborders));
+ EXPECT_TRUE(pile_->CanRasterSlowTileCheck(tile01_noborders));
+ EXPECT_FALSE(pile_->CanRasterLayerRect(tile02_noborders));
+ EXPECT_FALSE(pile_->CanRasterSlowTileCheck(tile02_noborders));
+}
+
+TEST_F(PicturePileTest, NoInvalidationValidViewport) {
+ // This test validates that the recorded_viewport cache of full tiles
+ // is still valid for some use cases. If it's not, it's a performance
+ // issue because CanRaster checks will go down the slow path.
+ EXPECT_TRUE(!pile_->recorded_viewport().IsEmpty());
+
+ // No invalidation, same viewport.
+ Region invalidation;
+ UpdateAndExpandInvalidation(&invalidation, tiling_size(), tiling_rect());
+ EXPECT_TRUE(!pile_->recorded_viewport().IsEmpty());
+ EXPECT_EQ(Region().ToString(), invalidation.ToString());
+
+ // Partial invalidation, same viewport.
+ invalidation = gfx::Rect(0, 0, 1, 1);
+ UpdateAndExpandInvalidation(&invalidation, tiling_size(), tiling_rect());
+ EXPECT_TRUE(!pile_->recorded_viewport().IsEmpty());
+ EXPECT_EQ(gfx::Rect(0, 0, 1, 1).ToString(), invalidation.ToString());
+
+ // No invalidation, changing viewport.
+ invalidation = Region();
+ UpdateAndExpandInvalidation(
+ &invalidation, tiling_size(), gfx::Rect(5, 5, 5, 5));
+ EXPECT_TRUE(!pile_->recorded_viewport().IsEmpty());
+ EXPECT_EQ(Region().ToString(), invalidation.ToString());
+}
+
+TEST_F(PicturePileTest, InvalidationOutsideRecordingRect) {
+ gfx::Size huge_layer_size(10000000, 20000000);
+ gfx::Rect viewport(300000, 400000, 5000, 6000);
+
+ // Resize the pile and set up the interest rect.
+ Region invalidation;
+ UpdateAndExpandInvalidation(&invalidation, huge_layer_size, viewport);
+
+ // Invalidation inside the recording rect does not need to be expanded.
+ invalidation = viewport;
+ UpdateAndExpandInvalidation(&invalidation, huge_layer_size, viewport);
+ EXPECT_EQ(viewport.ToString(), invalidation.ToString());
+
+ // Invalidation outside the recording rect should expand to the tiles it
+ // covers.
+ gfx::Rect recorded_over_tiles =
+ pile_->tiling().ExpandRectToTileBounds(pile_->recorded_viewport());
+ gfx::Rect invalidation_outside(
+ recorded_over_tiles.right(), recorded_over_tiles.y(), 30, 30);
+ invalidation = invalidation_outside;
+ UpdateAndExpandInvalidation(&invalidation, huge_layer_size, viewport);
+ gfx::Rect expanded_recorded_viewport =
+ pile_->tiling().ExpandRectToTileBounds(pile_->recorded_viewport());
+ Region expected_invalidation =
+ pile_->tiling().ExpandRectToTileBounds(invalidation_outside);
+ EXPECT_EQ(expected_invalidation.ToString(), invalidation.ToString());
+}
+
+enum Corner {
+ TOP_LEFT,
+ TOP_RIGHT,
+ BOTTOM_LEFT,
+ BOTTOM_RIGHT,
+};
+
+class PicturePileResizeCornerTest : public PicturePileTestBase,
+ public testing::TestWithParam<Corner> {
+ protected:
+ virtual void SetUp() OVERRIDE { InitializeData(); }
+
+ static gfx::Rect CornerSinglePixelRect(Corner corner, const gfx::Size& s) {
+ switch (corner) {
+ case TOP_LEFT:
+ return gfx::Rect(0, 0, 1, 1);
+ case TOP_RIGHT:
+ return gfx::Rect(s.width() - 1, 0, 1, 1);
+ case BOTTOM_LEFT:
+ return gfx::Rect(0, s.height() - 1, 1, 1);
+ case BOTTOM_RIGHT:
+ return gfx::Rect(s.width() - 1, s.height() - 1, 1, 1);
+ }
+ NOTREACHED();
+ return gfx::Rect();
+ }
+};
+
+TEST_P(PicturePileResizeCornerTest, ResizePileOutsideInterestRect) {
+ Corner corner = GetParam();
+
+ // This size chosen to be larger than the interest rect size, which is
+ // at least kPixelDistanceToRecord * 2 in each dimension.
+ int tile_size = 100000;
+ // The small number subtracted keeps the last tile in each axis larger than
+ // the interest rect also.
+ int offset = -100;
+ gfx::Size base_tiling_size(6 * tile_size + offset, 6 * tile_size + offset);
+ gfx::Size grow_down_tiling_size(6 * tile_size + offset,
+ 8 * tile_size + offset);
+ gfx::Size grow_right_tiling_size(8 * tile_size + offset,
+ 6 * tile_size + offset);
+ gfx::Size grow_both_tiling_size(8 * tile_size + offset,
+ 8 * tile_size + offset);
+
+ Region invalidation;
+ Region expected_invalidation;
+
+ pile_->tiling().SetMaxTextureSize(gfx::Size(tile_size, tile_size));
+ SetTilingSize(base_tiling_size);
+
+ // We should have a recording for every tile.
+ EXPECT_EQ(6, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(6, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ EXPECT_TRUE(it != map.end() && it->second.GetPicture());
+ }
+ }
+
+ UpdateAndExpandInvalidation(
+ &invalidation,
+ grow_down_tiling_size,
+ CornerSinglePixelRect(corner, grow_down_tiling_size));
+
+ // We should have lost the recordings in the bottom row.
+ EXPECT_EQ(6, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(8, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < 6; ++i) {
+ for (int j = 0; j < 6; ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ EXPECT_EQ(j < 5, it != map.end() && it->second.GetPicture());
+ }
+ }
+
+ // We invalidated all new pixels in the recording.
+ expected_invalidation = SubtractRegions(gfx::Rect(grow_down_tiling_size),
+ gfx::Rect(base_tiling_size));
+ // But the new pixels don't cover the whole bottom row.
+ gfx::Rect bottom_row = gfx::UnionRects(pile_->tiling().TileBounds(0, 5),
+ pile_->tiling().TileBounds(5, 5));
+ EXPECT_FALSE(expected_invalidation.Contains(bottom_row));
+ // We invalidated the entire old bottom row.
+ expected_invalidation.Union(bottom_row);
+ EXPECT_EQ(expected_invalidation.ToString(), invalidation.ToString());
+ invalidation.Clear();
+
+ UpdateWholePile();
+ UpdateAndExpandInvalidation(&invalidation,
+ base_tiling_size,
+ CornerSinglePixelRect(corner, base_tiling_size));
+
+ // We should have lost the recordings that are now outside the tiling only.
+ EXPECT_EQ(6, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(6, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ EXPECT_EQ(j < 6, it != map.end() && it->second.GetPicture());
+ }
+ }
+
+ // No invalidation when shrinking.
+ expected_invalidation.Clear();
+ EXPECT_EQ(expected_invalidation.ToString(), invalidation.ToString());
+ invalidation.Clear();
+
+ UpdateWholePile();
+ UpdateAndExpandInvalidation(
+ &invalidation,
+ grow_right_tiling_size,
+ CornerSinglePixelRect(corner, grow_right_tiling_size));
+
+ // We should have lost the recordings in the right column.
+ EXPECT_EQ(8, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(6, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < 6; ++i) {
+ for (int j = 0; j < 6; ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ EXPECT_EQ(i < 5, it != map.end() && it->second.GetPicture());
+ }
+ }
+
+ // We invalidated all new pixels in the recording.
+ expected_invalidation = SubtractRegions(gfx::Rect(grow_right_tiling_size),
+ gfx::Rect(base_tiling_size));
+ // But the new pixels don't cover the whole right_column.
+ gfx::Rect right_column = gfx::UnionRects(pile_->tiling().TileBounds(5, 0),
+ pile_->tiling().TileBounds(5, 5));
+ EXPECT_FALSE(expected_invalidation.Contains(right_column));
+ // We invalidated the entire old right column.
+ expected_invalidation.Union(right_column);
+ EXPECT_EQ(expected_invalidation.ToString(), invalidation.ToString());
+ invalidation.Clear();
+
+ UpdateWholePile();
+ UpdateAndExpandInvalidation(&invalidation,
+ base_tiling_size,
+ CornerSinglePixelRect(corner, base_tiling_size));
+
+ // We should have lost the recordings that are now outside the tiling only.
+ EXPECT_EQ(6, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(6, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ EXPECT_EQ(i < 6, it != map.end() && it->second.GetPicture());
+ }
+ }
+
+ // No invalidation when shrinking.
+ expected_invalidation.Clear();
+ EXPECT_EQ(expected_invalidation.ToString(), invalidation.ToString());
+ invalidation.Clear();
+
+ UpdateWholePile();
+ UpdateAndExpandInvalidation(
+ &invalidation,
+ grow_both_tiling_size,
+ CornerSinglePixelRect(corner, grow_both_tiling_size));
+
+ // We should have lost the recordings in the right column and bottom row.
+ EXPECT_EQ(8, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(8, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < 6; ++i) {
+ for (int j = 0; j < 6; ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ EXPECT_EQ(i < 5 && j < 5, it != map.end() && it->second.GetPicture());
+ }
+ }
+
+ // We invalidated all new pixels in the recording.
+ expected_invalidation = SubtractRegions(gfx::Rect(grow_both_tiling_size),
+ gfx::Rect(base_tiling_size));
+ // But the new pixels don't cover the whole right_column.
+ Region right_column_and_bottom_row =
+ UnionRegions(gfx::UnionRects(pile_->tiling().TileBounds(5, 0),
+ pile_->tiling().TileBounds(5, 5)),
+ gfx::UnionRects(pile_->tiling().TileBounds(0, 5),
+ pile_->tiling().TileBounds(5, 5)));
+ EXPECT_FALSE(expected_invalidation.Contains(right_column_and_bottom_row));
+ // We invalidated the entire old right column and the old bottom row.
+ expected_invalidation.Union(right_column_and_bottom_row);
+ EXPECT_EQ(expected_invalidation.ToString(), invalidation.ToString());
+ invalidation.Clear();
+
+ UpdateWholePile();
+ UpdateAndExpandInvalidation(&invalidation, base_tiling_size, gfx::Rect());
+
+ // We should have lost the recordings that are now outside the tiling only.
+ EXPECT_EQ(6, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(6, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ EXPECT_EQ(i < 6 && j < 6, it != map.end() && it->second.GetPicture());
+ }
+ }
+
+ // No invalidation when shrinking.
+ expected_invalidation.Clear();
+ EXPECT_EQ(expected_invalidation.ToString(), invalidation.ToString());
+ invalidation.Clear();
+}
+
+TEST_P(PicturePileResizeCornerTest, SmallResizePileOutsideInterestRect) {
+ Corner corner = GetParam();
+
+ // This size chosen to be larger than the interest rect size, which is
+ // at least kPixelDistanceToRecord * 2 in each dimension.
+ int tile_size = 100000;
+ // The small number subtracted keeps the last tile in each axis larger than
+ // the interest rect also.
+ int offset = -100;
+ gfx::Size base_tiling_size(6 * tile_size + offset, 6 * tile_size + offset);
+ gfx::Size grow_down_tiling_size(6 * tile_size + offset,
+ 6 * tile_size + offset + 5);
+ gfx::Size grow_right_tiling_size(6 * tile_size + offset + 5,
+ 6 * tile_size + offset);
+ gfx::Size grow_both_tiling_size(6 * tile_size + offset + 5,
+ 6 * tile_size + offset + 5);
+
+ Region invalidation;
+ Region expected_invalidation;
+
+ pile_->tiling().SetMaxTextureSize(gfx::Size(tile_size, tile_size));
+ SetTilingSize(base_tiling_size);
+
+ // We should have a recording for every tile.
+ EXPECT_EQ(6, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(6, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ EXPECT_TRUE(it != map.end() && it->second.GetPicture());
+ }
+ }
+
+ UpdateAndExpandInvalidation(
+ &invalidation,
+ grow_down_tiling_size,
+ CornerSinglePixelRect(corner, grow_down_tiling_size));
+
+ // We should have lost the recordings in the bottom row that do not intersect
+ // the interest rect.
+ EXPECT_EQ(6, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(6, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ bool expect_tile;
+ switch (corner) {
+ case TOP_LEFT:
+ case TOP_RIGHT:
+ expect_tile = j < 5;
+ break;
+ case BOTTOM_LEFT:
+ // The interest rect in the bottom left tile means we'll record it.
+ expect_tile = j < 5 || (j == 5 && i == 0);
+ break;
+ case BOTTOM_RIGHT:
+ // The interest rect in the bottom right tile means we'll record it.
+ expect_tile = j < 5 || (j == 5 && i == 5);
+ break;
+ }
+ EXPECT_EQ(expect_tile, it != map.end() && it->second.GetPicture());
+ }
+ }
+
+ // We invalidated the bottom row outside the new interest rect. The tile that
+ // insects the interest rect in invalidated only on its new pixels.
+ switch (corner) {
+ case TOP_LEFT:
+ case TOP_RIGHT:
+ expected_invalidation = gfx::UnionRects(pile_->tiling().TileBounds(0, 5),
+ pile_->tiling().TileBounds(5, 5));
+ break;
+ case BOTTOM_LEFT:
+ expected_invalidation = gfx::UnionRects(pile_->tiling().TileBounds(1, 5),
+ pile_->tiling().TileBounds(5, 5));
+ expected_invalidation.Union(SubtractRects(
+ pile_->tiling().TileBounds(0, 5), gfx::Rect(base_tiling_size)));
+ break;
+ case BOTTOM_RIGHT:
+ expected_invalidation = gfx::UnionRects(pile_->tiling().TileBounds(0, 5),
+ pile_->tiling().TileBounds(4, 5));
+ expected_invalidation.Union(SubtractRects(
+ pile_->tiling().TileBounds(5, 5), gfx::Rect(base_tiling_size)));
+ break;
+ }
+ EXPECT_EQ(expected_invalidation.ToString(), invalidation.ToString());
+ invalidation.Clear();
+
+ UpdateWholePile();
+ UpdateAndExpandInvalidation(&invalidation,
+ base_tiling_size,
+ CornerSinglePixelRect(corner, base_tiling_size));
+
+ // We should have lost nothing.
+ EXPECT_EQ(6, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(6, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ EXPECT_TRUE(it != map.end() && it->second.GetPicture());
+ }
+ }
+
+ // We invalidated nothing.
+ expected_invalidation.Clear();
+ EXPECT_EQ(expected_invalidation.ToString(), invalidation.ToString());
+ invalidation.Clear();
+
+ UpdateWholePile();
+ UpdateAndExpandInvalidation(
+ &invalidation,
+ grow_right_tiling_size,
+ CornerSinglePixelRect(corner, grow_right_tiling_size));
+
+ // We should have lost the recordings in the right column.
+ EXPECT_EQ(6, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(6, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ bool expect_tile;
+ switch (corner) {
+ case TOP_LEFT:
+ case BOTTOM_LEFT:
+ expect_tile = i < 5;
+ break;
+ case TOP_RIGHT:
+ // The interest rect in the top right tile means we'll record it.
+ expect_tile = i < 5 || (j == 0 && i == 5);
+ break;
+ case BOTTOM_RIGHT:
+ // The interest rect in the bottom right tile means we'll record it.
+ expect_tile = i < 5 || (j == 5 && i == 5);
+ break;
+ }
+ EXPECT_EQ(expect_tile, it != map.end() && it->second.GetPicture());
+ }
+ }
+
+ // We invalidated the right column outside the new interest rect. The tile
+ // that insects the interest rect in invalidated only on its new pixels.
+ switch (corner) {
+ case TOP_LEFT:
+ case BOTTOM_LEFT:
+ expected_invalidation = gfx::UnionRects(pile_->tiling().TileBounds(5, 0),
+ pile_->tiling().TileBounds(5, 5));
+ break;
+ case TOP_RIGHT:
+ expected_invalidation = gfx::UnionRects(pile_->tiling().TileBounds(5, 1),
+ pile_->tiling().TileBounds(5, 5));
+ expected_invalidation.Union(SubtractRects(
+ pile_->tiling().TileBounds(5, 0), gfx::Rect(base_tiling_size)));
+ break;
+ case BOTTOM_RIGHT:
+ expected_invalidation = gfx::UnionRects(pile_->tiling().TileBounds(5, 0),
+ pile_->tiling().TileBounds(5, 4));
+ expected_invalidation.Union(SubtractRects(
+ pile_->tiling().TileBounds(5, 5), gfx::Rect(base_tiling_size)));
+ break;
+ }
+ EXPECT_EQ(expected_invalidation.ToString(), invalidation.ToString());
+ invalidation.Clear();
+
+ UpdateWholePile();
+ UpdateAndExpandInvalidation(&invalidation,
+ base_tiling_size,
+ CornerSinglePixelRect(corner, base_tiling_size));
+
+ // We should have lost nothing.
+ EXPECT_EQ(6, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(6, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ EXPECT_TRUE(it != map.end() && it->second.GetPicture());
+ }
+ }
+
+ // We invalidated nothing.
+ expected_invalidation.Clear();
+ EXPECT_EQ(expected_invalidation.ToString(), invalidation.ToString());
+ invalidation.Clear();
+
+ UpdateWholePile();
+ UpdateAndExpandInvalidation(
+ &invalidation,
+ grow_both_tiling_size,
+ CornerSinglePixelRect(corner, grow_both_tiling_size));
+
+ // We should have lost the recordings in the right column and bottom row. The
+ // tile that insects the interest rect in invalidated only on its new pixels.
+ EXPECT_EQ(6, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(6, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ bool expect_tile;
+ switch (corner) {
+ case TOP_LEFT:
+ expect_tile = i < 5 && j < 5;
+ break;
+ case TOP_RIGHT:
+ // The interest rect in the top right tile means we'll record it.
+ expect_tile = (i < 5 && j < 5) || (j == 0 && i == 5);
+ break;
+ case BOTTOM_LEFT:
+ // The interest rect in the bottom left tile means we'll record it.
+ expect_tile = (i < 5 && j < 5) || (j == 5 && i == 0);
+ break;
+ case BOTTOM_RIGHT:
+ // The interest rect in the bottom right tile means we'll record it.
+ expect_tile = (i < 5 && j < 5) || (j == 5 && i == 5);
+ break;
+ }
+ EXPECT_EQ(expect_tile, it != map.end() && it->second.GetPicture())
+ << i << "," << j;
+ }
+ }
+
+ // We invalidated the right column and the bottom row outside the new interest
+ // rect. The tile that insects the interest rect in invalidated only on its
+ // new pixels.
+ switch (corner) {
+ case TOP_LEFT:
+ expected_invalidation = gfx::UnionRects(pile_->tiling().TileBounds(5, 0),
+ pile_->tiling().TileBounds(5, 5));
+ expected_invalidation.Union(gfx::UnionRects(
+ pile_->tiling().TileBounds(0, 5), pile_->tiling().TileBounds(5, 5)));
+ break;
+ case TOP_RIGHT:
+ expected_invalidation = gfx::UnionRects(pile_->tiling().TileBounds(5, 1),
+ pile_->tiling().TileBounds(5, 5));
+ expected_invalidation.Union(gfx::UnionRects(
+ pile_->tiling().TileBounds(0, 5), pile_->tiling().TileBounds(5, 5)));
+ expected_invalidation.Union(SubtractRects(
+ pile_->tiling().TileBounds(5, 0), gfx::Rect(base_tiling_size)));
+ break;
+ case BOTTOM_LEFT:
+ expected_invalidation = gfx::UnionRects(pile_->tiling().TileBounds(5, 0),
+ pile_->tiling().TileBounds(5, 5));
+ expected_invalidation.Union(gfx::UnionRects(
+ pile_->tiling().TileBounds(1, 5), pile_->tiling().TileBounds(5, 5)));
+ expected_invalidation.Union(SubtractRects(
+ pile_->tiling().TileBounds(0, 5), gfx::Rect(base_tiling_size)));
+ break;
+ case BOTTOM_RIGHT:
+ expected_invalidation = gfx::UnionRects(pile_->tiling().TileBounds(5, 0),
+ pile_->tiling().TileBounds(5, 4));
+ expected_invalidation.Union(gfx::UnionRects(
+ pile_->tiling().TileBounds(0, 5), pile_->tiling().TileBounds(4, 5)));
+ expected_invalidation.Union(SubtractRegions(
+ pile_->tiling().TileBounds(5, 5), gfx::Rect(base_tiling_size)));
+ break;
+ }
+ EXPECT_EQ(expected_invalidation.ToString(), invalidation.ToString());
+ invalidation.Clear();
+
+ UpdateWholePile();
+ UpdateAndExpandInvalidation(&invalidation,
+ base_tiling_size,
+ CornerSinglePixelRect(corner, base_tiling_size));
+
+ // We should have lost nothing.
+ EXPECT_EQ(6, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(6, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ EXPECT_TRUE(it != map.end() && it->second.GetPicture());
+ }
+ }
+
+ // We invalidated nothing.
+ expected_invalidation.Clear();
+ EXPECT_EQ(expected_invalidation.ToString(), invalidation.ToString());
+ invalidation.Clear();
+}
+
+INSTANTIATE_TEST_CASE_P(
+ PicturePileResizeCornerTests,
+ PicturePileResizeCornerTest,
+ ::testing::Values(TOP_LEFT, TOP_RIGHT, BOTTOM_LEFT, BOTTOM_RIGHT));
+
+TEST_F(PicturePileTest, ResizePileInsideInterestRect) {
+ // This size chosen to be small enough that all the rects below fit inside the
+ // the interest rect, so they are smaller than kPixelDistanceToRecord in each
+ // dimension.
+ int tile_size = 100;
+ gfx::Size base_tiling_size(5 * tile_size, 5 * tile_size);
+ gfx::Size grow_down_tiling_size(5 * tile_size, 7 * tile_size);
+ gfx::Size grow_right_tiling_size(7 * tile_size, 5 * tile_size);
+ gfx::Size grow_both_tiling_size(7 * tile_size, 7 * tile_size);
+
+ Region invalidation;
+ Region expected_invalidation;
+
+ pile_->tiling().SetMaxTextureSize(gfx::Size(tile_size, tile_size));
+ SetTilingSize(base_tiling_size);
+
+ // We should have a recording for every tile.
+ EXPECT_EQ(6, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(6, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ EXPECT_TRUE(it != map.end() && it->second.GetPicture());
+ }
+ }
+
+ UpdateAndExpandInvalidation(
+ &invalidation, grow_down_tiling_size, gfx::Rect(1, 1));
+
+ // We should have a recording for every tile.
+ EXPECT_EQ(6, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(8, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ EXPECT_TRUE(it != map.end() && it->second.GetPicture());
+ }
+ }
+
+ // We invalidated the newly exposed pixels on the bottom row of tiles.
+ expected_invalidation = SubtractRegions(gfx::Rect(grow_down_tiling_size),
+ gfx::Rect(base_tiling_size));
+ Region bottom_row_new_pixels =
+ SubtractRegions(gfx::UnionRects(pile_->tiling().TileBounds(0, 5),
+ pile_->tiling().TileBounds(5, 5)),
+ gfx::Rect(base_tiling_size));
+ EXPECT_TRUE(expected_invalidation.Contains(bottom_row_new_pixels));
+ EXPECT_EQ(expected_invalidation.ToString(), invalidation.ToString());
+ invalidation.Clear();
+
+ UpdateWholePile();
+ UpdateAndExpandInvalidation(&invalidation, base_tiling_size, gfx::Rect(1, 1));
+
+ // We should have a recording for every tile.
+ EXPECT_EQ(6, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(6, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ EXPECT_TRUE(it != map.end() && it->second.GetPicture());
+ }
+ }
+
+ // No invalidation when shrinking.
+ EXPECT_EQ(Region().ToString(), invalidation.ToString());
+ invalidation.Clear();
+
+ UpdateWholePile();
+ UpdateAndExpandInvalidation(
+ &invalidation, grow_right_tiling_size, gfx::Rect(1, 1));
+
+ // We should have a recording for every tile.
+ EXPECT_EQ(8, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(6, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ EXPECT_TRUE(it != map.end() && it->second.GetPicture());
+ }
+ }
+
+ // We invalidated the newly exposed pixels on the right column of tiles.
+ expected_invalidation = SubtractRegions(gfx::Rect(grow_right_tiling_size),
+ gfx::Rect(base_tiling_size));
+ Region right_column_new_pixels =
+ SubtractRegions(gfx::UnionRects(pile_->tiling().TileBounds(5, 0),
+ pile_->tiling().TileBounds(5, 5)),
+ gfx::Rect(base_tiling_size));
+ EXPECT_TRUE(expected_invalidation.Contains(right_column_new_pixels));
+ EXPECT_EQ(expected_invalidation.ToString(), invalidation.ToString());
+ invalidation.Clear();
+
+ UpdateWholePile();
+ UpdateAndExpandInvalidation(&invalidation, base_tiling_size, gfx::Rect(1, 1));
+
+ // We should have lost the recordings that are now outside the tiling only.
+ EXPECT_EQ(6, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(6, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ EXPECT_TRUE(it != map.end() && it->second.GetPicture());
+ }
+ }
+
+ // No invalidation when shrinking.
+ EXPECT_EQ(Region().ToString(), invalidation.ToString());
+ invalidation.Clear();
+
+ UpdateWholePile();
+ UpdateAndExpandInvalidation(
+ &invalidation, grow_both_tiling_size, gfx::Rect(1, 1));
+
+ // We should have a recording for every tile.
+ EXPECT_EQ(8, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(8, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ EXPECT_TRUE(it != map.end() && it->second.GetPicture());
+ }
+ }
+
+ // We invalidated the newly exposed pixels on the bottom row and right column
+ // of tiles.
+ expected_invalidation = SubtractRegions(gfx::Rect(grow_both_tiling_size),
+ gfx::Rect(base_tiling_size));
+ Region bottom_row_and_right_column_new_pixels = SubtractRegions(
+ UnionRegions(gfx::UnionRects(pile_->tiling().TileBounds(0, 5),
+ pile_->tiling().TileBounds(5, 5)),
+ gfx::UnionRects(pile_->tiling().TileBounds(5, 0),
+ pile_->tiling().TileBounds(5, 5))),
+ gfx::Rect(base_tiling_size));
+ EXPECT_TRUE(
+ expected_invalidation.Contains(bottom_row_and_right_column_new_pixels));
+ EXPECT_EQ(expected_invalidation.ToString(), invalidation.ToString());
+ invalidation.Clear();
+
+ UpdateWholePile();
+ UpdateAndExpandInvalidation(&invalidation, base_tiling_size, gfx::Rect());
+
+ // We should have lost the recordings that are now outside the tiling only.
+ EXPECT_EQ(6, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(6, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ EXPECT_TRUE(it != map.end() && it->second.GetPicture());
+ }
+ }
+
+ // No invalidation when shrinking.
+ EXPECT_EQ(Region().ToString(), invalidation.ToString());
+ invalidation.Clear();
+}
+
+TEST_F(PicturePileTest, SmallResizePileInsideInterestRect) {
+ // This size chosen to be small enough that all the rects below fit inside the
+ // the interest rect, so they are smaller than kPixelDistanceToRecord in each
+ // dimension.
+ int tile_size = 100;
+ gfx::Size base_tiling_size(5 * tile_size, 5 * tile_size);
+ gfx::Size grow_down_tiling_size(5 * tile_size, 5 * tile_size + 5);
+ gfx::Size grow_right_tiling_size(5 * tile_size + 5, 5 * tile_size);
+ gfx::Size grow_both_tiling_size(5 * tile_size + 5, 5 * tile_size + 5);
+
+ Region invalidation;
+ Region expected_invalidation;
+
+ pile_->tiling().SetMaxTextureSize(gfx::Size(tile_size, tile_size));
+ SetTilingSize(base_tiling_size);
+
+ // We should have a recording for every tile.
+ EXPECT_EQ(6, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(6, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ EXPECT_TRUE(it != map.end() && it->second.GetPicture());
+ }
+ }
+
+ UpdateAndExpandInvalidation(
+ &invalidation, grow_down_tiling_size, gfx::Rect(1, 1));
+
+ // We should have a recording for every tile.
+ EXPECT_EQ(6, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(6, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ EXPECT_TRUE(it != map.end() && it->second.GetPicture());
+ }
+ }
+
+ // We invalidated the newly exposed pixels.
+ expected_invalidation = SubtractRegions(gfx::Rect(grow_down_tiling_size),
+ gfx::Rect(base_tiling_size));
+ EXPECT_EQ(expected_invalidation.ToString(), invalidation.ToString());
+ invalidation.Clear();
+
+ UpdateWholePile();
+ UpdateAndExpandInvalidation(&invalidation, base_tiling_size, gfx::Rect(1, 1));
+
+ // We should have a recording for every tile.
+ EXPECT_EQ(6, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(6, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ EXPECT_TRUE(it != map.end() && it->second.GetPicture());
+ }
+ }
+
+ // No invalidation when shrinking.
+ EXPECT_EQ(Region().ToString(), invalidation.ToString());
+ invalidation.Clear();
+
+ UpdateWholePile();
+ UpdateAndExpandInvalidation(
+ &invalidation, grow_right_tiling_size, gfx::Rect(1, 1));
+
+ // We should have a recording for every tile.
+ EXPECT_EQ(6, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(6, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ EXPECT_TRUE(it != map.end() && it->second.GetPicture());
+ }
+ }
+
+ // We invalidated the newly exposed pixels.
+ expected_invalidation = SubtractRegions(gfx::Rect(grow_right_tiling_size),
+ gfx::Rect(base_tiling_size));
+ EXPECT_EQ(expected_invalidation.ToString(), invalidation.ToString());
+ invalidation.Clear();
+
+ UpdateWholePile();
+ UpdateAndExpandInvalidation(&invalidation, base_tiling_size, gfx::Rect(1, 1));
+
+ // We should have lost the recordings that are now outside the tiling only.
+ EXPECT_EQ(6, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(6, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ EXPECT_TRUE(it != map.end() && it->second.GetPicture());
+ }
+ }
+
+ // No invalidation when shrinking.
+ EXPECT_EQ(Region().ToString(), invalidation.ToString());
+ invalidation.Clear();
+
+ UpdateWholePile();
+ UpdateAndExpandInvalidation(
+ &invalidation, grow_both_tiling_size, gfx::Rect(1, 1));
+
+ // We should have a recording for every tile.
+ EXPECT_EQ(6, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(6, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ EXPECT_TRUE(it != map.end() && it->second.GetPicture());
+ }
+ }
+
+ // We invalidated the newly exposed pixels.
+ expected_invalidation = SubtractRegions(gfx::Rect(grow_both_tiling_size),
+ gfx::Rect(base_tiling_size));
+ EXPECT_EQ(expected_invalidation.ToString(), invalidation.ToString());
+ invalidation.Clear();
+
+ UpdateWholePile();
+ UpdateAndExpandInvalidation(&invalidation, base_tiling_size, gfx::Rect());
+
+ // We should have lost the recordings that are now outside the tiling only.
+ EXPECT_EQ(6, pile_->tiling().num_tiles_x());
+ EXPECT_EQ(6, pile_->tiling().num_tiles_y());
+ for (int i = 0; i < pile_->tiling().num_tiles_x(); ++i) {
+ for (int j = 0; j < pile_->tiling().num_tiles_y(); ++j) {
+ TestPicturePile::PictureMapKey key(i, j);
+ TestPicturePile::PictureMap& map = pile_->picture_map();
+ TestPicturePile::PictureMap::iterator it = map.find(key);
+ EXPECT_TRUE(it != map.end() && it->second.GetPicture());
+ }
+ }
+
+ // No invalidation when shrinking.
+ EXPECT_EQ(Region().ToString(), invalidation.ToString());
+ invalidation.Clear();
+}
+
+TEST_F(PicturePileTest, SolidRectangleIsSolid) {
+ // If the client has no contents, the solid state will be true.
+ Region invalidation1(tiling_rect());
+ UpdateAndExpandInvalidation(&invalidation1, tiling_size(), tiling_rect());
+ EXPECT_TRUE(pile_->is_solid_color());
+ EXPECT_EQ(static_cast<SkColor>(SK_ColorTRANSPARENT), pile_->solid_color());
+
+ // If there is a single rect that covers the view, the solid
+ // state will be true.
+ SkPaint paint;
+ paint.setColor(SK_ColorCYAN);
+ client_.add_draw_rect(tiling_rect(), paint);
+ Region invalidation2(tiling_rect());
+ UpdateAndExpandInvalidation(&invalidation2, tiling_size(), tiling_rect());
+ EXPECT_TRUE(pile_->is_solid_color());
+ EXPECT_EQ(SK_ColorCYAN, pile_->solid_color());
+
+ // If a second smaller rect is draw that doesn't cover the viewport
+ // completely, the solid state will be false.
+ gfx::Rect smallRect = tiling_rect();
+ smallRect.Inset(10, 10, 10, 10);
+ client_.add_draw_rect(smallRect, paint);
+ Region invalidation3(tiling_rect());
+ UpdateAndExpandInvalidation(&invalidation3, tiling_size(), tiling_rect());
+ EXPECT_FALSE(pile_->is_solid_color());
+
+ // If a third rect is drawn over everything, we should be solid again.
+ paint.setColor(SK_ColorRED);
+ client_.add_draw_rect(tiling_rect(), paint);
+ Region invalidation4(tiling_rect());
+ UpdateAndExpandInvalidation(&invalidation4, tiling_size(), tiling_rect());
+ EXPECT_TRUE(pile_->is_solid_color());
+ EXPECT_EQ(SK_ColorRED, pile_->solid_color());
+
+ // If we draw too many, we don't bother doing the analysis and we should no
+ // longer be in a solid state. There are 8 rects, two clips and a translate.
+ client_.add_draw_rect(tiling_rect(), paint);
+ client_.add_draw_rect(tiling_rect(), paint);
+ client_.add_draw_rect(tiling_rect(), paint);
+ client_.add_draw_rect(tiling_rect(), paint);
+ client_.add_draw_rect(tiling_rect(), paint);
+ Region invalidation5(tiling_rect());
+ UpdateAndExpandInvalidation(&invalidation5, tiling_size(), tiling_rect());
+ EXPECT_FALSE(pile_->is_solid_color());
+}
+
+} // namespace
+} // namespace cc
diff --git a/cc/resources/picture_unittest.cc b/cc/resources/picture_unittest.cc
new file mode 100644
index 0000000..fc25989
--- /dev/null
+++ b/cc/resources/picture_unittest.cc
@@ -0,0 +1,494 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/picture.h"
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/values.h"
+#include "cc/test/fake_content_layer_client.h"
+#include "cc/test/skia_common.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/skia/include/core/SkBBHFactory.h"
+#include "third_party/skia/include/core/SkCanvas.h"
+#include "third_party/skia/include/core/SkGraphics.h"
+#include "third_party/skia/include/core/SkPixelRef.h"
+#include "ui/gfx/rect.h"
+#include "ui/gfx/skia_util.h"
+
+namespace cc {
+namespace {
+
+TEST(PictureTest, AsBase64String) {
+ SkGraphics::Init();
+
+ gfx::Rect layer_rect(100, 100);
+
+ SkTileGridFactory::TileGridInfo tile_grid_info;
+ tile_grid_info.fTileInterval = SkISize::Make(100, 100);
+ tile_grid_info.fMargin.setEmpty();
+ tile_grid_info.fOffset.setZero();
+
+ FakeContentLayerClient content_layer_client;
+
+ scoped_ptr<base::Value> tmp;
+
+ SkPaint red_paint;
+ red_paint.setColor(SkColorSetARGB(255, 255, 0, 0));
+ SkPaint green_paint;
+ green_paint.setColor(SkColorSetARGB(255, 0, 255, 0));
+
+ // Invalid picture (not a dict).
+ tmp.reset(new base::StringValue("abc!@#$%"));
+ scoped_refptr<Picture> invalid_picture =
+ Picture::CreateFromValue(tmp.get());
+ EXPECT_FALSE(invalid_picture.get());
+
+ Picture::RecordingMode kRecordingModes[] = {Picture::RECORD_NORMALLY,
+ Picture::RECORD_WITH_SKRECORD};
+
+ // Single full-size rect picture.
+ content_layer_client.add_draw_rect(layer_rect, red_paint);
+
+ for (size_t i = 0; i < arraysize(kRecordingModes); ++i) {
+ scoped_refptr<Picture> one_rect_picture =
+ Picture::Create(layer_rect,
+ &content_layer_client,
+ tile_grid_info,
+ false,
+ kRecordingModes[i]);
+ scoped_ptr<base::Value> serialized_one_rect(one_rect_picture->AsValue());
+
+ // Reconstruct the picture.
+ scoped_refptr<Picture> one_rect_picture_check =
+ Picture::CreateFromValue(serialized_one_rect.get());
+ EXPECT_TRUE(!!one_rect_picture_check.get());
+
+ // Check for equivalence.
+ unsigned char one_rect_buffer[4 * 100 * 100] = {0};
+ DrawPicture(one_rect_buffer, layer_rect, one_rect_picture);
+ unsigned char one_rect_buffer_check[4 * 100 * 100] = {0};
+ DrawPicture(one_rect_buffer_check, layer_rect, one_rect_picture_check);
+
+ EXPECT_EQ(one_rect_picture->LayerRect(),
+ one_rect_picture_check->LayerRect());
+ EXPECT_EQ(0, memcmp(one_rect_buffer, one_rect_buffer_check, 4 * 100 * 100));
+ }
+
+ // Two rect picture.
+ content_layer_client.add_draw_rect(gfx::Rect(25, 25, 50, 50), green_paint);
+
+ for (size_t i = 0; i < arraysize(kRecordingModes); ++i) {
+ scoped_refptr<Picture> two_rect_picture =
+ Picture::Create(layer_rect,
+ &content_layer_client,
+ tile_grid_info,
+ false,
+ Picture::RECORD_NORMALLY);
+
+ scoped_ptr<base::Value> serialized_two_rect(two_rect_picture->AsValue());
+
+ // Reconstruct the picture.
+ scoped_refptr<Picture> two_rect_picture_check =
+ Picture::CreateFromValue(serialized_two_rect.get());
+ EXPECT_TRUE(!!two_rect_picture_check.get());
+
+ // Check for equivalence.
+ unsigned char two_rect_buffer[4 * 100 * 100] = {0};
+ DrawPicture(two_rect_buffer, layer_rect, two_rect_picture);
+ unsigned char two_rect_buffer_check[4 * 100 * 100] = {0};
+ DrawPicture(two_rect_buffer_check, layer_rect, two_rect_picture_check);
+
+ EXPECT_EQ(two_rect_picture->LayerRect(),
+ two_rect_picture_check->LayerRect());
+ EXPECT_EQ(0, memcmp(two_rect_buffer, two_rect_buffer_check, 4 * 100 * 100));
+ }
+}
+
+TEST(PictureTest, PixelRefIterator) {
+ gfx::Rect layer_rect(2048, 2048);
+
+ SkTileGridFactory::TileGridInfo tile_grid_info;
+ tile_grid_info.fTileInterval = SkISize::Make(512, 512);
+ tile_grid_info.fMargin.setEmpty();
+ tile_grid_info.fOffset.setZero();
+
+ FakeContentLayerClient content_layer_client;
+
+ // Discardable pixel refs are found in the following grids:
+ // |---|---|---|---|
+ // | | x | | x |
+ // |---|---|---|---|
+ // | x | | x | |
+ // |---|---|---|---|
+ // | | x | | x |
+ // |---|---|---|---|
+ // | x | | x | |
+ // |---|---|---|---|
+ SkBitmap discardable_bitmap[4][4];
+ for (int y = 0; y < 4; ++y) {
+ for (int x = 0; x < 4; ++x) {
+ if ((x + y) & 1) {
+ CreateBitmap(
+ gfx::Size(500, 500), "discardable", &discardable_bitmap[y][x]);
+ SkPaint paint;
+ content_layer_client.add_draw_bitmap(
+ discardable_bitmap[y][x],
+ gfx::Point(x * 512 + 6, y * 512 + 6), paint);
+ }
+ }
+ }
+
+ scoped_refptr<Picture> picture = Picture::Create(layer_rect,
+ &content_layer_client,
+ tile_grid_info,
+ true,
+ Picture::RECORD_NORMALLY);
+
+ // Default iterator does not have any pixel refs
+ {
+ Picture::PixelRefIterator iterator;
+ EXPECT_FALSE(iterator);
+ }
+ for (int y = 0; y < 4; ++y) {
+ for (int x = 0; x < 4; ++x) {
+ Picture::PixelRefIterator iterator(gfx::Rect(x * 512, y * 512, 500, 500),
+ picture.get());
+ if ((x + y) & 1) {
+ EXPECT_TRUE(iterator) << x << " " << y;
+ EXPECT_TRUE(*iterator == discardable_bitmap[y][x].pixelRef()) << x <<
+ " " << y;
+ EXPECT_FALSE(++iterator) << x << " " << y;
+ } else {
+ EXPECT_FALSE(iterator) << x << " " << y;
+ }
+ }
+ }
+ // Capture 4 pixel refs.
+ {
+ Picture::PixelRefIterator iterator(gfx::Rect(512, 512, 2048, 2048),
+ picture.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1][2].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[2][1].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[2][3].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[3][2].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+
+ // Copy test.
+ Picture::PixelRefIterator iterator(gfx::Rect(512, 512, 2048, 2048),
+ picture.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1][2].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[2][1].pixelRef());
+
+ // copy now points to the same spot as iterator,
+ // but both can be incremented independently.
+ Picture::PixelRefIterator copy = iterator;
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[2][3].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[3][2].pixelRef());
+ EXPECT_FALSE(++iterator);
+
+ EXPECT_TRUE(copy);
+ EXPECT_TRUE(*copy == discardable_bitmap[2][1].pixelRef());
+ EXPECT_TRUE(++copy);
+ EXPECT_TRUE(*copy == discardable_bitmap[2][3].pixelRef());
+ EXPECT_TRUE(++copy);
+ EXPECT_TRUE(*copy == discardable_bitmap[3][2].pixelRef());
+ EXPECT_FALSE(++copy);
+}
+
+TEST(PictureTest, PixelRefIteratorNonZeroLayer) {
+ gfx::Rect layer_rect(1024, 0, 2048, 2048);
+
+ SkTileGridFactory::TileGridInfo tile_grid_info;
+ tile_grid_info.fTileInterval = SkISize::Make(512, 512);
+ tile_grid_info.fMargin.setEmpty();
+ tile_grid_info.fOffset.setZero();
+
+ FakeContentLayerClient content_layer_client;
+
+ // Discardable pixel refs are found in the following grids:
+ // |---|---|---|---|
+ // | | x | | x |
+ // |---|---|---|---|
+ // | x | | x | |
+ // |---|---|---|---|
+ // | | x | | x |
+ // |---|---|---|---|
+ // | x | | x | |
+ // |---|---|---|---|
+ SkBitmap discardable_bitmap[4][4];
+ for (int y = 0; y < 4; ++y) {
+ for (int x = 0; x < 4; ++x) {
+ if ((x + y) & 1) {
+ CreateBitmap(
+ gfx::Size(500, 500), "discardable", &discardable_bitmap[y][x]);
+ SkPaint paint;
+ content_layer_client.add_draw_bitmap(
+ discardable_bitmap[y][x],
+ gfx::Point(1024 + x * 512 + 6, y * 512 + 6), paint);
+ }
+ }
+ }
+
+ scoped_refptr<Picture> picture = Picture::Create(layer_rect,
+ &content_layer_client,
+ tile_grid_info,
+ true,
+ Picture::RECORD_NORMALLY);
+
+ // Default iterator does not have any pixel refs
+ {
+ Picture::PixelRefIterator iterator;
+ EXPECT_FALSE(iterator);
+ }
+ for (int y = 0; y < 4; ++y) {
+ for (int x = 0; x < 4; ++x) {
+ Picture::PixelRefIterator iterator(
+ gfx::Rect(1024 + x * 512, y * 512, 500, 500), picture.get());
+ if ((x + y) & 1) {
+ EXPECT_TRUE(iterator) << x << " " << y;
+ EXPECT_TRUE(*iterator == discardable_bitmap[y][x].pixelRef());
+ EXPECT_FALSE(++iterator) << x << " " << y;
+ } else {
+ EXPECT_FALSE(iterator) << x << " " << y;
+ }
+ }
+ }
+ // Capture 4 pixel refs.
+ {
+ Picture::PixelRefIterator iterator(gfx::Rect(1024 + 512, 512, 2048, 2048),
+ picture.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1][2].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[2][1].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[2][3].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[3][2].pixelRef());
+ EXPECT_FALSE(++iterator);
+ }
+
+ // Copy test.
+ {
+ Picture::PixelRefIterator iterator(gfx::Rect(1024 + 512, 512, 2048, 2048),
+ picture.get());
+ EXPECT_TRUE(iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[1][2].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[2][1].pixelRef());
+
+ // copy now points to the same spot as iterator,
+ // but both can be incremented independently.
+ Picture::PixelRefIterator copy = iterator;
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[2][3].pixelRef());
+ EXPECT_TRUE(++iterator);
+ EXPECT_TRUE(*iterator == discardable_bitmap[3][2].pixelRef());
+ EXPECT_FALSE(++iterator);
+
+ EXPECT_TRUE(copy);
+ EXPECT_TRUE(*copy == discardable_bitmap[2][1].pixelRef());
+ EXPECT_TRUE(++copy);
+ EXPECT_TRUE(*copy == discardable_bitmap[2][3].pixelRef());
+ EXPECT_TRUE(++copy);
+ EXPECT_TRUE(*copy == discardable_bitmap[3][2].pixelRef());
+ EXPECT_FALSE(++copy);
+ }
+
+ // Non intersecting rects
+ {
+ Picture::PixelRefIterator iterator(gfx::Rect(0, 0, 1000, 1000),
+ picture.get());
+ EXPECT_FALSE(iterator);
+ }
+ {
+ Picture::PixelRefIterator iterator(gfx::Rect(3500, 0, 1000, 1000),
+ picture.get());
+ EXPECT_FALSE(iterator);
+ }
+ {
+ Picture::PixelRefIterator iterator(gfx::Rect(0, 1100, 1000, 1000),
+ picture.get());
+ EXPECT_FALSE(iterator);
+ }
+ {
+ Picture::PixelRefIterator iterator(gfx::Rect(3500, 1100, 1000, 1000),
+ picture.get());
+ EXPECT_FALSE(iterator);
+ }
+}
+
+TEST(PictureTest, PixelRefIteratorOnePixelQuery) {
+ gfx::Rect layer_rect(2048, 2048);
+
+ SkTileGridFactory::TileGridInfo tile_grid_info;
+ tile_grid_info.fTileInterval = SkISize::Make(512, 512);
+ tile_grid_info.fMargin.setEmpty();
+ tile_grid_info.fOffset.setZero();
+
+ FakeContentLayerClient content_layer_client;
+
+ // Discardable pixel refs are found in the following grids:
+ // |---|---|---|---|
+ // | | x | | x |
+ // |---|---|---|---|
+ // | x | | x | |
+ // |---|---|---|---|
+ // | | x | | x |
+ // |---|---|---|---|
+ // | x | | x | |
+ // |---|---|---|---|
+ SkBitmap discardable_bitmap[4][4];
+ for (int y = 0; y < 4; ++y) {
+ for (int x = 0; x < 4; ++x) {
+ if ((x + y) & 1) {
+ CreateBitmap(
+ gfx::Size(500, 500), "discardable", &discardable_bitmap[y][x]);
+ SkPaint paint;
+ content_layer_client.add_draw_bitmap(
+ discardable_bitmap[y][x],
+ gfx::Point(x * 512 + 6, y * 512 + 6), paint);
+ }
+ }
+ }
+
+ scoped_refptr<Picture> picture = Picture::Create(layer_rect,
+ &content_layer_client,
+ tile_grid_info,
+ true,
+ Picture::RECORD_NORMALLY);
+
+ for (int y = 0; y < 4; ++y) {
+ for (int x = 0; x < 4; ++x) {
+ Picture::PixelRefIterator iterator(
+ gfx::Rect(x * 512, y * 512 + 256, 1, 1), picture.get());
+ if ((x + y) & 1) {
+ EXPECT_TRUE(iterator) << x << " " << y;
+ EXPECT_TRUE(*iterator == discardable_bitmap[y][x].pixelRef());
+ EXPECT_FALSE(++iterator) << x << " " << y;
+ } else {
+ EXPECT_FALSE(iterator) << x << " " << y;
+ }
+ }
+ }
+}
+
+TEST(PictureTest, CreateFromSkpValue) {
+ SkGraphics::Init();
+
+ gfx::Rect layer_rect(100, 200);
+
+ SkTileGridFactory::TileGridInfo tile_grid_info;
+ tile_grid_info.fTileInterval = SkISize::Make(100, 200);
+ tile_grid_info.fMargin.setEmpty();
+ tile_grid_info.fOffset.setZero();
+
+ FakeContentLayerClient content_layer_client;
+
+ scoped_ptr<base::Value> tmp;
+
+ SkPaint red_paint;
+ red_paint.setColor(SkColorSetARGB(255, 255, 0, 0));
+ SkPaint green_paint;
+ green_paint.setColor(SkColorSetARGB(255, 0, 255, 0));
+
+ // Invalid picture (not a dict).
+ tmp.reset(new base::StringValue("abc!@#$%"));
+ scoped_refptr<Picture> invalid_picture =
+ Picture::CreateFromSkpValue(tmp.get());
+ EXPECT_TRUE(!invalid_picture.get());
+
+ // Single full-size rect picture.
+ content_layer_client.add_draw_rect(layer_rect, red_paint);
+ scoped_refptr<Picture> one_rect_picture =
+ Picture::Create(layer_rect,
+ &content_layer_client,
+ tile_grid_info,
+ false,
+ Picture::RECORD_NORMALLY);
+ scoped_ptr<base::Value> serialized_one_rect(
+ one_rect_picture->AsValue());
+
+ const base::DictionaryValue* value = NULL;
+ EXPECT_TRUE(serialized_one_rect->GetAsDictionary(&value));
+
+ // Decode the picture from base64.
+ const base::Value* skp_value;
+ EXPECT_TRUE(value->Get("skp64", &skp_value));
+
+ // Reconstruct the picture.
+ scoped_refptr<Picture> one_rect_picture_check =
+ Picture::CreateFromSkpValue(skp_value);
+ EXPECT_TRUE(!!one_rect_picture_check.get());
+
+ EXPECT_EQ(100, one_rect_picture_check->LayerRect().width());
+ EXPECT_EQ(200, one_rect_picture_check->LayerRect().height());
+}
+
+TEST(PictureTest, RecordingModes) {
+ SkGraphics::Init();
+
+ gfx::Rect layer_rect(100, 200);
+
+ SkTileGridFactory::TileGridInfo tile_grid_info;
+ tile_grid_info.fTileInterval = SkISize::Make(100, 200);
+ tile_grid_info.fMargin.setEmpty();
+ tile_grid_info.fOffset.setZero();
+
+ FakeContentLayerClient content_layer_client;
+ EXPECT_EQ(NULL, content_layer_client.last_canvas());
+
+ scoped_refptr<Picture> picture = Picture::Create(layer_rect,
+ &content_layer_client,
+ tile_grid_info,
+ false,
+ Picture::RECORD_NORMALLY);
+ EXPECT_TRUE(content_layer_client.last_canvas() != NULL);
+ EXPECT_EQ(ContentLayerClient::GRAPHICS_CONTEXT_ENABLED,
+ content_layer_client.last_context_status());
+ EXPECT_TRUE(picture.get());
+
+ picture = Picture::Create(layer_rect,
+ &content_layer_client,
+ tile_grid_info,
+ false,
+ Picture::RECORD_WITH_SK_NULL_CANVAS);
+ EXPECT_TRUE(content_layer_client.last_canvas() != NULL);
+ EXPECT_EQ(ContentLayerClient::GRAPHICS_CONTEXT_ENABLED,
+ content_layer_client.last_context_status());
+ EXPECT_TRUE(picture.get());
+
+ picture = Picture::Create(layer_rect,
+ &content_layer_client,
+ tile_grid_info,
+ false,
+ Picture::RECORD_WITH_PAINTING_DISABLED);
+ EXPECT_TRUE(content_layer_client.last_canvas() != NULL);
+ EXPECT_EQ(ContentLayerClient::GRAPHICS_CONTEXT_DISABLED,
+ content_layer_client.last_context_status());
+ EXPECT_TRUE(picture.get());
+
+ picture = Picture::Create(layer_rect,
+ &content_layer_client,
+ tile_grid_info,
+ false,
+ Picture::RECORD_WITH_SKRECORD);
+ EXPECT_TRUE(content_layer_client.last_canvas() != NULL);
+ EXPECT_TRUE(picture.get());
+
+ EXPECT_EQ(4, Picture::RECORDING_MODE_COUNT);
+}
+
+} // namespace
+} // namespace cc
diff --git a/cc/resources/pixel_buffer_raster_worker_pool.cc b/cc/resources/pixel_buffer_raster_worker_pool.cc
new file mode 100644
index 0000000..b1011fc
--- /dev/null
+++ b/cc/resources/pixel_buffer_raster_worker_pool.cc
@@ -0,0 +1,761 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/pixel_buffer_raster_worker_pool.h"
+
+#include <algorithm>
+
+#include "base/containers/stack_container.h"
+#include "base/debug/trace_event.h"
+#include "base/debug/trace_event_argument.h"
+#include "base/strings/stringprintf.h"
+#include "cc/debug/traced_value.h"
+#include "cc/resources/raster_buffer.h"
+#include "cc/resources/resource.h"
+#include "gpu/command_buffer/client/gles2_interface.h"
+#include "third_party/skia/include/utils/SkNullCanvas.h"
+
+namespace cc {
+namespace {
+
+class RasterBufferImpl : public RasterBuffer {
+ public:
+ RasterBufferImpl(ResourceProvider* resource_provider,
+ const Resource* resource)
+ : resource_provider_(resource_provider),
+ resource_(resource),
+ buffer_(NULL),
+ stride_(0) {
+ resource_provider_->AcquirePixelBuffer(resource_->id());
+ buffer_ = resource_provider_->MapPixelBuffer(resource_->id(), &stride_);
+ }
+
+ virtual ~RasterBufferImpl() {
+ resource_provider_->ReleasePixelBuffer(resource_->id());
+ }
+
+ // Overridden from RasterBuffer:
+ virtual skia::RefPtr<SkCanvas> AcquireSkCanvas() OVERRIDE {
+ if (!buffer_)
+ return skia::AdoptRef(SkCreateNullCanvas());
+
+ RasterWorkerPool::AcquireBitmapForBuffer(
+ &bitmap_, buffer_, resource_->format(), resource_->size(), stride_);
+ return skia::AdoptRef(new SkCanvas(bitmap_));
+ }
+ virtual void ReleaseSkCanvas(const skia::RefPtr<SkCanvas>& canvas) OVERRIDE {
+ if (!buffer_)
+ return;
+
+ RasterWorkerPool::ReleaseBitmapForBuffer(
+ &bitmap_, buffer_, resource_->format());
+ }
+
+ private:
+ ResourceProvider* resource_provider_;
+ const Resource* resource_;
+ uint8_t* buffer_;
+ int stride_;
+ SkBitmap bitmap_;
+
+ DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
+};
+
+const int kCheckForCompletedRasterTasksDelayMs = 6;
+
+const size_t kMaxScheduledRasterTasks = 48;
+
+typedef base::StackVector<RasterTask*, kMaxScheduledRasterTasks>
+ RasterTaskVector;
+
+TaskSetCollection NonEmptyTaskSetsFromTaskCounts(const size_t* task_counts) {
+ TaskSetCollection task_sets;
+ for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
+ if (task_counts[task_set])
+ task_sets[task_set] = true;
+ }
+ return task_sets;
+}
+
+void AddTaskSetsToTaskCounts(size_t* task_counts,
+ const TaskSetCollection& task_sets) {
+ for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
+ if (task_sets[task_set])
+ task_counts[task_set]++;
+ }
+}
+
+void RemoveTaskSetsFromTaskCounts(size_t* task_counts,
+ const TaskSetCollection& task_sets) {
+ for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
+ if (task_sets[task_set])
+ task_counts[task_set]--;
+ }
+}
+
+} // namespace
+
+PixelBufferRasterWorkerPool::RasterTaskState::RasterTaskState(
+ RasterTask* task,
+ const TaskSetCollection& task_sets)
+ : type(UNSCHEDULED), task(task), task_sets(task_sets) {
+}
+
+// static
+scoped_ptr<RasterWorkerPool> PixelBufferRasterWorkerPool::Create(
+ base::SequencedTaskRunner* task_runner,
+ TaskGraphRunner* task_graph_runner,
+ ContextProvider* context_provider,
+ ResourceProvider* resource_provider,
+ size_t max_transfer_buffer_usage_bytes) {
+ return make_scoped_ptr<RasterWorkerPool>(
+ new PixelBufferRasterWorkerPool(task_runner,
+ task_graph_runner,
+ context_provider,
+ resource_provider,
+ max_transfer_buffer_usage_bytes));
+}
+
+PixelBufferRasterWorkerPool::PixelBufferRasterWorkerPool(
+ base::SequencedTaskRunner* task_runner,
+ TaskGraphRunner* task_graph_runner,
+ ContextProvider* context_provider,
+ ResourceProvider* resource_provider,
+ size_t max_transfer_buffer_usage_bytes)
+ : task_runner_(task_runner),
+ task_graph_runner_(task_graph_runner),
+ namespace_token_(task_graph_runner->GetNamespaceToken()),
+ context_provider_(context_provider),
+ resource_provider_(resource_provider),
+ shutdown_(false),
+ scheduled_raster_task_count_(0u),
+ bytes_pending_upload_(0u),
+ max_bytes_pending_upload_(max_transfer_buffer_usage_bytes),
+ has_performed_uploads_since_last_flush_(false),
+ check_for_completed_raster_task_notifier_(
+ task_runner,
+ base::Bind(&PixelBufferRasterWorkerPool::CheckForCompletedRasterTasks,
+ base::Unretained(this)),
+ base::TimeDelta::FromMilliseconds(
+ kCheckForCompletedRasterTasksDelayMs)),
+ raster_finished_weak_ptr_factory_(this) {
+ DCHECK(context_provider_);
+ std::fill(task_counts_, task_counts_ + kNumberOfTaskSets, 0);
+}
+
+PixelBufferRasterWorkerPool::~PixelBufferRasterWorkerPool() {
+ DCHECK_EQ(0u, raster_task_states_.size());
+ DCHECK_EQ(0u, raster_tasks_with_pending_upload_.size());
+ DCHECK_EQ(0u, completed_raster_tasks_.size());
+ DCHECK_EQ(0u, completed_image_decode_tasks_.size());
+ DCHECK(NonEmptyTaskSetsFromTaskCounts(task_counts_).none());
+}
+
+Rasterizer* PixelBufferRasterWorkerPool::AsRasterizer() { return this; }
+
+void PixelBufferRasterWorkerPool::SetClient(RasterizerClient* client) {
+ client_ = client;
+}
+
+void PixelBufferRasterWorkerPool::Shutdown() {
+ TRACE_EVENT0("cc", "PixelBufferRasterWorkerPool::Shutdown");
+
+ shutdown_ = true;
+
+ TaskGraph empty;
+ task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
+ task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
+
+ CheckForCompletedRasterizerTasks();
+ CheckForCompletedUploads();
+
+ check_for_completed_raster_task_notifier_.Cancel();
+
+ for (RasterTaskState::Vector::iterator it = raster_task_states_.begin();
+ it != raster_task_states_.end();
+ ++it) {
+ RasterTaskState& state = *it;
+
+ // All unscheduled tasks need to be canceled.
+ if (state.type == RasterTaskState::UNSCHEDULED) {
+ completed_raster_tasks_.push_back(state.task);
+ state.type = RasterTaskState::COMPLETED;
+ }
+ }
+ DCHECK_EQ(completed_raster_tasks_.size(), raster_task_states_.size());
+}
+
+void PixelBufferRasterWorkerPool::ScheduleTasks(RasterTaskQueue* queue) {
+ TRACE_EVENT0("cc", "PixelBufferRasterWorkerPool::ScheduleTasks");
+
+ if (should_notify_client_if_no_tasks_are_pending_.none())
+ TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this);
+
+ should_notify_client_if_no_tasks_are_pending_.set();
+ std::fill(task_counts_, task_counts_ + kNumberOfTaskSets, 0);
+
+ // Update raster task state and remove items from old queue.
+ for (RasterTaskQueue::Item::Vector::const_iterator it = queue->items.begin();
+ it != queue->items.end();
+ ++it) {
+ const RasterTaskQueue::Item& item = *it;
+ RasterTask* task = item.task;
+
+ // Remove any old items that are associated with this task. The result is
+ // that the old queue is left with all items not present in this queue,
+ // which we use below to determine what tasks need to be canceled.
+ RasterTaskQueue::Item::Vector::iterator old_it =
+ std::find_if(raster_tasks_.items.begin(),
+ raster_tasks_.items.end(),
+ RasterTaskQueue::Item::TaskComparator(task));
+ if (old_it != raster_tasks_.items.end()) {
+ std::swap(*old_it, raster_tasks_.items.back());
+ raster_tasks_.items.pop_back();
+ }
+
+ RasterTaskState::Vector::iterator state_it =
+ std::find_if(raster_task_states_.begin(),
+ raster_task_states_.end(),
+ RasterTaskState::TaskComparator(task));
+ if (state_it != raster_task_states_.end()) {
+ RasterTaskState& state = *state_it;
+
+ state.task_sets = item.task_sets;
+ // |raster_tasks_required_for_activation_count| accounts for all tasks
+ // that need to complete before we can send a "ready to activate" signal.
+ // Tasks that have already completed should not be part of this count.
+ if (state.type != RasterTaskState::COMPLETED)
+ AddTaskSetsToTaskCounts(task_counts_, item.task_sets);
+
+ continue;
+ }
+
+ DCHECK(!task->HasBeenScheduled());
+ raster_task_states_.push_back(RasterTaskState(task, item.task_sets));
+ AddTaskSetsToTaskCounts(task_counts_, item.task_sets);
+ }
+
+ // Determine what tasks in old queue need to be canceled.
+ for (RasterTaskQueue::Item::Vector::const_iterator it =
+ raster_tasks_.items.begin();
+ it != raster_tasks_.items.end();
+ ++it) {
+ const RasterTaskQueue::Item& item = *it;
+ RasterTask* task = item.task;
+
+ RasterTaskState::Vector::iterator state_it =
+ std::find_if(raster_task_states_.begin(),
+ raster_task_states_.end(),
+ RasterTaskState::TaskComparator(task));
+ // We've already processed completion if we can't find a RasterTaskState for
+ // this task.
+ if (state_it == raster_task_states_.end())
+ continue;
+
+ RasterTaskState& state = *state_it;
+
+ // Unscheduled task can be canceled.
+ if (state.type == RasterTaskState::UNSCHEDULED) {
+ DCHECK(!task->HasBeenScheduled());
+ DCHECK(std::find(completed_raster_tasks_.begin(),
+ completed_raster_tasks_.end(),
+ task) == completed_raster_tasks_.end());
+ completed_raster_tasks_.push_back(task);
+ state.type = RasterTaskState::COMPLETED;
+ }
+
+ // No longer in any task set.
+ state.task_sets.reset();
+ }
+
+ raster_tasks_.Swap(queue);
+
+ // Check for completed tasks when ScheduleTasks() is called as
+ // priorities might have changed and this maximizes the number
+ // of top priority tasks that are scheduled.
+ CheckForCompletedRasterizerTasks();
+ CheckForCompletedUploads();
+ FlushUploads();
+
+ // Schedule new tasks.
+ ScheduleMoreTasks();
+
+ // Reschedule check for completed raster tasks.
+ check_for_completed_raster_task_notifier_.Schedule();
+
+ TRACE_EVENT_ASYNC_STEP_INTO1(
+ "cc", "ScheduledTasks", this, StateName(), "state", StateAsValue());
+}
+
+void PixelBufferRasterWorkerPool::CheckForCompletedTasks() {
+ TRACE_EVENT0("cc", "PixelBufferRasterWorkerPool::CheckForCompletedTasks");
+
+ CheckForCompletedRasterizerTasks();
+ CheckForCompletedUploads();
+ FlushUploads();
+
+ for (RasterizerTask::Vector::const_iterator it =
+ completed_image_decode_tasks_.begin();
+ it != completed_image_decode_tasks_.end();
+ ++it) {
+ RasterizerTask* task = it->get();
+ task->RunReplyOnOriginThread();
+ }
+ completed_image_decode_tasks_.clear();
+
+ for (RasterTask::Vector::const_iterator it = completed_raster_tasks_.begin();
+ it != completed_raster_tasks_.end();
+ ++it) {
+ RasterTask* task = it->get();
+ RasterTaskState::Vector::iterator state_it =
+ std::find_if(raster_task_states_.begin(),
+ raster_task_states_.end(),
+ RasterTaskState::TaskComparator(task));
+ DCHECK(state_it != raster_task_states_.end());
+ DCHECK_EQ(RasterTaskState::COMPLETED, state_it->type);
+
+ std::swap(*state_it, raster_task_states_.back());
+ raster_task_states_.pop_back();
+
+ task->RunReplyOnOriginThread();
+ }
+ completed_raster_tasks_.clear();
+}
+
+scoped_ptr<RasterBuffer> PixelBufferRasterWorkerPool::AcquireBufferForRaster(
+ const Resource* resource) {
+ return make_scoped_ptr<RasterBuffer>(
+ new RasterBufferImpl(resource_provider_, resource));
+}
+
+void PixelBufferRasterWorkerPool::ReleaseBufferForRaster(
+ scoped_ptr<RasterBuffer> buffer) {
+ // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
+}
+
+void PixelBufferRasterWorkerPool::OnRasterFinished(TaskSet task_set) {
+ TRACE_EVENT2("cc",
+ "PixelBufferRasterWorkerPool::OnRasterFinished",
+ "task_set",
+ task_set,
+ "should_notify_client_if_no_tasks_are_pending",
+ should_notify_client_if_no_tasks_are_pending_[task_set]);
+
+ // There's no need to call CheckForCompletedRasterTasks() if the client has
+ // already been notified.
+ if (!should_notify_client_if_no_tasks_are_pending_[task_set])
+ return;
+ raster_finished_tasks_pending_[task_set] = false;
+
+ // This reduces latency between the time when all tasks required for
+ // activation have finished running and the time when the client is
+ // notified.
+ CheckForCompletedRasterTasks();
+}
+
+void PixelBufferRasterWorkerPool::FlushUploads() {
+ if (!has_performed_uploads_since_last_flush_)
+ return;
+
+ context_provider_->ContextGL()->ShallowFlushCHROMIUM();
+ has_performed_uploads_since_last_flush_ = false;
+}
+
+void PixelBufferRasterWorkerPool::CheckForCompletedUploads() {
+ RasterTask::Vector tasks_with_completed_uploads;
+
+ // First check if any have completed.
+ while (!raster_tasks_with_pending_upload_.empty()) {
+ RasterTask* task = raster_tasks_with_pending_upload_.front().get();
+ DCHECK(std::find_if(raster_task_states_.begin(),
+ raster_task_states_.end(),
+ RasterTaskState::TaskComparator(task)) !=
+ raster_task_states_.end());
+ DCHECK_EQ(RasterTaskState::UPLOADING,
+ std::find_if(raster_task_states_.begin(),
+ raster_task_states_.end(),
+ RasterTaskState::TaskComparator(task))->type);
+
+ // Uploads complete in the order they are issued.
+ if (!resource_provider_->DidSetPixelsComplete(task->resource()->id()))
+ break;
+
+ tasks_with_completed_uploads.push_back(task);
+ raster_tasks_with_pending_upload_.pop_front();
+ }
+
+ DCHECK(client_);
+ TaskSetCollection tasks_that_should_be_forced_to_complete =
+ client_->TasksThatShouldBeForcedToComplete();
+ bool should_force_some_uploads_to_complete =
+ shutdown_ || tasks_that_should_be_forced_to_complete.any();
+
+ if (should_force_some_uploads_to_complete) {
+ RasterTask::Vector tasks_with_uploads_to_force;
+ RasterTaskDeque::iterator it = raster_tasks_with_pending_upload_.begin();
+ while (it != raster_tasks_with_pending_upload_.end()) {
+ RasterTask* task = it->get();
+ RasterTaskState::Vector::const_iterator state_it =
+ std::find_if(raster_task_states_.begin(),
+ raster_task_states_.end(),
+ RasterTaskState::TaskComparator(task));
+ DCHECK(state_it != raster_task_states_.end());
+ const RasterTaskState& state = *state_it;
+
+ // Force all uploads to complete for which the client requests to do so.
+ // During shutdown, force all pending uploads to complete.
+ if (shutdown_ ||
+ (state.task_sets & tasks_that_should_be_forced_to_complete).any()) {
+ tasks_with_uploads_to_force.push_back(task);
+ tasks_with_completed_uploads.push_back(task);
+ it = raster_tasks_with_pending_upload_.erase(it);
+ continue;
+ }
+
+ ++it;
+ }
+
+ // Force uploads in reverse order. Since forcing can cause a wait on
+ // all previous uploads, we would rather wait only once downstream.
+ for (RasterTask::Vector::reverse_iterator it =
+ tasks_with_uploads_to_force.rbegin();
+ it != tasks_with_uploads_to_force.rend();
+ ++it) {
+ RasterTask* task = it->get();
+
+ resource_provider_->ForceSetPixelsToComplete(task->resource()->id());
+ has_performed_uploads_since_last_flush_ = true;
+ }
+ }
+
+ // Release shared memory and move tasks with completed uploads
+ // to |completed_raster_tasks_|.
+ for (RasterTask::Vector::const_iterator it =
+ tasks_with_completed_uploads.begin();
+ it != tasks_with_completed_uploads.end();
+ ++it) {
+ RasterTask* task = it->get();
+ RasterTaskState::Vector::iterator state_it =
+ std::find_if(raster_task_states_.begin(),
+ raster_task_states_.end(),
+ RasterTaskState::TaskComparator(task));
+ DCHECK(state_it != raster_task_states_.end());
+ RasterTaskState& state = *state_it;
+
+ bytes_pending_upload_ -= task->resource()->bytes();
+
+ task->WillComplete();
+ task->CompleteOnOriginThread(this);
+ task->DidComplete();
+
+ // Async set pixels commands are not necessarily processed in-sequence with
+ // drawing commands. Read lock fences are required to ensure that async
+ // commands don't access the resource while used for drawing.
+ resource_provider_->EnableReadLockFences(task->resource()->id());
+
+ DCHECK(std::find(completed_raster_tasks_.begin(),
+ completed_raster_tasks_.end(),
+ task) == completed_raster_tasks_.end());
+ completed_raster_tasks_.push_back(task);
+ state.type = RasterTaskState::COMPLETED;
+ // Triggers if the current task belongs to a set that should be empty.
+ DCHECK((state.task_sets & ~NonEmptyTaskSetsFromTaskCounts(task_counts_))
+ .none());
+ RemoveTaskSetsFromTaskCounts(task_counts_, state.task_sets);
+ }
+}
+
+void PixelBufferRasterWorkerPool::CheckForCompletedRasterTasks() {
+ TRACE_EVENT0("cc",
+ "PixelBufferRasterWorkerPool::CheckForCompletedRasterTasks");
+
+ // Since this function can be called directly, cancel any pending checks.
+ check_for_completed_raster_task_notifier_.Cancel();
+
+ DCHECK(should_notify_client_if_no_tasks_are_pending_.any());
+
+ CheckForCompletedRasterizerTasks();
+ CheckForCompletedUploads();
+ FlushUploads();
+
+ // Determine what client notifications to generate.
+ TaskSetCollection will_notify_client_that_no_tasks_are_pending =
+ should_notify_client_if_no_tasks_are_pending_ &
+ ~raster_finished_tasks_pending_ & ~PendingTasks();
+
+ // Adjust the need to generate notifications before scheduling more tasks.
+ should_notify_client_if_no_tasks_are_pending_ &=
+ ~will_notify_client_that_no_tasks_are_pending;
+
+ scheduled_raster_task_count_ = 0;
+ if (PendingRasterTaskCount())
+ ScheduleMoreTasks();
+
+ TRACE_EVENT_ASYNC_STEP_INTO1(
+ "cc", "ScheduledTasks", this, StateName(), "state", StateAsValue());
+
+ // Schedule another check for completed raster tasks while there are
+ // pending raster tasks or pending uploads.
+ if (PendingTasks().any())
+ check_for_completed_raster_task_notifier_.Schedule();
+
+ if (should_notify_client_if_no_tasks_are_pending_.none())
+ TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this);
+
+ // Generate client notifications.
+ for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
+ if (will_notify_client_that_no_tasks_are_pending[task_set]) {
+ DCHECK(!PendingTasks()[task_set]);
+ client_->DidFinishRunningTasks(task_set);
+ }
+ }
+}
+
+void PixelBufferRasterWorkerPool::ScheduleMoreTasks() {
+ TRACE_EVENT0("cc", "PixelBufferRasterWorkerPool::ScheduleMoreTasks");
+
+ RasterTaskVector tasks[kNumberOfTaskSets];
+
+ unsigned priority = kRasterTaskPriorityBase;
+
+ graph_.Reset();
+
+ size_t bytes_pending_upload = bytes_pending_upload_;
+ TaskSetCollection did_throttle_raster_tasks;
+ size_t scheduled_raster_task_count = 0;
+
+ for (RasterTaskQueue::Item::Vector::const_iterator it =
+ raster_tasks_.items.begin();
+ it != raster_tasks_.items.end();
+ ++it) {
+ const RasterTaskQueue::Item& item = *it;
+ RasterTask* task = item.task;
+ DCHECK(item.task_sets.any());
+
+ // |raster_task_states_| contains the state of all tasks that we have not
+ // yet run reply callbacks for.
+ RasterTaskState::Vector::iterator state_it =
+ std::find_if(raster_task_states_.begin(),
+ raster_task_states_.end(),
+ RasterTaskState::TaskComparator(task));
+ if (state_it == raster_task_states_.end())
+ continue;
+
+ RasterTaskState& state = *state_it;
+
+ // Skip task if completed.
+ if (state.type == RasterTaskState::COMPLETED) {
+ DCHECK(std::find(completed_raster_tasks_.begin(),
+ completed_raster_tasks_.end(),
+ task) != completed_raster_tasks_.end());
+ continue;
+ }
+
+ // All raster tasks need to be throttled by bytes of pending uploads,
+ // but if it's the only task allow it to complete no matter what its size,
+ // to prevent starvation of the task queue.
+ size_t new_bytes_pending_upload = bytes_pending_upload;
+ new_bytes_pending_upload += task->resource()->bytes();
+ if (new_bytes_pending_upload > max_bytes_pending_upload_ &&
+ bytes_pending_upload) {
+ did_throttle_raster_tasks |= item.task_sets;
+ continue;
+ }
+
+ // If raster has finished, just update |bytes_pending_upload|.
+ if (state.type == RasterTaskState::UPLOADING) {
+ DCHECK(!task->HasCompleted());
+ bytes_pending_upload = new_bytes_pending_upload;
+ continue;
+ }
+
+ // Throttle raster tasks based on kMaxScheduledRasterTasks.
+ if (scheduled_raster_task_count >= kMaxScheduledRasterTasks) {
+ did_throttle_raster_tasks |= item.task_sets;
+ continue;
+ }
+
+ // Update |bytes_pending_upload| now that task has cleared all
+ // throttling limits.
+ bytes_pending_upload = new_bytes_pending_upload;
+
+ DCHECK(state.type == RasterTaskState::UNSCHEDULED ||
+ state.type == RasterTaskState::SCHEDULED);
+ state.type = RasterTaskState::SCHEDULED;
+
+ InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++);
+
+ ++scheduled_raster_task_count;
+ for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
+ if (item.task_sets[task_set])
+ tasks[task_set].container().push_back(task);
+ }
+ }
+
+ // Cancel existing OnRasterFinished callbacks.
+ raster_finished_weak_ptr_factory_.InvalidateWeakPtrs();
+
+ scoped_refptr<RasterizerTask> new_raster_finished_tasks[kNumberOfTaskSets];
+ size_t scheduled_task_counts[kNumberOfTaskSets] = {0};
+
+ for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
+ scheduled_task_counts[task_set] = tasks[task_set].container().size();
+ DCHECK_LE(scheduled_task_counts[task_set], task_counts_[task_set]);
+ // Schedule OnRasterFinished call for task set only when notification is
+ // pending and throttling is not preventing all pending tasks in the set
+ // from being scheduled.
+ if (!did_throttle_raster_tasks[task_set] &&
+ should_notify_client_if_no_tasks_are_pending_[task_set]) {
+ new_raster_finished_tasks[task_set] = CreateRasterFinishedTask(
+ task_runner_.get(),
+ base::Bind(&PixelBufferRasterWorkerPool::OnRasterFinished,
+ raster_finished_weak_ptr_factory_.GetWeakPtr(),
+ task_set));
+ raster_finished_tasks_pending_[task_set] = true;
+ InsertNodeForTask(&graph_,
+ new_raster_finished_tasks[task_set].get(),
+ kRasterFinishedTaskPriority,
+ scheduled_task_counts[task_set]);
+ for (RasterTaskVector::ContainerType::const_iterator it =
+ tasks[task_set].container().begin();
+ it != tasks[task_set].container().end();
+ ++it) {
+ graph_.edges.push_back(
+ TaskGraph::Edge(*it, new_raster_finished_tasks[task_set].get()));
+ }
+ }
+ }
+
+ DCHECK_LE(scheduled_raster_task_count, PendingRasterTaskCount());
+
+ ScheduleTasksOnOriginThread(this, &graph_);
+ task_graph_runner_->ScheduleTasks(namespace_token_, &graph_);
+
+ scheduled_raster_task_count_ = scheduled_raster_task_count;
+
+ std::copy(new_raster_finished_tasks,
+ new_raster_finished_tasks + kNumberOfTaskSets,
+ raster_finished_tasks_);
+}
+
+unsigned PixelBufferRasterWorkerPool::PendingRasterTaskCount() const {
+ unsigned num_completed_raster_tasks =
+ raster_tasks_with_pending_upload_.size() + completed_raster_tasks_.size();
+ DCHECK_GE(raster_task_states_.size(), num_completed_raster_tasks);
+ return raster_task_states_.size() - num_completed_raster_tasks;
+}
+
+TaskSetCollection PixelBufferRasterWorkerPool::PendingTasks() const {
+ return NonEmptyTaskSetsFromTaskCounts(task_counts_);
+}
+
+const char* PixelBufferRasterWorkerPool::StateName() const {
+ if (scheduled_raster_task_count_)
+ return "rasterizing";
+ if (PendingRasterTaskCount())
+ return "throttled";
+ if (!raster_tasks_with_pending_upload_.empty())
+ return "waiting_for_uploads";
+
+ return "finishing";
+}
+
+void PixelBufferRasterWorkerPool::CheckForCompletedRasterizerTasks() {
+ TRACE_EVENT0("cc",
+ "PixelBufferRasterWorkerPool::CheckForCompletedRasterizerTasks");
+
+ task_graph_runner_->CollectCompletedTasks(namespace_token_,
+ &completed_tasks_);
+ for (Task::Vector::const_iterator it = completed_tasks_.begin();
+ it != completed_tasks_.end();
+ ++it) {
+ RasterizerTask* task = static_cast<RasterizerTask*>(it->get());
+
+ RasterTask* raster_task = task->AsRasterTask();
+ if (!raster_task) {
+ task->WillComplete();
+ task->CompleteOnOriginThread(this);
+ task->DidComplete();
+
+ completed_image_decode_tasks_.push_back(task);
+ continue;
+ }
+
+ RasterTaskState::Vector::iterator state_it =
+ std::find_if(raster_task_states_.begin(),
+ raster_task_states_.end(),
+ RasterTaskState::TaskComparator(raster_task));
+ DCHECK(state_it != raster_task_states_.end());
+
+ RasterTaskState& state = *state_it;
+ DCHECK_EQ(RasterTaskState::SCHEDULED, state.type);
+
+ resource_provider_->UnmapPixelBuffer(raster_task->resource()->id());
+
+ if (!raster_task->HasFinishedRunning()) {
+ // When priorites change, a raster task can be canceled as a result of
+ // no longer being of high enough priority to fit in our throttled
+ // raster task budget. The task has not yet completed in this case.
+ raster_task->WillComplete();
+ raster_task->CompleteOnOriginThread(this);
+ raster_task->DidComplete();
+
+ RasterTaskQueue::Item::Vector::const_iterator item_it =
+ std::find_if(raster_tasks_.items.begin(),
+ raster_tasks_.items.end(),
+ RasterTaskQueue::Item::TaskComparator(raster_task));
+ if (item_it != raster_tasks_.items.end()) {
+ state.type = RasterTaskState::UNSCHEDULED;
+ continue;
+ }
+
+ DCHECK(std::find(completed_raster_tasks_.begin(),
+ completed_raster_tasks_.end(),
+ raster_task) == completed_raster_tasks_.end());
+ completed_raster_tasks_.push_back(raster_task);
+ state.type = RasterTaskState::COMPLETED;
+ // Triggers if the current task belongs to a set that should be empty.
+ DCHECK((state.task_sets & ~NonEmptyTaskSetsFromTaskCounts(task_counts_))
+ .none());
+ RemoveTaskSetsFromTaskCounts(task_counts_, state.task_sets);
+ continue;
+ }
+
+ resource_provider_->BeginSetPixels(raster_task->resource()->id());
+ has_performed_uploads_since_last_flush_ = true;
+
+ bytes_pending_upload_ += raster_task->resource()->bytes();
+ raster_tasks_with_pending_upload_.push_back(raster_task);
+ state.type = RasterTaskState::UPLOADING;
+ }
+ completed_tasks_.clear();
+}
+
+scoped_refptr<base::debug::ConvertableToTraceFormat>
+PixelBufferRasterWorkerPool::StateAsValue() const {
+ scoped_refptr<base::debug::TracedValue> state =
+ new base::debug::TracedValue();
+ state->SetInteger("completed_count", completed_raster_tasks_.size());
+ state->BeginArray("pending_count");
+ for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set)
+ state->AppendInteger(task_counts_[task_set]);
+ state->EndArray();
+ state->SetInteger("pending_upload_count",
+ raster_tasks_with_pending_upload_.size());
+ state->BeginDictionary("throttle_state");
+ ThrottleStateAsValueInto(state.get());
+ state->EndDictionary();
+ return state;
+}
+
+void PixelBufferRasterWorkerPool::ThrottleStateAsValueInto(
+ base::debug::TracedValue* throttle_state) const {
+ throttle_state->SetInteger("bytes_available_for_upload",
+ max_bytes_pending_upload_ - bytes_pending_upload_);
+ throttle_state->SetInteger("bytes_pending_upload", bytes_pending_upload_);
+ throttle_state->SetInteger("scheduled_raster_task_count",
+ scheduled_raster_task_count_);
+}
+
+} // namespace cc
diff --git a/cc/resources/pixel_buffer_raster_worker_pool.h b/cc/resources/pixel_buffer_raster_worker_pool.h
new file mode 100644
index 0000000..a2bed33
--- /dev/null
+++ b/cc/resources/pixel_buffer_raster_worker_pool.h
@@ -0,0 +1,140 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_PIXEL_BUFFER_RASTER_WORKER_POOL_H_
+#define CC_RESOURCES_PIXEL_BUFFER_RASTER_WORKER_POOL_H_
+
+#include <deque>
+#include <vector>
+
+#include "base/memory/weak_ptr.h"
+#include "base/values.h"
+#include "cc/base/delayed_unique_notifier.h"
+#include "cc/output/context_provider.h"
+#include "cc/resources/raster_worker_pool.h"
+#include "cc/resources/rasterizer.h"
+
+namespace base {
+namespace debug {
+class ConvertableToTraceFormat;
+class TracedValue;
+}
+}
+
+namespace cc {
+class ResourceProvider;
+
+class CC_EXPORT PixelBufferRasterWorkerPool : public RasterWorkerPool,
+ public Rasterizer,
+ public RasterizerTaskClient {
+ public:
+ virtual ~PixelBufferRasterWorkerPool();
+
+ static scoped_ptr<RasterWorkerPool> Create(
+ base::SequencedTaskRunner* task_runner,
+ TaskGraphRunner* task_graph_runner,
+ ContextProvider* context_provider,
+ ResourceProvider* resource_provider,
+ size_t max_transfer_buffer_usage_bytes);
+
+ // Overridden from RasterWorkerPool:
+ virtual Rasterizer* AsRasterizer() OVERRIDE;
+
+ // Overridden from Rasterizer:
+ virtual void SetClient(RasterizerClient* client) OVERRIDE;
+ virtual void Shutdown() OVERRIDE;
+ virtual void ScheduleTasks(RasterTaskQueue* queue) OVERRIDE;
+ virtual void CheckForCompletedTasks() OVERRIDE;
+
+ // Overridden from RasterizerTaskClient:
+ virtual scoped_ptr<RasterBuffer> AcquireBufferForRaster(
+ const Resource* resource) OVERRIDE;
+ virtual void ReleaseBufferForRaster(scoped_ptr<RasterBuffer> buffer) OVERRIDE;
+
+ private:
+ struct RasterTaskState {
+ class TaskComparator {
+ public:
+ explicit TaskComparator(const RasterTask* task) : task_(task) {}
+
+ bool operator()(const RasterTaskState& state) const {
+ return state.task == task_;
+ }
+
+ private:
+ const RasterTask* task_;
+ };
+
+ typedef std::vector<RasterTaskState> Vector;
+
+ RasterTaskState(RasterTask* task, const TaskSetCollection& task_sets);
+
+ enum { UNSCHEDULED, SCHEDULED, UPLOADING, COMPLETED } type;
+ RasterTask* task;
+ TaskSetCollection task_sets;
+ };
+
+ typedef std::deque<scoped_refptr<RasterTask> > RasterTaskDeque;
+
+ PixelBufferRasterWorkerPool(base::SequencedTaskRunner* task_runner,
+ TaskGraphRunner* task_graph_runner,
+ ContextProvider* context_provider,
+ ResourceProvider* resource_provider,
+ size_t max_transfer_buffer_usage_bytes);
+
+ void OnRasterFinished(TaskSet task_set);
+ void FlushUploads();
+ void CheckForCompletedUploads();
+ void CheckForCompletedRasterTasks();
+ void ScheduleMoreTasks();
+ unsigned PendingRasterTaskCount() const;
+ TaskSetCollection PendingTasks() const;
+ void CheckForCompletedRasterizerTasks();
+
+ const char* StateName() const;
+ scoped_refptr<base::debug::ConvertableToTraceFormat> StateAsValue() const;
+ void ThrottleStateAsValueInto(base::debug::TracedValue* throttle_state) const;
+
+ scoped_refptr<base::SequencedTaskRunner> task_runner_;
+ TaskGraphRunner* task_graph_runner_;
+ const NamespaceToken namespace_token_;
+ RasterizerClient* client_;
+ ContextProvider* context_provider_;
+ ResourceProvider* resource_provider_;
+
+ bool shutdown_;
+
+ RasterTaskQueue raster_tasks_;
+ RasterTaskState::Vector raster_task_states_;
+ RasterTaskDeque raster_tasks_with_pending_upload_;
+ RasterTask::Vector completed_raster_tasks_;
+ RasterizerTask::Vector completed_image_decode_tasks_;
+
+ size_t scheduled_raster_task_count_;
+ size_t task_counts_[kNumberOfTaskSets];
+ size_t bytes_pending_upload_;
+ size_t max_bytes_pending_upload_;
+ bool has_performed_uploads_since_last_flush_;
+
+ TaskSetCollection should_notify_client_if_no_tasks_are_pending_;
+ TaskSetCollection raster_finished_tasks_pending_;
+
+ DelayedUniqueNotifier check_for_completed_raster_task_notifier_;
+
+ scoped_refptr<RasterizerTask> raster_finished_tasks_[kNumberOfTaskSets];
+
+ // Task graph used when scheduling tasks and vector used to gather
+ // completed tasks.
+ TaskGraph graph_;
+ Task::Vector completed_tasks_;
+
+ base::WeakPtrFactory<PixelBufferRasterWorkerPool>
+ raster_finished_weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(PixelBufferRasterWorkerPool);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_PIXEL_BUFFER_RASTER_WORKER_POOL_H_
diff --git a/cc/resources/platform_color.h b/cc/resources/platform_color.h
new file mode 100644
index 0000000..4945dcc
--- /dev/null
+++ b/cc/resources/platform_color.h
@@ -0,0 +1,59 @@
+// Copyright 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_PLATFORM_COLOR_H_
+#define CC_RESOURCES_PLATFORM_COLOR_H_
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "cc/resources/resource_format.h"
+#include "third_party/khronos/GLES2/gl2.h"
+#include "third_party/khronos/GLES2/gl2ext.h"
+#include "third_party/skia/include/core/SkTypes.h"
+
+namespace cc {
+
+class PlatformColor {
+ public:
+ enum SourceDataFormat {
+ SOURCE_FORMAT_RGBA8,
+ SOURCE_FORMAT_BGRA8
+ };
+
+ static SourceDataFormat Format() {
+ return SK_B32_SHIFT ? SOURCE_FORMAT_RGBA8 : SOURCE_FORMAT_BGRA8;
+ }
+
+ // Returns the most efficient texture format for this platform.
+ static ResourceFormat BestTextureFormat(bool supports_bgra8888) {
+ switch (Format()) {
+ case SOURCE_FORMAT_BGRA8:
+ return (supports_bgra8888) ? BGRA_8888 : RGBA_8888;
+ case SOURCE_FORMAT_RGBA8:
+ return RGBA_8888;
+ }
+ NOTREACHED();
+ return RGBA_8888;
+ }
+
+ // Return true if the given texture format has the same component order
+ // as the color on this platform.
+ static bool SameComponentOrder(ResourceFormat format) {
+ switch (Format()) {
+ case SOURCE_FORMAT_RGBA8:
+ return format == RGBA_8888 || format == RGBA_4444;
+ case SOURCE_FORMAT_BGRA8:
+ return format == BGRA_8888 || format == RGBA_4444;
+ }
+ NOTREACHED();
+ return false;
+ }
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PlatformColor);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_PLATFORM_COLOR_H_
diff --git a/cc/resources/prioritized_resource.cc b/cc/resources/prioritized_resource.cc
new file mode 100644
index 0000000..462f7f1
--- /dev/null
+++ b/cc/resources/prioritized_resource.cc
@@ -0,0 +1,203 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/prioritized_resource.h"
+
+#include <algorithm>
+
+#include "cc/resources/platform_color.h"
+#include "cc/resources/prioritized_resource_manager.h"
+#include "cc/resources/priority_calculator.h"
+#include "cc/trees/proxy.h"
+
+namespace cc {
+
+PrioritizedResource::PrioritizedResource(PrioritizedResourceManager* manager,
+ const gfx::Size& size,
+ ResourceFormat format)
+ : size_(size),
+ format_(format),
+ bytes_(0),
+ contents_swizzled_(false),
+ priority_(PriorityCalculator::LowestPriority()),
+ is_above_priority_cutoff_(false),
+ is_self_managed_(false),
+ backing_(NULL),
+ manager_(NULL) {
+ bytes_ = Resource::MemorySizeBytes(size, format);
+ if (manager)
+ manager->RegisterTexture(this);
+}
+
+PrioritizedResource::~PrioritizedResource() {
+ if (manager_)
+ manager_->UnregisterTexture(this);
+}
+
+void PrioritizedResource::SetTextureManager(
+ PrioritizedResourceManager* manager) {
+ if (manager_ == manager)
+ return;
+ if (manager_)
+ manager_->UnregisterTexture(this);
+ if (manager)
+ manager->RegisterTexture(this);
+}
+
+void PrioritizedResource::SetDimensions(const gfx::Size& size,
+ ResourceFormat format) {
+ if (format_ != format || size_ != size) {
+ is_above_priority_cutoff_ = false;
+ format_ = format;
+ size_ = size;
+ bytes_ = Resource::MemorySizeBytes(size, format);
+ DCHECK(manager_ || !backing_);
+ if (manager_)
+ manager_->ReturnBackingTexture(this);
+ }
+}
+
+bool PrioritizedResource::RequestLate() {
+ if (!manager_)
+ return false;
+ return manager_->RequestLate(this);
+}
+
+bool PrioritizedResource::BackingResourceWasEvicted() const {
+ return backing_ ? backing_->ResourceHasBeenDeleted() : false;
+}
+
+void PrioritizedResource::AcquireBackingTexture(
+ ResourceProvider* resource_provider) {
+ DCHECK(is_above_priority_cutoff_);
+ if (is_above_priority_cutoff_)
+ manager_->AcquireBackingTextureIfNeeded(this, resource_provider);
+}
+
+void PrioritizedResource::SetPixels(ResourceProvider* resource_provider,
+ const uint8_t* image,
+ const gfx::Rect& image_rect,
+ const gfx::Rect& source_rect,
+ const gfx::Vector2d& dest_offset) {
+ DCHECK(is_above_priority_cutoff_);
+ if (is_above_priority_cutoff_)
+ AcquireBackingTexture(resource_provider);
+ DCHECK(backing_);
+ resource_provider->SetPixels(
+ resource_id(), image, image_rect, source_rect, dest_offset);
+
+ // The component order may be bgra if we uploaded bgra pixels to rgba
+ // texture. Mark contents as swizzled if image component order is
+ // different than texture format.
+ contents_swizzled_ = !PlatformColor::SameComponentOrder(format_);
+}
+
+void PrioritizedResource::Link(Backing* backing) {
+ DCHECK(backing);
+ DCHECK(!backing->owner_);
+ DCHECK(!backing_);
+
+ backing_ = backing;
+ backing_->owner_ = this;
+}
+
+void PrioritizedResource::Unlink() {
+ DCHECK(backing_);
+ DCHECK(backing_->owner_ == this);
+
+ backing_->owner_ = NULL;
+ backing_ = NULL;
+}
+
+void PrioritizedResource::SetToSelfManagedMemoryPlaceholder(size_t bytes) {
+ SetDimensions(gfx::Size(), RGBA_8888);
+ set_is_self_managed(true);
+ bytes_ = bytes;
+}
+
+PrioritizedResource::Backing::Backing(unsigned id,
+ ResourceProvider* resource_provider,
+ const gfx::Size& size,
+ ResourceFormat format)
+ : Resource(id, size, format),
+ owner_(NULL),
+ priority_at_last_priority_update_(PriorityCalculator::LowestPriority()),
+ was_above_priority_cutoff_at_last_priority_update_(false),
+ in_drawing_impl_tree_(false),
+ in_parent_compositor_(false),
+#if !DCHECK_IS_ON
+ resource_has_been_deleted_(false) {
+#else
+ resource_has_been_deleted_(false),
+ resource_provider_(resource_provider) {
+#endif
+}
+
+PrioritizedResource::Backing::~Backing() {
+ DCHECK(!owner_);
+ DCHECK(resource_has_been_deleted_);
+}
+
+void PrioritizedResource::Backing::DeleteResource(
+ ResourceProvider* resource_provider) {
+ DCHECK(!proxy() || proxy()->IsImplThread());
+ DCHECK(!resource_has_been_deleted_);
+#if DCHECK_IS_ON
+ DCHECK(resource_provider == resource_provider_);
+#endif
+
+ resource_provider->DeleteResource(id());
+ set_id(0);
+ resource_has_been_deleted_ = true;
+}
+
+bool PrioritizedResource::Backing::ResourceHasBeenDeleted() const {
+ DCHECK(!proxy() || proxy()->IsImplThread());
+ return resource_has_been_deleted_;
+}
+
+bool PrioritizedResource::Backing::CanBeRecycledIfNotInExternalUse() const {
+ DCHECK(!proxy() || proxy()->IsImplThread());
+ return !was_above_priority_cutoff_at_last_priority_update_ &&
+ !in_drawing_impl_tree_;
+}
+
+void PrioritizedResource::Backing::UpdatePriority() {
+ DCHECK(!proxy() ||
+ (proxy()->IsImplThread() && proxy()->IsMainThreadBlocked()));
+ if (owner_) {
+ priority_at_last_priority_update_ = owner_->request_priority();
+ was_above_priority_cutoff_at_last_priority_update_ =
+ owner_->is_above_priority_cutoff();
+ } else {
+ priority_at_last_priority_update_ = PriorityCalculator::LowestPriority();
+ was_above_priority_cutoff_at_last_priority_update_ = false;
+ }
+}
+
+void PrioritizedResource::Backing::UpdateState(
+ ResourceProvider* resource_provider) {
+ DCHECK(!proxy() ||
+ (proxy()->IsImplThread() && proxy()->IsMainThreadBlocked()));
+ in_drawing_impl_tree_ = !!owner();
+ in_parent_compositor_ = resource_provider->InUseByConsumer(id());
+ if (!in_drawing_impl_tree_) {
+ DCHECK_EQ(priority_at_last_priority_update_,
+ PriorityCalculator::LowestPriority());
+ }
+}
+
+void PrioritizedResource::ReturnBackingTexture() {
+ DCHECK(manager_ || !backing_);
+ if (manager_)
+ manager_->ReturnBackingTexture(this);
+}
+
+const Proxy* PrioritizedResource::Backing::proxy() const {
+ if (!owner_ || !owner_->resource_manager())
+ return NULL;
+ return owner_->resource_manager()->ProxyForDebug();
+}
+
+} // namespace cc
diff --git a/cc/resources/prioritized_resource.h b/cc/resources/prioritized_resource.h
new file mode 100644
index 0000000..7920f81
--- /dev/null
+++ b/cc/resources/prioritized_resource.h
@@ -0,0 +1,185 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_PRIORITIZED_RESOURCE_H_
+#define CC_RESOURCES_PRIORITIZED_RESOURCE_H_
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "cc/base/cc_export.h"
+#include "cc/resources/priority_calculator.h"
+#include "cc/resources/resource.h"
+#include "cc/resources/resource_provider.h"
+#include "ui/gfx/rect.h"
+#include "ui/gfx/size.h"
+#include "ui/gfx/vector2d.h"
+
+namespace cc {
+
+class PrioritizedResourceManager;
+class Proxy;
+
+class CC_EXPORT PrioritizedResource {
+ public:
+ static scoped_ptr<PrioritizedResource> Create(
+ PrioritizedResourceManager* manager,
+ const gfx::Size& size,
+ ResourceFormat format) {
+ return make_scoped_ptr(new PrioritizedResource(manager, size, format));
+ }
+ static scoped_ptr<PrioritizedResource> Create(
+ PrioritizedResourceManager* manager) {
+ return make_scoped_ptr(
+ new PrioritizedResource(manager, gfx::Size(), RGBA_8888));
+ }
+ ~PrioritizedResource();
+
+ // Texture properties. Changing these causes the backing texture to be lost.
+ // Setting these to the same value is a no-op.
+ void SetTextureManager(PrioritizedResourceManager* manager);
+ PrioritizedResourceManager* resource_manager() { return manager_; }
+ void SetDimensions(const gfx::Size& size, ResourceFormat format);
+ ResourceFormat format() const { return format_; }
+ gfx::Size size() const { return size_; }
+ size_t bytes() const { return bytes_; }
+ bool contents_swizzled() const { return contents_swizzled_; }
+
+ // Set priority for the requested texture.
+ void set_request_priority(int priority) { priority_ = priority; }
+ int request_priority() const { return priority_; }
+
+ // After PrioritizedResource::PrioritizeTextures() is called, this returns
+ // if the the request succeeded and this texture can be acquired for use.
+ bool can_acquire_backing_texture() const { return is_above_priority_cutoff_; }
+
+ // This returns whether we still have a backing texture. This can continue
+ // to be true even after CanAcquireBackingTexture() becomes false. In this
+ // case the texture can be used but shouldn't be updated since it will get
+ // taken away "soon".
+ bool have_backing_texture() const { return !!backing(); }
+
+ bool BackingResourceWasEvicted() const;
+
+ // If CanAcquireBackingTexture() is true AcquireBackingTexture() will acquire
+ // a backing texture for use. Call this whenever the texture is actually
+ // needed.
+ void AcquireBackingTexture(ResourceProvider* resource_provider);
+
+ // TODO(epenner): Request late is really a hack for when we are totally out of
+ // memory (all textures are visible) but we can still squeeze into the limit
+ // by not painting occluded textures. In this case the manager refuses all
+ // visible textures and RequestLate() will enable CanAcquireBackingTexture()
+ // on a call-order basis. We might want to just remove this in the future
+ // (carefully) and just make sure we don't regress OOMs situations.
+ bool RequestLate();
+
+ // Update pixels of backing resource from image. This functions will aquire
+ // the backing if needed.
+ void SetPixels(ResourceProvider* resource_provider,
+ const uint8_t* image,
+ const gfx::Rect& image_rect,
+ const gfx::Rect& source_rect,
+ const gfx::Vector2d& dest_offset);
+
+ ResourceProvider::ResourceId resource_id() const {
+ return backing_ ? backing_->id() : 0;
+ }
+
+ // Self-managed textures are accounted for when prioritizing other textures,
+ // but they are not allocated/recycled/deleted, so this needs to be done
+ // externally. CanAcquireBackingTexture() indicates if the texture would have
+ // been allowed given its priority.
+ void set_is_self_managed(bool is_self_managed) {
+ is_self_managed_ = is_self_managed;
+ }
+ bool is_self_managed() { return is_self_managed_; }
+ void SetToSelfManagedMemoryPlaceholder(size_t bytes);
+
+ void ReturnBackingTexture();
+
+ private:
+ friend class PrioritizedResourceManager;
+ friend class PrioritizedResourceTest;
+
+ class Backing : public Resource {
+ public:
+ Backing(unsigned id,
+ ResourceProvider* resource_provider,
+ const gfx::Size& size,
+ ResourceFormat format);
+ ~Backing();
+ void UpdatePriority();
+ void UpdateState(ResourceProvider* resource_provider);
+
+ PrioritizedResource* owner() { return owner_; }
+ bool CanBeRecycledIfNotInExternalUse() const;
+ int request_priority_at_last_priority_update() const {
+ return priority_at_last_priority_update_;
+ }
+ bool was_above_priority_cutoff_at_last_priority_update() const {
+ return was_above_priority_cutoff_at_last_priority_update_;
+ }
+ bool in_drawing_impl_tree() const { return in_drawing_impl_tree_; }
+ bool in_parent_compositor() const { return in_parent_compositor_; }
+
+ void DeleteResource(ResourceProvider* resource_provider);
+ bool ResourceHasBeenDeleted() const;
+
+ private:
+ const Proxy* proxy() const;
+
+ friend class PrioritizedResource;
+ friend class PrioritizedResourceManager;
+ PrioritizedResource* owner_;
+ int priority_at_last_priority_update_;
+ bool was_above_priority_cutoff_at_last_priority_update_;
+
+ // Set if this is currently-drawing impl tree.
+ bool in_drawing_impl_tree_;
+ // Set if this is in the parent compositor.
+ bool in_parent_compositor_;
+
+ bool resource_has_been_deleted_;
+
+#if DCHECK_IS_ON
+ ResourceProvider* resource_provider_;
+#endif
+ DISALLOW_COPY_AND_ASSIGN(Backing);
+ };
+
+ PrioritizedResource(PrioritizedResourceManager* resource_manager,
+ const gfx::Size& size,
+ ResourceFormat format);
+
+ bool is_above_priority_cutoff() { return is_above_priority_cutoff_; }
+ void set_above_priority_cutoff(bool is_above_priority_cutoff) {
+ is_above_priority_cutoff_ = is_above_priority_cutoff;
+ }
+ void set_manager_internal(PrioritizedResourceManager* manager) {
+ manager_ = manager;
+ }
+
+ Backing* backing() const { return backing_; }
+ void Link(Backing* backing);
+ void Unlink();
+
+ gfx::Size size_;
+ ResourceFormat format_;
+ size_t bytes_;
+ bool contents_swizzled_;
+
+ int priority_;
+ bool is_above_priority_cutoff_;
+ bool is_self_managed_;
+
+ Backing* backing_;
+ PrioritizedResourceManager* manager_;
+
+ DISALLOW_COPY_AND_ASSIGN(PrioritizedResource);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_PRIORITIZED_RESOURCE_H_
diff --git a/cc/resources/prioritized_resource_manager.cc b/cc/resources/prioritized_resource_manager.cc
new file mode 100644
index 0000000..54ed308
--- /dev/null
+++ b/cc/resources/prioritized_resource_manager.cc
@@ -0,0 +1,551 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/prioritized_resource_manager.h"
+
+#include <algorithm>
+
+#include "base/debug/trace_event.h"
+#include "base/stl_util.h"
+#include "cc/resources/prioritized_resource.h"
+#include "cc/resources/priority_calculator.h"
+#include "cc/trees/proxy.h"
+
+namespace cc {
+
+PrioritizedResourceManager::PrioritizedResourceManager(const Proxy* proxy)
+ : max_memory_limit_bytes_(DefaultMemoryAllocationLimit()),
+ external_priority_cutoff_(PriorityCalculator::AllowEverythingCutoff()),
+ memory_use_bytes_(0),
+ memory_above_cutoff_bytes_(0),
+ max_memory_needed_bytes_(0),
+ memory_available_bytes_(0),
+ proxy_(proxy),
+ backings_tail_not_sorted_(false),
+ memory_visible_bytes_(0),
+ memory_visible_and_nearby_bytes_(0),
+ memory_visible_last_pushed_bytes_(0),
+ memory_visible_and_nearby_last_pushed_bytes_(0) {}
+
+PrioritizedResourceManager::~PrioritizedResourceManager() {
+ while (textures_.size() > 0)
+ UnregisterTexture(*textures_.begin());
+
+ UnlinkAndClearEvictedBackings();
+ DCHECK(evicted_backings_.empty());
+
+ // Each remaining backing is a leaked opengl texture. There should be none.
+ DCHECK(backings_.empty());
+}
+
+size_t PrioritizedResourceManager::MemoryVisibleBytes() const {
+ DCHECK(proxy_->IsImplThread());
+ return memory_visible_last_pushed_bytes_;
+}
+
+size_t PrioritizedResourceManager::MemoryVisibleAndNearbyBytes() const {
+ DCHECK(proxy_->IsImplThread());
+ return memory_visible_and_nearby_last_pushed_bytes_;
+}
+
+void PrioritizedResourceManager::PrioritizeTextures() {
+ TRACE_EVENT0("cc", "PrioritizedResourceManager::PrioritizeTextures");
+ DCHECK(proxy_->IsMainThread());
+
+ // Sorting textures in this function could be replaced by a slightly
+ // modified O(n) quick-select to partition textures rather than
+ // sort them (if performance of the sort becomes an issue).
+
+ TextureVector& sorted_textures = temp_texture_vector_;
+ sorted_textures.clear();
+
+ // Copy all textures into a vector, sort them, and collect memory requirements
+ // statistics.
+ memory_visible_bytes_ = 0;
+ memory_visible_and_nearby_bytes_ = 0;
+ for (TextureSet::iterator it = textures_.begin(); it != textures_.end();
+ ++it) {
+ PrioritizedResource* texture = (*it);
+ sorted_textures.push_back(texture);
+ if (PriorityCalculator::priority_is_higher(
+ texture->request_priority(),
+ PriorityCalculator::AllowVisibleOnlyCutoff()))
+ memory_visible_bytes_ += texture->bytes();
+ if (PriorityCalculator::priority_is_higher(
+ texture->request_priority(),
+ PriorityCalculator::AllowVisibleAndNearbyCutoff()))
+ memory_visible_and_nearby_bytes_ += texture->bytes();
+ }
+ std::sort(sorted_textures.begin(), sorted_textures.end(), CompareTextures);
+
+ // Compute a priority cutoff based on memory pressure
+ memory_available_bytes_ = max_memory_limit_bytes_;
+ priority_cutoff_ = external_priority_cutoff_;
+ size_t memory_bytes = 0;
+ for (TextureVector::iterator it = sorted_textures.begin();
+ it != sorted_textures.end();
+ ++it) {
+ if ((*it)->is_self_managed()) {
+ // Account for self-managed memory immediately by reducing the memory
+ // available (since it never gets acquired).
+ size_t new_memory_bytes = memory_bytes + (*it)->bytes();
+ if (new_memory_bytes > memory_available_bytes_) {
+ priority_cutoff_ = (*it)->request_priority();
+ memory_available_bytes_ = memory_bytes;
+ break;
+ }
+ memory_available_bytes_ -= (*it)->bytes();
+ } else {
+ size_t new_memory_bytes = memory_bytes + (*it)->bytes();
+ if (new_memory_bytes > memory_available_bytes_) {
+ priority_cutoff_ = (*it)->request_priority();
+ break;
+ }
+ memory_bytes = new_memory_bytes;
+ }
+ }
+
+ // Disallow any textures with priority below the external cutoff to have
+ // backings.
+ for (TextureVector::iterator it = sorted_textures.begin();
+ it != sorted_textures.end();
+ ++it) {
+ PrioritizedResource* texture = (*it);
+ if (!PriorityCalculator::priority_is_higher(texture->request_priority(),
+ external_priority_cutoff_) &&
+ texture->have_backing_texture())
+ texture->Unlink();
+ }
+
+ // Only allow textures if they are higher than the cutoff. All textures
+ // of the same priority are accepted or rejected together, rather than
+ // being partially allowed randomly.
+ max_memory_needed_bytes_ = 0;
+ memory_above_cutoff_bytes_ = 0;
+ for (TextureVector::iterator it = sorted_textures.begin();
+ it != sorted_textures.end();
+ ++it) {
+ PrioritizedResource* resource = *it;
+ bool is_above_priority_cutoff = PriorityCalculator::priority_is_higher(
+ resource->request_priority(), priority_cutoff_);
+ resource->set_above_priority_cutoff(is_above_priority_cutoff);
+ if (!resource->is_self_managed()) {
+ max_memory_needed_bytes_ += resource->bytes();
+ if (is_above_priority_cutoff)
+ memory_above_cutoff_bytes_ += resource->bytes();
+ }
+ }
+ sorted_textures.clear();
+
+ DCHECK_LE(memory_above_cutoff_bytes_, memory_available_bytes_);
+ DCHECK_LE(MemoryAboveCutoffBytes(), MaxMemoryLimitBytes());
+}
+
+void PrioritizedResourceManager::PushTexturePrioritiesToBackings() {
+ TRACE_EVENT0("cc",
+ "PrioritizedResourceManager::PushTexturePrioritiesToBackings");
+ DCHECK(proxy_->IsImplThread() && proxy_->IsMainThreadBlocked());
+
+ AssertInvariants();
+ for (BackingList::iterator it = backings_.begin(); it != backings_.end();
+ ++it)
+ (*it)->UpdatePriority();
+ SortBackings();
+ AssertInvariants();
+
+ // Push memory requirements to the impl thread structure.
+ memory_visible_last_pushed_bytes_ = memory_visible_bytes_;
+ memory_visible_and_nearby_last_pushed_bytes_ =
+ memory_visible_and_nearby_bytes_;
+}
+
+void PrioritizedResourceManager::UpdateBackingsState(
+ ResourceProvider* resource_provider) {
+ TRACE_EVENT0("cc",
+ "PrioritizedResourceManager::UpdateBackingsInDrawingImplTree");
+ DCHECK(proxy_->IsImplThread() && proxy_->IsMainThreadBlocked());
+
+ AssertInvariants();
+ for (BackingList::iterator it = backings_.begin(); it != backings_.end();
+ ++it) {
+ PrioritizedResource::Backing* backing = (*it);
+ backing->UpdateState(resource_provider);
+ }
+ SortBackings();
+ AssertInvariants();
+}
+
+void PrioritizedResourceManager::SortBackings() {
+ TRACE_EVENT0("cc", "PrioritizedResourceManager::SortBackings");
+ DCHECK(proxy_->IsImplThread());
+
+ // Put backings in eviction/recycling order.
+ backings_.sort(CompareBackings);
+ backings_tail_not_sorted_ = false;
+}
+
+void PrioritizedResourceManager::ClearPriorities() {
+ DCHECK(proxy_->IsMainThread());
+ for (TextureSet::iterator it = textures_.begin(); it != textures_.end();
+ ++it) {
+ // TODO(reveman): We should remove this and just set all priorities to
+ // PriorityCalculator::lowestPriority() once we have priorities for all
+ // textures (we can't currently calculate distances for off-screen
+ // textures).
+ (*it)->set_request_priority(
+ PriorityCalculator::LingeringPriority((*it)->request_priority()));
+ }
+}
+
+bool PrioritizedResourceManager::RequestLate(PrioritizedResource* texture) {
+ DCHECK(proxy_->IsMainThread());
+
+ // This is already above cutoff, so don't double count it's memory below.
+ if (texture->is_above_priority_cutoff())
+ return true;
+
+ // Allow textures that have priority equal to the cutoff, but not strictly
+ // lower.
+ if (PriorityCalculator::priority_is_lower(texture->request_priority(),
+ priority_cutoff_))
+ return false;
+
+ // Disallow textures that do not have a priority strictly higher than the
+ // external cutoff.
+ if (!PriorityCalculator::priority_is_higher(texture->request_priority(),
+ external_priority_cutoff_))
+ return false;
+
+ size_t new_memory_bytes = memory_above_cutoff_bytes_ + texture->bytes();
+ if (new_memory_bytes > memory_available_bytes_)
+ return false;
+
+ memory_above_cutoff_bytes_ = new_memory_bytes;
+ texture->set_above_priority_cutoff(true);
+ return true;
+}
+
+void PrioritizedResourceManager::AcquireBackingTextureIfNeeded(
+ PrioritizedResource* texture,
+ ResourceProvider* resource_provider) {
+ DCHECK(proxy_->IsImplThread() && proxy_->IsMainThreadBlocked());
+ DCHECK(!texture->is_self_managed());
+ DCHECK(texture->is_above_priority_cutoff());
+ if (texture->backing() || !texture->is_above_priority_cutoff())
+ return;
+
+ // Find a backing below, by either recycling or allocating.
+ PrioritizedResource::Backing* backing = NULL;
+
+ // First try to recycle
+ for (BackingList::iterator it = backings_.begin(); it != backings_.end();
+ ++it) {
+ if (!(*it)->CanBeRecycledIfNotInExternalUse())
+ break;
+ if (resource_provider->InUseByConsumer((*it)->id()))
+ continue;
+ if ((*it)->size() == texture->size() &&
+ (*it)->format() == texture->format()) {
+ backing = (*it);
+ backings_.erase(it);
+ break;
+ }
+ }
+
+ // Otherwise reduce memory and just allocate a new backing texures.
+ if (!backing) {
+ EvictBackingsToReduceMemory(memory_available_bytes_ - texture->bytes(),
+ PriorityCalculator::AllowEverythingCutoff(),
+ EVICT_ONLY_RECYCLABLE,
+ DO_NOT_UNLINK_BACKINGS,
+ resource_provider);
+ backing =
+ CreateBacking(texture->size(), texture->format(), resource_provider);
+ }
+
+ // Move the used backing to the end of the eviction list, and note that
+ // the tail is not sorted.
+ if (backing->owner())
+ backing->owner()->Unlink();
+ texture->Link(backing);
+ backings_.push_back(backing);
+ backings_tail_not_sorted_ = true;
+
+ // Update the backing's priority from its new owner.
+ backing->UpdatePriority();
+}
+
+bool PrioritizedResourceManager::EvictBackingsToReduceMemory(
+ size_t limit_bytes,
+ int priority_cutoff,
+ EvictionPolicy eviction_policy,
+ UnlinkPolicy unlink_policy,
+ ResourceProvider* resource_provider) {
+ DCHECK(proxy_->IsImplThread());
+ if (unlink_policy == UNLINK_BACKINGS)
+ DCHECK(proxy_->IsMainThreadBlocked());
+ if (MemoryUseBytes() <= limit_bytes &&
+ PriorityCalculator::AllowEverythingCutoff() == priority_cutoff)
+ return false;
+
+ // Destroy backings until we are below the limit,
+ // or until all backings remaining are above the cutoff.
+ bool evicted_anything = false;
+ while (backings_.size() > 0) {
+ PrioritizedResource::Backing* backing = backings_.front();
+ if (MemoryUseBytes() <= limit_bytes &&
+ PriorityCalculator::priority_is_higher(
+ backing->request_priority_at_last_priority_update(),
+ priority_cutoff))
+ break;
+ if (eviction_policy == EVICT_ONLY_RECYCLABLE &&
+ !backing->CanBeRecycledIfNotInExternalUse())
+ break;
+ if (unlink_policy == UNLINK_BACKINGS && backing->owner())
+ backing->owner()->Unlink();
+ EvictFirstBackingResource(resource_provider);
+ evicted_anything = true;
+ }
+ return evicted_anything;
+}
+
+void PrioritizedResourceManager::ReduceWastedMemory(
+ ResourceProvider* resource_provider) {
+ // We currently collect backings from deleted textures for later recycling.
+ // However, if we do that forever we will always use the max limit even if
+ // we really need very little memory. This should probably be solved by
+ // reducing the limit externally, but until then this just does some "clean
+ // up" of unused backing textures (any more than 10%).
+ size_t wasted_memory = 0;
+ for (BackingList::iterator it = backings_.begin(); it != backings_.end();
+ ++it) {
+ if ((*it)->owner())
+ break;
+ if ((*it)->in_parent_compositor())
+ continue;
+ wasted_memory += (*it)->bytes();
+ }
+ size_t wasted_memory_to_allow = memory_available_bytes_ / 10;
+ // If the external priority cutoff indicates that unused memory should be
+ // freed, then do not allow any memory for texture recycling.
+ if (external_priority_cutoff_ != PriorityCalculator::AllowEverythingCutoff())
+ wasted_memory_to_allow = 0;
+ if (wasted_memory > wasted_memory_to_allow)
+ EvictBackingsToReduceMemory(MemoryUseBytes() -
+ (wasted_memory - wasted_memory_to_allow),
+ PriorityCalculator::AllowEverythingCutoff(),
+ EVICT_ONLY_RECYCLABLE,
+ DO_NOT_UNLINK_BACKINGS,
+ resource_provider);
+}
+
+void PrioritizedResourceManager::ReduceMemory(
+ ResourceProvider* resource_provider) {
+ DCHECK(proxy_->IsImplThread() && proxy_->IsMainThreadBlocked());
+ EvictBackingsToReduceMemory(memory_available_bytes_,
+ PriorityCalculator::AllowEverythingCutoff(),
+ EVICT_ANYTHING,
+ UNLINK_BACKINGS,
+ resource_provider);
+ DCHECK_LE(MemoryUseBytes(), memory_available_bytes_);
+
+ ReduceWastedMemory(resource_provider);
+}
+
+void PrioritizedResourceManager::ClearAllMemory(
+ ResourceProvider* resource_provider) {
+ DCHECK(proxy_->IsImplThread() && proxy_->IsMainThreadBlocked());
+ if (!resource_provider) {
+ DCHECK(backings_.empty());
+ return;
+ }
+ EvictBackingsToReduceMemory(0,
+ PriorityCalculator::AllowEverythingCutoff(),
+ EVICT_ANYTHING,
+ DO_NOT_UNLINK_BACKINGS,
+ resource_provider);
+}
+
+bool PrioritizedResourceManager::ReduceMemoryOnImplThread(
+ size_t limit_bytes,
+ int priority_cutoff,
+ ResourceProvider* resource_provider) {
+ DCHECK(proxy_->IsImplThread());
+ DCHECK(resource_provider);
+
+ // If we are in the process of uploading a new frame then the backings at the
+ // very end of the list are not sorted by priority. Sort them before doing the
+ // eviction.
+ if (backings_tail_not_sorted_)
+ SortBackings();
+ return EvictBackingsToReduceMemory(limit_bytes,
+ priority_cutoff,
+ EVICT_ANYTHING,
+ DO_NOT_UNLINK_BACKINGS,
+ resource_provider);
+}
+
+void PrioritizedResourceManager::UnlinkAndClearEvictedBackings() {
+ DCHECK(proxy_->IsMainThread());
+ base::AutoLock scoped_lock(evicted_backings_lock_);
+ for (BackingList::const_iterator it = evicted_backings_.begin();
+ it != evicted_backings_.end();
+ ++it) {
+ PrioritizedResource::Backing* backing = (*it);
+ if (backing->owner())
+ backing->owner()->Unlink();
+ delete backing;
+ }
+ evicted_backings_.clear();
+}
+
+bool PrioritizedResourceManager::LinkedEvictedBackingsExist() const {
+ DCHECK(proxy_->IsImplThread() && proxy_->IsMainThreadBlocked());
+ base::AutoLock scoped_lock(evicted_backings_lock_);
+ for (BackingList::const_iterator it = evicted_backings_.begin();
+ it != evicted_backings_.end();
+ ++it) {
+ if ((*it)->owner())
+ return true;
+ }
+ return false;
+}
+
+void PrioritizedResourceManager::RegisterTexture(PrioritizedResource* texture) {
+ DCHECK(proxy_->IsMainThread());
+ DCHECK(texture);
+ DCHECK(!texture->resource_manager());
+ DCHECK(!texture->backing());
+ DCHECK(!ContainsKey(textures_, texture));
+
+ texture->set_manager_internal(this);
+ textures_.insert(texture);
+}
+
+void PrioritizedResourceManager::UnregisterTexture(
+ PrioritizedResource* texture) {
+ DCHECK(proxy_->IsMainThread() ||
+ (proxy_->IsImplThread() && proxy_->IsMainThreadBlocked()));
+ DCHECK(texture);
+ DCHECK(ContainsKey(textures_, texture));
+
+ ReturnBackingTexture(texture);
+ texture->set_manager_internal(NULL);
+ textures_.erase(texture);
+ texture->set_above_priority_cutoff(false);
+}
+
+void PrioritizedResourceManager::ReturnBackingTexture(
+ PrioritizedResource* texture) {
+ DCHECK(proxy_->IsMainThread() ||
+ (proxy_->IsImplThread() && proxy_->IsMainThreadBlocked()));
+ if (texture->backing())
+ texture->Unlink();
+}
+
+PrioritizedResource::Backing* PrioritizedResourceManager::CreateBacking(
+ const gfx::Size& size,
+ ResourceFormat format,
+ ResourceProvider* resource_provider) {
+ DCHECK(proxy_->IsImplThread() && proxy_->IsMainThreadBlocked());
+ DCHECK(resource_provider);
+ ResourceProvider::ResourceId resource_id =
+ resource_provider->CreateManagedResource(
+ size,
+ GL_TEXTURE_2D,
+ GL_CLAMP_TO_EDGE,
+ ResourceProvider::TextureHintImmutable,
+ format);
+ PrioritizedResource::Backing* backing = new PrioritizedResource::Backing(
+ resource_id, resource_provider, size, format);
+ memory_use_bytes_ += backing->bytes();
+ return backing;
+}
+
+void PrioritizedResourceManager::EvictFirstBackingResource(
+ ResourceProvider* resource_provider) {
+ DCHECK(proxy_->IsImplThread());
+ DCHECK(resource_provider);
+ DCHECK(!backings_.empty());
+ PrioritizedResource::Backing* backing = backings_.front();
+
+ // Note that we create a backing and its resource at the same time, but we
+ // delete the backing structure and its resource in two steps. This is because
+ // we can delete the resource while the main thread is running, but we cannot
+ // unlink backings while the main thread is running.
+ backing->DeleteResource(resource_provider);
+ memory_use_bytes_ -= backing->bytes();
+ backings_.pop_front();
+ base::AutoLock scoped_lock(evicted_backings_lock_);
+ evicted_backings_.push_back(backing);
+}
+
+void PrioritizedResourceManager::AssertInvariants() {
+#if DCHECK_IS_ON
+ DCHECK(proxy_->IsImplThread() && proxy_->IsMainThreadBlocked());
+
+ // If we hit any of these asserts, there is a bug in this class. To see
+ // where the bug is, call this function at the beginning and end of
+ // every public function.
+
+ // Backings/textures must be doubly-linked and only to other backings/textures
+ // in this manager.
+ for (BackingList::iterator it = backings_.begin(); it != backings_.end();
+ ++it) {
+ if ((*it)->owner()) {
+ DCHECK(ContainsKey(textures_, (*it)->owner()));
+ DCHECK((*it)->owner()->backing() == (*it));
+ }
+ }
+ for (TextureSet::iterator it = textures_.begin(); it != textures_.end();
+ ++it) {
+ PrioritizedResource* texture = (*it);
+ PrioritizedResource::Backing* backing = texture->backing();
+ base::AutoLock scoped_lock(evicted_backings_lock_);
+ if (backing) {
+ if (backing->ResourceHasBeenDeleted()) {
+ DCHECK(std::find(backings_.begin(), backings_.end(), backing) ==
+ backings_.end());
+ DCHECK(std::find(evicted_backings_.begin(),
+ evicted_backings_.end(),
+ backing) != evicted_backings_.end());
+ } else {
+ DCHECK(std::find(backings_.begin(), backings_.end(), backing) !=
+ backings_.end());
+ DCHECK(std::find(evicted_backings_.begin(),
+ evicted_backings_.end(),
+ backing) == evicted_backings_.end());
+ }
+ DCHECK(backing->owner() == texture);
+ }
+ }
+
+ // At all times, backings that can be evicted must always come before
+ // backings that can't be evicted in the backing texture list (otherwise
+ // ReduceMemory will not find all textures available for eviction/recycling).
+ bool reached_unrecyclable = false;
+ PrioritizedResource::Backing* previous_backing = NULL;
+ for (BackingList::iterator it = backings_.begin(); it != backings_.end();
+ ++it) {
+ PrioritizedResource::Backing* backing = *it;
+ if (previous_backing &&
+ (!backings_tail_not_sorted_ ||
+ !backing->was_above_priority_cutoff_at_last_priority_update()))
+ DCHECK(CompareBackings(previous_backing, backing));
+ if (!backing->CanBeRecycledIfNotInExternalUse())
+ reached_unrecyclable = true;
+ if (reached_unrecyclable)
+ DCHECK(!backing->CanBeRecycledIfNotInExternalUse());
+ else
+ DCHECK(backing->CanBeRecycledIfNotInExternalUse());
+ previous_backing = backing;
+ }
+#endif // DCHECK_IS_ON
+}
+
+const Proxy* PrioritizedResourceManager::ProxyForDebug() const {
+ return proxy_;
+}
+
+} // namespace cc
diff --git a/cc/resources/prioritized_resource_manager.h b/cc/resources/prioritized_resource_manager.h
new file mode 100644
index 0000000..092b1d7
--- /dev/null
+++ b/cc/resources/prioritized_resource_manager.h
@@ -0,0 +1,247 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_PRIORITIZED_RESOURCE_MANAGER_H_
+#define CC_RESOURCES_PRIORITIZED_RESOURCE_MANAGER_H_
+
+#include <list>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/synchronization/lock.h"
+#include "cc/base/cc_export.h"
+#include "cc/resources/prioritized_resource.h"
+#include "cc/resources/priority_calculator.h"
+#include "cc/resources/resource.h"
+#include "cc/trees/proxy.h"
+#include "ui/gfx/size.h"
+
+#if defined(COMPILER_GCC)
+namespace BASE_HASH_NAMESPACE {
+template <> struct hash<cc::PrioritizedResource*> {
+ size_t operator()(cc::PrioritizedResource* ptr) const {
+ return hash<size_t>()(reinterpret_cast<size_t>(ptr));
+ }
+};
+} // namespace BASE_HASH_NAMESPACE
+#endif // COMPILER
+
+namespace cc {
+
+class PriorityCalculator;
+class Proxy;
+
+class CC_EXPORT PrioritizedResourceManager {
+ public:
+ static scoped_ptr<PrioritizedResourceManager> Create(const Proxy* proxy) {
+ return make_scoped_ptr(new PrioritizedResourceManager(proxy));
+ }
+ scoped_ptr<PrioritizedResource> CreateTexture(
+ const gfx::Size& size, ResourceFormat format) {
+ return make_scoped_ptr(new PrioritizedResource(this, size, format));
+ }
+ ~PrioritizedResourceManager();
+
+ typedef std::list<PrioritizedResource::Backing*> BackingList;
+
+ // TODO(epenner): (http://crbug.com/137094) This 64MB default is a straggler
+ // from the old texture manager and is just to give us a default memory
+ // allocation before we get a callback from the GPU memory manager. We
+ // should probaby either:
+ // - wait for the callback before rendering anything instead
+ // - push this into the GPU memory manager somehow.
+ static size_t DefaultMemoryAllocationLimit() { return 64 * 1024 * 1024; }
+
+ // MemoryUseBytes() describes the number of bytes used by existing allocated
+ // textures.
+ size_t MemoryUseBytes() const { return memory_use_bytes_; }
+ // MemoryAboveCutoffBytes() describes the number of bytes that
+ // would be used if all textures that are above the cutoff were allocated.
+ // MemoryUseBytes() <= MemoryAboveCutoffBytes() should always be true.
+ size_t MemoryAboveCutoffBytes() const { return memory_above_cutoff_bytes_; }
+ // MaxMemoryNeededBytes() describes the number of bytes that would be used
+ // by textures if there were no limit on memory usage.
+ size_t MaxMemoryNeededBytes() const { return max_memory_needed_bytes_; }
+ size_t MemoryForSelfManagedTextures() const {
+ return max_memory_limit_bytes_ - memory_available_bytes_;
+ }
+
+ void SetMaxMemoryLimitBytes(size_t bytes) { max_memory_limit_bytes_ = bytes; }
+ size_t MaxMemoryLimitBytes() const { return max_memory_limit_bytes_; }
+
+ // Sepecify a external priority cutoff. Only textures that have a strictly
+ // higher priority than this cutoff will be allowed.
+ void SetExternalPriorityCutoff(int priority_cutoff) {
+ external_priority_cutoff_ = priority_cutoff;
+ }
+ int ExternalPriorityCutoff() const {
+ return external_priority_cutoff_;
+ }
+
+ // Return the amount of texture memory required at particular cutoffs.
+ size_t MemoryVisibleBytes() const;
+ size_t MemoryVisibleAndNearbyBytes() const;
+
+ void PrioritizeTextures();
+ void ClearPriorities();
+
+ // Delete contents textures' backing resources until they use only
+ // limit_bytes bytes. This may be called on the impl thread while the main
+ // thread is running. Returns true if resources are indeed evicted as a
+ // result of this call.
+ bool ReduceMemoryOnImplThread(size_t limit_bytes,
+ int priority_cutoff,
+ ResourceProvider* resource_provider);
+
+ // Returns true if there exist any textures that are linked to backings that
+ // have had their resources evicted. Only when we commit a tree that has no
+ // textures linked to evicted backings may we allow drawing. After an
+ // eviction, this will not become true until unlinkAndClearEvictedBackings
+ // is called.
+ bool LinkedEvictedBackingsExist() const;
+
+ // Unlink the list of contents textures' backings from their owning textures
+ // and delete the evicted backings' structures. This is called just before
+ // updating layers, and is only ever called on the main thread.
+ void UnlinkAndClearEvictedBackings();
+
+ bool RequestLate(PrioritizedResource* texture);
+
+ void ReduceWastedMemory(ResourceProvider* resource_provider);
+ void ReduceMemory(ResourceProvider* resource_provider);
+ void ClearAllMemory(ResourceProvider* resource_provider);
+
+ void AcquireBackingTextureIfNeeded(PrioritizedResource* texture,
+ ResourceProvider* resource_provider);
+
+ void RegisterTexture(PrioritizedResource* texture);
+ void UnregisterTexture(PrioritizedResource* texture);
+ void ReturnBackingTexture(PrioritizedResource* texture);
+
+ // Update all backings' priorities from their owning texture.
+ void PushTexturePrioritiesToBackings();
+
+ // Mark all textures' backings as being in the drawing impl tree.
+ void UpdateBackingsState(ResourceProvider* resource_provider);
+
+ const Proxy* ProxyForDebug() const;
+
+ private:
+ friend class PrioritizedResourceTest;
+
+ enum EvictionPolicy {
+ EVICT_ONLY_RECYCLABLE,
+ EVICT_ANYTHING,
+ };
+ enum UnlinkPolicy {
+ DO_NOT_UNLINK_BACKINGS,
+ UNLINK_BACKINGS,
+ };
+
+ // Compare textures. Highest priority first.
+ static inline bool CompareTextures(PrioritizedResource* a,
+ PrioritizedResource* b) {
+ if (a->request_priority() == b->request_priority())
+ return a < b;
+ return PriorityCalculator::priority_is_higher(a->request_priority(),
+ b->request_priority());
+ }
+ // Compare backings. Lowest priority first.
+ static inline bool CompareBackings(PrioritizedResource::Backing* a,
+ PrioritizedResource::Backing* b) {
+ // Make textures that can be recycled appear first.
+ if (a->CanBeRecycledIfNotInExternalUse() !=
+ b->CanBeRecycledIfNotInExternalUse())
+ return (a->CanBeRecycledIfNotInExternalUse() >
+ b->CanBeRecycledIfNotInExternalUse());
+ // Then sort by being above or below the priority cutoff.
+ if (a->was_above_priority_cutoff_at_last_priority_update() !=
+ b->was_above_priority_cutoff_at_last_priority_update())
+ return (a->was_above_priority_cutoff_at_last_priority_update() <
+ b->was_above_priority_cutoff_at_last_priority_update());
+ // Then sort by priority (note that backings that no longer have owners will
+ // always have the lowest priority).
+ if (a->request_priority_at_last_priority_update() !=
+ b->request_priority_at_last_priority_update())
+ return PriorityCalculator::priority_is_lower(
+ a->request_priority_at_last_priority_update(),
+ b->request_priority_at_last_priority_update());
+ // Then sort by being in the impl tree versus being completely
+ // unreferenced.
+ if (a->in_drawing_impl_tree() != b->in_drawing_impl_tree())
+ return (a->in_drawing_impl_tree() < b->in_drawing_impl_tree());
+ // Finally, prefer to evict textures in the parent compositor because
+ // they will otherwise take another roundtrip to the parent compositor
+ // before they are evicted.
+ if (a->in_parent_compositor() != b->in_parent_compositor())
+ return (a->in_parent_compositor() > b->in_parent_compositor());
+ return a < b;
+ }
+
+ explicit PrioritizedResourceManager(const Proxy* proxy);
+
+ bool EvictBackingsToReduceMemory(size_t limit_bytes,
+ int priority_cutoff,
+ EvictionPolicy eviction_policy,
+ UnlinkPolicy unlink_policy,
+ ResourceProvider* resource_provider);
+ PrioritizedResource::Backing* CreateBacking(
+ const gfx::Size& size,
+ ResourceFormat format,
+ ResourceProvider* resource_provider);
+ void EvictFirstBackingResource(ResourceProvider* resource_provider);
+ void SortBackings();
+
+ void AssertInvariants();
+
+ size_t max_memory_limit_bytes_;
+ // The priority cutoff based on memory pressure. This is not a strict
+ // cutoff -- RequestLate allows textures with priority equal to this
+ // cutoff to be allowed.
+ int priority_cutoff_;
+ // The priority cutoff based on external memory policy. This is a strict
+ // cutoff -- no textures with priority equal to this cutoff will be allowed.
+ int external_priority_cutoff_;
+ size_t memory_use_bytes_;
+ size_t memory_above_cutoff_bytes_;
+ size_t max_memory_needed_bytes_;
+ size_t memory_available_bytes_;
+
+ typedef base::hash_set<PrioritizedResource*> TextureSet;
+ typedef std::vector<PrioritizedResource*> TextureVector;
+
+ const Proxy* proxy_;
+
+ TextureSet textures_;
+ // This list is always sorted in eviction order, with the exception the
+ // newly-allocated or recycled textures at the very end of the tail that
+ // are not sorted by priority.
+ BackingList backings_;
+ bool backings_tail_not_sorted_;
+
+ // The list of backings that have been evicted, but may still be linked
+ // to textures. This can be accessed concurrently by the main and impl
+ // threads, and may only be accessed while holding evicted_backings_lock_.
+ mutable base::Lock evicted_backings_lock_;
+ BackingList evicted_backings_;
+
+ TextureVector temp_texture_vector_;
+
+ // Statistics about memory usage at priority cutoffs, computed at
+ // PrioritizeTextures.
+ size_t memory_visible_bytes_;
+ size_t memory_visible_and_nearby_bytes_;
+
+ // Statistics copied at the time of PushTexturePrioritiesToBackings.
+ size_t memory_visible_last_pushed_bytes_;
+ size_t memory_visible_and_nearby_last_pushed_bytes_;
+
+ DISALLOW_COPY_AND_ASSIGN(PrioritizedResourceManager);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_PRIORITIZED_RESOURCE_MANAGER_H_
diff --git a/cc/resources/prioritized_resource_unittest.cc b/cc/resources/prioritized_resource_unittest.cc
new file mode 100644
index 0000000..05e87d9
--- /dev/null
+++ b/cc/resources/prioritized_resource_unittest.cc
@@ -0,0 +1,1117 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/prioritized_resource.h"
+
+#include <vector>
+
+#include "cc/resources/prioritized_resource_manager.h"
+#include "cc/resources/resource.h"
+#include "cc/resources/resource_provider.h"
+#include "cc/test/fake_output_surface.h"
+#include "cc/test/fake_output_surface_client.h"
+#include "cc/test/fake_proxy.h"
+#include "cc/test/test_shared_bitmap_manager.h"
+#include "cc/test/tiled_layer_test_common.h"
+#include "cc/trees/single_thread_proxy.h" // For DebugScopedSetImplThread
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cc {
+
+class PrioritizedResourceTest : public testing::Test {
+ public:
+ PrioritizedResourceTest()
+ : texture_size_(256, 256),
+ texture_format_(RGBA_8888),
+ output_surface_(FakeOutputSurface::Create3d()) {
+ DebugScopedSetImplThread impl_thread(&proxy_);
+ CHECK(output_surface_->BindToClient(&output_surface_client_));
+ shared_bitmap_manager_.reset(new TestSharedBitmapManager());
+ resource_provider_ = ResourceProvider::Create(output_surface_.get(),
+ shared_bitmap_manager_.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false);
+ }
+
+ virtual ~PrioritizedResourceTest() {
+ DebugScopedSetImplThread impl_thread(&proxy_);
+ resource_provider_ = nullptr;
+ }
+
+ size_t TexturesMemorySize(size_t texture_count) {
+ return Resource::MemorySizeBytes(texture_size_, texture_format_) *
+ texture_count;
+ }
+
+ scoped_ptr<PrioritizedResourceManager> CreateManager(size_t max_textures) {
+ scoped_ptr<PrioritizedResourceManager> manager =
+ PrioritizedResourceManager::Create(&proxy_);
+ manager->SetMaxMemoryLimitBytes(TexturesMemorySize(max_textures));
+ return manager.Pass();
+ }
+
+ bool ValidateTexture(PrioritizedResource* texture,
+ bool request_late) {
+ ResourceManagerAssertInvariants(texture->resource_manager());
+ if (request_late)
+ texture->RequestLate();
+ ResourceManagerAssertInvariants(texture->resource_manager());
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ bool success = texture->can_acquire_backing_texture();
+ if (success)
+ texture->AcquireBackingTexture(resource_provider());
+ return success;
+ }
+
+ void PrioritizeTexturesAndBackings(
+ PrioritizedResourceManager* resource_manager) {
+ resource_manager->PrioritizeTextures();
+ ResourceManagerUpdateBackingsPriorities(resource_manager);
+ }
+
+ void ResourceManagerUpdateBackingsPriorities(
+ PrioritizedResourceManager* resource_manager) {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->PushTexturePrioritiesToBackings();
+ }
+
+ ResourceProvider* resource_provider() { return resource_provider_.get(); }
+
+ void ResourceManagerAssertInvariants(
+ PrioritizedResourceManager* resource_manager) {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->AssertInvariants();
+ }
+
+ bool TextureBackingIsAbovePriorityCutoff(PrioritizedResource* texture) {
+ return texture->backing()->
+ was_above_priority_cutoff_at_last_priority_update();
+ }
+
+ size_t EvictedBackingCount(PrioritizedResourceManager* resource_manager) {
+ return resource_manager->evicted_backings_.size();
+ }
+
+ std::vector<unsigned> BackingResources(
+ PrioritizedResourceManager* resource_manager) {
+ std::vector<unsigned> resources;
+ for (PrioritizedResourceManager::BackingList::iterator it =
+ resource_manager->backings_.begin();
+ it != resource_manager->backings_.end();
+ ++it)
+ resources.push_back((*it)->id());
+ return resources;
+ }
+
+ protected:
+ FakeProxy proxy_;
+ const gfx::Size texture_size_;
+ const ResourceFormat texture_format_;
+ FakeOutputSurfaceClient output_surface_client_;
+ scoped_ptr<OutputSurface> output_surface_;
+ scoped_ptr<SharedBitmapManager> shared_bitmap_manager_;
+ scoped_ptr<ResourceProvider> resource_provider_;
+};
+
+namespace {
+
+TEST_F(PrioritizedResourceTest, RequestTextureExceedingMaxLimit) {
+ const size_t kMaxTextures = 8;
+ scoped_ptr<PrioritizedResourceManager> resource_manager =
+ CreateManager(kMaxTextures);
+
+ // Create textures for double our memory limit.
+ scoped_ptr<PrioritizedResource> textures[kMaxTextures * 2];
+
+ for (size_t i = 0; i < kMaxTextures * 2; ++i)
+ textures[i] =
+ resource_manager->CreateTexture(texture_size_, texture_format_);
+
+ // Set decreasing priorities
+ for (size_t i = 0; i < kMaxTextures * 2; ++i)
+ textures[i]->set_request_priority(100 + i);
+
+ // Only lower half should be available.
+ PrioritizeTexturesAndBackings(resource_manager.get());
+ EXPECT_TRUE(ValidateTexture(textures[0].get(), false));
+ EXPECT_TRUE(ValidateTexture(textures[7].get(), false));
+ EXPECT_FALSE(ValidateTexture(textures[8].get(), false));
+ EXPECT_FALSE(ValidateTexture(textures[15].get(), false));
+
+ // Set increasing priorities
+ for (size_t i = 0; i < kMaxTextures * 2; ++i)
+ textures[i]->set_request_priority(100 - i);
+
+ // Only upper half should be available.
+ PrioritizeTexturesAndBackings(resource_manager.get());
+ EXPECT_FALSE(ValidateTexture(textures[0].get(), false));
+ EXPECT_FALSE(ValidateTexture(textures[7].get(), false));
+ EXPECT_TRUE(ValidateTexture(textures[8].get(), false));
+ EXPECT_TRUE(ValidateTexture(textures[15].get(), false));
+
+ EXPECT_EQ(TexturesMemorySize(kMaxTextures),
+ resource_manager->MemoryAboveCutoffBytes());
+ EXPECT_LE(resource_manager->MemoryUseBytes(),
+ resource_manager->MemoryAboveCutoffBytes());
+ EXPECT_EQ(TexturesMemorySize(2*kMaxTextures),
+ resource_manager->MaxMemoryNeededBytes());
+
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->ClearAllMemory(resource_provider());
+}
+
+TEST_F(PrioritizedResourceTest, ChangeMemoryLimits) {
+ const size_t kMaxTextures = 8;
+ scoped_ptr<PrioritizedResourceManager> resource_manager =
+ CreateManager(kMaxTextures);
+ scoped_ptr<PrioritizedResource> textures[kMaxTextures];
+
+ for (size_t i = 0; i < kMaxTextures; ++i) {
+ textures[i] =
+ resource_manager->CreateTexture(texture_size_, texture_format_);
+ }
+ for (size_t i = 0; i < kMaxTextures; ++i)
+ textures[i]->set_request_priority(100 + i);
+
+ // Set max limit to 8 textures
+ resource_manager->SetMaxMemoryLimitBytes(TexturesMemorySize(8));
+ PrioritizeTexturesAndBackings(resource_manager.get());
+ for (size_t i = 0; i < kMaxTextures; ++i)
+ ValidateTexture(textures[i].get(), false);
+ {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->ReduceMemory(resource_provider());
+ }
+
+ EXPECT_EQ(TexturesMemorySize(8), resource_manager->MemoryAboveCutoffBytes());
+ EXPECT_LE(resource_manager->MemoryUseBytes(),
+ resource_manager->MemoryAboveCutoffBytes());
+
+ // Set max limit to 5 textures
+ resource_manager->SetMaxMemoryLimitBytes(TexturesMemorySize(5));
+ PrioritizeTexturesAndBackings(resource_manager.get());
+ for (size_t i = 0; i < kMaxTextures; ++i)
+ EXPECT_EQ(ValidateTexture(textures[i].get(), false), i < 5);
+ {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->ReduceMemory(resource_provider());
+ }
+
+ EXPECT_EQ(TexturesMemorySize(5), resource_manager->MemoryAboveCutoffBytes());
+ EXPECT_LE(resource_manager->MemoryUseBytes(),
+ resource_manager->MemoryAboveCutoffBytes());
+ EXPECT_EQ(TexturesMemorySize(kMaxTextures),
+ resource_manager->MaxMemoryNeededBytes());
+
+ // Set max limit to 4 textures
+ resource_manager->SetMaxMemoryLimitBytes(TexturesMemorySize(4));
+ PrioritizeTexturesAndBackings(resource_manager.get());
+ for (size_t i = 0; i < kMaxTextures; ++i)
+ EXPECT_EQ(ValidateTexture(textures[i].get(), false), i < 4);
+ {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->ReduceMemory(resource_provider());
+ }
+
+ EXPECT_EQ(TexturesMemorySize(4), resource_manager->MemoryAboveCutoffBytes());
+ EXPECT_LE(resource_manager->MemoryUseBytes(),
+ resource_manager->MemoryAboveCutoffBytes());
+ EXPECT_EQ(TexturesMemorySize(kMaxTextures),
+ resource_manager->MaxMemoryNeededBytes());
+
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->ClearAllMemory(resource_provider());
+}
+
+TEST_F(PrioritizedResourceTest, ReduceWastedMemory) {
+ const size_t kMaxTextures = 20;
+ scoped_ptr<PrioritizedResourceManager> resource_manager =
+ CreateManager(kMaxTextures);
+ scoped_ptr<PrioritizedResource> textures[kMaxTextures];
+
+ for (size_t i = 0; i < kMaxTextures; ++i) {
+ textures[i] =
+ resource_manager->CreateTexture(texture_size_, texture_format_);
+ }
+ for (size_t i = 0; i < kMaxTextures; ++i)
+ textures[i]->set_request_priority(100 + i);
+
+ // Set the memory limit to the max number of textures.
+ resource_manager->SetMaxMemoryLimitBytes(TexturesMemorySize(kMaxTextures));
+ PrioritizeTexturesAndBackings(resource_manager.get());
+
+ // Create backings and textures for all of the textures.
+ for (size_t i = 0; i < kMaxTextures; ++i) {
+ ValidateTexture(textures[i].get(), false);
+
+ {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ uint8_t image[4] = {0};
+ textures[i]->SetPixels(resource_provider_.get(),
+ image,
+ gfx::Rect(1, 1),
+ gfx::Rect(1, 1),
+ gfx::Vector2d());
+ }
+ }
+ {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->ReduceMemory(resource_provider());
+ }
+
+ // 20 textures have backings allocated.
+ EXPECT_EQ(TexturesMemorySize(20), resource_manager->MemoryUseBytes());
+
+ // Destroy one texture, not enough is wasted to cause cleanup.
+ textures[0] = nullptr;
+ PrioritizeTexturesAndBackings(resource_manager.get());
+ {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->UpdateBackingsState(resource_provider());
+ resource_manager->ReduceWastedMemory(resource_provider());
+ }
+ EXPECT_EQ(TexturesMemorySize(20), resource_manager->MemoryUseBytes());
+
+ // Destroy half the textures, leaving behind the backings. Now a cleanup
+ // should happen.
+ for (size_t i = 0; i < kMaxTextures / 2; ++i)
+ textures[i] = nullptr;
+ PrioritizeTexturesAndBackings(resource_manager.get());
+ {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->UpdateBackingsState(resource_provider());
+ resource_manager->ReduceWastedMemory(resource_provider());
+ }
+ EXPECT_GT(TexturesMemorySize(20), resource_manager->MemoryUseBytes());
+
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->ClearAllMemory(resource_provider());
+}
+
+TEST_F(PrioritizedResourceTest, InUseNotWastedMemory) {
+ const size_t kMaxTextures = 20;
+ scoped_ptr<PrioritizedResourceManager> resource_manager =
+ CreateManager(kMaxTextures);
+ scoped_ptr<PrioritizedResource> textures[kMaxTextures];
+
+ for (size_t i = 0; i < kMaxTextures; ++i) {
+ textures[i] =
+ resource_manager->CreateTexture(texture_size_, texture_format_);
+ }
+ for (size_t i = 0; i < kMaxTextures; ++i)
+ textures[i]->set_request_priority(100 + i);
+
+ // Set the memory limit to the max number of textures.
+ resource_manager->SetMaxMemoryLimitBytes(TexturesMemorySize(kMaxTextures));
+ PrioritizeTexturesAndBackings(resource_manager.get());
+
+ // Create backings and textures for all of the textures.
+ for (size_t i = 0; i < kMaxTextures; ++i) {
+ ValidateTexture(textures[i].get(), false);
+
+ {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ uint8_t image[4] = {0};
+ textures[i]->SetPixels(resource_provider_.get(),
+ image,
+ gfx::Rect(1, 1),
+ gfx::Rect(1, 1),
+ gfx::Vector2d());
+ }
+ }
+ {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->ReduceMemory(resource_provider());
+ }
+
+ // 20 textures have backings allocated.
+ EXPECT_EQ(TexturesMemorySize(20), resource_manager->MemoryUseBytes());
+
+ // Send half the textures to a parent compositor.
+ ResourceProvider::ResourceIdArray to_send;
+ TransferableResourceArray transferable;
+ for (size_t i = 0; i < kMaxTextures / 2; ++i)
+ to_send.push_back(textures[i]->resource_id());
+ resource_provider_->PrepareSendToParent(to_send, &transferable);
+
+ // Destroy half the textures, leaving behind the backings. The backings are
+ // sent to a parent compositor though, so they should not be considered wasted
+ // and a cleanup should not happen.
+ for (size_t i = 0; i < kMaxTextures / 2; ++i)
+ textures[i] = nullptr;
+ PrioritizeTexturesAndBackings(resource_manager.get());
+ {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->UpdateBackingsState(resource_provider());
+ resource_manager->ReduceWastedMemory(resource_provider());
+ }
+ EXPECT_EQ(TexturesMemorySize(20), resource_manager->MemoryUseBytes());
+
+ // Receive the textures back from the parent compositor. Now a cleanup should
+ // happen.
+ ReturnedResourceArray returns;
+ TransferableResource::ReturnResources(transferable, &returns);
+ resource_provider_->ReceiveReturnsFromParent(returns);
+ {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->UpdateBackingsState(resource_provider());
+ resource_manager->ReduceWastedMemory(resource_provider());
+ }
+ EXPECT_GT(TexturesMemorySize(20), resource_manager->MemoryUseBytes());
+
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->ClearAllMemory(resource_provider());
+}
+
+TEST_F(PrioritizedResourceTest, ChangePriorityCutoff) {
+ const size_t kMaxTextures = 8;
+ scoped_ptr<PrioritizedResourceManager> resource_manager =
+ CreateManager(kMaxTextures);
+ scoped_ptr<PrioritizedResource> textures[kMaxTextures];
+
+ for (size_t i = 0; i < kMaxTextures; ++i) {
+ textures[i] =
+ resource_manager->CreateTexture(texture_size_, texture_format_);
+ }
+ for (size_t i = 0; i < kMaxTextures; ++i)
+ textures[i]->set_request_priority(100 + i);
+
+ // Set the cutoff to drop two textures. Try to request_late on all textures,
+ // and make sure that request_late doesn't work on a texture with equal
+ // priority to the cutoff.
+ resource_manager->SetMaxMemoryLimitBytes(TexturesMemorySize(8));
+ resource_manager->SetExternalPriorityCutoff(106);
+ PrioritizeTexturesAndBackings(resource_manager.get());
+ for (size_t i = 0; i < kMaxTextures; ++i)
+ EXPECT_EQ(ValidateTexture(textures[i].get(), true), i < 6);
+ {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->ReduceMemory(resource_provider());
+ }
+ EXPECT_EQ(TexturesMemorySize(6), resource_manager->MemoryAboveCutoffBytes());
+ EXPECT_LE(resource_manager->MemoryUseBytes(),
+ resource_manager->MemoryAboveCutoffBytes());
+
+ // Set the cutoff to drop two more textures.
+ resource_manager->SetExternalPriorityCutoff(104);
+ PrioritizeTexturesAndBackings(resource_manager.get());
+ for (size_t i = 0; i < kMaxTextures; ++i)
+ EXPECT_EQ(ValidateTexture(textures[i].get(), false), i < 4);
+ {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->ReduceMemory(resource_provider());
+ }
+ EXPECT_EQ(TexturesMemorySize(4), resource_manager->MemoryAboveCutoffBytes());
+
+ // Do a one-time eviction for one more texture based on priority cutoff
+ resource_manager->UnlinkAndClearEvictedBackings();
+ {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->ReduceMemoryOnImplThread(
+ TexturesMemorySize(8), 104, resource_provider());
+ EXPECT_EQ(0u, EvictedBackingCount(resource_manager.get()));
+ resource_manager->ReduceMemoryOnImplThread(
+ TexturesMemorySize(8), 103, resource_provider());
+ EXPECT_EQ(1u, EvictedBackingCount(resource_manager.get()));
+ }
+ resource_manager->UnlinkAndClearEvictedBackings();
+ EXPECT_EQ(TexturesMemorySize(3), resource_manager->MemoryUseBytes());
+
+ // Re-allocate the the texture after the one-time drop.
+ PrioritizeTexturesAndBackings(resource_manager.get());
+ for (size_t i = 0; i < kMaxTextures; ++i)
+ EXPECT_EQ(ValidateTexture(textures[i].get(), false), i < 4);
+ {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->ReduceMemory(resource_provider());
+ }
+ EXPECT_EQ(TexturesMemorySize(4), resource_manager->MemoryAboveCutoffBytes());
+
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->ClearAllMemory(resource_provider());
+}
+
+TEST_F(PrioritizedResourceTest, EvictingTexturesInParent) {
+ const size_t kMaxTextures = 8;
+ scoped_ptr<PrioritizedResourceManager> resource_manager =
+ CreateManager(kMaxTextures);
+ scoped_ptr<PrioritizedResource> textures[kMaxTextures];
+ unsigned texture_resource_ids[kMaxTextures];
+
+ for (size_t i = 0; i < kMaxTextures; ++i) {
+ textures[i] =
+ resource_manager->CreateTexture(texture_size_, texture_format_);
+ textures[i]->set_request_priority(100 + i);
+ }
+
+ PrioritizeTexturesAndBackings(resource_manager.get());
+ for (size_t i = 0; i < kMaxTextures; ++i) {
+ EXPECT_TRUE(ValidateTexture(textures[i].get(), true));
+
+ {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ uint8_t image[4] = {0};
+ textures[i]->SetPixels(resource_provider_.get(),
+ image,
+ gfx::Rect(1, 1),
+ gfx::Rect(1, 1),
+ gfx::Vector2d());
+ }
+ }
+ {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->ReduceMemory(resource_provider());
+ }
+ EXPECT_EQ(TexturesMemorySize(8), resource_manager->MemoryAboveCutoffBytes());
+
+ for (size_t i = 0; i < 8; ++i)
+ texture_resource_ids[i] = textures[i]->resource_id();
+
+ // Evict four textures. It will be the last four.
+ {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->ReduceMemoryOnImplThread(
+ TexturesMemorySize(4), 200, resource_provider());
+
+ EXPECT_EQ(4u, EvictedBackingCount(resource_manager.get()));
+
+ // The last four backings are evicted.
+ std::vector<unsigned> remaining = BackingResources(resource_manager.get());
+ EXPECT_TRUE(std::find(remaining.begin(),
+ remaining.end(),
+ texture_resource_ids[0]) != remaining.end());
+ EXPECT_TRUE(std::find(remaining.begin(),
+ remaining.end(),
+ texture_resource_ids[1]) != remaining.end());
+ EXPECT_TRUE(std::find(remaining.begin(),
+ remaining.end(),
+ texture_resource_ids[2]) != remaining.end());
+ EXPECT_TRUE(std::find(remaining.begin(),
+ remaining.end(),
+ texture_resource_ids[3]) != remaining.end());
+ }
+ resource_manager->UnlinkAndClearEvictedBackings();
+ EXPECT_EQ(TexturesMemorySize(4), resource_manager->MemoryUseBytes());
+
+ // Re-allocate the the texture after the eviction.
+ PrioritizeTexturesAndBackings(resource_manager.get());
+ for (size_t i = 0; i < kMaxTextures; ++i) {
+ EXPECT_TRUE(ValidateTexture(textures[i].get(), true));
+
+ {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ uint8_t image[4] = {0};
+ textures[i]->SetPixels(resource_provider_.get(),
+ image,
+ gfx::Rect(1, 1),
+ gfx::Rect(1, 1),
+ gfx::Vector2d());
+ }
+ }
+ {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->ReduceMemory(resource_provider());
+ }
+ EXPECT_EQ(TexturesMemorySize(8), resource_manager->MemoryAboveCutoffBytes());
+
+ // Send the last two of the textures to a parent compositor.
+ ResourceProvider::ResourceIdArray to_send;
+ TransferableResourceArray transferable;
+ for (size_t i = 6; i < 8; ++i)
+ to_send.push_back(textures[i]->resource_id());
+ resource_provider_->PrepareSendToParent(to_send, &transferable);
+
+ // Set the last two textures to be tied for prioity with the two
+ // before them. Being sent to the parent will break the tie.
+ textures[4]->set_request_priority(100 + 4);
+ textures[5]->set_request_priority(100 + 5);
+ textures[6]->set_request_priority(100 + 4);
+ textures[7]->set_request_priority(100 + 5);
+
+ for (size_t i = 0; i < 8; ++i)
+ texture_resource_ids[i] = textures[i]->resource_id();
+
+ // Drop all the textures. Now we have backings that can be recycled.
+ for (size_t i = 0; i < 8; ++i)
+ textures[0] = nullptr;
+ PrioritizeTexturesAndBackings(resource_manager.get());
+
+ // The next commit finishes.
+ {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->UpdateBackingsState(resource_provider());
+ }
+
+ // Evict four textures. It would be the last four again, except that 2 of them
+ // are sent to the parent, so they are evicted last.
+ {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->ReduceMemoryOnImplThread(
+ TexturesMemorySize(4), 200, resource_provider());
+
+ EXPECT_EQ(4u, EvictedBackingCount(resource_manager.get()));
+ // The last 2 backings remain this time.
+ std::vector<unsigned> remaining = BackingResources(resource_manager.get());
+ EXPECT_TRUE(std::find(remaining.begin(),
+ remaining.end(),
+ texture_resource_ids[6]) == remaining.end());
+ EXPECT_TRUE(std::find(remaining.begin(),
+ remaining.end(),
+ texture_resource_ids[7]) == remaining.end());
+ }
+ resource_manager->UnlinkAndClearEvictedBackings();
+ EXPECT_EQ(TexturesMemorySize(4), resource_manager->MemoryUseBytes());
+
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->ClearAllMemory(resource_provider());
+}
+
+TEST_F(PrioritizedResourceTest, ResourceManagerPartialUpdateTextures) {
+ const size_t kMaxTextures = 4;
+ const size_t kNumTextures = 4;
+ scoped_ptr<PrioritizedResourceManager> resource_manager =
+ CreateManager(kMaxTextures);
+ scoped_ptr<PrioritizedResource> textures[kNumTextures];
+ scoped_ptr<PrioritizedResource> more_textures[kNumTextures];
+
+ for (size_t i = 0; i < kNumTextures; ++i) {
+ textures[i] =
+ resource_manager->CreateTexture(texture_size_, texture_format_);
+ more_textures[i] =
+ resource_manager->CreateTexture(texture_size_, texture_format_);
+ }
+
+ for (size_t i = 0; i < kNumTextures; ++i)
+ textures[i]->set_request_priority(200 + i);
+ PrioritizeTexturesAndBackings(resource_manager.get());
+
+ // Allocate textures which are currently high priority.
+ EXPECT_TRUE(ValidateTexture(textures[0].get(), false));
+ EXPECT_TRUE(ValidateTexture(textures[1].get(), false));
+ EXPECT_TRUE(ValidateTexture(textures[2].get(), false));
+ EXPECT_TRUE(ValidateTexture(textures[3].get(), false));
+
+ EXPECT_TRUE(textures[0]->have_backing_texture());
+ EXPECT_TRUE(textures[1]->have_backing_texture());
+ EXPECT_TRUE(textures[2]->have_backing_texture());
+ EXPECT_TRUE(textures[3]->have_backing_texture());
+
+ for (size_t i = 0; i < kNumTextures; ++i)
+ more_textures[i]->set_request_priority(100 + i);
+ PrioritizeTexturesAndBackings(resource_manager.get());
+
+ // Textures are now below cutoff.
+ EXPECT_FALSE(ValidateTexture(textures[0].get(), false));
+ EXPECT_FALSE(ValidateTexture(textures[1].get(), false));
+ EXPECT_FALSE(ValidateTexture(textures[2].get(), false));
+ EXPECT_FALSE(ValidateTexture(textures[3].get(), false));
+
+ // But they are still valid to use.
+ EXPECT_TRUE(textures[0]->have_backing_texture());
+ EXPECT_TRUE(textures[1]->have_backing_texture());
+ EXPECT_TRUE(textures[2]->have_backing_texture());
+ EXPECT_TRUE(textures[3]->have_backing_texture());
+
+ // Higher priority textures are finally needed.
+ EXPECT_TRUE(ValidateTexture(more_textures[0].get(), false));
+ EXPECT_TRUE(ValidateTexture(more_textures[1].get(), false));
+ EXPECT_TRUE(ValidateTexture(more_textures[2].get(), false));
+ EXPECT_TRUE(ValidateTexture(more_textures[3].get(), false));
+
+ // Lower priority have been fully evicted.
+ EXPECT_FALSE(textures[0]->have_backing_texture());
+ EXPECT_FALSE(textures[1]->have_backing_texture());
+ EXPECT_FALSE(textures[2]->have_backing_texture());
+ EXPECT_FALSE(textures[3]->have_backing_texture());
+
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->ClearAllMemory(resource_provider());
+}
+
+TEST_F(PrioritizedResourceTest, ResourceManagerPrioritiesAreEqual) {
+ const size_t kMaxTextures = 16;
+ scoped_ptr<PrioritizedResourceManager> resource_manager =
+ CreateManager(kMaxTextures);
+ scoped_ptr<PrioritizedResource> textures[kMaxTextures];
+
+ for (size_t i = 0; i < kMaxTextures; ++i) {
+ textures[i] =
+ resource_manager->CreateTexture(texture_size_, texture_format_);
+ }
+
+ // All 16 textures have the same priority except 2 higher priority.
+ for (size_t i = 0; i < kMaxTextures; ++i)
+ textures[i]->set_request_priority(100);
+ textures[0]->set_request_priority(99);
+ textures[1]->set_request_priority(99);
+
+ // Set max limit to 8 textures
+ resource_manager->SetMaxMemoryLimitBytes(TexturesMemorySize(8));
+ PrioritizeTexturesAndBackings(resource_manager.get());
+
+ // The two high priority textures should be available, others should not.
+ for (size_t i = 0; i < 2; ++i)
+ EXPECT_TRUE(ValidateTexture(textures[i].get(), false));
+ for (size_t i = 2; i < kMaxTextures; ++i)
+ EXPECT_FALSE(ValidateTexture(textures[i].get(), false));
+ EXPECT_EQ(TexturesMemorySize(2), resource_manager->MemoryAboveCutoffBytes());
+ EXPECT_LE(resource_manager->MemoryUseBytes(),
+ resource_manager->MemoryAboveCutoffBytes());
+
+ // Manually reserving textures should only succeed on the higher priority
+ // textures, and on remaining textures up to the memory limit.
+ for (size_t i = 0; i < 8; i++)
+ EXPECT_TRUE(ValidateTexture(textures[i].get(), true));
+ for (size_t i = 9; i < kMaxTextures; i++)
+ EXPECT_FALSE(ValidateTexture(textures[i].get(), true));
+ EXPECT_EQ(TexturesMemorySize(8), resource_manager->MemoryAboveCutoffBytes());
+ EXPECT_LE(resource_manager->MemoryUseBytes(),
+ resource_manager->MemoryAboveCutoffBytes());
+
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->ClearAllMemory(resource_provider());
+}
+
+TEST_F(PrioritizedResourceTest, ResourceManagerDestroyedFirst) {
+ scoped_ptr<PrioritizedResourceManager> resource_manager = CreateManager(1);
+ scoped_ptr<PrioritizedResource> texture =
+ resource_manager->CreateTexture(texture_size_, texture_format_);
+
+ // Texture is initially invalid, but it will become available.
+ EXPECT_FALSE(texture->have_backing_texture());
+
+ texture->set_request_priority(100);
+ PrioritizeTexturesAndBackings(resource_manager.get());
+
+ EXPECT_TRUE(ValidateTexture(texture.get(), false));
+ EXPECT_TRUE(texture->can_acquire_backing_texture());
+ EXPECT_TRUE(texture->have_backing_texture());
+ {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->ClearAllMemory(resource_provider());
+ }
+ resource_manager = nullptr;
+
+ EXPECT_FALSE(texture->can_acquire_backing_texture());
+ EXPECT_FALSE(texture->have_backing_texture());
+}
+
+TEST_F(PrioritizedResourceTest, TextureMovedToNewManager) {
+ scoped_ptr<PrioritizedResourceManager> resource_manager_one =
+ CreateManager(1);
+ scoped_ptr<PrioritizedResourceManager> resource_manager_two =
+ CreateManager(1);
+ scoped_ptr<PrioritizedResource> texture =
+ resource_manager_one->CreateTexture(texture_size_, texture_format_);
+
+ // Texture is initially invalid, but it will become available.
+ EXPECT_FALSE(texture->have_backing_texture());
+
+ texture->set_request_priority(100);
+ PrioritizeTexturesAndBackings(resource_manager_one.get());
+
+ EXPECT_TRUE(ValidateTexture(texture.get(), false));
+ EXPECT_TRUE(texture->can_acquire_backing_texture());
+ EXPECT_TRUE(texture->have_backing_texture());
+
+ texture->SetTextureManager(NULL);
+ {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager_one->ClearAllMemory(resource_provider());
+ }
+ resource_manager_one = nullptr;
+
+ EXPECT_FALSE(texture->can_acquire_backing_texture());
+ EXPECT_FALSE(texture->have_backing_texture());
+
+ texture->SetTextureManager(resource_manager_two.get());
+
+ PrioritizeTexturesAndBackings(resource_manager_two.get());
+
+ EXPECT_TRUE(ValidateTexture(texture.get(), false));
+ EXPECT_TRUE(texture->can_acquire_backing_texture());
+ EXPECT_TRUE(texture->have_backing_texture());
+
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager_two->ClearAllMemory(resource_provider());
+}
+
+TEST_F(PrioritizedResourceTest,
+ RenderSurfacesReduceMemoryAvailableOutsideRootSurface) {
+ const size_t kMaxTextures = 8;
+ scoped_ptr<PrioritizedResourceManager> resource_manager =
+ CreateManager(kMaxTextures);
+
+ // Half of the memory is taken by surfaces (with high priority place-holder)
+ scoped_ptr<PrioritizedResource> render_surface_place_holder =
+ resource_manager->CreateTexture(texture_size_, texture_format_);
+ render_surface_place_holder->SetToSelfManagedMemoryPlaceholder(
+ TexturesMemorySize(4));
+ render_surface_place_holder->set_request_priority(
+ PriorityCalculator::RenderSurfacePriority());
+
+ // Create textures to fill our memory limit.
+ scoped_ptr<PrioritizedResource> textures[kMaxTextures];
+
+ for (size_t i = 0; i < kMaxTextures; ++i) {
+ textures[i] =
+ resource_manager->CreateTexture(texture_size_, texture_format_);
+ }
+
+ // Set decreasing non-visible priorities outside root surface.
+ for (size_t i = 0; i < kMaxTextures; ++i)
+ textures[i]->set_request_priority(100 + i);
+
+ // Only lower half should be available.
+ PrioritizeTexturesAndBackings(resource_manager.get());
+ EXPECT_TRUE(ValidateTexture(textures[0].get(), false));
+ EXPECT_TRUE(ValidateTexture(textures[3].get(), false));
+ EXPECT_FALSE(ValidateTexture(textures[4].get(), false));
+ EXPECT_FALSE(ValidateTexture(textures[7].get(), false));
+
+ // Set increasing non-visible priorities outside root surface.
+ for (size_t i = 0; i < kMaxTextures; ++i)
+ textures[i]->set_request_priority(100 - i);
+
+ // Only upper half should be available.
+ PrioritizeTexturesAndBackings(resource_manager.get());
+ EXPECT_FALSE(ValidateTexture(textures[0].get(), false));
+ EXPECT_FALSE(ValidateTexture(textures[3].get(), false));
+ EXPECT_TRUE(ValidateTexture(textures[4].get(), false));
+ EXPECT_TRUE(ValidateTexture(textures[7].get(), false));
+
+ EXPECT_EQ(TexturesMemorySize(4), resource_manager->MemoryAboveCutoffBytes());
+ EXPECT_EQ(TexturesMemorySize(4),
+ resource_manager->MemoryForSelfManagedTextures());
+ EXPECT_LE(resource_manager->MemoryUseBytes(),
+ resource_manager->MemoryAboveCutoffBytes());
+ EXPECT_EQ(TexturesMemorySize(8),
+ resource_manager->MaxMemoryNeededBytes());
+
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->ClearAllMemory(resource_provider());
+}
+
+TEST_F(PrioritizedResourceTest,
+ RenderSurfacesReduceMemoryAvailableForRequestLate) {
+ const size_t kMaxTextures = 8;
+ scoped_ptr<PrioritizedResourceManager> resource_manager =
+ CreateManager(kMaxTextures);
+
+ // Half of the memory is taken by surfaces (with high priority place-holder)
+ scoped_ptr<PrioritizedResource> render_surface_place_holder =
+ resource_manager->CreateTexture(texture_size_, texture_format_);
+ render_surface_place_holder->SetToSelfManagedMemoryPlaceholder(
+ TexturesMemorySize(4));
+ render_surface_place_holder->set_request_priority(
+ PriorityCalculator::RenderSurfacePriority());
+
+ // Create textures to fill our memory limit.
+ scoped_ptr<PrioritizedResource> textures[kMaxTextures];
+
+ for (size_t i = 0; i < kMaxTextures; ++i) {
+ textures[i] =
+ resource_manager->CreateTexture(texture_size_, texture_format_);
+ }
+
+ // Set equal priorities.
+ for (size_t i = 0; i < kMaxTextures; ++i)
+ textures[i]->set_request_priority(100);
+
+ // The first four to be requested late will be available.
+ PrioritizeTexturesAndBackings(resource_manager.get());
+ for (unsigned i = 0; i < kMaxTextures; ++i)
+ EXPECT_FALSE(ValidateTexture(textures[i].get(), false));
+ for (unsigned i = 0; i < kMaxTextures; i += 2)
+ EXPECT_TRUE(ValidateTexture(textures[i].get(), true));
+ for (unsigned i = 1; i < kMaxTextures; i += 2)
+ EXPECT_FALSE(ValidateTexture(textures[i].get(), true));
+
+ EXPECT_EQ(TexturesMemorySize(4), resource_manager->MemoryAboveCutoffBytes());
+ EXPECT_EQ(TexturesMemorySize(4),
+ resource_manager->MemoryForSelfManagedTextures());
+ EXPECT_LE(resource_manager->MemoryUseBytes(),
+ resource_manager->MemoryAboveCutoffBytes());
+ EXPECT_EQ(TexturesMemorySize(8),
+ resource_manager->MaxMemoryNeededBytes());
+
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->ClearAllMemory(resource_provider());
+}
+
+TEST_F(PrioritizedResourceTest,
+ WhenRenderSurfaceNotAvailableTexturesAlsoNotAvailable) {
+ const size_t kMaxTextures = 8;
+ scoped_ptr<PrioritizedResourceManager> resource_manager =
+ CreateManager(kMaxTextures);
+
+ // Half of the memory is taken by surfaces (with high priority place-holder)
+ scoped_ptr<PrioritizedResource> render_surface_place_holder =
+ resource_manager->CreateTexture(texture_size_, texture_format_);
+ render_surface_place_holder->SetToSelfManagedMemoryPlaceholder(
+ TexturesMemorySize(4));
+ render_surface_place_holder->set_request_priority(
+ PriorityCalculator::RenderSurfacePriority());
+
+ // Create textures to fill our memory limit.
+ scoped_ptr<PrioritizedResource> textures[kMaxTextures];
+
+ for (size_t i = 0; i < kMaxTextures; ++i)
+ textures[i] =
+ resource_manager->CreateTexture(texture_size_, texture_format_);
+
+ // Set 6 visible textures in the root surface, and 2 in a child surface.
+ for (size_t i = 0; i < 6; ++i) {
+ textures[i]->
+ set_request_priority(PriorityCalculator::VisiblePriority(true));
+ }
+ for (size_t i = 6; i < 8; ++i) {
+ textures[i]->
+ set_request_priority(PriorityCalculator::VisiblePriority(false));
+ }
+
+ PrioritizeTexturesAndBackings(resource_manager.get());
+
+ // Unable to request_late textures in the child surface.
+ EXPECT_FALSE(ValidateTexture(textures[6].get(), true));
+ EXPECT_FALSE(ValidateTexture(textures[7].get(), true));
+
+ // Root surface textures are valid.
+ for (size_t i = 0; i < 6; ++i)
+ EXPECT_TRUE(ValidateTexture(textures[i].get(), false));
+
+ EXPECT_EQ(TexturesMemorySize(6), resource_manager->MemoryAboveCutoffBytes());
+ EXPECT_EQ(TexturesMemorySize(2),
+ resource_manager->MemoryForSelfManagedTextures());
+ EXPECT_LE(resource_manager->MemoryUseBytes(),
+ resource_manager->MemoryAboveCutoffBytes());
+
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->ClearAllMemory(resource_provider());
+}
+
+TEST_F(PrioritizedResourceTest, RequestLateBackingsSorting) {
+ const size_t kMaxTextures = 8;
+ scoped_ptr<PrioritizedResourceManager> resource_manager =
+ CreateManager(kMaxTextures);
+ resource_manager->SetMaxMemoryLimitBytes(TexturesMemorySize(kMaxTextures));
+
+ // Create textures to fill our memory limit.
+ scoped_ptr<PrioritizedResource> textures[kMaxTextures];
+ for (size_t i = 0; i < kMaxTextures; ++i)
+ textures[i] =
+ resource_manager->CreateTexture(texture_size_, texture_format_);
+
+ // Set equal priorities, and allocate backings for all textures.
+ for (size_t i = 0; i < kMaxTextures; ++i)
+ textures[i]->set_request_priority(100);
+ PrioritizeTexturesAndBackings(resource_manager.get());
+ for (unsigned i = 0; i < kMaxTextures; ++i)
+ EXPECT_TRUE(ValidateTexture(textures[i].get(), false));
+
+ // Drop the memory limit and prioritize (none will be above the threshold,
+ // but they still have backings because ReduceMemory hasn't been called).
+ resource_manager->SetMaxMemoryLimitBytes(
+ TexturesMemorySize(kMaxTextures / 2));
+ PrioritizeTexturesAndBackings(resource_manager.get());
+
+ // Push half of them back over the limit.
+ for (size_t i = 0; i < kMaxTextures; i += 2)
+ EXPECT_TRUE(textures[i]->RequestLate());
+
+ // Push the priorities to the backings array and sort the backings array
+ ResourceManagerUpdateBackingsPriorities(resource_manager.get());
+
+ // Assert that the backings list be sorted with the below-limit backings
+ // before the above-limit backings.
+ ResourceManagerAssertInvariants(resource_manager.get());
+
+ // Make sure that we have backings for all of the textures.
+ for (size_t i = 0; i < kMaxTextures; ++i)
+ EXPECT_TRUE(textures[i]->have_backing_texture());
+
+ // Make sure that only the request_late textures are above the priority
+ // cutoff
+ for (size_t i = 0; i < kMaxTextures; i += 2)
+ EXPECT_TRUE(TextureBackingIsAbovePriorityCutoff(textures[i].get()));
+ for (size_t i = 1; i < kMaxTextures; i += 2)
+ EXPECT_FALSE(TextureBackingIsAbovePriorityCutoff(textures[i].get()));
+
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->ClearAllMemory(resource_provider());
+}
+
+TEST_F(PrioritizedResourceTest, ClearUploadsToEvictedResources) {
+ const size_t kMaxTextures = 4;
+ scoped_ptr<PrioritizedResourceManager> resource_manager =
+ CreateManager(kMaxTextures);
+ resource_manager->SetMaxMemoryLimitBytes(TexturesMemorySize(kMaxTextures));
+
+ // Create textures to fill our memory limit.
+ scoped_ptr<PrioritizedResource> textures[kMaxTextures];
+
+ for (size_t i = 0; i < kMaxTextures; ++i)
+ textures[i] =
+ resource_manager->CreateTexture(texture_size_, texture_format_);
+
+ // Set equal priorities, and allocate backings for all textures.
+ for (size_t i = 0; i < kMaxTextures; ++i)
+ textures[i]->set_request_priority(100);
+ PrioritizeTexturesAndBackings(resource_manager.get());
+ for (unsigned i = 0; i < kMaxTextures; ++i)
+ EXPECT_TRUE(ValidateTexture(textures[i].get(), false));
+
+ ResourceUpdateQueue queue;
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ for (size_t i = 0; i < kMaxTextures; ++i) {
+ const ResourceUpdate upload = ResourceUpdate::Create(
+ textures[i].get(), NULL, gfx::Rect(), gfx::Rect(), gfx::Vector2d());
+ queue.AppendFullUpload(upload);
+ }
+
+ // Make sure that we have backings for all of the textures.
+ for (size_t i = 0; i < kMaxTextures; ++i)
+ EXPECT_TRUE(textures[i]->have_backing_texture());
+
+ queue.ClearUploadsToEvictedResources();
+ EXPECT_EQ(4u, queue.FullUploadSize());
+
+ resource_manager->ReduceMemoryOnImplThread(
+ TexturesMemorySize(1),
+ PriorityCalculator::AllowEverythingCutoff(),
+ resource_provider());
+ queue.ClearUploadsToEvictedResources();
+ EXPECT_EQ(1u, queue.FullUploadSize());
+
+ resource_manager->ReduceMemoryOnImplThread(
+ 0, PriorityCalculator::AllowEverythingCutoff(), resource_provider());
+ queue.ClearUploadsToEvictedResources();
+ EXPECT_EQ(0u, queue.FullUploadSize());
+}
+
+TEST_F(PrioritizedResourceTest, UsageStatistics) {
+ const size_t kMaxTextures = 5;
+ scoped_ptr<PrioritizedResourceManager> resource_manager =
+ CreateManager(kMaxTextures);
+ scoped_ptr<PrioritizedResource> textures[kMaxTextures];
+
+ for (size_t i = 0; i < kMaxTextures; ++i) {
+ textures[i] =
+ resource_manager->CreateTexture(texture_size_, texture_format_);
+ }
+
+ textures[0]->
+ set_request_priority(PriorityCalculator::AllowVisibleOnlyCutoff() - 1);
+ textures[1]->
+ set_request_priority(PriorityCalculator::AllowVisibleOnlyCutoff());
+ textures[2]->set_request_priority(
+ PriorityCalculator::AllowVisibleAndNearbyCutoff() - 1);
+ textures[3]->
+ set_request_priority(PriorityCalculator::AllowVisibleAndNearbyCutoff());
+ textures[4]->set_request_priority(
+ PriorityCalculator::AllowVisibleAndNearbyCutoff() + 1);
+
+ // Set max limit to 2 textures.
+ resource_manager->SetMaxMemoryLimitBytes(TexturesMemorySize(2));
+ PrioritizeTexturesAndBackings(resource_manager.get());
+
+ // The first two textures should be available, others should not.
+ for (size_t i = 0; i < 2; ++i)
+ EXPECT_TRUE(ValidateTexture(textures[i].get(), false));
+ for (size_t i = 2; i < kMaxTextures; ++i)
+ EXPECT_FALSE(ValidateTexture(textures[i].get(), false));
+
+ // Validate the statistics.
+ {
+ DebugScopedSetImplThread impl_thread(&proxy_);
+ EXPECT_EQ(TexturesMemorySize(2), resource_manager->MemoryUseBytes());
+ EXPECT_EQ(TexturesMemorySize(1), resource_manager->MemoryVisibleBytes());
+ EXPECT_EQ(TexturesMemorySize(3),
+ resource_manager->MemoryVisibleAndNearbyBytes());
+ }
+
+ // Re-prioritize the textures, but do not push the values to backings.
+ textures[0]->
+ set_request_priority(PriorityCalculator::AllowVisibleOnlyCutoff() - 1);
+ textures[1]->
+ set_request_priority(PriorityCalculator::AllowVisibleOnlyCutoff() - 1);
+ textures[2]->
+ set_request_priority(PriorityCalculator::AllowVisibleOnlyCutoff() - 1);
+ textures[3]->set_request_priority(
+ PriorityCalculator::AllowVisibleAndNearbyCutoff() - 1);
+ textures[4]->
+ set_request_priority(PriorityCalculator::AllowVisibleAndNearbyCutoff());
+ resource_manager->PrioritizeTextures();
+
+ // Verify that we still see the old values.
+ {
+ DebugScopedSetImplThread impl_thread(&proxy_);
+ EXPECT_EQ(TexturesMemorySize(2), resource_manager->MemoryUseBytes());
+ EXPECT_EQ(TexturesMemorySize(1), resource_manager->MemoryVisibleBytes());
+ EXPECT_EQ(TexturesMemorySize(3),
+ resource_manager->MemoryVisibleAndNearbyBytes());
+ }
+
+ // Push priorities to backings, and verify we see the new values.
+ {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->PushTexturePrioritiesToBackings();
+ EXPECT_EQ(TexturesMemorySize(2), resource_manager->MemoryUseBytes());
+ EXPECT_EQ(TexturesMemorySize(3), resource_manager->MemoryVisibleBytes());
+ EXPECT_EQ(TexturesMemorySize(4),
+ resource_manager->MemoryVisibleAndNearbyBytes());
+ }
+
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager->ClearAllMemory(resource_provider());
+}
+
+} // namespace
+} // namespace cc
diff --git a/cc/resources/prioritized_tile_set.cc b/cc/resources/prioritized_tile_set.cc
new file mode 100644
index 0000000..d0104cb
--- /dev/null
+++ b/cc/resources/prioritized_tile_set.cc
@@ -0,0 +1,155 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/prioritized_tile_set.h"
+
+#include <algorithm>
+
+#include "cc/resources/managed_tile_state.h"
+#include "cc/resources/tile.h"
+
+namespace cc {
+
+class BinComparator {
+ public:
+ bool operator()(const Tile* a,
+ const Tile* b) const {
+ const ManagedTileState& ams = a->managed_state();
+ const ManagedTileState& bms = b->managed_state();
+
+ if (ams.priority_bin != bms.priority_bin)
+ return ams.priority_bin < bms.priority_bin;
+
+ if (ams.required_for_activation != bms.required_for_activation)
+ return ams.required_for_activation;
+
+ if (ams.resolution != bms.resolution)
+ return ams.resolution < bms.resolution;
+
+ if (ams.distance_to_visible != bms.distance_to_visible)
+ return ams.distance_to_visible < bms.distance_to_visible;
+
+ gfx::Rect a_rect = a->content_rect();
+ gfx::Rect b_rect = b->content_rect();
+ if (a_rect.y() != b_rect.y())
+ return a_rect.y() < b_rect.y();
+ return a_rect.x() < b_rect.x();
+ }
+};
+
+namespace {
+
+bool TilePriorityTieBreaker(const Tile* tile_i, const Tile* tile_j) {
+ // When two tiles has same priority use Id as tie breaker.
+ return tile_i->id() < tile_j->id();
+}
+
+typedef std::vector<Tile*> TileVector;
+
+void SortBinTiles(ManagedTileBin bin, TileVector* tiles) {
+ switch (bin) {
+ case NEVER_BIN:
+ break;
+ case NOW_AND_READY_TO_DRAW_BIN:
+ std::sort(tiles->begin(), tiles->end(), TilePriorityTieBreaker);
+ break;
+ case NOW_BIN:
+ case SOON_BIN:
+ case EVENTUALLY_AND_ACTIVE_BIN:
+ case EVENTUALLY_BIN:
+ case AT_LAST_AND_ACTIVE_BIN:
+ case AT_LAST_BIN:
+ std::sort(tiles->begin(), tiles->end(), BinComparator());
+ break;
+ default:
+ NOTREACHED();
+ }
+}
+
+} // namespace
+
+PrioritizedTileSet::PrioritizedTileSet() {
+ for (int bin = 0; bin < NUM_BINS; ++bin)
+ bin_sorted_[bin] = true;
+}
+
+PrioritizedTileSet::~PrioritizedTileSet() {}
+
+void PrioritizedTileSet::InsertTile(Tile* tile, ManagedTileBin bin) {
+ tiles_[bin].push_back(tile);
+ bin_sorted_[bin] = false;
+}
+
+void PrioritizedTileSet::Clear() {
+ for (int bin = 0; bin < NUM_BINS; ++bin) {
+ tiles_[bin].clear();
+ bin_sorted_[bin] = true;
+ }
+}
+
+bool PrioritizedTileSet::IsEmpty() {
+ for (int bin = 0; bin < NUM_BINS; ++bin)
+ if (!tiles_[bin].empty())
+ return false;
+
+ return true;
+}
+
+void PrioritizedTileSet::SortBinIfNeeded(ManagedTileBin bin) {
+ if (!bin_sorted_[bin]) {
+ SortBinTiles(bin, &tiles_[bin]);
+ bin_sorted_[bin] = true;
+ }
+}
+
+PrioritizedTileSet::Iterator::Iterator(
+ PrioritizedTileSet* tile_set, bool use_priority_ordering)
+ : tile_set_(tile_set),
+ current_bin_(NOW_AND_READY_TO_DRAW_BIN),
+ use_priority_ordering_(use_priority_ordering) {
+ if (use_priority_ordering_)
+ tile_set_->SortBinIfNeeded(current_bin_);
+ iterator_ = tile_set->tiles_[current_bin_].begin();
+ if (iterator_ == tile_set_->tiles_[current_bin_].end())
+ AdvanceList();
+}
+
+PrioritizedTileSet::Iterator::~Iterator() {}
+
+void PrioritizedTileSet::Iterator::DisablePriorityOrdering() {
+ use_priority_ordering_ = false;
+}
+
+PrioritizedTileSet::Iterator&
+PrioritizedTileSet::Iterator::operator++() {
+ // We can't increment past the end of the tiles.
+ DCHECK(iterator_ != tile_set_->tiles_[current_bin_].end());
+
+ ++iterator_;
+ if (iterator_ == tile_set_->tiles_[current_bin_].end())
+ AdvanceList();
+ return *this;
+}
+
+Tile* PrioritizedTileSet::Iterator::operator*() {
+ DCHECK(iterator_ != tile_set_->tiles_[current_bin_].end());
+ return *iterator_;
+}
+
+void PrioritizedTileSet::Iterator::AdvanceList() {
+ DCHECK(iterator_ == tile_set_->tiles_[current_bin_].end());
+
+ while (current_bin_ != NEVER_BIN) {
+ current_bin_ = static_cast<ManagedTileBin>(current_bin_ + 1);
+
+ if (use_priority_ordering_)
+ tile_set_->SortBinIfNeeded(current_bin_);
+
+ iterator_ = tile_set_->tiles_[current_bin_].begin();
+ if (iterator_ != tile_set_->tiles_[current_bin_].end())
+ break;
+ }
+}
+
+} // namespace cc
diff --git a/cc/resources/prioritized_tile_set.h b/cc/resources/prioritized_tile_set.h
new file mode 100644
index 0000000..2d4693e
--- /dev/null
+++ b/cc/resources/prioritized_tile_set.h
@@ -0,0 +1,60 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_PRIORITIZED_TILE_SET_H_
+#define CC_RESOURCES_PRIORITIZED_TILE_SET_H_
+
+#include <vector>
+
+#include "cc/base/cc_export.h"
+#include "cc/resources/managed_tile_state.h"
+
+namespace cc {
+class Tile;
+
+class CC_EXPORT PrioritizedTileSet {
+ public:
+ PrioritizedTileSet();
+ ~PrioritizedTileSet();
+
+ void InsertTile(Tile* tile, ManagedTileBin bin);
+ void Clear();
+ bool IsEmpty();
+
+ class CC_EXPORT Iterator {
+ public:
+ Iterator(PrioritizedTileSet* set, bool use_priority_ordering);
+
+ ~Iterator();
+
+ void DisablePriorityOrdering();
+
+ Iterator& operator++();
+ Tile* operator->() { return *(*this); }
+ Tile* operator*();
+ operator bool() const {
+ return iterator_ != tile_set_->tiles_[current_bin_].end();
+ }
+
+ private:
+ void AdvanceList();
+
+ PrioritizedTileSet* tile_set_;
+ ManagedTileBin current_bin_;
+ std::vector<Tile*>::iterator iterator_;
+ bool use_priority_ordering_;
+ };
+
+ private:
+ friend class Iterator;
+
+ void SortBinIfNeeded(ManagedTileBin bin);
+
+ std::vector<Tile*> tiles_[NUM_BINS];
+ bool bin_sorted_[NUM_BINS];
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_PRIORITIZED_TILE_SET_H_
diff --git a/cc/resources/prioritized_tile_set_unittest.cc b/cc/resources/prioritized_tile_set_unittest.cc
new file mode 100644
index 0000000..e9ed478
--- /dev/null
+++ b/cc/resources/prioritized_tile_set_unittest.cc
@@ -0,0 +1,774 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <algorithm>
+#include <vector>
+
+#include "cc/resources/managed_tile_state.h"
+#include "cc/resources/prioritized_tile_set.h"
+#include "cc/resources/tile.h"
+#include "cc/test/fake_output_surface.h"
+#include "cc/test/fake_output_surface_client.h"
+#include "cc/test/fake_picture_pile_impl.h"
+#include "cc/test/fake_tile_manager.h"
+#include "cc/test/fake_tile_manager_client.h"
+#include "cc/test/test_shared_bitmap_manager.h"
+#include "cc/test/test_tile_priorities.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cc {
+
+class BinComparator {
+ public:
+ bool operator()(const scoped_refptr<Tile>& a,
+ const scoped_refptr<Tile>& b) const {
+ const ManagedTileState& ams = a->managed_state();
+ const ManagedTileState& bms = b->managed_state();
+
+ if (ams.priority_bin != bms.priority_bin)
+ return ams.priority_bin < bms.priority_bin;
+
+ if (ams.required_for_activation != bms.required_for_activation)
+ return ams.required_for_activation;
+
+ if (ams.resolution != bms.resolution)
+ return ams.resolution < bms.resolution;
+
+ if (ams.distance_to_visible != bms.distance_to_visible)
+ return ams.distance_to_visible < bms.distance_to_visible;
+
+ gfx::Rect a_rect = a->content_rect();
+ gfx::Rect b_rect = b->content_rect();
+ if (a_rect.y() != b_rect.y())
+ return a_rect.y() < b_rect.y();
+ return a_rect.x() < b_rect.x();
+ }
+};
+
+namespace {
+
+class PrioritizedTileSetTest : public testing::Test {
+ public:
+ PrioritizedTileSetTest() {
+ output_surface_ = FakeOutputSurface::Create3d().Pass();
+ CHECK(output_surface_->BindToClient(&output_surface_client_));
+
+ shared_bitmap_manager_.reset(new TestSharedBitmapManager());
+ resource_provider_ = ResourceProvider::Create(output_surface_.get(),
+ shared_bitmap_manager_.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false).Pass();
+ resource_pool_ = ResourcePool::Create(
+ resource_provider_.get(), GL_TEXTURE_2D, RGBA_8888);
+ tile_manager_.reset(
+ new FakeTileManager(&tile_manager_client_, resource_pool_.get()));
+ picture_pile_ = FakePicturePileImpl::CreateInfiniteFilledPile();
+ }
+
+ scoped_refptr<Tile> CreateTile() {
+ return tile_manager_->CreateTile(picture_pile_.get(),
+ settings_.default_tile_size,
+ gfx::Rect(),
+ 1.0,
+ 0,
+ 0,
+ 0);
+ }
+ void ReleaseTiles(std::vector<scoped_refptr<Tile> >* tiles) {
+ for (std::vector<scoped_refptr<Tile> >::iterator it = tiles->begin();
+ it != tiles->end();
+ it++) {
+ Tile* tile = it->get();
+ tile->SetPriority(ACTIVE_TREE, TilePriority());
+ tile->SetPriority(PENDING_TREE, TilePriority());
+ }
+ }
+
+ private:
+ LayerTreeSettings settings_;
+ FakeOutputSurfaceClient output_surface_client_;
+ scoped_ptr<FakeOutputSurface> output_surface_;
+ scoped_ptr<SharedBitmapManager> shared_bitmap_manager_;
+ scoped_ptr<ResourceProvider> resource_provider_;
+ scoped_ptr<ResourcePool> resource_pool_;
+ FakeTileManagerClient tile_manager_client_;
+ scoped_ptr<FakeTileManager> tile_manager_;
+ scoped_refptr<FakePicturePileImpl> picture_pile_;
+};
+
+TEST_F(PrioritizedTileSetTest, EmptyIterator) {
+ // Creating an iterator to an empty set should work (but create iterator that
+ // isn't valid).
+
+ PrioritizedTileSet set;
+
+ PrioritizedTileSet::Iterator it(&set, true);
+ EXPECT_FALSE(it);
+}
+
+TEST_F(PrioritizedTileSetTest, NonEmptyIterator) {
+ PrioritizedTileSet set;
+ scoped_refptr<Tile> tile = CreateTile();
+ set.InsertTile(tile.get(), NOW_BIN);
+
+ PrioritizedTileSet::Iterator it(&set, true);
+ EXPECT_TRUE(it);
+ EXPECT_TRUE(*it == tile.get());
+ ++it;
+ EXPECT_FALSE(it);
+}
+
+TEST_F(PrioritizedTileSetTest, NowAndReadyToDrawBin) {
+ // Ensure that tiles in NOW_AND_READY_TO_DRAW_BIN aren't sorted.
+
+ PrioritizedTileSet set;
+ TilePriority priorities[4] = {
+ TilePriorityForEventualBin(),
+ TilePriorityForNowBin(),
+ TilePriority(),
+ TilePriorityForSoonBin()};
+
+ std::vector<scoped_refptr<Tile> > tiles;
+ for (int priority = 0; priority < 4; ++priority) {
+ for (int i = 0; i < 5; ++i) {
+ scoped_refptr<Tile> tile = CreateTile();
+ tile->SetPriority(ACTIVE_TREE, priorities[priority]);
+ tile->SetPriority(PENDING_TREE, priorities[priority]);
+ tiles.push_back(tile);
+ set.InsertTile(tile.get(), NOW_AND_READY_TO_DRAW_BIN);
+ }
+ }
+
+ // Tiles should appear in the same order as inserted.
+ int i = 0;
+ for (PrioritizedTileSet::Iterator it(&set, true);
+ it;
+ ++it) {
+ EXPECT_TRUE(*it == tiles[i].get());
+ ++i;
+ }
+ EXPECT_EQ(20, i);
+
+ ReleaseTiles(&tiles);
+}
+
+TEST_F(PrioritizedTileSetTest, NowBin) {
+ // Ensure that tiles in NOW_BIN are sorted according to BinComparator.
+
+ PrioritizedTileSet set;
+ TilePriority priorities[4] = {
+ TilePriorityForEventualBin(),
+ TilePriorityForNowBin(),
+ TilePriority(),
+ TilePriorityForSoonBin()};
+
+ std::vector<scoped_refptr<Tile> > tiles;
+ for (int priority = 0; priority < 4; ++priority) {
+ for (int i = 0; i < 5; ++i) {
+ scoped_refptr<Tile> tile = CreateTile();
+ tile->SetPriority(ACTIVE_TREE, priorities[priority]);
+ tile->SetPriority(PENDING_TREE, priorities[priority]);
+ tiles.push_back(tile);
+ set.InsertTile(tile.get(), NOW_BIN);
+ }
+ }
+
+ // Tiles should appear in BinComparator order.
+ std::sort(tiles.begin(), tiles.end(), BinComparator());
+
+ int i = 0;
+ for (PrioritizedTileSet::Iterator it(&set, true);
+ it;
+ ++it) {
+ EXPECT_TRUE(*it == tiles[i].get());
+ ++i;
+ }
+ EXPECT_EQ(20, i);
+
+ ReleaseTiles(&tiles);
+}
+
+TEST_F(PrioritizedTileSetTest, SoonBin) {
+ // Ensure that tiles in SOON_BIN are sorted according to BinComparator.
+
+ PrioritizedTileSet set;
+ TilePriority priorities[4] = {
+ TilePriorityForEventualBin(),
+ TilePriorityForNowBin(),
+ TilePriority(),
+ TilePriorityForSoonBin()};
+
+ std::vector<scoped_refptr<Tile> > tiles;
+ for (int priority = 0; priority < 4; ++priority) {
+ for (int i = 0; i < 5; ++i) {
+ scoped_refptr<Tile> tile = CreateTile();
+ tile->SetPriority(ACTIVE_TREE, priorities[priority]);
+ tile->SetPriority(PENDING_TREE, priorities[priority]);
+ tiles.push_back(tile);
+ set.InsertTile(tile.get(), SOON_BIN);
+ }
+ }
+
+ // Tiles should appear in BinComparator order.
+ std::sort(tiles.begin(), tiles.end(), BinComparator());
+
+ int i = 0;
+ for (PrioritizedTileSet::Iterator it(&set, true);
+ it;
+ ++it) {
+ EXPECT_TRUE(*it == tiles[i].get());
+ ++i;
+ }
+ EXPECT_EQ(20, i);
+
+ ReleaseTiles(&tiles);
+}
+
+TEST_F(PrioritizedTileSetTest, SoonBinNoPriority) {
+ // Ensure that when not using priority iterator, SOON_BIN tiles
+ // are not sorted.
+
+ PrioritizedTileSet set;
+ TilePriority priorities[4] = {
+ TilePriorityForEventualBin(),
+ TilePriorityForNowBin(),
+ TilePriority(),
+ TilePriorityForSoonBin()};
+
+ std::vector<scoped_refptr<Tile> > tiles;
+ for (int priority = 0; priority < 4; ++priority) {
+ for (int i = 0; i < 5; ++i) {
+ scoped_refptr<Tile> tile = CreateTile();
+ tile->SetPriority(ACTIVE_TREE, priorities[priority]);
+ tile->SetPriority(PENDING_TREE, priorities[priority]);
+ tiles.push_back(tile);
+ set.InsertTile(tile.get(), SOON_BIN);
+ }
+ }
+
+ int i = 0;
+ for (PrioritizedTileSet::Iterator it(&set, false);
+ it;
+ ++it) {
+ EXPECT_TRUE(*it == tiles[i].get());
+ ++i;
+ }
+ EXPECT_EQ(20, i);
+
+ ReleaseTiles(&tiles);
+}
+
+TEST_F(PrioritizedTileSetTest, EventuallyAndActiveBin) {
+ // Ensure that EVENTUALLY_AND_ACTIVE_BIN tiles are sorted.
+
+ PrioritizedTileSet set;
+ TilePriority priorities[4] = {
+ TilePriorityForEventualBin(),
+ TilePriorityForNowBin(),
+ TilePriority(),
+ TilePriorityForSoonBin()};
+
+ std::vector<scoped_refptr<Tile> > tiles;
+ for (int priority = 0; priority < 4; ++priority) {
+ for (int i = 0; i < 5; ++i) {
+ scoped_refptr<Tile> tile = CreateTile();
+ tile->SetPriority(ACTIVE_TREE, priorities[priority]);
+ tile->SetPriority(PENDING_TREE, priorities[priority]);
+ tiles.push_back(tile);
+ set.InsertTile(tile.get(), EVENTUALLY_AND_ACTIVE_BIN);
+ }
+ }
+
+ // Tiles should appear in BinComparator order.
+ std::sort(tiles.begin(), tiles.end(), BinComparator());
+
+ int i = 0;
+ for (PrioritizedTileSet::Iterator it(&set, true);
+ it;
+ ++it) {
+ EXPECT_TRUE(*it == tiles[i].get());
+ ++i;
+ }
+ EXPECT_EQ(20, i);
+
+ ReleaseTiles(&tiles);
+}
+
+TEST_F(PrioritizedTileSetTest, EventuallyBin) {
+ // Ensure that EVENTUALLY_BIN tiles are sorted.
+
+ PrioritizedTileSet set;
+ TilePriority priorities[4] = {
+ TilePriorityForEventualBin(),
+ TilePriorityForNowBin(),
+ TilePriority(),
+ TilePriorityForSoonBin()};
+
+ std::vector<scoped_refptr<Tile> > tiles;
+ for (int priority = 0; priority < 4; ++priority) {
+ for (int i = 0; i < 5; ++i) {
+ scoped_refptr<Tile> tile = CreateTile();
+ tile->SetPriority(ACTIVE_TREE, priorities[priority]);
+ tile->SetPriority(PENDING_TREE, priorities[priority]);
+ tiles.push_back(tile);
+ set.InsertTile(tile.get(), EVENTUALLY_BIN);
+ }
+ }
+
+ // Tiles should appear in BinComparator order.
+ std::sort(tiles.begin(), tiles.end(), BinComparator());
+
+ int i = 0;
+ for (PrioritizedTileSet::Iterator it(&set, true);
+ it;
+ ++it) {
+ EXPECT_TRUE(*it == tiles[i].get());
+ ++i;
+ }
+ EXPECT_EQ(20, i);
+
+ ReleaseTiles(&tiles);
+}
+
+TEST_F(PrioritizedTileSetTest, AtLastAndActiveBin) {
+ // Ensure that AT_LAST_AND_ACTIVE_BIN tiles are sorted.
+
+ PrioritizedTileSet set;
+ TilePriority priorities[4] = {
+ TilePriorityForEventualBin(),
+ TilePriorityForNowBin(),
+ TilePriority(),
+ TilePriorityForSoonBin()};
+
+ std::vector<scoped_refptr<Tile> > tiles;
+ for (int priority = 0; priority < 4; ++priority) {
+ for (int i = 0; i < 5; ++i) {
+ scoped_refptr<Tile> tile = CreateTile();
+ tile->SetPriority(ACTIVE_TREE, priorities[priority]);
+ tile->SetPriority(PENDING_TREE, priorities[priority]);
+ tiles.push_back(tile);
+ set.InsertTile(tile.get(), AT_LAST_AND_ACTIVE_BIN);
+ }
+ }
+
+ // Tiles should appear in BinComparator order.
+ std::sort(tiles.begin(), tiles.end(), BinComparator());
+
+ int i = 0;
+ for (PrioritizedTileSet::Iterator it(&set, true);
+ it;
+ ++it) {
+ EXPECT_TRUE(*it == tiles[i].get());
+ ++i;
+ }
+ EXPECT_EQ(20, i);
+
+ ReleaseTiles(&tiles);
+}
+
+TEST_F(PrioritizedTileSetTest, AtLastBin) {
+ // Ensure that AT_LAST_BIN tiles are sorted.
+
+ PrioritizedTileSet set;
+ TilePriority priorities[4] = {
+ TilePriorityForEventualBin(),
+ TilePriorityForNowBin(),
+ TilePriority(),
+ TilePriorityForSoonBin()};
+
+ std::vector<scoped_refptr<Tile> > tiles;
+ for (int priority = 0; priority < 4; ++priority) {
+ for (int i = 0; i < 5; ++i) {
+ scoped_refptr<Tile> tile = CreateTile();
+ tile->SetPriority(ACTIVE_TREE, priorities[priority]);
+ tile->SetPriority(PENDING_TREE, priorities[priority]);
+ tiles.push_back(tile);
+ set.InsertTile(tile.get(), AT_LAST_BIN);
+ }
+ }
+
+ // Tiles should appear in BinComparator order.
+ std::sort(tiles.begin(), tiles.end(), BinComparator());
+
+ int i = 0;
+ for (PrioritizedTileSet::Iterator it(&set, true);
+ it;
+ ++it) {
+ EXPECT_TRUE(*it == tiles[i].get());
+ ++i;
+ }
+ EXPECT_EQ(20, i);
+
+ ReleaseTiles(&tiles);
+}
+
+TEST_F(PrioritizedTileSetTest, TilesForEachBin) {
+ // Aggregate test with one tile for each of the bins, which
+ // should appear in order of the bins.
+
+ scoped_refptr<Tile> now_and_ready_to_draw_bin = CreateTile();
+ scoped_refptr<Tile> now_bin = CreateTile();
+ scoped_refptr<Tile> soon_bin = CreateTile();
+ scoped_refptr<Tile> eventually_and_active_bin = CreateTile();
+ scoped_refptr<Tile> eventually_bin = CreateTile();
+ scoped_refptr<Tile> at_last_bin = CreateTile();
+ scoped_refptr<Tile> at_last_and_active_bin = CreateTile();
+
+ PrioritizedTileSet set;
+ set.InsertTile(soon_bin.get(), SOON_BIN);
+ set.InsertTile(at_last_and_active_bin.get(), AT_LAST_AND_ACTIVE_BIN);
+ set.InsertTile(eventually_bin.get(), EVENTUALLY_BIN);
+ set.InsertTile(now_bin.get(), NOW_BIN);
+ set.InsertTile(eventually_and_active_bin.get(), EVENTUALLY_AND_ACTIVE_BIN);
+ set.InsertTile(at_last_bin.get(), AT_LAST_BIN);
+ set.InsertTile(now_and_ready_to_draw_bin.get(), NOW_AND_READY_TO_DRAW_BIN);
+
+ // Tiles should appear in order.
+ PrioritizedTileSet::Iterator it(&set, true);
+ EXPECT_TRUE(*it == now_and_ready_to_draw_bin.get());
+ ++it;
+ EXPECT_TRUE(*it == now_bin.get());
+ ++it;
+ EXPECT_TRUE(*it == soon_bin.get());
+ ++it;
+ EXPECT_TRUE(*it == eventually_and_active_bin.get());
+ ++it;
+ EXPECT_TRUE(*it == eventually_bin.get());
+ ++it;
+ EXPECT_TRUE(*it == at_last_and_active_bin.get());
+ ++it;
+ EXPECT_TRUE(*it == at_last_bin.get());
+ ++it;
+ EXPECT_FALSE(it);
+}
+
+TEST_F(PrioritizedTileSetTest, ManyTilesForEachBin) {
+ // Aggregate test with many tiles in each of the bins of various
+ // priorities. Ensure that they are all returned in a sorted order.
+
+ std::vector<scoped_refptr<Tile> > now_and_ready_to_draw_bins;
+ std::vector<scoped_refptr<Tile> > now_bins;
+ std::vector<scoped_refptr<Tile> > soon_bins;
+ std::vector<scoped_refptr<Tile> > eventually_and_active_bins;
+ std::vector<scoped_refptr<Tile> > eventually_bins;
+ std::vector<scoped_refptr<Tile> > at_last_bins;
+ std::vector<scoped_refptr<Tile> > at_last_and_active_bins;
+
+ TilePriority priorities[4] = {
+ TilePriorityForEventualBin(),
+ TilePriorityForNowBin(),
+ TilePriority(),
+ TilePriorityForSoonBin()};
+
+ PrioritizedTileSet set;
+ for (int priority = 0; priority < 4; ++priority) {
+ for (int i = 0; i < 5; ++i) {
+ scoped_refptr<Tile> tile = CreateTile();
+ tile->SetPriority(ACTIVE_TREE, priorities[priority]);
+ tile->SetPriority(PENDING_TREE, priorities[priority]);
+
+ now_and_ready_to_draw_bins.push_back(tile);
+ now_bins.push_back(tile);
+ soon_bins.push_back(tile);
+ eventually_and_active_bins.push_back(tile);
+ eventually_bins.push_back(tile);
+ at_last_bins.push_back(tile);
+ at_last_and_active_bins.push_back(tile);
+
+ set.InsertTile(tile.get(), NOW_AND_READY_TO_DRAW_BIN);
+ set.InsertTile(tile.get(), NOW_BIN);
+ set.InsertTile(tile.get(), SOON_BIN);
+ set.InsertTile(tile.get(), EVENTUALLY_AND_ACTIVE_BIN);
+ set.InsertTile(tile.get(), EVENTUALLY_BIN);
+ set.InsertTile(tile.get(), AT_LAST_BIN);
+ set.InsertTile(tile.get(), AT_LAST_AND_ACTIVE_BIN);
+ }
+ }
+
+ PrioritizedTileSet::Iterator it(&set, true);
+ std::vector<scoped_refptr<Tile> >::iterator vector_it;
+
+ // Now and ready are not sorted.
+ for (vector_it = now_and_ready_to_draw_bins.begin();
+ vector_it != now_and_ready_to_draw_bins.end();
+ ++vector_it) {
+ EXPECT_TRUE(vector_it->get() == *it);
+ ++it;
+ }
+
+ // Now bins are sorted.
+ std::sort(now_bins.begin(), now_bins.end(), BinComparator());
+ for (vector_it = now_bins.begin(); vector_it != now_bins.end(); ++vector_it) {
+ EXPECT_TRUE(vector_it->get() == *it);
+ ++it;
+ }
+
+ // Soon bins are sorted.
+ std::sort(soon_bins.begin(), soon_bins.end(), BinComparator());
+ for (vector_it = soon_bins.begin(); vector_it != soon_bins.end();
+ ++vector_it) {
+ EXPECT_TRUE(vector_it->get() == *it);
+ ++it;
+ }
+
+ // Eventually and active bins are sorted.
+ std::sort(eventually_and_active_bins.begin(),
+ eventually_and_active_bins.end(),
+ BinComparator());
+ for (vector_it = eventually_and_active_bins.begin();
+ vector_it != eventually_and_active_bins.end();
+ ++vector_it) {
+ EXPECT_TRUE(vector_it->get() == *it);
+ ++it;
+ }
+
+ // Eventually bins are sorted.
+ std::sort(eventually_bins.begin(), eventually_bins.end(), BinComparator());
+ for (vector_it = eventually_bins.begin(); vector_it != eventually_bins.end();
+ ++vector_it) {
+ EXPECT_TRUE(vector_it->get() == *it);
+ ++it;
+ }
+
+ // At last and active bins are sorted.
+ std::sort(at_last_and_active_bins.begin(),
+ at_last_and_active_bins.end(),
+ BinComparator());
+ for (vector_it = at_last_and_active_bins.begin();
+ vector_it != at_last_and_active_bins.end();
+ ++vector_it) {
+ EXPECT_TRUE(vector_it->get() == *it);
+ ++it;
+ }
+
+ // At last bins are sorted.
+ std::sort(at_last_bins.begin(), at_last_bins.end(), BinComparator());
+ for (vector_it = at_last_bins.begin(); vector_it != at_last_bins.end();
+ ++vector_it) {
+ EXPECT_TRUE(vector_it->get() == *it);
+ ++it;
+ }
+
+ EXPECT_FALSE(it);
+
+ ReleaseTiles(&now_and_ready_to_draw_bins);
+ ReleaseTiles(&now_bins);
+ ReleaseTiles(&soon_bins);
+ ReleaseTiles(&eventually_and_active_bins);
+ ReleaseTiles(&eventually_bins);
+ ReleaseTiles(&at_last_bins);
+ ReleaseTiles(&at_last_and_active_bins);
+}
+
+TEST_F(PrioritizedTileSetTest, ManyTilesForEachBinDisablePriority) {
+ // Aggregate test with many tiles for each of the bins. Tiles should
+ // appear in order, until DisablePriorityOrdering is called. After that
+ // tiles should appear in the order they were inserted.
+
+ std::vector<scoped_refptr<Tile> > now_and_ready_to_draw_bins;
+ std::vector<scoped_refptr<Tile> > now_bins;
+ std::vector<scoped_refptr<Tile> > soon_bins;
+ std::vector<scoped_refptr<Tile> > eventually_and_active_bins;
+ std::vector<scoped_refptr<Tile> > eventually_bins;
+ std::vector<scoped_refptr<Tile> > at_last_bins;
+ std::vector<scoped_refptr<Tile> > at_last_and_active_bins;
+
+ TilePriority priorities[4] = {
+ TilePriorityForEventualBin(),
+ TilePriorityForNowBin(),
+ TilePriority(),
+ TilePriorityForSoonBin()};
+
+ PrioritizedTileSet set;
+ for (int priority = 0; priority < 4; ++priority) {
+ for (int i = 0; i < 5; ++i) {
+ scoped_refptr<Tile> tile = CreateTile();
+ tile->SetPriority(ACTIVE_TREE, priorities[priority]);
+ tile->SetPriority(PENDING_TREE, priorities[priority]);
+
+ now_and_ready_to_draw_bins.push_back(tile);
+ now_bins.push_back(tile);
+ soon_bins.push_back(tile);
+ eventually_and_active_bins.push_back(tile);
+ eventually_bins.push_back(tile);
+ at_last_bins.push_back(tile);
+ at_last_and_active_bins.push_back(tile);
+
+ set.InsertTile(tile.get(), NOW_AND_READY_TO_DRAW_BIN);
+ set.InsertTile(tile.get(), NOW_BIN);
+ set.InsertTile(tile.get(), SOON_BIN);
+ set.InsertTile(tile.get(), EVENTUALLY_AND_ACTIVE_BIN);
+ set.InsertTile(tile.get(), EVENTUALLY_BIN);
+ set.InsertTile(tile.get(), AT_LAST_BIN);
+ set.InsertTile(tile.get(), AT_LAST_AND_ACTIVE_BIN);
+ }
+ }
+
+ PrioritizedTileSet::Iterator it(&set, true);
+ std::vector<scoped_refptr<Tile> >::iterator vector_it;
+
+ // Now and ready are not sorted.
+ for (vector_it = now_and_ready_to_draw_bins.begin();
+ vector_it != now_and_ready_to_draw_bins.end();
+ ++vector_it) {
+ EXPECT_TRUE(vector_it->get() == *it);
+ ++it;
+ }
+
+ // Now bins are sorted.
+ std::sort(now_bins.begin(), now_bins.end(), BinComparator());
+ for (vector_it = now_bins.begin(); vector_it != now_bins.end(); ++vector_it) {
+ EXPECT_TRUE(vector_it->get() == *it);
+ ++it;
+ }
+
+ // Soon bins are sorted.
+ std::sort(soon_bins.begin(), soon_bins.end(), BinComparator());
+ for (vector_it = soon_bins.begin(); vector_it != soon_bins.end();
+ ++vector_it) {
+ EXPECT_TRUE(vector_it->get() == *it);
+ ++it;
+ }
+
+ // After we disable priority ordering, we already have sorted the next vector.
+ it.DisablePriorityOrdering();
+
+ // Eventually and active bins are sorted.
+ std::sort(eventually_and_active_bins.begin(),
+ eventually_and_active_bins.end(),
+ BinComparator());
+ for (vector_it = eventually_and_active_bins.begin();
+ vector_it != eventually_and_active_bins.end();
+ ++vector_it) {
+ EXPECT_TRUE(vector_it->get() == *it);
+ ++it;
+ }
+
+ // Eventually bins are not sorted.
+ for (vector_it = eventually_bins.begin(); vector_it != eventually_bins.end();
+ ++vector_it) {
+ EXPECT_TRUE(vector_it->get() == *it);
+ ++it;
+ }
+
+ // At last and active bins are not sorted.
+ for (vector_it = at_last_and_active_bins.begin();
+ vector_it != at_last_and_active_bins.end();
+ ++vector_it) {
+ EXPECT_TRUE(vector_it->get() == *it);
+ ++it;
+ }
+
+ // At last bins are not sorted.
+ for (vector_it = at_last_bins.begin(); vector_it != at_last_bins.end();
+ ++vector_it) {
+ EXPECT_TRUE(vector_it->get() == *it);
+ ++it;
+ }
+
+ EXPECT_FALSE(it);
+
+ ReleaseTiles(&now_and_ready_to_draw_bins);
+ ReleaseTiles(&now_bins);
+ ReleaseTiles(&soon_bins);
+ ReleaseTiles(&eventually_and_active_bins);
+ ReleaseTiles(&eventually_bins);
+ ReleaseTiles(&at_last_bins);
+ ReleaseTiles(&at_last_and_active_bins);
+}
+
+TEST_F(PrioritizedTileSetTest, TilesForFirstAndLastBins) {
+ // Make sure that if we have empty lists between two non-empty lists,
+ // we just get two tiles from the iterator.
+
+ scoped_refptr<Tile> now_and_ready_to_draw_bin = CreateTile();
+ scoped_refptr<Tile> at_last_bin = CreateTile();
+
+ PrioritizedTileSet set;
+ set.InsertTile(at_last_bin.get(), AT_LAST_BIN);
+ set.InsertTile(now_and_ready_to_draw_bin.get(), NOW_AND_READY_TO_DRAW_BIN);
+
+ // Only two tiles should appear and they should appear in order.
+ PrioritizedTileSet::Iterator it(&set, true);
+ EXPECT_TRUE(*it == now_and_ready_to_draw_bin.get());
+ ++it;
+ EXPECT_TRUE(*it == at_last_bin.get());
+ ++it;
+ EXPECT_FALSE(it);
+}
+
+TEST_F(PrioritizedTileSetTest, MultipleIterators) {
+ // Ensure that multiple iterators don't interfere with each other.
+
+ scoped_refptr<Tile> now_and_ready_to_draw_bin = CreateTile();
+ scoped_refptr<Tile> now_bin = CreateTile();
+ scoped_refptr<Tile> soon_bin = CreateTile();
+ scoped_refptr<Tile> eventually_bin = CreateTile();
+ scoped_refptr<Tile> at_last_bin = CreateTile();
+
+ PrioritizedTileSet set;
+ set.InsertTile(soon_bin.get(), SOON_BIN);
+ set.InsertTile(eventually_bin.get(), EVENTUALLY_BIN);
+ set.InsertTile(now_bin.get(), NOW_BIN);
+ set.InsertTile(at_last_bin.get(), AT_LAST_BIN);
+ set.InsertTile(now_and_ready_to_draw_bin.get(), NOW_AND_READY_TO_DRAW_BIN);
+
+ // Tiles should appear in order.
+ PrioritizedTileSet::Iterator it(&set, true);
+ EXPECT_TRUE(*it == now_and_ready_to_draw_bin.get());
+ ++it;
+ EXPECT_TRUE(*it == now_bin.get());
+ ++it;
+ EXPECT_TRUE(*it == soon_bin.get());
+ ++it;
+ EXPECT_TRUE(*it == eventually_bin.get());
+ ++it;
+ EXPECT_TRUE(*it == at_last_bin.get());
+ ++it;
+ EXPECT_FALSE(it);
+
+ // Creating multiple iterators shouldn't affect old iterators.
+ PrioritizedTileSet::Iterator second_it(&set, true);
+ EXPECT_TRUE(second_it);
+ EXPECT_FALSE(it);
+
+ ++second_it;
+ EXPECT_TRUE(second_it);
+ ++second_it;
+ EXPECT_TRUE(second_it);
+ EXPECT_FALSE(it);
+
+ PrioritizedTileSet::Iterator third_it(&set, true);
+ EXPECT_TRUE(third_it);
+ ++second_it;
+ ++second_it;
+ EXPECT_TRUE(second_it);
+ EXPECT_TRUE(third_it);
+ EXPECT_FALSE(it);
+
+ ++third_it;
+ ++third_it;
+ EXPECT_TRUE(third_it);
+ EXPECT_TRUE(*third_it == soon_bin.get());
+ EXPECT_TRUE(second_it);
+ EXPECT_TRUE(*second_it == at_last_bin.get());
+ EXPECT_FALSE(it);
+
+ ++second_it;
+ EXPECT_TRUE(third_it);
+ EXPECT_FALSE(second_it);
+ EXPECT_FALSE(it);
+
+ set.Clear();
+
+ PrioritizedTileSet::Iterator empty_it(&set, true);
+ EXPECT_FALSE(empty_it);
+}
+
+} // namespace
+} // namespace cc
+
diff --git a/cc/resources/priority_calculator.cc b/cc/resources/priority_calculator.cc
new file mode 100644
index 0000000..2a0cd18
--- /dev/null
+++ b/cc/resources/priority_calculator.cc
@@ -0,0 +1,116 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/priority_calculator.h"
+
+#include <algorithm>
+
+#include "ui/gfx/rect.h"
+
+namespace cc {
+
+static const int kNothingPriorityCutoff = -3;
+
+static const int kMostHighPriority = -2;
+
+static const int kUIDrawsToRootSurfacePriority = -1;
+static const int kVisibleDrawsToRootSurfacePriority = 0;
+static const int kRenderSurfacesPriority = 1;
+static const int kUIDoesNotDrawToRootSurfacePriority = 2;
+static const int kVisibleDoesNotDrawToRootSurfacePriority = 3;
+
+static const int kVisibleOnlyPriorityCutoff = 4;
+
+// The lower digits are how far from being visible the texture is,
+// in pixels.
+static const int kNotVisibleBasePriority = 1000000;
+static const int kNotVisibleLimitPriority = 1900000;
+
+// Arbitrarily define "nearby" to be 2000 pixels. A better estimate
+// would be percent-of-viewport or percent-of-screen.
+static const int kVisibleAndNearbyPriorityCutoff =
+ kNotVisibleBasePriority + 2000;
+
+// Small animated layers are treated as though they are 512 pixels
+// from being visible.
+static const int kSmallAnimatedLayerPriority = kNotVisibleBasePriority + 512;
+
+static const int kLingeringBasePriority = 2000000;
+static const int kLingeringLimitPriority = 2900000;
+
+static const int kMostLowPriority = 3000000;
+
+static const int kEverythingPriorityCutoff = 3000001;
+
+// static
+int PriorityCalculator::UIPriority(bool draws_to_root_surface) {
+ return draws_to_root_surface ? kUIDrawsToRootSurfacePriority
+ : kUIDoesNotDrawToRootSurfacePriority;
+}
+
+// static
+int PriorityCalculator::VisiblePriority(bool draws_to_root_surface) {
+ return draws_to_root_surface ? kVisibleDrawsToRootSurfacePriority
+ : kVisibleDoesNotDrawToRootSurfacePriority;
+}
+
+// static
+int PriorityCalculator::RenderSurfacePriority() {
+ return kRenderSurfacesPriority;
+}
+
+// static
+int PriorityCalculator::LingeringPriority(int previous_priority) {
+ // TODO(reveman): We should remove this once we have priorities for all
+ // textures (we can't currently calculate distances for off-screen textures).
+ return std::min(kLingeringLimitPriority,
+ std::max(kLingeringBasePriority, previous_priority + 1));
+}
+
+// static
+int PriorityCalculator::PriorityFromDistance(const gfx::Rect& visible_rect,
+ const gfx::Rect& texture_rect,
+ bool draws_to_root_surface) {
+ int distance = visible_rect.ManhattanInternalDistance(texture_rect);
+ if (!distance)
+ return VisiblePriority(draws_to_root_surface);
+ return std::min(kNotVisibleLimitPriority, kNotVisibleBasePriority + distance);
+}
+
+// static
+int PriorityCalculator::SmallAnimatedLayerMinPriority() {
+ return kSmallAnimatedLayerPriority;
+}
+
+// static
+int PriorityCalculator::HighestPriority() {
+ return kMostHighPriority;
+}
+
+// static
+int PriorityCalculator::LowestPriority() {
+ return kMostLowPriority;
+}
+
+// static
+int PriorityCalculator::AllowNothingCutoff() {
+ return kNothingPriorityCutoff;
+}
+
+// static
+int PriorityCalculator::AllowVisibleOnlyCutoff() {
+ return kVisibleOnlyPriorityCutoff;
+}
+
+// static
+int PriorityCalculator::AllowVisibleAndNearbyCutoff() {
+ return kVisibleAndNearbyPriorityCutoff;
+}
+
+// static
+int PriorityCalculator::AllowEverythingCutoff() {
+ return kEverythingPriorityCutoff;
+}
+
+} // namespace cc
diff --git a/cc/resources/priority_calculator.h b/cc/resources/priority_calculator.h
new file mode 100644
index 0000000..502bd27
--- /dev/null
+++ b/cc/resources/priority_calculator.h
@@ -0,0 +1,47 @@
+// Copyright 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_PRIORITY_CALCULATOR_H_
+#define CC_RESOURCES_PRIORITY_CALCULATOR_H_
+
+#include "base/basictypes.h"
+#include "cc/base/cc_export.h"
+
+namespace gfx { class Rect; }
+
+namespace cc {
+
+class CC_EXPORT PriorityCalculator {
+ public:
+ PriorityCalculator() {}
+
+ static int UIPriority(bool draws_to_root_surface);
+ static int VisiblePriority(bool draws_to_root_surface);
+ static int RenderSurfacePriority();
+ static int LingeringPriority(int previous_priority);
+ static int PriorityFromDistance(const gfx::Rect& visible_rect,
+ const gfx::Rect& texture_rect,
+ bool draws_to_root_surface);
+ static int SmallAnimatedLayerMinPriority();
+
+ static int HighestPriority();
+ static int LowestPriority();
+ static inline bool priority_is_lower(int a, int b) { return a > b; }
+ static inline bool priority_is_higher(int a, int b) { return a < b; }
+ static inline int max_priority(int a, int b) {
+ return priority_is_higher(a, b) ? a : b;
+ }
+
+ static int AllowNothingCutoff();
+ static int AllowVisibleOnlyCutoff();
+ static int AllowVisibleAndNearbyCutoff();
+ static int AllowEverythingCutoff();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PriorityCalculator);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_PRIORITY_CALCULATOR_H_
diff --git a/cc/resources/raster_buffer.cc b/cc/resources/raster_buffer.cc
new file mode 100644
index 0000000..8f56717
--- /dev/null
+++ b/cc/resources/raster_buffer.cc
@@ -0,0 +1,15 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/raster_buffer.h"
+
+namespace cc {
+
+RasterBuffer::RasterBuffer() {
+}
+
+RasterBuffer::~RasterBuffer() {
+}
+
+} // namespace cc
diff --git a/cc/resources/raster_buffer.h b/cc/resources/raster_buffer.h
new file mode 100644
index 0000000..d148b72
--- /dev/null
+++ b/cc/resources/raster_buffer.h
@@ -0,0 +1,26 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_RASTER_BUFFER_H_
+#define CC_RESOURCES_RASTER_BUFFER_H_
+
+#include "cc/base/cc_export.h"
+#include "skia/ext/refptr.h"
+
+class SkCanvas;
+
+namespace cc {
+
+class CC_EXPORT RasterBuffer {
+ public:
+ RasterBuffer();
+ virtual ~RasterBuffer();
+
+ virtual skia::RefPtr<SkCanvas> AcquireSkCanvas() = 0;
+ virtual void ReleaseSkCanvas(const skia::RefPtr<SkCanvas>& canvas) = 0;
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_RASTER_BUFFER_H_
diff --git a/cc/resources/raster_tile_priority_queue.cc b/cc/resources/raster_tile_priority_queue.cc
new file mode 100644
index 0000000..64a4a91
--- /dev/null
+++ b/cc/resources/raster_tile_priority_queue.cc
@@ -0,0 +1,229 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/raster_tile_priority_queue.h"
+
+namespace cc {
+
+namespace {
+
+class RasterOrderComparator {
+ public:
+ explicit RasterOrderComparator(TreePriority tree_priority)
+ : tree_priority_(tree_priority) {}
+
+ bool operator()(
+ const RasterTilePriorityQueue::PairedPictureLayerQueue* a,
+ const RasterTilePriorityQueue::PairedPictureLayerQueue* b) const {
+ // Note that in this function, we have to return true if and only if
+ // b is strictly lower priority than a. Note that for the sake of
+ // completeness, empty queue is considered to have lowest priority.
+ if (a->IsEmpty() || b->IsEmpty())
+ return b->IsEmpty() < a->IsEmpty();
+
+ WhichTree a_tree = a->NextTileIteratorTree(tree_priority_);
+ const PictureLayerImpl::LayerRasterTileIterator* a_iterator =
+ a_tree == ACTIVE_TREE ? &a->active_iterator : &a->pending_iterator;
+
+ WhichTree b_tree = b->NextTileIteratorTree(tree_priority_);
+ const PictureLayerImpl::LayerRasterTileIterator* b_iterator =
+ b_tree == ACTIVE_TREE ? &b->active_iterator : &b->pending_iterator;
+
+ const Tile* a_tile = **a_iterator;
+ const Tile* b_tile = **b_iterator;
+
+ const TilePriority& a_priority =
+ a_tile->priority_for_tree_priority(tree_priority_);
+ const TilePriority& b_priority =
+ b_tile->priority_for_tree_priority(tree_priority_);
+ bool prioritize_low_res = tree_priority_ == SMOOTHNESS_TAKES_PRIORITY;
+
+ // If the bin is the same but the resolution is not, then the order will be
+ // determined by whether we prioritize low res or not.
+ // TODO(vmpstr): Remove this when TilePriority is no longer a member of Tile
+ // class but instead produced by the iterators.
+ if (b_priority.priority_bin == a_priority.priority_bin &&
+ b_priority.resolution != a_priority.resolution) {
+ // Non ideal resolution should be sorted lower than other resolutions.
+ if (a_priority.resolution == NON_IDEAL_RESOLUTION)
+ return true;
+
+ if (b_priority.resolution == NON_IDEAL_RESOLUTION)
+ return false;
+
+ if (prioritize_low_res)
+ return b_priority.resolution == LOW_RESOLUTION;
+ return b_priority.resolution == HIGH_RESOLUTION;
+ }
+ return b_priority.IsHigherPriorityThan(a_priority);
+ }
+
+ private:
+ TreePriority tree_priority_;
+};
+
+WhichTree HigherPriorityTree(
+ TreePriority tree_priority,
+ const PictureLayerImpl::LayerRasterTileIterator* active_iterator,
+ const PictureLayerImpl::LayerRasterTileIterator* pending_iterator,
+ const Tile* shared_tile) {
+ switch (tree_priority) {
+ case SMOOTHNESS_TAKES_PRIORITY:
+ return ACTIVE_TREE;
+ case NEW_CONTENT_TAKES_PRIORITY:
+ return PENDING_TREE;
+ case SAME_PRIORITY_FOR_BOTH_TREES: {
+ const Tile* active_tile = shared_tile ? shared_tile : **active_iterator;
+ const Tile* pending_tile = shared_tile ? shared_tile : **pending_iterator;
+
+ const TilePriority& active_priority = active_tile->priority(ACTIVE_TREE);
+ const TilePriority& pending_priority =
+ pending_tile->priority(PENDING_TREE);
+
+ if (active_priority.IsHigherPriorityThan(pending_priority))
+ return ACTIVE_TREE;
+ return PENDING_TREE;
+ }
+ default:
+ NOTREACHED();
+ return ACTIVE_TREE;
+ }
+}
+
+} // namespace
+
+RasterTilePriorityQueue::RasterTilePriorityQueue() {
+}
+
+RasterTilePriorityQueue::~RasterTilePriorityQueue() {
+}
+
+void RasterTilePriorityQueue::Build(
+ const std::vector<PictureLayerImpl::Pair>& paired_layers,
+ TreePriority tree_priority) {
+ tree_priority_ = tree_priority;
+ for (std::vector<PictureLayerImpl::Pair>::const_iterator it =
+ paired_layers.begin();
+ it != paired_layers.end();
+ ++it) {
+ paired_queues_.push_back(
+ make_scoped_ptr(new PairedPictureLayerQueue(*it, tree_priority_)));
+ }
+ paired_queues_.make_heap(RasterOrderComparator(tree_priority_));
+}
+
+void RasterTilePriorityQueue::Reset() {
+ paired_queues_.clear();
+}
+
+bool RasterTilePriorityQueue::IsEmpty() const {
+ return paired_queues_.empty() || paired_queues_.front()->IsEmpty();
+}
+
+Tile* RasterTilePriorityQueue::Top() {
+ DCHECK(!IsEmpty());
+ return paired_queues_.front()->Top(tree_priority_);
+}
+
+void RasterTilePriorityQueue::Pop() {
+ DCHECK(!IsEmpty());
+
+ paired_queues_.pop_heap(RasterOrderComparator(tree_priority_));
+ PairedPictureLayerQueue* paired_queue = paired_queues_.back();
+ paired_queue->Pop(tree_priority_);
+ paired_queues_.push_heap(RasterOrderComparator(tree_priority_));
+}
+
+RasterTilePriorityQueue::PairedPictureLayerQueue::PairedPictureLayerQueue() {
+}
+
+RasterTilePriorityQueue::PairedPictureLayerQueue::PairedPictureLayerQueue(
+ const PictureLayerImpl::Pair& layer_pair,
+ TreePriority tree_priority)
+ : active_iterator(layer_pair.active
+ ? PictureLayerImpl::LayerRasterTileIterator(
+ layer_pair.active,
+ tree_priority == SMOOTHNESS_TAKES_PRIORITY)
+ : PictureLayerImpl::LayerRasterTileIterator()),
+ pending_iterator(layer_pair.pending
+ ? PictureLayerImpl::LayerRasterTileIterator(
+ layer_pair.pending,
+ tree_priority == SMOOTHNESS_TAKES_PRIORITY)
+ : PictureLayerImpl::LayerRasterTileIterator()),
+ has_both_layers(layer_pair.active && layer_pair.pending) {
+}
+
+RasterTilePriorityQueue::PairedPictureLayerQueue::~PairedPictureLayerQueue() {
+}
+
+bool RasterTilePriorityQueue::PairedPictureLayerQueue::IsEmpty() const {
+ return !active_iterator && !pending_iterator;
+}
+
+Tile* RasterTilePriorityQueue::PairedPictureLayerQueue::Top(
+ TreePriority tree_priority) {
+ DCHECK(!IsEmpty());
+
+ WhichTree next_tree = NextTileIteratorTree(tree_priority);
+ PictureLayerImpl::LayerRasterTileIterator* next_iterator =
+ next_tree == ACTIVE_TREE ? &active_iterator : &pending_iterator;
+ DCHECK(*next_iterator);
+ Tile* tile = **next_iterator;
+ DCHECK(returned_tiles_for_debug.find(tile) == returned_tiles_for_debug.end());
+ return tile;
+}
+
+void RasterTilePriorityQueue::PairedPictureLayerQueue::Pop(
+ TreePriority tree_priority) {
+ DCHECK(!IsEmpty());
+
+ WhichTree next_tree = NextTileIteratorTree(tree_priority);
+ PictureLayerImpl::LayerRasterTileIterator* next_iterator =
+ next_tree == ACTIVE_TREE ? &active_iterator : &pending_iterator;
+ DCHECK(*next_iterator);
+ DCHECK(returned_tiles_for_debug.insert(**next_iterator).second);
+ ++(*next_iterator);
+
+ if (has_both_layers) {
+ // We have both layers (active and pending) thus we can encounter shared
+ // tiles twice (from the active iterator and from the pending iterator).
+ for (; !IsEmpty(); ++(*next_iterator)) {
+ next_tree = NextTileIteratorTree(tree_priority);
+ next_iterator =
+ next_tree == ACTIVE_TREE ? &active_iterator : &pending_iterator;
+
+ // Accept all non-shared tiles.
+ const Tile* tile = **next_iterator;
+ if (!tile->is_shared())
+ break;
+
+ // Accept a shared tile if the next tree is the higher priority one
+ // corresponding the iterator (active or pending) which usually (but due
+ // to spiral iterators not always) returns the shared tile first.
+ if (next_tree == HigherPriorityTree(tree_priority, NULL, NULL, tile))
+ break;
+ }
+ }
+
+ // If no empty, use Top to do DCHECK the next iterator.
+ DCHECK(IsEmpty() || Top(tree_priority));
+}
+
+WhichTree
+RasterTilePriorityQueue::PairedPictureLayerQueue::NextTileIteratorTree(
+ TreePriority tree_priority) const {
+ DCHECK(!IsEmpty());
+
+ // If we only have one iterator with tiles, return it.
+ if (!active_iterator)
+ return PENDING_TREE;
+ if (!pending_iterator)
+ return ACTIVE_TREE;
+
+ // Now both iterators have tiles, so we have to decide based on tree priority.
+ return HigherPriorityTree(
+ tree_priority, &active_iterator, &pending_iterator, NULL);
+}
+
+} // namespace cc
diff --git a/cc/resources/raster_tile_priority_queue.h b/cc/resources/raster_tile_priority_queue.h
new file mode 100644
index 0000000..a7ec9e0
--- /dev/null
+++ b/cc/resources/raster_tile_priority_queue.h
@@ -0,0 +1,63 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_RASTER_TILE_PRIORITY_QUEUE_H_
+#define CC_RESOURCES_RASTER_TILE_PRIORITY_QUEUE_H_
+
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "cc/base/cc_export.h"
+#include "cc/layers/picture_layer_impl.h"
+#include "cc/resources/tile_priority.h"
+
+namespace cc {
+
+class CC_EXPORT RasterTilePriorityQueue {
+ public:
+ struct PairedPictureLayerQueue {
+ PairedPictureLayerQueue();
+ PairedPictureLayerQueue(const PictureLayerImpl::Pair& layer_pair,
+ TreePriority tree_priority);
+ ~PairedPictureLayerQueue();
+
+ bool IsEmpty() const;
+ Tile* Top(TreePriority tree_priority);
+ void Pop(TreePriority tree_priority);
+
+ WhichTree NextTileIteratorTree(TreePriority tree_priority) const;
+
+ PictureLayerImpl::LayerRasterTileIterator active_iterator;
+ PictureLayerImpl::LayerRasterTileIterator pending_iterator;
+ bool has_both_layers;
+
+ // Set of returned tiles (excluding the current one) for DCHECKing.
+ std::set<const Tile*> returned_tiles_for_debug;
+ };
+
+ RasterTilePriorityQueue();
+ ~RasterTilePriorityQueue();
+
+ void Build(const std::vector<PictureLayerImpl::Pair>& paired_layers,
+ TreePriority tree_priority);
+ void Reset();
+
+ bool IsEmpty() const;
+ Tile* Top();
+ void Pop();
+
+ private:
+ // TODO(vmpstr): This is potentially unnecessary if it becomes the case that
+ // PairedPictureLayerQueue is fast enough to copy. In that case, we can use
+ // objects directly (ie std::vector<PairedPictureLayerQueue>.
+ ScopedPtrVector<PairedPictureLayerQueue> paired_queues_;
+ TreePriority tree_priority_;
+
+ DISALLOW_COPY_AND_ASSIGN(RasterTilePriorityQueue);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_RASTER_TILE_PRIORITY_QUEUE_H_
diff --git a/cc/resources/raster_worker_pool.cc b/cc/resources/raster_worker_pool.cc
new file mode 100644
index 0000000..a720543
--- /dev/null
+++ b/cc/resources/raster_worker_pool.cc
@@ -0,0 +1,244 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/raster_worker_pool.h"
+
+#include <algorithm>
+
+#include "base/debug/trace_event.h"
+#include "base/lazy_instance.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/simple_thread.h"
+#include "cc/base/scoped_ptr_deque.h"
+
+namespace cc {
+namespace {
+
+class RasterTaskGraphRunner : public TaskGraphRunner,
+ public base::DelegateSimpleThread::Delegate {
+ public:
+ RasterTaskGraphRunner() {
+ size_t num_threads = RasterWorkerPool::GetNumRasterThreads();
+ while (workers_.size() < num_threads) {
+ scoped_ptr<base::DelegateSimpleThread> worker =
+ make_scoped_ptr(new base::DelegateSimpleThread(
+ this,
+ base::StringPrintf("CompositorRasterWorker%u",
+ static_cast<unsigned>(workers_.size() + 1))
+ .c_str()));
+ worker->Start();
+#if defined(OS_ANDROID) || defined(OS_LINUX)
+ worker->SetThreadPriority(base::kThreadPriority_Background);
+#endif
+ workers_.push_back(worker.Pass());
+ }
+ }
+
+ virtual ~RasterTaskGraphRunner() { NOTREACHED(); }
+
+ private:
+ // Overridden from base::DelegateSimpleThread::Delegate:
+ virtual void Run() OVERRIDE {
+ TaskGraphRunner::Run();
+ }
+
+ ScopedPtrDeque<base::DelegateSimpleThread> workers_;
+};
+
+base::LazyInstance<RasterTaskGraphRunner>::Leaky g_task_graph_runner =
+ LAZY_INSTANCE_INITIALIZER;
+
+const int kDefaultNumRasterThreads = 1;
+
+int g_num_raster_threads = 0;
+
+class RasterFinishedTaskImpl : public RasterizerTask {
+ public:
+ explicit RasterFinishedTaskImpl(
+ base::SequencedTaskRunner* task_runner,
+ const base::Closure& on_raster_finished_callback)
+ : task_runner_(task_runner),
+ on_raster_finished_callback_(on_raster_finished_callback) {}
+
+ // Overridden from Task:
+ virtual void RunOnWorkerThread() OVERRIDE {
+ TRACE_EVENT0("cc", "RasterFinishedTaskImpl::RunOnWorkerThread");
+ RasterFinished();
+ }
+
+ // Overridden from RasterizerTask:
+ virtual void ScheduleOnOriginThread(RasterizerTaskClient* client) OVERRIDE {}
+ virtual void CompleteOnOriginThread(RasterizerTaskClient* client) OVERRIDE {}
+ virtual void RunReplyOnOriginThread() OVERRIDE {}
+
+ protected:
+ virtual ~RasterFinishedTaskImpl() {}
+
+ void RasterFinished() {
+ task_runner_->PostTask(FROM_HERE, on_raster_finished_callback_);
+ }
+
+ private:
+ scoped_refptr<base::SequencedTaskRunner> task_runner_;
+ const base::Closure on_raster_finished_callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(RasterFinishedTaskImpl);
+};
+
+} // namespace
+
+// This allows a micro benchmark system to run tasks with highest priority,
+// since it should finish as quickly as possible.
+unsigned RasterWorkerPool::kBenchmarkRasterTaskPriority = 0u;
+// Task priorities that make sure raster finished tasks run before any
+// remaining raster tasks.
+unsigned RasterWorkerPool::kRasterFinishedTaskPriority = 1u;
+unsigned RasterWorkerPool::kRasterTaskPriorityBase = 2u;
+
+RasterWorkerPool::RasterWorkerPool() {}
+
+RasterWorkerPool::~RasterWorkerPool() {}
+
+// static
+void RasterWorkerPool::SetNumRasterThreads(int num_threads) {
+ DCHECK_LT(0, num_threads);
+ DCHECK_EQ(0, g_num_raster_threads);
+
+ g_num_raster_threads = num_threads;
+}
+
+// static
+int RasterWorkerPool::GetNumRasterThreads() {
+ if (!g_num_raster_threads)
+ g_num_raster_threads = kDefaultNumRasterThreads;
+
+ return g_num_raster_threads;
+}
+
+// static
+TaskGraphRunner* RasterWorkerPool::GetTaskGraphRunner() {
+ return g_task_graph_runner.Pointer();
+}
+
+// static
+scoped_refptr<RasterizerTask> RasterWorkerPool::CreateRasterFinishedTask(
+ base::SequencedTaskRunner* task_runner,
+ const base::Closure& on_raster_finished_callback) {
+ return make_scoped_refptr(
+ new RasterFinishedTaskImpl(task_runner, on_raster_finished_callback));
+}
+
+// static
+void RasterWorkerPool::ScheduleTasksOnOriginThread(RasterizerTaskClient* client,
+ TaskGraph* graph) {
+ TRACE_EVENT0("cc", "Rasterizer::ScheduleTasksOnOriginThread");
+
+ for (TaskGraph::Node::Vector::iterator it = graph->nodes.begin();
+ it != graph->nodes.end();
+ ++it) {
+ TaskGraph::Node& node = *it;
+ RasterizerTask* task = static_cast<RasterizerTask*>(node.task);
+
+ if (!task->HasBeenScheduled()) {
+ task->WillSchedule();
+ task->ScheduleOnOriginThread(client);
+ task->DidSchedule();
+ }
+ }
+}
+
+// static
+void RasterWorkerPool::InsertNodeForTask(TaskGraph* graph,
+ RasterizerTask* task,
+ unsigned priority,
+ size_t dependencies) {
+ DCHECK(std::find_if(graph->nodes.begin(),
+ graph->nodes.end(),
+ TaskGraph::Node::TaskComparator(task)) ==
+ graph->nodes.end());
+ graph->nodes.push_back(TaskGraph::Node(task, priority, dependencies));
+}
+
+// static
+void RasterWorkerPool::InsertNodesForRasterTask(
+ TaskGraph* graph,
+ RasterTask* raster_task,
+ const ImageDecodeTask::Vector& decode_tasks,
+ unsigned priority) {
+ size_t dependencies = 0u;
+
+ // Insert image decode tasks.
+ for (ImageDecodeTask::Vector::const_iterator it = decode_tasks.begin();
+ it != decode_tasks.end();
+ ++it) {
+ ImageDecodeTask* decode_task = it->get();
+
+ // Skip if already decoded.
+ if (decode_task->HasCompleted())
+ continue;
+
+ dependencies++;
+
+ // Add decode task if it doesn't already exists in graph.
+ TaskGraph::Node::Vector::iterator decode_it =
+ std::find_if(graph->nodes.begin(),
+ graph->nodes.end(),
+ TaskGraph::Node::TaskComparator(decode_task));
+ if (decode_it == graph->nodes.end())
+ InsertNodeForTask(graph, decode_task, priority, 0u);
+
+ graph->edges.push_back(TaskGraph::Edge(decode_task, raster_task));
+ }
+
+ InsertNodeForTask(graph, raster_task, priority, dependencies);
+}
+
+// static
+void RasterWorkerPool::AcquireBitmapForBuffer(SkBitmap* bitmap,
+ uint8_t* buffer,
+ ResourceFormat buffer_format,
+ const gfx::Size& size,
+ int stride) {
+ switch (buffer_format) {
+ case RGBA_4444:
+ bitmap->allocN32Pixels(size.width(), size.height());
+ break;
+ case RGBA_8888:
+ case BGRA_8888: {
+ SkImageInfo info =
+ SkImageInfo::MakeN32Premul(size.width(), size.height());
+ if (!stride)
+ stride = info.minRowBytes();
+ bitmap->installPixels(info, buffer, stride);
+ break;
+ }
+ case ALPHA_8:
+ case LUMINANCE_8:
+ case RGB_565:
+ case ETC1:
+ NOTREACHED();
+ break;
+ }
+}
+
+// static
+void RasterWorkerPool::ReleaseBitmapForBuffer(SkBitmap* bitmap,
+ uint8_t* buffer,
+ ResourceFormat buffer_format) {
+ SkColorType buffer_color_type = ResourceFormatToSkColorType(buffer_format);
+ if (buffer_color_type != bitmap->colorType()) {
+ SkImageInfo dst_info = bitmap->info();
+ dst_info.fColorType = buffer_color_type;
+ // TODO(kaanb): The GL pipeline assumes a 4-byte alignment for the
+ // bitmap data. There will be no need to call SkAlign4 once crbug.com/293728
+ // is fixed.
+ const size_t dst_row_bytes = SkAlign4(dst_info.minRowBytes());
+ DCHECK_EQ(0u, dst_row_bytes % 4);
+ bool success = bitmap->readPixels(dst_info, buffer, dst_row_bytes, 0, 0);
+ DCHECK_EQ(true, success);
+ }
+ bitmap->reset();
+}
+
+} // namespace cc
diff --git a/cc/resources/raster_worker_pool.h b/cc/resources/raster_worker_pool.h
new file mode 100644
index 0000000..e57fe06
--- /dev/null
+++ b/cc/resources/raster_worker_pool.h
@@ -0,0 +1,81 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_RASTER_WORKER_POOL_H_
+#define CC_RESOURCES_RASTER_WORKER_POOL_H_
+
+#include "cc/resources/rasterizer.h"
+#include "ui/gfx/geometry/size.h"
+
+namespace base {
+class SequencedTaskRunner;
+}
+
+namespace cc {
+
+class CC_EXPORT RasterWorkerPool {
+ public:
+ static unsigned kBenchmarkRasterTaskPriority;
+ static unsigned kRasterFinishedTaskPriority;
+ static unsigned kRasterTaskPriorityBase;
+
+ RasterWorkerPool();
+ virtual ~RasterWorkerPool();
+
+ // Set the number of threads to use for the global TaskGraphRunner instance.
+ // This can only be called once and must be called prior to
+ // GetNumRasterThreads().
+ static void SetNumRasterThreads(int num_threads);
+
+ // Returns the number of threads used for the global TaskGraphRunner instance.
+ static int GetNumRasterThreads();
+
+ // Returns a pointer to the global TaskGraphRunner instance.
+ static TaskGraphRunner* GetTaskGraphRunner();
+
+ // Utility function that can be used to create a "raster finished" task that
+ // posts |callback| to |task_runner| when run.
+ static scoped_refptr<RasterizerTask> CreateRasterFinishedTask(
+ base::SequencedTaskRunner* task_runner,
+ const base::Closure& callback);
+
+ // Utility function that can be used to call ::ScheduleOnOriginThread() for
+ // each task in |graph|.
+ static void ScheduleTasksOnOriginThread(RasterizerTaskClient* client,
+ TaskGraph* graph);
+
+ // Utility function that can be used to build a task graph. Inserts a node
+ // that represents |task| in |graph|. See TaskGraph definition for valid
+ // |priority| values.
+ static void InsertNodeForTask(TaskGraph* graph,
+ RasterizerTask* task,
+ unsigned priority,
+ size_t dependencies);
+
+ // Utility function that can be used to build a task graph. Inserts nodes that
+ // represent |task| and all its image decode dependencies in |graph|.
+ static void InsertNodesForRasterTask(
+ TaskGraph* graph,
+ RasterTask* task,
+ const ImageDecodeTask::Vector& decode_tasks,
+ unsigned priority);
+
+ // Utility functions that transparently create a temporary bitmap and copy
+ // pixels to buffer when necessary.
+ static void AcquireBitmapForBuffer(SkBitmap* bitmap,
+ uint8_t* buffer,
+ ResourceFormat format,
+ const gfx::Size& size,
+ int stride);
+ static void ReleaseBitmapForBuffer(SkBitmap* bitmap,
+ uint8_t* buffer,
+ ResourceFormat format);
+
+ // Type-checking downcast routine.
+ virtual Rasterizer* AsRasterizer() = 0;
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_RASTER_WORKER_POOL_H_
diff --git a/cc/resources/raster_worker_pool_perftest.cc b/cc/resources/raster_worker_pool_perftest.cc
new file mode 100644
index 0000000..3ae1c20
--- /dev/null
+++ b/cc/resources/raster_worker_pool_perftest.cc
@@ -0,0 +1,526 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/raster_worker_pool.h"
+
+#include "base/test/test_simple_task_runner.h"
+#include "base/time/time.h"
+#include "cc/debug/lap_timer.h"
+#include "cc/output/context_provider.h"
+#include "cc/resources/bitmap_raster_worker_pool.h"
+#include "cc/resources/gpu_raster_worker_pool.h"
+#include "cc/resources/one_copy_raster_worker_pool.h"
+#include "cc/resources/pixel_buffer_raster_worker_pool.h"
+#include "cc/resources/raster_buffer.h"
+#include "cc/resources/rasterizer.h"
+#include "cc/resources/resource_pool.h"
+#include "cc/resources/resource_provider.h"
+#include "cc/resources/scoped_resource.h"
+#include "cc/resources/zero_copy_raster_worker_pool.h"
+#include "cc/test/fake_output_surface.h"
+#include "cc/test/fake_output_surface_client.h"
+#include "cc/test/test_context_support.h"
+#include "cc/test/test_shared_bitmap_manager.h"
+#include "cc/test/test_web_graphics_context_3d.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/perf/perf_test.h"
+#include "third_party/khronos/GLES2/gl2.h"
+
+namespace cc {
+namespace {
+
+class PerfGLES2Interface : public gpu::gles2::GLES2InterfaceStub {
+ // Overridden from gpu::gles2::GLES2Interface:
+ virtual GLuint CreateImageCHROMIUM(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) OVERRIDE {
+ return 1u;
+ }
+ virtual void GenBuffers(GLsizei n, GLuint* buffers) OVERRIDE {
+ for (GLsizei i = 0; i < n; ++i)
+ buffers[i] = 1u;
+ }
+ virtual void GenTextures(GLsizei n, GLuint* textures) OVERRIDE {
+ for (GLsizei i = 0; i < n; ++i)
+ textures[i] = 1u;
+ }
+ virtual void GetIntegerv(GLenum pname, GLint* params) OVERRIDE {
+ if (pname == GL_MAX_TEXTURE_SIZE)
+ *params = INT_MAX;
+ }
+ virtual void GenQueriesEXT(GLsizei n, GLuint* queries) OVERRIDE {
+ for (GLsizei i = 0; i < n; ++i)
+ queries[i] = 1u;
+ }
+ virtual void GetQueryObjectuivEXT(GLuint query,
+ GLenum pname,
+ GLuint* params) OVERRIDE {
+ if (pname == GL_QUERY_RESULT_AVAILABLE_EXT)
+ *params = 1;
+ }
+};
+
+class PerfContextProvider : public ContextProvider {
+ public:
+ PerfContextProvider() : context_gl_(new PerfGLES2Interface) {}
+
+ virtual bool BindToCurrentThread() OVERRIDE { return true; }
+ virtual Capabilities ContextCapabilities() OVERRIDE {
+ Capabilities capabilities;
+ capabilities.gpu.image = true;
+ capabilities.gpu.sync_query = true;
+ return capabilities;
+ }
+ virtual gpu::gles2::GLES2Interface* ContextGL() OVERRIDE {
+ return context_gl_.get();
+ }
+ virtual gpu::ContextSupport* ContextSupport() OVERRIDE { return &support_; }
+ virtual class GrContext* GrContext() OVERRIDE { return NULL; }
+ virtual bool IsContextLost() OVERRIDE { return false; }
+ virtual void VerifyContexts() OVERRIDE {}
+ virtual void DeleteCachedResources() OVERRIDE {}
+ virtual bool DestroyedOnMainThread() OVERRIDE { return false; }
+ virtual void SetLostContextCallback(const LostContextCallback& cb) OVERRIDE {}
+ virtual void SetMemoryPolicyChangedCallback(
+ const MemoryPolicyChangedCallback& cb) OVERRIDE {}
+
+ private:
+ virtual ~PerfContextProvider() {}
+
+ scoped_ptr<PerfGLES2Interface> context_gl_;
+ TestContextSupport support_;
+};
+
+enum RasterWorkerPoolType {
+ RASTER_WORKER_POOL_TYPE_PIXEL_BUFFER,
+ RASTER_WORKER_POOL_TYPE_ZERO_COPY,
+ RASTER_WORKER_POOL_TYPE_ONE_COPY,
+ RASTER_WORKER_POOL_TYPE_GPU,
+ RASTER_WORKER_POOL_TYPE_BITMAP
+};
+
+static const int kTimeLimitMillis = 2000;
+static const int kWarmupRuns = 5;
+static const int kTimeCheckInterval = 10;
+
+class PerfImageDecodeTaskImpl : public ImageDecodeTask {
+ public:
+ PerfImageDecodeTaskImpl() {}
+
+ // Overridden from Task:
+ virtual void RunOnWorkerThread() OVERRIDE {}
+
+ // Overridden from RasterizerTask:
+ virtual void ScheduleOnOriginThread(RasterizerTaskClient* client) OVERRIDE {}
+ virtual void CompleteOnOriginThread(RasterizerTaskClient* client) OVERRIDE {}
+ virtual void RunReplyOnOriginThread() OVERRIDE { Reset(); }
+
+ void Reset() {
+ did_run_ = false;
+ did_complete_ = false;
+ }
+
+ protected:
+ virtual ~PerfImageDecodeTaskImpl() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PerfImageDecodeTaskImpl);
+};
+
+class PerfRasterTaskImpl : public RasterTask {
+ public:
+ PerfRasterTaskImpl(scoped_ptr<ScopedResource> resource,
+ ImageDecodeTask::Vector* dependencies)
+ : RasterTask(resource.get(), dependencies), resource_(resource.Pass()) {}
+
+ // Overridden from Task:
+ virtual void RunOnWorkerThread() OVERRIDE {}
+
+ // Overridden from RasterizerTask:
+ virtual void ScheduleOnOriginThread(RasterizerTaskClient* client) OVERRIDE {
+ raster_buffer_ = client->AcquireBufferForRaster(resource());
+ }
+ virtual void CompleteOnOriginThread(RasterizerTaskClient* client) OVERRIDE {
+ client->ReleaseBufferForRaster(raster_buffer_.Pass());
+ }
+ virtual void RunReplyOnOriginThread() OVERRIDE { Reset(); }
+
+ void Reset() {
+ did_run_ = false;
+ did_complete_ = false;
+ }
+
+ protected:
+ virtual ~PerfRasterTaskImpl() {}
+
+ private:
+ scoped_ptr<ScopedResource> resource_;
+ scoped_ptr<RasterBuffer> raster_buffer_;
+
+ DISALLOW_COPY_AND_ASSIGN(PerfRasterTaskImpl);
+};
+
+class RasterWorkerPoolPerfTestBase {
+ public:
+ typedef std::vector<scoped_refptr<RasterTask> > RasterTaskVector;
+
+ enum NamedTaskSet { REQUIRED_FOR_ACTIVATION = 0, ALL = 1 };
+
+ RasterWorkerPoolPerfTestBase()
+ : context_provider_(make_scoped_refptr(new PerfContextProvider)),
+ task_runner_(new base::TestSimpleTaskRunner),
+ task_graph_runner_(new TaskGraphRunner),
+ timer_(kWarmupRuns,
+ base::TimeDelta::FromMilliseconds(kTimeLimitMillis),
+ kTimeCheckInterval) {}
+
+ void CreateImageDecodeTasks(unsigned num_image_decode_tasks,
+ ImageDecodeTask::Vector* image_decode_tasks) {
+ for (unsigned i = 0; i < num_image_decode_tasks; ++i)
+ image_decode_tasks->push_back(new PerfImageDecodeTaskImpl);
+ }
+
+ void CreateRasterTasks(unsigned num_raster_tasks,
+ const ImageDecodeTask::Vector& image_decode_tasks,
+ RasterTaskVector* raster_tasks) {
+ const gfx::Size size(1, 1);
+
+ for (unsigned i = 0; i < num_raster_tasks; ++i) {
+ scoped_ptr<ScopedResource> resource(
+ ScopedResource::Create(resource_provider_.get()));
+ resource->Allocate(
+ size, ResourceProvider::TextureHintImmutable, RGBA_8888);
+
+ ImageDecodeTask::Vector dependencies = image_decode_tasks;
+ raster_tasks->push_back(
+ new PerfRasterTaskImpl(resource.Pass(), &dependencies));
+ }
+ }
+
+ void BuildRasterTaskQueue(RasterTaskQueue* queue,
+ const RasterTaskVector& raster_tasks) {
+ for (size_t i = 0u; i < raster_tasks.size(); ++i) {
+ bool required_for_activation = (i % 2) == 0;
+ TaskSetCollection task_set_collection;
+ task_set_collection[ALL] = true;
+ task_set_collection[REQUIRED_FOR_ACTIVATION] = required_for_activation;
+ queue->items.push_back(
+ RasterTaskQueue::Item(raster_tasks[i].get(), task_set_collection));
+ }
+ }
+
+ protected:
+ scoped_refptr<ContextProvider> context_provider_;
+ FakeOutputSurfaceClient output_surface_client_;
+ scoped_ptr<FakeOutputSurface> output_surface_;
+ scoped_ptr<ResourceProvider> resource_provider_;
+ scoped_refptr<base::TestSimpleTaskRunner> task_runner_;
+ scoped_ptr<TaskGraphRunner> task_graph_runner_;
+ LapTimer timer_;
+};
+
+class RasterWorkerPoolPerfTest
+ : public RasterWorkerPoolPerfTestBase,
+ public testing::TestWithParam<RasterWorkerPoolType>,
+ public RasterizerClient {
+ public:
+ // Overridden from testing::Test:
+ virtual void SetUp() OVERRIDE {
+ switch (GetParam()) {
+ case RASTER_WORKER_POOL_TYPE_PIXEL_BUFFER:
+ Create3dOutputSurfaceAndResourceProvider();
+ raster_worker_pool_ = PixelBufferRasterWorkerPool::Create(
+ task_runner_.get(),
+ task_graph_runner_.get(),
+ context_provider_.get(),
+ resource_provider_.get(),
+ std::numeric_limits<size_t>::max());
+ break;
+ case RASTER_WORKER_POOL_TYPE_ZERO_COPY:
+ Create3dOutputSurfaceAndResourceProvider();
+ raster_worker_pool_ =
+ ZeroCopyRasterWorkerPool::Create(task_runner_.get(),
+ task_graph_runner_.get(),
+ resource_provider_.get());
+ break;
+ case RASTER_WORKER_POOL_TYPE_ONE_COPY:
+ Create3dOutputSurfaceAndResourceProvider();
+ staging_resource_pool_ = ResourcePool::Create(
+ resource_provider_.get(), GL_TEXTURE_2D, RGBA_8888);
+ raster_worker_pool_ =
+ OneCopyRasterWorkerPool::Create(task_runner_.get(),
+ task_graph_runner_.get(),
+ context_provider_.get(),
+ resource_provider_.get(),
+ staging_resource_pool_.get());
+ break;
+ case RASTER_WORKER_POOL_TYPE_GPU:
+ Create3dOutputSurfaceAndResourceProvider();
+ raster_worker_pool_ =
+ GpuRasterWorkerPool::Create(task_runner_.get(),
+ context_provider_.get(),
+ resource_provider_.get());
+ break;
+ case RASTER_WORKER_POOL_TYPE_BITMAP:
+ CreateSoftwareOutputSurfaceAndResourceProvider();
+ raster_worker_pool_ =
+ BitmapRasterWorkerPool::Create(task_runner_.get(),
+ task_graph_runner_.get(),
+ resource_provider_.get());
+ break;
+ }
+
+ DCHECK(raster_worker_pool_);
+ raster_worker_pool_->AsRasterizer()->SetClient(this);
+ }
+ virtual void TearDown() OVERRIDE {
+ raster_worker_pool_->AsRasterizer()->Shutdown();
+ raster_worker_pool_->AsRasterizer()->CheckForCompletedTasks();
+ }
+
+ // Overriden from RasterizerClient:
+ virtual void DidFinishRunningTasks(TaskSet task_set) OVERRIDE {
+ raster_worker_pool_->AsRasterizer()->CheckForCompletedTasks();
+ }
+ virtual TaskSetCollection TasksThatShouldBeForcedToComplete() const OVERRIDE {
+ return TaskSetCollection();
+ }
+
+ void RunMessageLoopUntilAllTasksHaveCompleted() {
+ task_graph_runner_->RunUntilIdle();
+ task_runner_->RunUntilIdle();
+ }
+
+ void RunScheduleTasksTest(const std::string& test_name,
+ unsigned num_raster_tasks,
+ unsigned num_image_decode_tasks) {
+ ImageDecodeTask::Vector image_decode_tasks;
+ RasterTaskVector raster_tasks;
+ CreateImageDecodeTasks(num_image_decode_tasks, &image_decode_tasks);
+ CreateRasterTasks(num_raster_tasks, image_decode_tasks, &raster_tasks);
+
+ // Avoid unnecessary heap allocations by reusing the same queue.
+ RasterTaskQueue queue;
+
+ timer_.Reset();
+ do {
+ queue.Reset();
+ BuildRasterTaskQueue(&queue, raster_tasks);
+ raster_worker_pool_->AsRasterizer()->ScheduleTasks(&queue);
+ raster_worker_pool_->AsRasterizer()->CheckForCompletedTasks();
+ timer_.NextLap();
+ } while (!timer_.HasTimeLimitExpired());
+
+ RasterTaskQueue empty;
+ raster_worker_pool_->AsRasterizer()->ScheduleTasks(&empty);
+ RunMessageLoopUntilAllTasksHaveCompleted();
+
+ perf_test::PrintResult("schedule_tasks",
+ TestModifierString(),
+ test_name,
+ timer_.LapsPerSecond(),
+ "runs/s",
+ true);
+ }
+
+ void RunScheduleAlternateTasksTest(const std::string& test_name,
+ unsigned num_raster_tasks,
+ unsigned num_image_decode_tasks) {
+ const size_t kNumVersions = 2;
+ ImageDecodeTask::Vector image_decode_tasks[kNumVersions];
+ RasterTaskVector raster_tasks[kNumVersions];
+ for (size_t i = 0; i < kNumVersions; ++i) {
+ CreateImageDecodeTasks(num_image_decode_tasks, &image_decode_tasks[i]);
+ CreateRasterTasks(
+ num_raster_tasks, image_decode_tasks[i], &raster_tasks[i]);
+ }
+
+ // Avoid unnecessary heap allocations by reusing the same queue.
+ RasterTaskQueue queue;
+
+ size_t count = 0;
+ timer_.Reset();
+ do {
+ queue.Reset();
+ BuildRasterTaskQueue(&queue, raster_tasks[count % kNumVersions]);
+ raster_worker_pool_->AsRasterizer()->ScheduleTasks(&queue);
+ raster_worker_pool_->AsRasterizer()->CheckForCompletedTasks();
+ ++count;
+ timer_.NextLap();
+ } while (!timer_.HasTimeLimitExpired());
+
+ RasterTaskQueue empty;
+ raster_worker_pool_->AsRasterizer()->ScheduleTasks(&empty);
+ RunMessageLoopUntilAllTasksHaveCompleted();
+
+ perf_test::PrintResult("schedule_alternate_tasks",
+ TestModifierString(),
+ test_name,
+ timer_.LapsPerSecond(),
+ "runs/s",
+ true);
+ }
+
+ void RunScheduleAndExecuteTasksTest(const std::string& test_name,
+ unsigned num_raster_tasks,
+ unsigned num_image_decode_tasks) {
+ ImageDecodeTask::Vector image_decode_tasks;
+ RasterTaskVector raster_tasks;
+ CreateImageDecodeTasks(num_image_decode_tasks, &image_decode_tasks);
+ CreateRasterTasks(num_raster_tasks, image_decode_tasks, &raster_tasks);
+
+ // Avoid unnecessary heap allocations by reusing the same queue.
+ RasterTaskQueue queue;
+
+ timer_.Reset();
+ do {
+ queue.Reset();
+ BuildRasterTaskQueue(&queue, raster_tasks);
+ raster_worker_pool_->AsRasterizer()->ScheduleTasks(&queue);
+ RunMessageLoopUntilAllTasksHaveCompleted();
+ timer_.NextLap();
+ } while (!timer_.HasTimeLimitExpired());
+
+ RasterTaskQueue empty;
+ raster_worker_pool_->AsRasterizer()->ScheduleTasks(&empty);
+ RunMessageLoopUntilAllTasksHaveCompleted();
+
+ perf_test::PrintResult("schedule_and_execute_tasks",
+ TestModifierString(),
+ test_name,
+ timer_.LapsPerSecond(),
+ "runs/s",
+ true);
+ }
+
+ private:
+ void Create3dOutputSurfaceAndResourceProvider() {
+ output_surface_ = FakeOutputSurface::Create3d(context_provider_).Pass();
+ CHECK(output_surface_->BindToClient(&output_surface_client_));
+ resource_provider_ =
+ ResourceProvider::Create(
+ output_surface_.get(), NULL, NULL, 0, false, 1, false).Pass();
+ }
+
+ void CreateSoftwareOutputSurfaceAndResourceProvider() {
+ output_surface_ = FakeOutputSurface::CreateSoftware(
+ make_scoped_ptr(new SoftwareOutputDevice));
+ CHECK(output_surface_->BindToClient(&output_surface_client_));
+ resource_provider_ = ResourceProvider::Create(output_surface_.get(),
+ &shared_bitmap_manager_,
+ NULL,
+ 0,
+ false,
+ 1,
+ false).Pass();
+ }
+
+ std::string TestModifierString() const {
+ switch (GetParam()) {
+ case RASTER_WORKER_POOL_TYPE_PIXEL_BUFFER:
+ return std::string("_pixel_raster_worker_pool");
+ case RASTER_WORKER_POOL_TYPE_ZERO_COPY:
+ return std::string("_zero_copy_raster_worker_pool");
+ case RASTER_WORKER_POOL_TYPE_ONE_COPY:
+ return std::string("_one_copy_raster_worker_pool");
+ case RASTER_WORKER_POOL_TYPE_GPU:
+ return std::string("_gpu_raster_worker_pool");
+ case RASTER_WORKER_POOL_TYPE_BITMAP:
+ return std::string("_bitmap_raster_worker_pool");
+ }
+ NOTREACHED();
+ return std::string();
+ }
+
+ scoped_ptr<ResourcePool> staging_resource_pool_;
+ scoped_ptr<RasterWorkerPool> raster_worker_pool_;
+ TestSharedBitmapManager shared_bitmap_manager_;
+};
+
+TEST_P(RasterWorkerPoolPerfTest, ScheduleTasks) {
+ RunScheduleTasksTest("1_0", 1, 0);
+ RunScheduleTasksTest("32_0", 32, 0);
+ RunScheduleTasksTest("1_1", 1, 1);
+ RunScheduleTasksTest("32_1", 32, 1);
+ RunScheduleTasksTest("1_4", 1, 4);
+ RunScheduleTasksTest("32_4", 32, 4);
+}
+
+TEST_P(RasterWorkerPoolPerfTest, ScheduleAlternateTasks) {
+ RunScheduleAlternateTasksTest("1_0", 1, 0);
+ RunScheduleAlternateTasksTest("32_0", 32, 0);
+ RunScheduleAlternateTasksTest("1_1", 1, 1);
+ RunScheduleAlternateTasksTest("32_1", 32, 1);
+ RunScheduleAlternateTasksTest("1_4", 1, 4);
+ RunScheduleAlternateTasksTest("32_4", 32, 4);
+}
+
+TEST_P(RasterWorkerPoolPerfTest, ScheduleAndExecuteTasks) {
+ RunScheduleAndExecuteTasksTest("1_0", 1, 0);
+ RunScheduleAndExecuteTasksTest("32_0", 32, 0);
+ RunScheduleAndExecuteTasksTest("1_1", 1, 1);
+ RunScheduleAndExecuteTasksTest("32_1", 32, 1);
+ RunScheduleAndExecuteTasksTest("1_4", 1, 4);
+ RunScheduleAndExecuteTasksTest("32_4", 32, 4);
+}
+
+INSTANTIATE_TEST_CASE_P(RasterWorkerPoolPerfTests,
+ RasterWorkerPoolPerfTest,
+ ::testing::Values(RASTER_WORKER_POOL_TYPE_PIXEL_BUFFER,
+ RASTER_WORKER_POOL_TYPE_ZERO_COPY,
+ RASTER_WORKER_POOL_TYPE_ONE_COPY,
+ RASTER_WORKER_POOL_TYPE_GPU,
+ RASTER_WORKER_POOL_TYPE_BITMAP));
+
+class RasterWorkerPoolCommonPerfTest : public RasterWorkerPoolPerfTestBase,
+ public testing::Test {
+ public:
+ // Overridden from testing::Test:
+ virtual void SetUp() OVERRIDE {
+ output_surface_ = FakeOutputSurface::Create3d(context_provider_).Pass();
+ CHECK(output_surface_->BindToClient(&output_surface_client_));
+ resource_provider_ =
+ ResourceProvider::Create(
+ output_surface_.get(), NULL, NULL, 0, false, 1, false).Pass();
+ }
+
+ void RunBuildRasterTaskQueueTest(const std::string& test_name,
+ unsigned num_raster_tasks,
+ unsigned num_image_decode_tasks) {
+ ImageDecodeTask::Vector image_decode_tasks;
+ RasterTaskVector raster_tasks;
+ CreateImageDecodeTasks(num_image_decode_tasks, &image_decode_tasks);
+ CreateRasterTasks(num_raster_tasks, image_decode_tasks, &raster_tasks);
+
+ // Avoid unnecessary heap allocations by reusing the same queue.
+ RasterTaskQueue queue;
+
+ timer_.Reset();
+ do {
+ queue.Reset();
+ BuildRasterTaskQueue(&queue, raster_tasks);
+ timer_.NextLap();
+ } while (!timer_.HasTimeLimitExpired());
+
+ perf_test::PrintResult("build_raster_task_queue",
+ "",
+ test_name,
+ timer_.LapsPerSecond(),
+ "runs/s",
+ true);
+ }
+};
+
+TEST_F(RasterWorkerPoolCommonPerfTest, BuildRasterTaskQueue) {
+ RunBuildRasterTaskQueueTest("1_0", 1, 0);
+ RunBuildRasterTaskQueueTest("32_0", 32, 0);
+ RunBuildRasterTaskQueueTest("1_1", 1, 1);
+ RunBuildRasterTaskQueueTest("32_1", 32, 1);
+ RunBuildRasterTaskQueueTest("1_4", 1, 4);
+ RunBuildRasterTaskQueueTest("32_4", 32, 4);
+}
+
+} // namespace
+} // namespace cc
diff --git a/cc/resources/raster_worker_pool_unittest.cc b/cc/resources/raster_worker_pool_unittest.cc
new file mode 100644
index 0000000..86755f8
--- /dev/null
+++ b/cc/resources/raster_worker_pool_unittest.cc
@@ -0,0 +1,402 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/raster_worker_pool.h"
+
+#include <limits>
+#include <vector>
+
+#include "base/cancelable_callback.h"
+#include "cc/resources/bitmap_raster_worker_pool.h"
+#include "cc/resources/gpu_raster_worker_pool.h"
+#include "cc/resources/one_copy_raster_worker_pool.h"
+#include "cc/resources/picture_pile.h"
+#include "cc/resources/picture_pile_impl.h"
+#include "cc/resources/pixel_buffer_raster_worker_pool.h"
+#include "cc/resources/raster_buffer.h"
+#include "cc/resources/rasterizer.h"
+#include "cc/resources/resource_pool.h"
+#include "cc/resources/resource_provider.h"
+#include "cc/resources/scoped_resource.h"
+#include "cc/resources/zero_copy_raster_worker_pool.h"
+#include "cc/test/fake_output_surface.h"
+#include "cc/test/fake_output_surface_client.h"
+#include "cc/test/test_shared_bitmap_manager.h"
+#include "cc/test/test_web_graphics_context_3d.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cc {
+namespace {
+
+const size_t kMaxTransferBufferUsageBytes = 10000U;
+// A resource of this dimension^2 * 4 must be greater than the above transfer
+// buffer constant.
+const size_t kLargeResourceDimension = 1000U;
+
+enum RasterWorkerPoolType {
+ RASTER_WORKER_POOL_TYPE_PIXEL_BUFFER,
+ RASTER_WORKER_POOL_TYPE_ZERO_COPY,
+ RASTER_WORKER_POOL_TYPE_ONE_COPY,
+ RASTER_WORKER_POOL_TYPE_GPU,
+ RASTER_WORKER_POOL_TYPE_BITMAP
+};
+
+class TestRasterTaskImpl : public RasterTask {
+ public:
+ typedef base::Callback<
+ void(const PicturePileImpl::Analysis& analysis, bool was_canceled)> Reply;
+
+ TestRasterTaskImpl(const Resource* resource,
+ const Reply& reply,
+ ImageDecodeTask::Vector* dependencies)
+ : RasterTask(resource, dependencies), reply_(reply) {}
+
+ // Overridden from Task:
+ virtual void RunOnWorkerThread() OVERRIDE {
+ skia::RefPtr<SkCanvas> canvas = raster_buffer_->AcquireSkCanvas();
+ DCHECK(canvas);
+ canvas->drawColor(SK_ColorWHITE);
+ raster_buffer_->ReleaseSkCanvas(canvas);
+ }
+
+ // Overridden from RasterizerTask:
+ virtual void ScheduleOnOriginThread(RasterizerTaskClient* client) OVERRIDE {
+ raster_buffer_ = client->AcquireBufferForRaster(resource());
+ }
+ virtual void CompleteOnOriginThread(RasterizerTaskClient* client) OVERRIDE {
+ client->ReleaseBufferForRaster(raster_buffer_.Pass());
+ }
+ virtual void RunReplyOnOriginThread() OVERRIDE {
+ reply_.Run(PicturePileImpl::Analysis(), !HasFinishedRunning());
+ }
+
+ protected:
+ virtual ~TestRasterTaskImpl() {}
+
+ private:
+ const Reply reply_;
+ scoped_ptr<RasterBuffer> raster_buffer_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestRasterTaskImpl);
+};
+
+class BlockingTestRasterTaskImpl : public TestRasterTaskImpl {
+ public:
+ BlockingTestRasterTaskImpl(const Resource* resource,
+ const Reply& reply,
+ base::Lock* lock,
+ ImageDecodeTask::Vector* dependencies)
+ : TestRasterTaskImpl(resource, reply, dependencies), lock_(lock) {}
+
+ // Overridden from Task:
+ virtual void RunOnWorkerThread() OVERRIDE {
+ base::AutoLock lock(*lock_);
+ TestRasterTaskImpl::RunOnWorkerThread();
+ }
+
+ // Overridden from RasterizerTask:
+ virtual void RunReplyOnOriginThread() OVERRIDE {}
+
+ protected:
+ virtual ~BlockingTestRasterTaskImpl() {}
+
+ private:
+ base::Lock* lock_;
+
+ DISALLOW_COPY_AND_ASSIGN(BlockingTestRasterTaskImpl);
+};
+
+class RasterWorkerPoolTest
+ : public testing::TestWithParam<RasterWorkerPoolType>,
+ public RasterizerClient {
+ public:
+ struct RasterTaskResult {
+ unsigned id;
+ bool canceled;
+ };
+
+ typedef std::vector<scoped_refptr<RasterTask> > RasterTaskVector;
+
+ enum NamedTaskSet { REQUIRED_FOR_ACTIVATION = 0, ALL = 1 };
+
+ RasterWorkerPoolTest()
+ : context_provider_(TestContextProvider::Create()),
+ timeout_seconds_(5),
+ timed_out_(false) {}
+
+ // Overridden from testing::Test:
+ virtual void SetUp() OVERRIDE {
+ switch (GetParam()) {
+ case RASTER_WORKER_POOL_TYPE_PIXEL_BUFFER:
+ Create3dOutputSurfaceAndResourceProvider();
+ raster_worker_pool_ = PixelBufferRasterWorkerPool::Create(
+ base::MessageLoopProxy::current().get(),
+ RasterWorkerPool::GetTaskGraphRunner(),
+ context_provider_.get(),
+ resource_provider_.get(),
+ kMaxTransferBufferUsageBytes);
+ break;
+ case RASTER_WORKER_POOL_TYPE_ZERO_COPY:
+ Create3dOutputSurfaceAndResourceProvider();
+ raster_worker_pool_ = ZeroCopyRasterWorkerPool::Create(
+ base::MessageLoopProxy::current().get(),
+ RasterWorkerPool::GetTaskGraphRunner(),
+ resource_provider_.get());
+ break;
+ case RASTER_WORKER_POOL_TYPE_ONE_COPY:
+ Create3dOutputSurfaceAndResourceProvider();
+ staging_resource_pool_ = ResourcePool::Create(
+ resource_provider_.get(), GL_TEXTURE_2D, RGBA_8888);
+ raster_worker_pool_ = OneCopyRasterWorkerPool::Create(
+ base::MessageLoopProxy::current().get(),
+ RasterWorkerPool::GetTaskGraphRunner(),
+ context_provider_.get(),
+ resource_provider_.get(),
+ staging_resource_pool_.get());
+ break;
+ case RASTER_WORKER_POOL_TYPE_GPU:
+ Create3dOutputSurfaceAndResourceProvider();
+ raster_worker_pool_ =
+ GpuRasterWorkerPool::Create(base::MessageLoopProxy::current().get(),
+ context_provider_.get(),
+ resource_provider_.get());
+ break;
+ case RASTER_WORKER_POOL_TYPE_BITMAP:
+ CreateSoftwareOutputSurfaceAndResourceProvider();
+ raster_worker_pool_ = BitmapRasterWorkerPool::Create(
+ base::MessageLoopProxy::current().get(),
+ RasterWorkerPool::GetTaskGraphRunner(),
+ resource_provider_.get());
+ break;
+ }
+
+ DCHECK(raster_worker_pool_);
+ raster_worker_pool_->AsRasterizer()->SetClient(this);
+ }
+
+ virtual void TearDown() OVERRIDE {
+ raster_worker_pool_->AsRasterizer()->Shutdown();
+ raster_worker_pool_->AsRasterizer()->CheckForCompletedTasks();
+ }
+
+ // Overriden from RasterWorkerPoolClient:
+ virtual void DidFinishRunningTasks(TaskSet task_set) OVERRIDE {
+ if (task_set == ALL) {
+ raster_worker_pool_->AsRasterizer()->CheckForCompletedTasks();
+ base::MessageLoop::current()->Quit();
+ }
+ }
+ virtual TaskSetCollection TasksThatShouldBeForcedToComplete() const OVERRIDE {
+ return TaskSetCollection();
+ }
+
+ void RunMessageLoopUntilAllTasksHaveCompleted() {
+ if (timeout_seconds_) {
+ timeout_.Reset(
+ base::Bind(&RasterWorkerPoolTest::OnTimeout, base::Unretained(this)));
+ base::MessageLoopProxy::current()->PostDelayedTask(
+ FROM_HERE,
+ timeout_.callback(),
+ base::TimeDelta::FromSeconds(timeout_seconds_));
+ }
+
+ base::MessageLoop::current()->Run();
+
+ timeout_.Cancel();
+
+ ASSERT_FALSE(timed_out_) << "Test timed out";
+ }
+
+ void ScheduleTasks() {
+ RasterTaskQueue queue;
+
+ for (RasterTaskVector::const_iterator it = tasks_.begin();
+ it != tasks_.end();
+ ++it) {
+ TaskSetCollection task_sets;
+ task_sets[ALL] = true;
+ queue.items.push_back(RasterTaskQueue::Item(it->get(), task_sets));
+ }
+
+ raster_worker_pool_->AsRasterizer()->ScheduleTasks(&queue);
+ }
+
+ void AppendTask(unsigned id, const gfx::Size& size) {
+ scoped_ptr<ScopedResource> resource(
+ ScopedResource::Create(resource_provider_.get()));
+ resource->Allocate(size, ResourceProvider::TextureHintImmutable, RGBA_8888);
+ const Resource* const_resource = resource.get();
+
+ ImageDecodeTask::Vector empty;
+ tasks_.push_back(new TestRasterTaskImpl(
+ const_resource,
+ base::Bind(&RasterWorkerPoolTest::OnTaskCompleted,
+ base::Unretained(this),
+ base::Passed(&resource),
+ id),
+ &empty));
+ }
+
+ void AppendTask(unsigned id) { AppendTask(id, gfx::Size(1, 1)); }
+
+ void AppendBlockingTask(unsigned id, base::Lock* lock) {
+ const gfx::Size size(1, 1);
+
+ scoped_ptr<ScopedResource> resource(
+ ScopedResource::Create(resource_provider_.get()));
+ resource->Allocate(size, ResourceProvider::TextureHintImmutable, RGBA_8888);
+ const Resource* const_resource = resource.get();
+
+ ImageDecodeTask::Vector empty;
+ tasks_.push_back(new BlockingTestRasterTaskImpl(
+ const_resource,
+ base::Bind(&RasterWorkerPoolTest::OnTaskCompleted,
+ base::Unretained(this),
+ base::Passed(&resource),
+ id),
+ lock,
+ &empty));
+ }
+
+ const std::vector<RasterTaskResult>& completed_tasks() const {
+ return completed_tasks_;
+ }
+
+ private:
+ void Create3dOutputSurfaceAndResourceProvider() {
+ output_surface_ = FakeOutputSurface::Create3d(context_provider_).Pass();
+ CHECK(output_surface_->BindToClient(&output_surface_client_));
+ TestWebGraphicsContext3D* context3d = context_provider_->TestContext3d();
+ context3d->set_support_sync_query(true);
+ resource_provider_ =
+ ResourceProvider::Create(
+ output_surface_.get(), NULL, NULL, 0, false, 1, false).Pass();
+ }
+
+ void CreateSoftwareOutputSurfaceAndResourceProvider() {
+ output_surface_ = FakeOutputSurface::CreateSoftware(
+ make_scoped_ptr(new SoftwareOutputDevice));
+ CHECK(output_surface_->BindToClient(&output_surface_client_));
+ resource_provider_ = ResourceProvider::Create(output_surface_.get(),
+ &shared_bitmap_manager_,
+ NULL,
+ 0,
+ false,
+ 1,
+ false).Pass();
+ }
+
+ void OnTaskCompleted(scoped_ptr<ScopedResource> resource,
+ unsigned id,
+ const PicturePileImpl::Analysis& analysis,
+ bool was_canceled) {
+ RasterTaskResult result;
+ result.id = id;
+ result.canceled = was_canceled;
+ completed_tasks_.push_back(result);
+ }
+
+ void OnTimeout() {
+ timed_out_ = true;
+ base::MessageLoop::current()->Quit();
+ }
+
+ protected:
+ scoped_refptr<TestContextProvider> context_provider_;
+ FakeOutputSurfaceClient output_surface_client_;
+ scoped_ptr<FakeOutputSurface> output_surface_;
+ scoped_ptr<ResourceProvider> resource_provider_;
+ scoped_ptr<ResourcePool> staging_resource_pool_;
+ scoped_ptr<RasterWorkerPool> raster_worker_pool_;
+ TestSharedBitmapManager shared_bitmap_manager_;
+ base::CancelableClosure timeout_;
+ int timeout_seconds_;
+ bool timed_out_;
+ RasterTaskVector tasks_;
+ std::vector<RasterTaskResult> completed_tasks_;
+};
+
+TEST_P(RasterWorkerPoolTest, Basic) {
+ AppendTask(0u);
+ AppendTask(1u);
+ ScheduleTasks();
+
+ RunMessageLoopUntilAllTasksHaveCompleted();
+
+ ASSERT_EQ(2u, completed_tasks().size());
+ EXPECT_FALSE(completed_tasks()[0].canceled);
+ EXPECT_FALSE(completed_tasks()[1].canceled);
+}
+
+TEST_P(RasterWorkerPoolTest, FailedMapResource) {
+ if (GetParam() == RASTER_WORKER_POOL_TYPE_BITMAP)
+ return;
+
+ TestWebGraphicsContext3D* context3d = context_provider_->TestContext3d();
+ context3d->set_times_map_image_chromium_succeeds(0);
+ context3d->set_times_map_buffer_chromium_succeeds(0);
+ AppendTask(0u);
+ ScheduleTasks();
+
+ RunMessageLoopUntilAllTasksHaveCompleted();
+
+ ASSERT_EQ(1u, completed_tasks().size());
+ EXPECT_FALSE(completed_tasks()[0].canceled);
+}
+
+// This test checks that replacing a pending raster task with another does
+// not prevent the DidFinishRunningTasks notification from being sent.
+TEST_P(RasterWorkerPoolTest, FalseThrottling) {
+ base::Lock lock;
+
+ // Schedule a task that is prevented from completing with a lock.
+ lock.Acquire();
+ AppendBlockingTask(0u, &lock);
+ ScheduleTasks();
+
+ // Schedule another task to replace the still-pending task. Because the old
+ // task is not a throttled task in the new task set, it should not prevent
+ // DidFinishRunningTasks from getting signaled.
+ RasterTaskVector tasks;
+ tasks.swap(tasks_);
+ AppendTask(1u);
+ ScheduleTasks();
+
+ // Unblock the first task to allow the second task to complete.
+ lock.Release();
+
+ RunMessageLoopUntilAllTasksHaveCompleted();
+}
+
+TEST_P(RasterWorkerPoolTest, LargeResources) {
+ gfx::Size size(kLargeResourceDimension, kLargeResourceDimension);
+
+ {
+ // Verify a resource of this size is larger than the transfer buffer.
+ scoped_ptr<ScopedResource> resource(
+ ScopedResource::Create(resource_provider_.get()));
+ resource->Allocate(size, ResourceProvider::TextureHintImmutable, RGBA_8888);
+ EXPECT_GE(resource->bytes(), kMaxTransferBufferUsageBytes);
+ }
+
+ AppendTask(0u, size);
+ AppendTask(1u, size);
+ AppendTask(2u, size);
+ ScheduleTasks();
+
+ // This will time out if a resource that is larger than the throttle limit
+ // never gets scheduled.
+ RunMessageLoopUntilAllTasksHaveCompleted();
+}
+
+INSTANTIATE_TEST_CASE_P(RasterWorkerPoolTests,
+ RasterWorkerPoolTest,
+ ::testing::Values(RASTER_WORKER_POOL_TYPE_PIXEL_BUFFER,
+ RASTER_WORKER_POOL_TYPE_ZERO_COPY,
+ RASTER_WORKER_POOL_TYPE_ONE_COPY,
+ RASTER_WORKER_POOL_TYPE_GPU,
+ RASTER_WORKER_POOL_TYPE_BITMAP));
+
+} // namespace
+} // namespace cc
diff --git a/cc/resources/rasterizer.cc b/cc/resources/rasterizer.cc
new file mode 100644
index 0000000..4ce8987
--- /dev/null
+++ b/cc/resources/rasterizer.cc
@@ -0,0 +1,80 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/rasterizer.h"
+
+#include <algorithm>
+
+namespace cc {
+
+RasterizerTask::RasterizerTask() : did_schedule_(false), did_complete_(false) {}
+
+RasterizerTask::~RasterizerTask() {
+ // Debugging CHECKs to help track down a use-after-free.
+ CHECK(!did_schedule_);
+ CHECK(!did_run_ || did_complete_);
+}
+
+ImageDecodeTask* RasterizerTask::AsImageDecodeTask() { return NULL; }
+
+RasterTask* RasterizerTask::AsRasterTask() { return NULL; }
+
+void RasterizerTask::WillSchedule() { DCHECK(!did_schedule_); }
+
+void RasterizerTask::DidSchedule() {
+ did_schedule_ = true;
+ did_complete_ = false;
+}
+
+bool RasterizerTask::HasBeenScheduled() const { return did_schedule_; }
+
+void RasterizerTask::WillComplete() { DCHECK(!did_complete_); }
+
+void RasterizerTask::DidComplete() {
+ DCHECK(did_schedule_);
+ DCHECK(!did_complete_);
+ did_schedule_ = false;
+ did_complete_ = true;
+}
+
+bool RasterizerTask::HasCompleted() const { return did_complete_; }
+
+ImageDecodeTask::ImageDecodeTask() {}
+
+ImageDecodeTask::~ImageDecodeTask() {}
+
+ImageDecodeTask* ImageDecodeTask::AsImageDecodeTask() { return this; }
+
+RasterTask::RasterTask(const Resource* resource,
+ ImageDecodeTask::Vector* dependencies)
+ : resource_(resource) {
+ dependencies_.swap(*dependencies);
+}
+
+RasterTask::~RasterTask() {}
+
+RasterTask* RasterTask::AsRasterTask() { return this; }
+
+RasterTaskQueue::Item::Item(RasterTask* task,
+ const TaskSetCollection& task_sets)
+ : task(task), task_sets(task_sets) {
+ DCHECK(task_sets.any());
+}
+
+RasterTaskQueue::Item::~Item() {}
+
+RasterTaskQueue::RasterTaskQueue() {
+}
+
+RasterTaskQueue::~RasterTaskQueue() {}
+
+void RasterTaskQueue::Swap(RasterTaskQueue* other) {
+ items.swap(other->items);
+}
+
+void RasterTaskQueue::Reset() {
+ items.clear();
+}
+
+} // namespace cc
diff --git a/cc/resources/rasterizer.h b/cc/resources/rasterizer.h
new file mode 100644
index 0000000..72b50f2
--- /dev/null
+++ b/cc/resources/rasterizer.h
@@ -0,0 +1,164 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_RASTERIZER_H_
+#define CC_RESOURCES_RASTERIZER_H_
+
+#include <bitset>
+#include <vector>
+
+#include "base/callback.h"
+#include "cc/resources/resource_format.h"
+#include "cc/resources/task_graph_runner.h"
+
+namespace cc {
+class ImageDecodeTask;
+class RasterTask;
+class Resource;
+class RasterBuffer;
+
+class CC_EXPORT RasterizerTaskClient {
+ public:
+ virtual scoped_ptr<RasterBuffer> AcquireBufferForRaster(
+ const Resource* resource) = 0;
+ virtual void ReleaseBufferForRaster(scoped_ptr<RasterBuffer> buffer) = 0;
+
+ protected:
+ virtual ~RasterizerTaskClient() {}
+};
+
+class CC_EXPORT RasterizerTask : public Task {
+ public:
+ typedef std::vector<scoped_refptr<RasterizerTask> > Vector;
+
+ virtual void ScheduleOnOriginThread(RasterizerTaskClient* client) = 0;
+ virtual void CompleteOnOriginThread(RasterizerTaskClient* client) = 0;
+ virtual void RunReplyOnOriginThread() = 0;
+
+ // Type-checking downcast routines.
+ virtual ImageDecodeTask* AsImageDecodeTask();
+ virtual RasterTask* AsRasterTask();
+
+ void WillSchedule();
+ void DidSchedule();
+ bool HasBeenScheduled() const;
+
+ void WillComplete();
+ void DidComplete();
+ bool HasCompleted() const;
+
+ protected:
+ RasterizerTask();
+ virtual ~RasterizerTask();
+
+ bool did_schedule_;
+ bool did_complete_;
+};
+
+class CC_EXPORT ImageDecodeTask : public RasterizerTask {
+ public:
+ typedef std::vector<scoped_refptr<ImageDecodeTask> > Vector;
+
+ // Overridden from RasterizerTask:
+ virtual ImageDecodeTask* AsImageDecodeTask() OVERRIDE;
+
+ protected:
+ ImageDecodeTask();
+ virtual ~ImageDecodeTask();
+};
+
+class CC_EXPORT RasterTask : public RasterizerTask {
+ public:
+ typedef std::vector<scoped_refptr<RasterTask> > Vector;
+
+ // Overridden from RasterizerTask:
+ virtual RasterTask* AsRasterTask() OVERRIDE;
+
+ const Resource* resource() const { return resource_; }
+ const ImageDecodeTask::Vector& dependencies() const { return dependencies_; }
+
+ protected:
+ RasterTask(const Resource* resource, ImageDecodeTask::Vector* dependencies);
+ virtual ~RasterTask();
+
+ private:
+ const Resource* resource_;
+ ImageDecodeTask::Vector dependencies_;
+};
+
+static const size_t kNumberOfTaskSets = 2;
+typedef size_t TaskSet;
+typedef std::bitset<kNumberOfTaskSets> TaskSetCollection;
+
+class CC_EXPORT RasterizerClient {
+ public:
+ virtual void DidFinishRunningTasks(TaskSet task_set) = 0;
+ virtual TaskSetCollection TasksThatShouldBeForcedToComplete() const = 0;
+
+ protected:
+ virtual ~RasterizerClient() {}
+};
+
+struct CC_EXPORT RasterTaskQueue {
+ struct CC_EXPORT Item {
+ class TaskComparator {
+ public:
+ explicit TaskComparator(const RasterTask* task) : task_(task) {}
+
+ bool operator()(const Item& item) const { return item.task == task_; }
+
+ private:
+ const RasterTask* task_;
+ };
+
+ typedef std::vector<Item> Vector;
+
+ Item(RasterTask* task, const TaskSetCollection& task_sets);
+ ~Item();
+
+ RasterTask* task;
+ TaskSetCollection task_sets;
+ };
+
+ RasterTaskQueue();
+ ~RasterTaskQueue();
+
+ void Swap(RasterTaskQueue* other);
+ void Reset();
+
+ Item::Vector items;
+};
+
+// This interface can be used to schedule and run raster tasks. The client will
+// be notified asynchronously when the set of tasks marked as "required for
+// activation" have finished running and when all scheduled tasks have finished
+// running. The client can call CheckForCompletedTasks() at any time to dispatch
+// pending completion callbacks for all tasks that have finished running.
+class CC_EXPORT Rasterizer {
+ public:
+ // Set the client instance to be notified when finished running tasks.
+ virtual void SetClient(RasterizerClient* client) = 0;
+
+ // Tells the worker pool to shutdown after canceling all previously scheduled
+ // tasks. Reply callbacks are still guaranteed to run when
+ // CheckForCompletedTasks() is called.
+ virtual void Shutdown() = 0;
+
+ // Schedule running of raster tasks in |queue| and all dependencies.
+ // Previously scheduled tasks that are not in |queue| will be canceled unless
+ // already running. Once scheduled, reply callbacks are guaranteed to run for
+ // all tasks even if they later get canceled by another call to
+ // ScheduleTasks().
+ virtual void ScheduleTasks(RasterTaskQueue* queue) = 0;
+
+ // Check for completed tasks and dispatch reply callbacks.
+ virtual void CheckForCompletedTasks() = 0;
+
+ protected:
+ virtual ~Rasterizer() {}
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_RASTERIZER_H_
diff --git a/cc/resources/release_callback.h b/cc/resources/release_callback.h
new file mode 100644
index 0000000..b471381
--- /dev/null
+++ b/cc/resources/release_callback.h
@@ -0,0 +1,16 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_RELEASE_CALLBACK_H_
+#define CC_RESOURCES_RELEASE_CALLBACK_H_
+
+#include "base/callback.h"
+
+namespace cc {
+
+typedef base::Callback<void(uint32 sync_point, bool is_lost)> ReleaseCallback;
+
+} // namespace cc
+
+#endif // CC_RESOURCES_RELEASE_CALLBACK_H_
diff --git a/cc/resources/release_callback_impl.h b/cc/resources/release_callback_impl.h
new file mode 100644
index 0000000..89f22b6
--- /dev/null
+++ b/cc/resources/release_callback_impl.h
@@ -0,0 +1,20 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_RELEASE_CALLBACK_IMPL_H_
+#define CC_RESOURCES_RELEASE_CALLBACK_IMPL_H_
+
+#include "base/callback.h"
+
+namespace cc {
+class BlockingTaskRunner;
+
+typedef base::Callback<void(uint32 sync_point,
+ bool is_lost,
+ BlockingTaskRunner* main_thread_task_runner)>
+ ReleaseCallbackImpl;
+
+} // namespace cc
+
+#endif // CC_RESOURCES_RELEASE_CALLBACK_IMPL_H_
diff --git a/cc/resources/resource.cc b/cc/resources/resource.cc
new file mode 100644
index 0000000..9bbcd4f
--- /dev/null
+++ b/cc/resources/resource.cc
@@ -0,0 +1,17 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/resource.h"
+
+namespace cc {
+
+size_t Resource::bytes() const {
+ if (size_.IsEmpty())
+ return 0;
+
+ return MemorySizeBytes(size_, format_);
+}
+
+
+} // namespace cc
diff --git a/cc/resources/resource.h b/cc/resources/resource.h
new file mode 100644
index 0000000..24cb88a
--- /dev/null
+++ b/cc/resources/resource.h
@@ -0,0 +1,51 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_RESOURCE_H_
+#define CC_RESOURCES_RESOURCE_H_
+
+#include "cc/base/cc_export.h"
+#include "cc/resources/resource_provider.h"
+#include "third_party/khronos/GLES2/gl2.h"
+#include "ui/gfx/size.h"
+
+namespace cc {
+
+class CC_EXPORT Resource {
+ public:
+ Resource() : id_(0) {}
+ Resource(unsigned id, const gfx::Size& size, ResourceFormat format)
+ : id_(id),
+ size_(size),
+ format_(format) {}
+
+ ResourceProvider::ResourceId id() const { return id_; }
+ gfx::Size size() const { return size_; }
+ ResourceFormat format() const { return format_; }
+ size_t bytes() const;
+
+ inline static size_t MemorySizeBytes(const gfx::Size& size,
+ ResourceFormat format) {
+ DCHECK_EQ(0u, (BitsPerPixel(format) * size.width() * size.height()) % 8);
+ return (BitsPerPixel(format) * size.width() * size.height()) / 8;
+ }
+
+ protected:
+ void set_id(ResourceProvider::ResourceId id) { id_ = id; }
+ void set_dimensions(const gfx::Size& size, ResourceFormat format) {
+ size_ = size;
+ format_ = format;
+ }
+
+ private:
+ ResourceProvider::ResourceId id_;
+ gfx::Size size_;
+ ResourceFormat format_;
+
+ DISALLOW_COPY_AND_ASSIGN(Resource);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_RESOURCE_H_
diff --git a/cc/resources/resource_format.cc b/cc/resources/resource_format.cc
new file mode 100644
index 0000000..6cd0a93
--- /dev/null
+++ b/cc/resources/resource_format.cc
@@ -0,0 +1,27 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/resource_format.h"
+
+namespace cc {
+
+SkColorType ResourceFormatToSkColorType(ResourceFormat format) {
+ switch (format) {
+ case RGBA_4444:
+ return kARGB_4444_SkColorType;
+ case RGBA_8888:
+ case BGRA_8888:
+ return kN32_SkColorType;
+ case ETC1:
+ case ALPHA_8:
+ case LUMINANCE_8:
+ case RGB_565:
+ NOTREACHED();
+ break;
+ }
+ NOTREACHED();
+ return kN32_SkColorType;
+}
+
+} // namespace cc
diff --git a/cc/resources/resource_format.h b/cc/resources/resource_format.h
new file mode 100644
index 0000000..b51ac7c
--- /dev/null
+++ b/cc/resources/resource_format.h
@@ -0,0 +1,29 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_RESOURCE_FORMAT_H_
+#define CC_RESOURCES_RESOURCE_FORMAT_H_
+
+#include "base/logging.h"
+#include "third_party/skia/include/core/SkBitmap.h"
+
+namespace cc {
+
+// Keep in sync with arrays below.
+enum ResourceFormat {
+ RGBA_8888,
+ RGBA_4444,
+ BGRA_8888,
+ ALPHA_8,
+ LUMINANCE_8,
+ RGB_565,
+ ETC1,
+ RESOURCE_FORMAT_MAX = ETC1,
+};
+
+SkColorType ResourceFormatToSkColorType(ResourceFormat format);
+
+} // namespace cc
+
+#endif // CC_RESOURCES_RESOURCE_FORMAT_H_
diff --git a/cc/resources/resource_pool.cc b/cc/resources/resource_pool.cc
new file mode 100644
index 0000000..31b9b27
--- /dev/null
+++ b/cc/resources/resource_pool.cc
@@ -0,0 +1,128 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/resource_pool.h"
+
+#include "cc/resources/resource_provider.h"
+#include "cc/resources/scoped_resource.h"
+
+namespace cc {
+
+ResourcePool::ResourcePool(ResourceProvider* resource_provider,
+ GLenum target,
+ ResourceFormat format)
+ : resource_provider_(resource_provider),
+ target_(target),
+ format_(format),
+ max_memory_usage_bytes_(0),
+ max_unused_memory_usage_bytes_(0),
+ max_resource_count_(0),
+ memory_usage_bytes_(0),
+ unused_memory_usage_bytes_(0),
+ resource_count_(0) {}
+
+ResourcePool::~ResourcePool() {
+ while (!busy_resources_.empty()) {
+ DidFinishUsingResource(busy_resources_.front());
+ busy_resources_.pop_front();
+ }
+
+ SetResourceUsageLimits(0, 0, 0);
+ DCHECK_EQ(0u, unused_resources_.size());
+ DCHECK_EQ(0u, memory_usage_bytes_);
+ DCHECK_EQ(0u, unused_memory_usage_bytes_);
+ DCHECK_EQ(0u, resource_count_);
+}
+
+scoped_ptr<ScopedResource> ResourcePool::AcquireResource(
+ const gfx::Size& size) {
+ for (ResourceList::iterator it = unused_resources_.begin();
+ it != unused_resources_.end();
+ ++it) {
+ ScopedResource* resource = *it;
+ DCHECK(resource_provider_->CanLockForWrite(resource->id()));
+
+ if (resource->size() != size)
+ continue;
+
+ unused_resources_.erase(it);
+ unused_memory_usage_bytes_ -= resource->bytes();
+ return make_scoped_ptr(resource);
+ }
+
+ scoped_ptr<ScopedResource> resource =
+ ScopedResource::Create(resource_provider_);
+ resource->AllocateManaged(size, target_, format_);
+
+ memory_usage_bytes_ += resource->bytes();
+ ++resource_count_;
+ return resource.Pass();
+}
+
+void ResourcePool::ReleaseResource(scoped_ptr<ScopedResource> resource) {
+ busy_resources_.push_back(resource.release());
+}
+
+void ResourcePool::SetResourceUsageLimits(size_t max_memory_usage_bytes,
+ size_t max_unused_memory_usage_bytes,
+ size_t max_resource_count) {
+ max_memory_usage_bytes_ = max_memory_usage_bytes;
+ max_unused_memory_usage_bytes_ = max_unused_memory_usage_bytes;
+ max_resource_count_ = max_resource_count;
+
+ ReduceResourceUsage();
+}
+
+void ResourcePool::ReduceResourceUsage() {
+ while (!unused_resources_.empty()) {
+ if (!ResourceUsageTooHigh())
+ break;
+
+ // LRU eviction pattern. Most recently used might be blocked by
+ // a read lock fence but it's still better to evict the least
+ // recently used as it prevents a resource that is hard to reuse
+ // because of unique size from being kept around. Resources that
+ // can't be locked for write might also not be truly free-able.
+ // We can free the resource here but it doesn't mean that the
+ // memory is necessarily returned to the OS.
+ ScopedResource* resource = unused_resources_.front();
+ unused_resources_.pop_front();
+ memory_usage_bytes_ -= resource->bytes();
+ unused_memory_usage_bytes_ -= resource->bytes();
+ --resource_count_;
+ delete resource;
+ }
+}
+
+bool ResourcePool::ResourceUsageTooHigh() {
+ if (resource_count_ > max_resource_count_)
+ return true;
+ if (memory_usage_bytes_ > max_memory_usage_bytes_)
+ return true;
+ if (unused_memory_usage_bytes_ > max_unused_memory_usage_bytes_)
+ return true;
+ return false;
+}
+
+void ResourcePool::CheckBusyResources() {
+ ResourceList::iterator it = busy_resources_.begin();
+
+ while (it != busy_resources_.end()) {
+ ScopedResource* resource = *it;
+
+ if (resource_provider_->CanLockForWrite(resource->id())) {
+ DidFinishUsingResource(resource);
+ it = busy_resources_.erase(it);
+ } else {
+ ++it;
+ }
+ }
+}
+
+void ResourcePool::DidFinishUsingResource(ScopedResource* resource) {
+ unused_memory_usage_bytes_ += resource->bytes();
+ unused_resources_.push_back(resource);
+}
+
+} // namespace cc
diff --git a/cc/resources/resource_pool.h b/cc/resources/resource_pool.h
new file mode 100644
index 0000000..5f481e9
--- /dev/null
+++ b/cc/resources/resource_pool.h
@@ -0,0 +1,79 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_RESOURCE_POOL_H_
+#define CC_RESOURCES_RESOURCE_POOL_H_
+
+#include <list>
+
+#include "base/memory/scoped_ptr.h"
+#include "cc/base/cc_export.h"
+#include "cc/output/renderer.h"
+#include "cc/resources/resource.h"
+#include "cc/resources/resource_format.h"
+
+namespace cc {
+class ScopedResource;
+
+class CC_EXPORT ResourcePool {
+ public:
+ static scoped_ptr<ResourcePool> Create(ResourceProvider* resource_provider,
+ GLenum target,
+ ResourceFormat format) {
+ return make_scoped_ptr(new ResourcePool(resource_provider, target, format));
+ }
+
+ virtual ~ResourcePool();
+
+ scoped_ptr<ScopedResource> AcquireResource(const gfx::Size& size);
+ void ReleaseResource(scoped_ptr<ScopedResource>);
+
+ void SetResourceUsageLimits(size_t max_memory_usage_bytes,
+ size_t max_unused_memory_usage_bytes,
+ size_t max_resource_count);
+
+ void ReduceResourceUsage();
+ void CheckBusyResources();
+
+ size_t total_memory_usage_bytes() const { return memory_usage_bytes_; }
+ size_t acquired_memory_usage_bytes() const {
+ return memory_usage_bytes_ - unused_memory_usage_bytes_;
+ }
+ size_t total_resource_count() const { return resource_count_; }
+ size_t acquired_resource_count() const {
+ return resource_count_ - unused_resources_.size();
+ }
+
+ ResourceFormat resource_format() const { return format_; }
+
+ protected:
+ ResourcePool(ResourceProvider* resource_provider,
+ GLenum target,
+ ResourceFormat format);
+
+ bool ResourceUsageTooHigh();
+
+ private:
+ void DidFinishUsingResource(ScopedResource* resource);
+
+ ResourceProvider* resource_provider_;
+ const GLenum target_;
+ const ResourceFormat format_;
+ size_t max_memory_usage_bytes_;
+ size_t max_unused_memory_usage_bytes_;
+ size_t max_resource_count_;
+ size_t memory_usage_bytes_;
+ size_t unused_memory_usage_bytes_;
+ size_t resource_count_;
+
+ typedef std::list<ScopedResource*> ResourceList;
+ ResourceList unused_resources_;
+ ResourceList busy_resources_;
+
+ DISALLOW_COPY_AND_ASSIGN(ResourcePool);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_RESOURCE_POOL_H_
diff --git a/cc/resources/resource_provider.cc b/cc/resources/resource_provider.cc
new file mode 100644
index 0000000..0e7c938
--- /dev/null
+++ b/cc/resources/resource_provider.cc
@@ -0,0 +1,2082 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/resource_provider.h"
+
+#include <algorithm>
+#include <limits>
+
+#include "base/containers/hash_tables.h"
+#include "base/debug/trace_event.h"
+#include "base/stl_util.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+#include "cc/base/util.h"
+#include "cc/output/gl_renderer.h" // For the GLC() macro.
+#include "cc/resources/platform_color.h"
+#include "cc/resources/returned_resource.h"
+#include "cc/resources/shared_bitmap_manager.h"
+#include "cc/resources/texture_uploader.h"
+#include "cc/resources/transferable_resource.h"
+#include "gpu/GLES2/gl2extchromium.h"
+#include "gpu/command_buffer/client/gles2_interface.h"
+#include "third_party/khronos/GLES2/gl2.h"
+#include "third_party/khronos/GLES2/gl2ext.h"
+#include "third_party/skia/include/core/SkSurface.h"
+#include "third_party/skia/include/gpu/GrContext.h"
+#include "ui/gfx/frame_time.h"
+#include "ui/gfx/rect.h"
+#include "ui/gfx/vector2d.h"
+
+using gpu::gles2::GLES2Interface;
+
+namespace cc {
+
+class IdAllocator {
+ public:
+ virtual ~IdAllocator() {}
+
+ virtual GLuint NextId() = 0;
+
+ protected:
+ IdAllocator(GLES2Interface* gl, size_t id_allocation_chunk_size)
+ : gl_(gl),
+ id_allocation_chunk_size_(id_allocation_chunk_size),
+ ids_(new GLuint[id_allocation_chunk_size]),
+ next_id_index_(id_allocation_chunk_size) {
+ DCHECK(id_allocation_chunk_size_);
+ }
+
+ GLES2Interface* gl_;
+ const size_t id_allocation_chunk_size_;
+ scoped_ptr<GLuint[]> ids_;
+ size_t next_id_index_;
+};
+
+namespace {
+
+// Measured in seconds.
+const double kSoftwareUploadTickRate = 0.000250;
+const double kTextureUploadTickRate = 0.004;
+
+GLenum TextureToStorageFormat(ResourceFormat format) {
+ GLenum storage_format = GL_RGBA8_OES;
+ switch (format) {
+ case RGBA_8888:
+ break;
+ case BGRA_8888:
+ storage_format = GL_BGRA8_EXT;
+ break;
+ case RGBA_4444:
+ case ALPHA_8:
+ case LUMINANCE_8:
+ case RGB_565:
+ case ETC1:
+ NOTREACHED();
+ break;
+ }
+
+ return storage_format;
+}
+
+bool IsFormatSupportedForStorage(ResourceFormat format, bool use_bgra) {
+ switch (format) {
+ case RGBA_8888:
+ return true;
+ case BGRA_8888:
+ return use_bgra;
+ case RGBA_4444:
+ case ALPHA_8:
+ case LUMINANCE_8:
+ case RGB_565:
+ case ETC1:
+ return false;
+ }
+ return false;
+}
+
+GrPixelConfig ToGrPixelConfig(ResourceFormat format) {
+ switch (format) {
+ case RGBA_8888:
+ return kRGBA_8888_GrPixelConfig;
+ case BGRA_8888:
+ return kBGRA_8888_GrPixelConfig;
+ case RGBA_4444:
+ return kRGBA_4444_GrPixelConfig;
+ default:
+ break;
+ }
+ DCHECK(false) << "Unsupported resource format.";
+ return kSkia8888_GrPixelConfig;
+}
+
+class ScopedSetActiveTexture {
+ public:
+ ScopedSetActiveTexture(GLES2Interface* gl, GLenum unit)
+ : gl_(gl), unit_(unit) {
+ DCHECK_EQ(GL_TEXTURE0, ResourceProvider::GetActiveTextureUnit(gl_));
+
+ if (unit_ != GL_TEXTURE0)
+ GLC(gl_, gl_->ActiveTexture(unit_));
+ }
+
+ ~ScopedSetActiveTexture() {
+ // Active unit being GL_TEXTURE0 is effectively the ground state.
+ if (unit_ != GL_TEXTURE0)
+ GLC(gl_, gl_->ActiveTexture(GL_TEXTURE0));
+ }
+
+ private:
+ GLES2Interface* gl_;
+ GLenum unit_;
+};
+
+class TextureIdAllocator : public IdAllocator {
+ public:
+ TextureIdAllocator(GLES2Interface* gl,
+ size_t texture_id_allocation_chunk_size)
+ : IdAllocator(gl, texture_id_allocation_chunk_size) {}
+ virtual ~TextureIdAllocator() {
+ gl_->DeleteTextures(id_allocation_chunk_size_ - next_id_index_,
+ ids_.get() + next_id_index_);
+ }
+
+ // Overridden from IdAllocator:
+ virtual GLuint NextId() OVERRIDE {
+ if (next_id_index_ == id_allocation_chunk_size_) {
+ gl_->GenTextures(id_allocation_chunk_size_, ids_.get());
+ next_id_index_ = 0;
+ }
+
+ return ids_[next_id_index_++];
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TextureIdAllocator);
+};
+
+class BufferIdAllocator : public IdAllocator {
+ public:
+ BufferIdAllocator(GLES2Interface* gl, size_t buffer_id_allocation_chunk_size)
+ : IdAllocator(gl, buffer_id_allocation_chunk_size) {}
+ virtual ~BufferIdAllocator() {
+ gl_->DeleteBuffers(id_allocation_chunk_size_ - next_id_index_,
+ ids_.get() + next_id_index_);
+ }
+
+ // Overridden from IdAllocator:
+ virtual GLuint NextId() OVERRIDE {
+ if (next_id_index_ == id_allocation_chunk_size_) {
+ gl_->GenBuffers(id_allocation_chunk_size_, ids_.get());
+ next_id_index_ = 0;
+ }
+
+ return ids_[next_id_index_++];
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BufferIdAllocator);
+};
+
+// Generic fence implementation for query objects. Fence has passed when query
+// result is available.
+class QueryFence : public ResourceProvider::Fence {
+ public:
+ QueryFence(gpu::gles2::GLES2Interface* gl, unsigned query_id)
+ : gl_(gl), query_id_(query_id) {}
+
+ // Overridden from ResourceProvider::Fence:
+ virtual void Set() OVERRIDE {}
+ virtual bool HasPassed() OVERRIDE {
+ unsigned available = 1;
+ gl_->GetQueryObjectuivEXT(
+ query_id_, GL_QUERY_RESULT_AVAILABLE_EXT, &available);
+ return !!available;
+ }
+
+ private:
+ virtual ~QueryFence() {}
+
+ gpu::gles2::GLES2Interface* gl_;
+ unsigned query_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(QueryFence);
+};
+
+} // namespace
+
+ResourceProvider::Resource::Resource()
+ : child_id(0),
+ gl_id(0),
+ gl_pixel_buffer_id(0),
+ gl_upload_query_id(0),
+ gl_read_lock_query_id(0),
+ pixels(NULL),
+ lock_for_read_count(0),
+ imported_count(0),
+ exported_count(0),
+ dirty_image(false),
+ locked_for_write(false),
+ lost(false),
+ marked_for_deletion(false),
+ pending_set_pixels(false),
+ set_pixels_completion_forced(false),
+ allocated(false),
+ read_lock_fences_enabled(false),
+ has_shared_bitmap_id(false),
+ allow_overlay(false),
+ read_lock_fence(NULL),
+ size(),
+ origin(Internal),
+ target(0),
+ original_filter(0),
+ filter(0),
+ image_id(0),
+ bound_image_id(0),
+ texture_pool(0),
+ wrap_mode(0),
+ hint(TextureHintImmutable),
+ type(InvalidType),
+ format(RGBA_8888),
+ shared_bitmap(NULL) {
+}
+
+ResourceProvider::Resource::~Resource() {}
+
+ResourceProvider::Resource::Resource(GLuint texture_id,
+ const gfx::Size& size,
+ Origin origin,
+ GLenum target,
+ GLenum filter,
+ GLenum texture_pool,
+ GLint wrap_mode,
+ TextureHint hint,
+ ResourceFormat format)
+ : child_id(0),
+ gl_id(texture_id),
+ gl_pixel_buffer_id(0),
+ gl_upload_query_id(0),
+ gl_read_lock_query_id(0),
+ pixels(NULL),
+ lock_for_read_count(0),
+ imported_count(0),
+ exported_count(0),
+ dirty_image(false),
+ locked_for_write(false),
+ lost(false),
+ marked_for_deletion(false),
+ pending_set_pixels(false),
+ set_pixels_completion_forced(false),
+ allocated(false),
+ read_lock_fences_enabled(false),
+ has_shared_bitmap_id(false),
+ allow_overlay(false),
+ read_lock_fence(NULL),
+ size(size),
+ origin(origin),
+ target(target),
+ original_filter(filter),
+ filter(filter),
+ image_id(0),
+ bound_image_id(0),
+ texture_pool(texture_pool),
+ wrap_mode(wrap_mode),
+ hint(hint),
+ type(GLTexture),
+ format(format),
+ shared_bitmap(NULL) {
+ DCHECK(wrap_mode == GL_CLAMP_TO_EDGE || wrap_mode == GL_REPEAT);
+ DCHECK_EQ(origin == Internal, !!texture_pool);
+}
+
+ResourceProvider::Resource::Resource(uint8_t* pixels,
+ SharedBitmap* bitmap,
+ const gfx::Size& size,
+ Origin origin,
+ GLenum filter,
+ GLint wrap_mode)
+ : child_id(0),
+ gl_id(0),
+ gl_pixel_buffer_id(0),
+ gl_upload_query_id(0),
+ gl_read_lock_query_id(0),
+ pixels(pixels),
+ lock_for_read_count(0),
+ imported_count(0),
+ exported_count(0),
+ dirty_image(false),
+ locked_for_write(false),
+ lost(false),
+ marked_for_deletion(false),
+ pending_set_pixels(false),
+ set_pixels_completion_forced(false),
+ allocated(false),
+ read_lock_fences_enabled(false),
+ has_shared_bitmap_id(!!bitmap),
+ allow_overlay(false),
+ read_lock_fence(NULL),
+ size(size),
+ origin(origin),
+ target(0),
+ original_filter(filter),
+ filter(filter),
+ image_id(0),
+ bound_image_id(0),
+ texture_pool(0),
+ wrap_mode(wrap_mode),
+ hint(TextureHintImmutable),
+ type(Bitmap),
+ format(RGBA_8888),
+ shared_bitmap(bitmap) {
+ DCHECK(wrap_mode == GL_CLAMP_TO_EDGE || wrap_mode == GL_REPEAT);
+ DCHECK(origin == Delegated || pixels);
+ if (bitmap)
+ shared_bitmap_id = bitmap->id();
+}
+
+ResourceProvider::Resource::Resource(const SharedBitmapId& bitmap_id,
+ const gfx::Size& size,
+ Origin origin,
+ GLenum filter,
+ GLint wrap_mode)
+ : child_id(0),
+ gl_id(0),
+ gl_pixel_buffer_id(0),
+ gl_upload_query_id(0),
+ gl_read_lock_query_id(0),
+ pixels(NULL),
+ lock_for_read_count(0),
+ imported_count(0),
+ exported_count(0),
+ dirty_image(false),
+ locked_for_write(false),
+ lost(false),
+ marked_for_deletion(false),
+ pending_set_pixels(false),
+ set_pixels_completion_forced(false),
+ allocated(false),
+ read_lock_fences_enabled(false),
+ has_shared_bitmap_id(true),
+ allow_overlay(false),
+ read_lock_fence(NULL),
+ size(size),
+ origin(origin),
+ target(0),
+ original_filter(filter),
+ filter(filter),
+ image_id(0),
+ bound_image_id(0),
+ texture_pool(0),
+ wrap_mode(wrap_mode),
+ hint(TextureHintImmutable),
+ type(Bitmap),
+ format(RGBA_8888),
+ shared_bitmap_id(bitmap_id),
+ shared_bitmap(NULL) {
+ DCHECK(wrap_mode == GL_CLAMP_TO_EDGE || wrap_mode == GL_REPEAT);
+}
+
+ResourceProvider::Child::Child() : marked_for_deletion(false) {}
+
+ResourceProvider::Child::~Child() {}
+
+scoped_ptr<ResourceProvider> ResourceProvider::Create(
+ OutputSurface* output_surface,
+ SharedBitmapManager* shared_bitmap_manager,
+ BlockingTaskRunner* blocking_main_thread_task_runner,
+ int highp_threshold_min,
+ bool use_rgba_4444_texture_format,
+ size_t id_allocation_chunk_size,
+ bool use_distance_field_text) {
+ scoped_ptr<ResourceProvider> resource_provider(
+ new ResourceProvider(output_surface,
+ shared_bitmap_manager,
+ blocking_main_thread_task_runner,
+ highp_threshold_min,
+ use_rgba_4444_texture_format,
+ id_allocation_chunk_size,
+ use_distance_field_text));
+
+ if (resource_provider->ContextGL())
+ resource_provider->InitializeGL();
+ else
+ resource_provider->InitializeSoftware();
+
+ DCHECK_NE(InvalidType, resource_provider->default_resource_type());
+ return resource_provider.Pass();
+}
+
+ResourceProvider::~ResourceProvider() {
+ while (!children_.empty())
+ DestroyChildInternal(children_.begin(), ForShutdown);
+ while (!resources_.empty())
+ DeleteResourceInternal(resources_.begin(), ForShutdown);
+
+ CleanUpGLIfNeeded();
+}
+
+bool ResourceProvider::InUseByConsumer(ResourceId id) {
+ Resource* resource = GetResource(id);
+ return resource->lock_for_read_count > 0 || resource->exported_count > 0 ||
+ resource->lost;
+}
+
+bool ResourceProvider::IsLost(ResourceId id) {
+ Resource* resource = GetResource(id);
+ return resource->lost;
+}
+
+bool ResourceProvider::AllowOverlay(ResourceId id) {
+ Resource* resource = GetResource(id);
+ return resource->allow_overlay;
+}
+
+ResourceProvider::ResourceId ResourceProvider::CreateResource(
+ const gfx::Size& size,
+ GLint wrap_mode,
+ TextureHint hint,
+ ResourceFormat format) {
+ DCHECK(!size.IsEmpty());
+ switch (default_resource_type_) {
+ case GLTexture:
+ return CreateGLTexture(size,
+ GL_TEXTURE_2D,
+ GL_TEXTURE_POOL_UNMANAGED_CHROMIUM,
+ wrap_mode,
+ hint,
+ format);
+ case Bitmap:
+ DCHECK_EQ(RGBA_8888, format);
+ return CreateBitmap(size, wrap_mode);
+ case InvalidType:
+ break;
+ }
+
+ LOG(FATAL) << "Invalid default resource type.";
+ return 0;
+}
+
+ResourceProvider::ResourceId ResourceProvider::CreateManagedResource(
+ const gfx::Size& size,
+ GLenum target,
+ GLint wrap_mode,
+ TextureHint hint,
+ ResourceFormat format) {
+ DCHECK(!size.IsEmpty());
+ switch (default_resource_type_) {
+ case GLTexture:
+ return CreateGLTexture(size,
+ target,
+ GL_TEXTURE_POOL_MANAGED_CHROMIUM,
+ wrap_mode,
+ hint,
+ format);
+ case Bitmap:
+ DCHECK_EQ(RGBA_8888, format);
+ return CreateBitmap(size, wrap_mode);
+ case InvalidType:
+ break;
+ }
+
+ LOG(FATAL) << "Invalid default resource type.";
+ return 0;
+}
+
+ResourceProvider::ResourceId ResourceProvider::CreateGLTexture(
+ const gfx::Size& size,
+ GLenum target,
+ GLenum texture_pool,
+ GLint wrap_mode,
+ TextureHint hint,
+ ResourceFormat format) {
+ DCHECK_LE(size.width(), max_texture_size_);
+ DCHECK_LE(size.height(), max_texture_size_);
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ ResourceId id = next_id_++;
+ Resource resource(0,
+ size,
+ Resource::Internal,
+ target,
+ GL_LINEAR,
+ texture_pool,
+ wrap_mode,
+ hint,
+ format);
+ resource.allocated = false;
+ resources_[id] = resource;
+ return id;
+}
+
+ResourceProvider::ResourceId ResourceProvider::CreateBitmap(
+ const gfx::Size& size, GLint wrap_mode) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ scoped_ptr<SharedBitmap> bitmap;
+ if (shared_bitmap_manager_)
+ bitmap = shared_bitmap_manager_->AllocateSharedBitmap(size);
+
+ uint8_t* pixels;
+ if (bitmap) {
+ pixels = bitmap->pixels();
+ } else {
+ size_t bytes = SharedBitmap::CheckedSizeInBytes(size);
+ pixels = new uint8_t[bytes];
+ }
+ DCHECK(pixels);
+
+ ResourceId id = next_id_++;
+ Resource resource(
+ pixels, bitmap.release(), size, Resource::Internal, GL_LINEAR, wrap_mode);
+ resource.allocated = true;
+ resources_[id] = resource;
+ return id;
+}
+
+ResourceProvider::ResourceId ResourceProvider::CreateResourceFromIOSurface(
+ const gfx::Size& size,
+ unsigned io_surface_id) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ ResourceId id = next_id_++;
+ Resource resource(0,
+ gfx::Size(),
+ Resource::Internal,
+ GL_TEXTURE_RECTANGLE_ARB,
+ GL_LINEAR,
+ GL_TEXTURE_POOL_UNMANAGED_CHROMIUM,
+ GL_CLAMP_TO_EDGE,
+ TextureHintImmutable,
+ RGBA_8888);
+ LazyCreate(&resource);
+ GLES2Interface* gl = ContextGL();
+ DCHECK(gl);
+ gl->BindTexture(GL_TEXTURE_RECTANGLE_ARB, resource.gl_id);
+ gl->TexImageIOSurface2DCHROMIUM(
+ GL_TEXTURE_RECTANGLE_ARB, size.width(), size.height(), io_surface_id, 0);
+ resource.allocated = true;
+ resources_[id] = resource;
+ return id;
+}
+
+ResourceProvider::ResourceId ResourceProvider::CreateResourceFromTextureMailbox(
+ const TextureMailbox& mailbox,
+ scoped_ptr<SingleReleaseCallbackImpl> release_callback_impl) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ // Just store the information. Mailbox will be consumed in LockForRead().
+ ResourceId id = next_id_++;
+ DCHECK(mailbox.IsValid());
+ Resource& resource = resources_[id];
+ if (mailbox.IsTexture()) {
+ resource = Resource(0,
+ gfx::Size(),
+ Resource::External,
+ mailbox.target(),
+ GL_LINEAR,
+ 0,
+ GL_CLAMP_TO_EDGE,
+ TextureHintImmutable,
+ RGBA_8888);
+ } else {
+ DCHECK(mailbox.IsSharedMemory());
+ base::SharedMemory* shared_memory = mailbox.shared_memory();
+ DCHECK(shared_memory->memory());
+ uint8_t* pixels = reinterpret_cast<uint8_t*>(shared_memory->memory());
+ DCHECK(pixels);
+ scoped_ptr<SharedBitmap> shared_bitmap;
+ if (shared_bitmap_manager_) {
+ shared_bitmap =
+ shared_bitmap_manager_->GetBitmapForSharedMemory(shared_memory);
+ }
+ resource = Resource(pixels,
+ shared_bitmap.release(),
+ mailbox.shared_memory_size(),
+ Resource::External,
+ GL_LINEAR,
+ GL_CLAMP_TO_EDGE);
+ }
+ resource.allocated = true;
+ resource.mailbox = mailbox;
+ resource.release_callback_impl =
+ base::Bind(&SingleReleaseCallbackImpl::Run,
+ base::Owned(release_callback_impl.release()));
+ resource.allow_overlay = mailbox.allow_overlay();
+ return id;
+}
+
+void ResourceProvider::DeleteResource(ResourceId id) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ ResourceMap::iterator it = resources_.find(id);
+ CHECK(it != resources_.end());
+ Resource* resource = &it->second;
+ DCHECK(!resource->marked_for_deletion);
+ DCHECK_EQ(resource->imported_count, 0);
+ DCHECK(resource->pending_set_pixels || !resource->locked_for_write);
+
+ if (resource->exported_count > 0 || resource->lock_for_read_count > 0) {
+ resource->marked_for_deletion = true;
+ return;
+ } else {
+ DeleteResourceInternal(it, Normal);
+ }
+}
+
+void ResourceProvider::DeleteResourceInternal(ResourceMap::iterator it,
+ DeleteStyle style) {
+ TRACE_EVENT0("cc", "ResourceProvider::DeleteResourceInternal");
+ Resource* resource = &it->second;
+ bool lost_resource = resource->lost;
+
+ DCHECK(resource->exported_count == 0 || style != Normal);
+ if (style == ForShutdown && resource->exported_count > 0)
+ lost_resource = true;
+
+ if (resource->image_id) {
+ DCHECK(resource->origin == Resource::Internal);
+ GLES2Interface* gl = ContextGL();
+ DCHECK(gl);
+ GLC(gl, gl->DestroyImageCHROMIUM(resource->image_id));
+ }
+ if (resource->gl_upload_query_id) {
+ DCHECK(resource->origin == Resource::Internal);
+ GLES2Interface* gl = ContextGL();
+ DCHECK(gl);
+ GLC(gl, gl->DeleteQueriesEXT(1, &resource->gl_upload_query_id));
+ }
+ if (resource->gl_read_lock_query_id) {
+ DCHECK(resource->origin == Resource::Internal);
+ GLES2Interface* gl = ContextGL();
+ DCHECK(gl);
+ GLC(gl, gl->DeleteQueriesEXT(1, &resource->gl_read_lock_query_id));
+ }
+ if (resource->gl_pixel_buffer_id) {
+ DCHECK(resource->origin == Resource::Internal);
+ GLES2Interface* gl = ContextGL();
+ DCHECK(gl);
+ GLC(gl, gl->DeleteBuffers(1, &resource->gl_pixel_buffer_id));
+ }
+ if (resource->origin == Resource::External) {
+ DCHECK(resource->mailbox.IsValid());
+ GLuint sync_point = resource->mailbox.sync_point();
+ if (resource->type == GLTexture) {
+ DCHECK(resource->mailbox.IsTexture());
+ lost_resource |= lost_output_surface_;
+ GLES2Interface* gl = ContextGL();
+ DCHECK(gl);
+ if (resource->gl_id) {
+ GLC(gl, gl->DeleteTextures(1, &resource->gl_id));
+ resource->gl_id = 0;
+ if (!lost_resource)
+ sync_point = gl->InsertSyncPointCHROMIUM();
+ }
+ } else {
+ DCHECK(resource->mailbox.IsSharedMemory());
+ base::SharedMemory* shared_memory = resource->mailbox.shared_memory();
+ if (resource->pixels && shared_memory) {
+ DCHECK(shared_memory->memory() == resource->pixels);
+ resource->pixels = NULL;
+ delete resource->shared_bitmap;
+ resource->shared_bitmap = NULL;
+ }
+ }
+ resource->release_callback_impl.Run(
+ sync_point, lost_resource, blocking_main_thread_task_runner_);
+ }
+ if (resource->gl_id) {
+ GLES2Interface* gl = ContextGL();
+ DCHECK(gl);
+ GLC(gl, gl->DeleteTextures(1, &resource->gl_id));
+ resource->gl_id = 0;
+ }
+ if (resource->shared_bitmap) {
+ DCHECK(resource->origin != Resource::External);
+ DCHECK_EQ(Bitmap, resource->type);
+ delete resource->shared_bitmap;
+ resource->pixels = NULL;
+ }
+ if (resource->pixels) {
+ DCHECK(resource->origin == Resource::Internal);
+ delete[] resource->pixels;
+ }
+ resources_.erase(it);
+}
+
+ResourceProvider::ResourceType ResourceProvider::GetResourceType(
+ ResourceId id) {
+ return GetResource(id)->type;
+}
+
+void ResourceProvider::SetPixels(ResourceId id,
+ const uint8_t* image,
+ const gfx::Rect& image_rect,
+ const gfx::Rect& source_rect,
+ const gfx::Vector2d& dest_offset) {
+ Resource* resource = GetResource(id);
+ DCHECK(!resource->locked_for_write);
+ DCHECK(!resource->lock_for_read_count);
+ DCHECK(resource->origin == Resource::Internal);
+ DCHECK_EQ(resource->exported_count, 0);
+ DCHECK(ReadLockFenceHasPassed(resource));
+ LazyAllocate(resource);
+
+ if (resource->type == GLTexture) {
+ DCHECK(resource->gl_id);
+ DCHECK(!resource->pending_set_pixels);
+ DCHECK_EQ(resource->target, static_cast<GLenum>(GL_TEXTURE_2D));
+ GLES2Interface* gl = ContextGL();
+ DCHECK(gl);
+ DCHECK(texture_uploader_.get());
+ gl->BindTexture(GL_TEXTURE_2D, resource->gl_id);
+ texture_uploader_->Upload(image,
+ image_rect,
+ source_rect,
+ dest_offset,
+ resource->format,
+ resource->size);
+ } else {
+ DCHECK_EQ(Bitmap, resource->type);
+ DCHECK(resource->allocated);
+ DCHECK_EQ(RGBA_8888, resource->format);
+ DCHECK(source_rect.x() >= image_rect.x());
+ DCHECK(source_rect.y() >= image_rect.y());
+ DCHECK(source_rect.right() <= image_rect.right());
+ DCHECK(source_rect.bottom() <= image_rect.bottom());
+ SkImageInfo source_info =
+ SkImageInfo::MakeN32Premul(source_rect.width(), source_rect.height());
+ size_t image_row_bytes = image_rect.width() * 4;
+ gfx::Vector2d source_offset = source_rect.origin() - image_rect.origin();
+ image += source_offset.y() * image_row_bytes + source_offset.x() * 4;
+
+ ScopedWriteLockSoftware lock(this, id);
+ SkCanvas* dest = lock.sk_canvas();
+ dest->writePixels(
+ source_info, image, image_row_bytes, dest_offset.x(), dest_offset.y());
+ }
+}
+
+size_t ResourceProvider::NumBlockingUploads() {
+ if (!texture_uploader_)
+ return 0;
+
+ return texture_uploader_->NumBlockingUploads();
+}
+
+void ResourceProvider::MarkPendingUploadsAsNonBlocking() {
+ if (!texture_uploader_)
+ return;
+
+ texture_uploader_->MarkPendingUploadsAsNonBlocking();
+}
+
+size_t ResourceProvider::EstimatedUploadsPerTick() {
+ if (!texture_uploader_)
+ return 1u;
+
+ double textures_per_second = texture_uploader_->EstimatedTexturesPerSecond();
+ size_t textures_per_tick = floor(
+ kTextureUploadTickRate * textures_per_second);
+ return textures_per_tick ? textures_per_tick : 1u;
+}
+
+void ResourceProvider::FlushUploads() {
+ if (!texture_uploader_)
+ return;
+
+ texture_uploader_->Flush();
+}
+
+void ResourceProvider::ReleaseCachedData() {
+ if (!texture_uploader_)
+ return;
+
+ texture_uploader_->ReleaseCachedQueries();
+}
+
+base::TimeTicks ResourceProvider::EstimatedUploadCompletionTime(
+ size_t uploads_per_tick) {
+ if (lost_output_surface_)
+ return base::TimeTicks();
+
+ // Software resource uploads happen on impl thread, so don't bother batching
+ // them up and trying to wait for them to complete.
+ if (!texture_uploader_) {
+ return gfx::FrameTime::Now() + base::TimeDelta::FromMicroseconds(
+ base::Time::kMicrosecondsPerSecond * kSoftwareUploadTickRate);
+ }
+
+ base::TimeDelta upload_one_texture_time =
+ base::TimeDelta::FromMicroseconds(
+ base::Time::kMicrosecondsPerSecond * kTextureUploadTickRate) /
+ uploads_per_tick;
+
+ size_t total_uploads = NumBlockingUploads() + uploads_per_tick;
+ return gfx::FrameTime::Now() + upload_one_texture_time * total_uploads;
+}
+
+ResourceProvider::Resource* ResourceProvider::GetResource(ResourceId id) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ ResourceMap::iterator it = resources_.find(id);
+ CHECK(it != resources_.end());
+ return &it->second;
+}
+
+const ResourceProvider::Resource* ResourceProvider::LockForRead(ResourceId id) {
+ Resource* resource = GetResource(id);
+ DCHECK(!resource->locked_for_write ||
+ resource->set_pixels_completion_forced) <<
+ "locked for write: " << resource->locked_for_write <<
+ " pixels completion forced: " << resource->set_pixels_completion_forced;
+ DCHECK_EQ(resource->exported_count, 0);
+ // Uninitialized! Call SetPixels or LockForWrite first.
+ DCHECK(resource->allocated);
+
+ LazyCreate(resource);
+
+ if (resource->type == GLTexture && !resource->gl_id) {
+ DCHECK(resource->origin != Resource::Internal);
+ DCHECK(resource->mailbox.IsTexture());
+
+ // Mailbox sync_points must be processed by a call to
+ // WaitSyncPointIfNeeded() prior to calling LockForRead().
+ DCHECK(!resource->mailbox.sync_point());
+
+ GLES2Interface* gl = ContextGL();
+ DCHECK(gl);
+ resource->gl_id = texture_id_allocator_->NextId();
+ GLC(gl, gl->BindTexture(resource->target, resource->gl_id));
+ GLC(gl,
+ gl->ConsumeTextureCHROMIUM(resource->mailbox.target(),
+ resource->mailbox.name()));
+ }
+
+ if (!resource->pixels && resource->has_shared_bitmap_id &&
+ shared_bitmap_manager_) {
+ scoped_ptr<SharedBitmap> bitmap =
+ shared_bitmap_manager_->GetSharedBitmapFromId(
+ resource->size, resource->shared_bitmap_id);
+ if (bitmap) {
+ resource->shared_bitmap = bitmap.release();
+ resource->pixels = resource->shared_bitmap->pixels();
+ }
+ }
+
+ resource->lock_for_read_count++;
+ if (resource->read_lock_fences_enabled) {
+ if (current_read_lock_fence_.get())
+ current_read_lock_fence_->Set();
+ resource->read_lock_fence = current_read_lock_fence_;
+ }
+
+ return resource;
+}
+
+void ResourceProvider::UnlockForRead(ResourceId id) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ ResourceMap::iterator it = resources_.find(id);
+ CHECK(it != resources_.end());
+
+ Resource* resource = &it->second;
+ DCHECK_GT(resource->lock_for_read_count, 0);
+ DCHECK_EQ(resource->exported_count, 0);
+ resource->lock_for_read_count--;
+ if (resource->marked_for_deletion && !resource->lock_for_read_count) {
+ if (!resource->child_id) {
+ // The resource belongs to this ResourceProvider, so it can be destroyed.
+ DeleteResourceInternal(it, Normal);
+ } else {
+ ChildMap::iterator child_it = children_.find(resource->child_id);
+ ResourceIdArray unused;
+ unused.push_back(id);
+ DeleteAndReturnUnusedResourcesToChild(child_it, Normal, unused);
+ }
+ }
+}
+
+const ResourceProvider::Resource* ResourceProvider::LockForWrite(
+ ResourceId id) {
+ Resource* resource = GetResource(id);
+ DCHECK(!resource->locked_for_write);
+ DCHECK(!resource->lock_for_read_count);
+ DCHECK_EQ(resource->exported_count, 0);
+ DCHECK(resource->origin == Resource::Internal);
+ DCHECK(!resource->lost);
+ DCHECK(ReadLockFenceHasPassed(resource));
+ LazyAllocate(resource);
+
+ resource->locked_for_write = true;
+ return resource;
+}
+
+bool ResourceProvider::CanLockForWrite(ResourceId id) {
+ Resource* resource = GetResource(id);
+ return !resource->locked_for_write && !resource->lock_for_read_count &&
+ !resource->exported_count && resource->origin == Resource::Internal &&
+ !resource->lost && ReadLockFenceHasPassed(resource);
+}
+
+void ResourceProvider::UnlockForWrite(ResourceId id) {
+ Resource* resource = GetResource(id);
+ DCHECK(resource->locked_for_write);
+ DCHECK_EQ(resource->exported_count, 0);
+ DCHECK(resource->origin == Resource::Internal);
+ resource->locked_for_write = false;
+}
+
+ResourceProvider::ScopedReadLockGL::ScopedReadLockGL(
+ ResourceProvider* resource_provider,
+ ResourceProvider::ResourceId resource_id)
+ : resource_provider_(resource_provider),
+ resource_id_(resource_id),
+ texture_id_(resource_provider->LockForRead(resource_id)->gl_id) {
+ DCHECK(texture_id_);
+}
+
+ResourceProvider::ScopedReadLockGL::~ScopedReadLockGL() {
+ resource_provider_->UnlockForRead(resource_id_);
+}
+
+ResourceProvider::ScopedSamplerGL::ScopedSamplerGL(
+ ResourceProvider* resource_provider,
+ ResourceProvider::ResourceId resource_id,
+ GLenum filter)
+ : ScopedReadLockGL(resource_provider, resource_id),
+ unit_(GL_TEXTURE0),
+ target_(resource_provider->BindForSampling(resource_id, unit_, filter)) {
+}
+
+ResourceProvider::ScopedSamplerGL::ScopedSamplerGL(
+ ResourceProvider* resource_provider,
+ ResourceProvider::ResourceId resource_id,
+ GLenum unit,
+ GLenum filter)
+ : ScopedReadLockGL(resource_provider, resource_id),
+ unit_(unit),
+ target_(resource_provider->BindForSampling(resource_id, unit_, filter)) {
+}
+
+ResourceProvider::ScopedSamplerGL::~ScopedSamplerGL() {
+}
+
+ResourceProvider::ScopedWriteLockGL::ScopedWriteLockGL(
+ ResourceProvider* resource_provider,
+ ResourceProvider::ResourceId resource_id)
+ : resource_provider_(resource_provider),
+ resource_id_(resource_id),
+ texture_id_(resource_provider->LockForWrite(resource_id)->gl_id) {
+ DCHECK(texture_id_);
+}
+
+ResourceProvider::ScopedWriteLockGL::~ScopedWriteLockGL() {
+ resource_provider_->UnlockForWrite(resource_id_);
+}
+
+void ResourceProvider::PopulateSkBitmapWithResource(
+ SkBitmap* sk_bitmap, const Resource* resource) {
+ DCHECK_EQ(RGBA_8888, resource->format);
+ SkImageInfo info = SkImageInfo::MakeN32Premul(resource->size.width(),
+ resource->size.height());
+ sk_bitmap->installPixels(info, resource->pixels, info.minRowBytes());
+}
+
+ResourceProvider::ScopedReadLockSoftware::ScopedReadLockSoftware(
+ ResourceProvider* resource_provider,
+ ResourceProvider::ResourceId resource_id)
+ : resource_provider_(resource_provider),
+ resource_id_(resource_id) {
+ const Resource* resource = resource_provider->LockForRead(resource_id);
+ wrap_mode_ = resource->wrap_mode;
+ ResourceProvider::PopulateSkBitmapWithResource(&sk_bitmap_, resource);
+}
+
+ResourceProvider::ScopedReadLockSoftware::~ScopedReadLockSoftware() {
+ resource_provider_->UnlockForRead(resource_id_);
+}
+
+ResourceProvider::ScopedWriteLockSoftware::ScopedWriteLockSoftware(
+ ResourceProvider* resource_provider,
+ ResourceProvider::ResourceId resource_id)
+ : resource_provider_(resource_provider),
+ resource_id_(resource_id) {
+ ResourceProvider::PopulateSkBitmapWithResource(
+ &sk_bitmap_, resource_provider->LockForWrite(resource_id));
+ DCHECK(valid());
+ sk_canvas_.reset(new SkCanvas(sk_bitmap_));
+}
+
+ResourceProvider::ScopedWriteLockSoftware::~ScopedWriteLockSoftware() {
+ resource_provider_->UnlockForWrite(resource_id_);
+}
+
+ResourceProvider::ResourceProvider(
+ OutputSurface* output_surface,
+ SharedBitmapManager* shared_bitmap_manager,
+ BlockingTaskRunner* blocking_main_thread_task_runner,
+ int highp_threshold_min,
+ bool use_rgba_4444_texture_format,
+ size_t id_allocation_chunk_size,
+ bool use_distance_field_text)
+ : output_surface_(output_surface),
+ shared_bitmap_manager_(shared_bitmap_manager),
+ blocking_main_thread_task_runner_(blocking_main_thread_task_runner),
+ lost_output_surface_(false),
+ highp_threshold_min_(highp_threshold_min),
+ next_id_(1),
+ next_child_(1),
+ default_resource_type_(InvalidType),
+ use_texture_storage_ext_(false),
+ use_texture_format_bgra_(false),
+ use_texture_usage_hint_(false),
+ use_compressed_texture_etc1_(false),
+ max_texture_size_(0),
+ best_texture_format_(RGBA_8888),
+ use_rgba_4444_texture_format_(use_rgba_4444_texture_format),
+ id_allocation_chunk_size_(id_allocation_chunk_size),
+ use_sync_query_(false),
+ use_distance_field_text_(use_distance_field_text) {
+ DCHECK(output_surface_->HasClient());
+ DCHECK(id_allocation_chunk_size_);
+}
+
+void ResourceProvider::InitializeSoftware() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_NE(Bitmap, default_resource_type_);
+
+ CleanUpGLIfNeeded();
+
+ default_resource_type_ = Bitmap;
+ // Pick an arbitrary limit here similar to what hardware might.
+ max_texture_size_ = 16 * 1024;
+ best_texture_format_ = RGBA_8888;
+}
+
+void ResourceProvider::InitializeGL() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!texture_uploader_);
+ DCHECK_NE(GLTexture, default_resource_type_);
+ DCHECK(!texture_id_allocator_);
+ DCHECK(!buffer_id_allocator_);
+
+ default_resource_type_ = GLTexture;
+
+ const ContextProvider::Capabilities& caps =
+ output_surface_->context_provider()->ContextCapabilities();
+
+ bool use_bgra = caps.gpu.texture_format_bgra8888;
+ use_texture_storage_ext_ = caps.gpu.texture_storage;
+ use_texture_format_bgra_ = caps.gpu.texture_format_bgra8888;
+ use_texture_usage_hint_ = caps.gpu.texture_usage;
+ use_compressed_texture_etc1_ = caps.gpu.texture_format_etc1;
+ use_sync_query_ = caps.gpu.sync_query;
+
+ GLES2Interface* gl = ContextGL();
+ DCHECK(gl);
+
+ texture_uploader_ = TextureUploader::Create(gl);
+ max_texture_size_ = 0; // Context expects cleared value.
+ GLC(gl, gl->GetIntegerv(GL_MAX_TEXTURE_SIZE, &max_texture_size_));
+ best_texture_format_ = PlatformColor::BestTextureFormat(use_bgra);
+
+ texture_id_allocator_.reset(
+ new TextureIdAllocator(gl, id_allocation_chunk_size_));
+ buffer_id_allocator_.reset(
+ new BufferIdAllocator(gl, id_allocation_chunk_size_));
+}
+
+void ResourceProvider::CleanUpGLIfNeeded() {
+ GLES2Interface* gl = ContextGL();
+ if (default_resource_type_ != GLTexture) {
+ // We are not in GL mode, but double check before returning.
+ DCHECK(!gl);
+ DCHECK(!texture_uploader_);
+ return;
+ }
+
+ DCHECK(gl);
+#if DCHECK_IS_ON
+ // Check that all GL resources has been deleted.
+ for (ResourceMap::const_iterator itr = resources_.begin();
+ itr != resources_.end();
+ ++itr) {
+ DCHECK_NE(GLTexture, itr->second.type);
+ }
+#endif // DCHECK_IS_ON
+
+ texture_uploader_ = nullptr;
+ texture_id_allocator_ = nullptr;
+ buffer_id_allocator_ = nullptr;
+ gl->Finish();
+}
+
+int ResourceProvider::CreateChild(const ReturnCallback& return_callback) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ Child child_info;
+ child_info.return_callback = return_callback;
+
+ int child = next_child_++;
+ children_[child] = child_info;
+ return child;
+}
+
+void ResourceProvider::DestroyChild(int child_id) {
+ ChildMap::iterator it = children_.find(child_id);
+ DCHECK(it != children_.end());
+ DestroyChildInternal(it, Normal);
+}
+
+void ResourceProvider::DestroyChildInternal(ChildMap::iterator it,
+ DeleteStyle style) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ Child& child = it->second;
+ DCHECK(style == ForShutdown || !child.marked_for_deletion);
+
+ ResourceIdArray resources_for_child;
+
+ for (ResourceIdMap::iterator child_it = child.child_to_parent_map.begin();
+ child_it != child.child_to_parent_map.end();
+ ++child_it) {
+ ResourceId id = child_it->second;
+ resources_for_child.push_back(id);
+ }
+
+ // If the child is going away, don't consider any resources in use.
+ child.in_use_resources.clear();
+ child.marked_for_deletion = true;
+
+ DeleteAndReturnUnusedResourcesToChild(it, style, resources_for_child);
+}
+
+const ResourceProvider::ResourceIdMap& ResourceProvider::GetChildToParentMap(
+ int child) const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ ChildMap::const_iterator it = children_.find(child);
+ DCHECK(it != children_.end());
+ DCHECK(!it->second.marked_for_deletion);
+ return it->second.child_to_parent_map;
+}
+
+void ResourceProvider::PrepareSendToParent(const ResourceIdArray& resources,
+ TransferableResourceArray* list) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ GLES2Interface* gl = ContextGL();
+ bool need_sync_point = false;
+ for (ResourceIdArray::const_iterator it = resources.begin();
+ it != resources.end();
+ ++it) {
+ TransferableResource resource;
+ TransferResource(gl, *it, &resource);
+ if (!resource.mailbox_holder.sync_point && !resource.is_software)
+ need_sync_point = true;
+ ++resources_.find(*it)->second.exported_count;
+ list->push_back(resource);
+ }
+ if (need_sync_point) {
+ GLuint sync_point = gl->InsertSyncPointCHROMIUM();
+ for (TransferableResourceArray::iterator it = list->begin();
+ it != list->end();
+ ++it) {
+ if (!it->mailbox_holder.sync_point)
+ it->mailbox_holder.sync_point = sync_point;
+ }
+ }
+}
+
+void ResourceProvider::ReceiveFromChild(
+ int child, const TransferableResourceArray& resources) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ GLES2Interface* gl = ContextGL();
+ Child& child_info = children_.find(child)->second;
+ DCHECK(!child_info.marked_for_deletion);
+ for (TransferableResourceArray::const_iterator it = resources.begin();
+ it != resources.end();
+ ++it) {
+ ResourceIdMap::iterator resource_in_map_it =
+ child_info.child_to_parent_map.find(it->id);
+ if (resource_in_map_it != child_info.child_to_parent_map.end()) {
+ Resource& resource = resources_[resource_in_map_it->second];
+ resource.marked_for_deletion = false;
+ resource.imported_count++;
+ continue;
+ }
+
+ if ((!it->is_software && !gl) ||
+ (it->is_software && !shared_bitmap_manager_)) {
+ TRACE_EVENT0("cc", "ResourceProvider::ReceiveFromChild dropping invalid");
+ ReturnedResourceArray to_return;
+ to_return.push_back(it->ToReturnedResource());
+ child_info.return_callback.Run(to_return,
+ blocking_main_thread_task_runner_);
+ continue;
+ }
+
+ ResourceId local_id = next_id_++;
+ Resource& resource = resources_[local_id];
+ if (it->is_software) {
+ resource = Resource(it->mailbox_holder.mailbox,
+ it->size,
+ Resource::Delegated,
+ GL_LINEAR,
+ it->is_repeated ? GL_REPEAT : GL_CLAMP_TO_EDGE);
+ } else {
+ resource = Resource(0,
+ it->size,
+ Resource::Delegated,
+ it->mailbox_holder.texture_target,
+ it->filter,
+ 0,
+ it->is_repeated ? GL_REPEAT : GL_CLAMP_TO_EDGE,
+ TextureHintImmutable,
+ it->format);
+ resource.mailbox = TextureMailbox(it->mailbox_holder.mailbox,
+ it->mailbox_holder.texture_target,
+ it->mailbox_holder.sync_point);
+ }
+ resource.child_id = child;
+ // Don't allocate a texture for a child.
+ resource.allocated = true;
+ resource.imported_count = 1;
+ resource.allow_overlay = it->allow_overlay;
+ child_info.parent_to_child_map[local_id] = it->id;
+ child_info.child_to_parent_map[it->id] = local_id;
+ }
+}
+
+void ResourceProvider::DeclareUsedResourcesFromChild(
+ int child,
+ const ResourceIdArray& resources_from_child) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ ChildMap::iterator child_it = children_.find(child);
+ DCHECK(child_it != children_.end());
+ Child& child_info = child_it->second;
+ DCHECK(!child_info.marked_for_deletion);
+ child_info.in_use_resources.clear();
+
+ for (size_t i = 0; i < resources_from_child.size(); ++i) {
+ ResourceIdMap::iterator it =
+ child_info.child_to_parent_map.find(resources_from_child[i]);
+ DCHECK(it != child_info.child_to_parent_map.end());
+
+ ResourceId local_id = it->second;
+ DCHECK(!resources_[local_id].marked_for_deletion);
+ child_info.in_use_resources.insert(local_id);
+ }
+
+ ResourceIdArray unused;
+ for (ResourceIdMap::iterator it = child_info.child_to_parent_map.begin();
+ it != child_info.child_to_parent_map.end();
+ ++it) {
+ ResourceId local_id = it->second;
+ bool resource_is_in_use = child_info.in_use_resources.count(local_id) > 0;
+ if (!resource_is_in_use)
+ unused.push_back(local_id);
+ }
+ DeleteAndReturnUnusedResourcesToChild(child_it, Normal, unused);
+}
+
+// static
+bool ResourceProvider::CompareResourceMapIteratorsByChildId(
+ const std::pair<ReturnedResource, ResourceMap::iterator>& a,
+ const std::pair<ReturnedResource, ResourceMap::iterator>& b) {
+ const ResourceMap::iterator& a_it = a.second;
+ const ResourceMap::iterator& b_it = b.second;
+ const Resource& a_resource = a_it->second;
+ const Resource& b_resource = b_it->second;
+ return a_resource.child_id < b_resource.child_id;
+}
+
+void ResourceProvider::ReceiveReturnsFromParent(
+ const ReturnedResourceArray& resources) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ GLES2Interface* gl = ContextGL();
+
+ int child_id = 0;
+ ResourceIdArray resources_for_child;
+
+ std::vector<std::pair<ReturnedResource, ResourceMap::iterator> >
+ sorted_resources;
+
+ for (ReturnedResourceArray::const_iterator it = resources.begin();
+ it != resources.end();
+ ++it) {
+ ResourceId local_id = it->id;
+ ResourceMap::iterator map_iterator = resources_.find(local_id);
+
+ // Resource was already lost (e.g. it belonged to a child that was
+ // destroyed).
+ if (map_iterator == resources_.end())
+ continue;
+
+ sorted_resources.push_back(
+ std::pair<ReturnedResource, ResourceMap::iterator>(*it, map_iterator));
+ }
+
+ std::sort(sorted_resources.begin(),
+ sorted_resources.end(),
+ CompareResourceMapIteratorsByChildId);
+
+ ChildMap::iterator child_it = children_.end();
+ for (size_t i = 0; i < sorted_resources.size(); ++i) {
+ ReturnedResource& returned = sorted_resources[i].first;
+ ResourceMap::iterator& map_iterator = sorted_resources[i].second;
+ ResourceId local_id = map_iterator->first;
+ Resource* resource = &map_iterator->second;
+
+ CHECK_GE(resource->exported_count, returned.count);
+ resource->exported_count -= returned.count;
+ resource->lost |= returned.lost;
+ if (resource->exported_count)
+ continue;
+
+ // Need to wait for the current read lock fence to pass before we can
+ // recycle this resource.
+ if (resource->read_lock_fences_enabled) {
+ if (current_read_lock_fence_.get())
+ current_read_lock_fence_->Set();
+ resource->read_lock_fence = current_read_lock_fence_;
+ }
+
+ if (returned.sync_point) {
+ DCHECK(!resource->has_shared_bitmap_id);
+ if (resource->origin == Resource::Internal) {
+ DCHECK(resource->gl_id);
+ GLC(gl, gl->WaitSyncPointCHROMIUM(returned.sync_point));
+ } else {
+ DCHECK(!resource->gl_id);
+ resource->mailbox.set_sync_point(returned.sync_point);
+ }
+ }
+
+ if (!resource->marked_for_deletion)
+ continue;
+
+ if (!resource->child_id) {
+ // The resource belongs to this ResourceProvider, so it can be destroyed.
+ DeleteResourceInternal(map_iterator, Normal);
+ continue;
+ }
+
+ DCHECK(resource->origin == Resource::Delegated);
+ // Delete the resource and return it to the child it came from one.
+ if (resource->child_id != child_id) {
+ if (child_id) {
+ DCHECK_NE(resources_for_child.size(), 0u);
+ DCHECK(child_it != children_.end());
+ DeleteAndReturnUnusedResourcesToChild(
+ child_it, Normal, resources_for_child);
+ resources_for_child.clear();
+ }
+
+ child_it = children_.find(resource->child_id);
+ DCHECK(child_it != children_.end());
+ child_id = resource->child_id;
+ }
+ resources_for_child.push_back(local_id);
+ }
+
+ if (child_id) {
+ DCHECK_NE(resources_for_child.size(), 0u);
+ DCHECK(child_it != children_.end());
+ DeleteAndReturnUnusedResourcesToChild(
+ child_it, Normal, resources_for_child);
+ }
+}
+
+void ResourceProvider::TransferResource(GLES2Interface* gl,
+ ResourceId id,
+ TransferableResource* resource) {
+ Resource* source = GetResource(id);
+ DCHECK(!source->locked_for_write);
+ DCHECK(!source->lock_for_read_count);
+ DCHECK(source->origin != Resource::External || source->mailbox.IsValid());
+ DCHECK(source->allocated);
+ resource->id = id;
+ resource->format = source->format;
+ resource->mailbox_holder.texture_target = source->target;
+ resource->filter = source->filter;
+ resource->size = source->size;
+ resource->is_repeated = (source->wrap_mode == GL_REPEAT);
+ resource->allow_overlay = source->allow_overlay;
+
+ if (source->type == Bitmap) {
+ resource->mailbox_holder.mailbox = source->shared_bitmap_id;
+ resource->is_software = true;
+ } else if (!source->mailbox.IsValid()) {
+ LazyCreate(source);
+ DCHECK(source->gl_id);
+ DCHECK(source->origin == Resource::Internal);
+ GLC(gl,
+ gl->BindTexture(resource->mailbox_holder.texture_target,
+ source->gl_id));
+ if (source->image_id) {
+ DCHECK(source->dirty_image);
+ BindImageForSampling(source);
+ }
+ // This is a resource allocated by the compositor, we need to produce it.
+ // Don't set a sync point, the caller will do it.
+ GLC(gl, gl->GenMailboxCHROMIUM(resource->mailbox_holder.mailbox.name));
+ GLC(gl,
+ gl->ProduceTextureCHROMIUM(resource->mailbox_holder.texture_target,
+ resource->mailbox_holder.mailbox.name));
+ source->mailbox = TextureMailbox(resource->mailbox_holder);
+ } else {
+ DCHECK(source->mailbox.IsTexture());
+ if (source->image_id && source->dirty_image) {
+ DCHECK(source->gl_id);
+ DCHECK(source->origin == Resource::Internal);
+ GLC(gl,
+ gl->BindTexture(resource->mailbox_holder.texture_target,
+ source->gl_id));
+ BindImageForSampling(source);
+ }
+ // This is either an external resource, or a compositor resource that we
+ // already exported. Make sure to forward the sync point that we were given.
+ resource->mailbox_holder.mailbox = source->mailbox.mailbox();
+ resource->mailbox_holder.texture_target = source->mailbox.target();
+ resource->mailbox_holder.sync_point = source->mailbox.sync_point();
+ source->mailbox.set_sync_point(0);
+ }
+}
+
+void ResourceProvider::DeleteAndReturnUnusedResourcesToChild(
+ ChildMap::iterator child_it,
+ DeleteStyle style,
+ const ResourceIdArray& unused) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(child_it != children_.end());
+ Child* child_info = &child_it->second;
+
+ if (unused.empty() && !child_info->marked_for_deletion)
+ return;
+
+ ReturnedResourceArray to_return;
+
+ GLES2Interface* gl = ContextGL();
+ bool need_sync_point = false;
+ for (size_t i = 0; i < unused.size(); ++i) {
+ ResourceId local_id = unused[i];
+
+ ResourceMap::iterator it = resources_.find(local_id);
+ CHECK(it != resources_.end());
+ Resource& resource = it->second;
+
+ DCHECK(!resource.locked_for_write);
+ DCHECK_EQ(0u, child_info->in_use_resources.count(local_id));
+ DCHECK(child_info->parent_to_child_map.count(local_id));
+
+ ResourceId child_id = child_info->parent_to_child_map[local_id];
+ DCHECK(child_info->child_to_parent_map.count(child_id));
+
+ bool is_lost =
+ resource.lost || (resource.type == GLTexture && lost_output_surface_);
+ if (resource.exported_count > 0 || resource.lock_for_read_count > 0) {
+ if (style != ForShutdown) {
+ // Defer this until we receive the resource back from the parent or
+ // the read lock is released.
+ resource.marked_for_deletion = true;
+ continue;
+ }
+
+ // We still have an exported_count, so we'll have to lose it.
+ is_lost = true;
+ }
+
+ if (gl && resource.filter != resource.original_filter) {
+ DCHECK(resource.target);
+ DCHECK(resource.gl_id);
+
+ GLC(gl, gl->BindTexture(resource.target, resource.gl_id));
+ GLC(gl,
+ gl->TexParameteri(resource.target,
+ GL_TEXTURE_MIN_FILTER,
+ resource.original_filter));
+ GLC(gl,
+ gl->TexParameteri(resource.target,
+ GL_TEXTURE_MAG_FILTER,
+ resource.original_filter));
+ }
+
+ ReturnedResource returned;
+ returned.id = child_id;
+ returned.sync_point = resource.mailbox.sync_point();
+ if (!returned.sync_point && resource.type == GLTexture)
+ need_sync_point = true;
+ returned.count = resource.imported_count;
+ returned.lost = is_lost;
+ to_return.push_back(returned);
+
+ child_info->parent_to_child_map.erase(local_id);
+ child_info->child_to_parent_map.erase(child_id);
+ resource.imported_count = 0;
+ DeleteResourceInternal(it, style);
+ }
+ if (need_sync_point) {
+ DCHECK(gl);
+ GLuint sync_point = gl->InsertSyncPointCHROMIUM();
+ for (size_t i = 0; i < to_return.size(); ++i) {
+ if (!to_return[i].sync_point)
+ to_return[i].sync_point = sync_point;
+ }
+ }
+
+ if (!to_return.empty())
+ child_info->return_callback.Run(to_return,
+ blocking_main_thread_task_runner_);
+
+ if (child_info->marked_for_deletion &&
+ child_info->parent_to_child_map.empty()) {
+ DCHECK(child_info->child_to_parent_map.empty());
+ children_.erase(child_it);
+ }
+}
+
+void ResourceProvider::AcquirePixelBuffer(ResourceId id) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
+ "ResourceProvider::AcquirePixelBuffer");
+
+ Resource* resource = GetResource(id);
+ DCHECK(resource->origin == Resource::Internal);
+ DCHECK_EQ(resource->exported_count, 0);
+ DCHECK(!resource->image_id);
+ DCHECK_NE(ETC1, resource->format);
+
+ DCHECK_EQ(GLTexture, resource->type);
+ GLES2Interface* gl = ContextGL();
+ DCHECK(gl);
+ if (!resource->gl_pixel_buffer_id)
+ resource->gl_pixel_buffer_id = buffer_id_allocator_->NextId();
+ gl->BindBuffer(GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM,
+ resource->gl_pixel_buffer_id);
+ unsigned bytes_per_pixel = BitsPerPixel(resource->format) / 8;
+ gl->BufferData(GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM,
+ resource->size.height() *
+ RoundUp(bytes_per_pixel * resource->size.width(), 4u),
+ NULL,
+ GL_DYNAMIC_DRAW);
+ gl->BindBuffer(GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM, 0);
+}
+
+void ResourceProvider::ReleasePixelBuffer(ResourceId id) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
+ "ResourceProvider::ReleasePixelBuffer");
+
+ Resource* resource = GetResource(id);
+ DCHECK(resource->origin == Resource::Internal);
+ DCHECK_EQ(resource->exported_count, 0);
+ DCHECK(!resource->image_id);
+
+ // The pixel buffer can be released while there is a pending "set pixels"
+ // if completion has been forced. Any shared memory associated with this
+ // pixel buffer will not be freed until the waitAsyncTexImage2DCHROMIUM
+ // command has been processed on the service side. It is also safe to
+ // reuse any query id associated with this resource before they complete
+ // as each new query has a unique submit count.
+ if (resource->pending_set_pixels) {
+ DCHECK(resource->set_pixels_completion_forced);
+ resource->pending_set_pixels = false;
+ resource->locked_for_write = false;
+ }
+
+ DCHECK_EQ(GLTexture, resource->type);
+ if (!resource->gl_pixel_buffer_id)
+ return;
+ GLES2Interface* gl = ContextGL();
+ DCHECK(gl);
+ gl->BindBuffer(GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM,
+ resource->gl_pixel_buffer_id);
+ gl->BufferData(
+ GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM, 0, NULL, GL_DYNAMIC_DRAW);
+ gl->BindBuffer(GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM, 0);
+}
+
+uint8_t* ResourceProvider::MapPixelBuffer(ResourceId id, int* stride) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
+ "ResourceProvider::MapPixelBuffer");
+
+ Resource* resource = GetResource(id);
+ DCHECK(resource->origin == Resource::Internal);
+ DCHECK_EQ(resource->exported_count, 0);
+ DCHECK(!resource->image_id);
+
+ *stride = 0;
+ DCHECK_EQ(GLTexture, resource->type);
+ GLES2Interface* gl = ContextGL();
+ DCHECK(gl);
+ DCHECK(resource->gl_pixel_buffer_id);
+ gl->BindBuffer(GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM,
+ resource->gl_pixel_buffer_id);
+ uint8_t* image = static_cast<uint8_t*>(gl->MapBufferCHROMIUM(
+ GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM, GL_WRITE_ONLY));
+ gl->BindBuffer(GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM, 0);
+ // Buffer is required to be 4-byte aligned.
+ CHECK(!(reinterpret_cast<intptr_t>(image) & 3));
+ return image;
+}
+
+void ResourceProvider::UnmapPixelBuffer(ResourceId id) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
+ "ResourceProvider::UnmapPixelBuffer");
+
+ Resource* resource = GetResource(id);
+ DCHECK(resource->origin == Resource::Internal);
+ DCHECK_EQ(resource->exported_count, 0);
+ DCHECK(!resource->image_id);
+
+ DCHECK_EQ(GLTexture, resource->type);
+ GLES2Interface* gl = ContextGL();
+ DCHECK(gl);
+ DCHECK(resource->gl_pixel_buffer_id);
+ gl->BindBuffer(GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM,
+ resource->gl_pixel_buffer_id);
+ gl->UnmapBufferCHROMIUM(GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM);
+ gl->BindBuffer(GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM, 0);
+}
+
+GLenum ResourceProvider::BindForSampling(ResourceId resource_id,
+ GLenum unit,
+ GLenum filter) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ GLES2Interface* gl = ContextGL();
+ ResourceMap::iterator it = resources_.find(resource_id);
+ DCHECK(it != resources_.end());
+ Resource* resource = &it->second;
+ DCHECK(resource->lock_for_read_count);
+ DCHECK(!resource->locked_for_write || resource->set_pixels_completion_forced);
+
+ ScopedSetActiveTexture scoped_active_tex(gl, unit);
+ GLenum target = resource->target;
+ GLC(gl, gl->BindTexture(target, resource->gl_id));
+ if (filter != resource->filter) {
+ GLC(gl, gl->TexParameteri(target, GL_TEXTURE_MIN_FILTER, filter));
+ GLC(gl, gl->TexParameteri(target, GL_TEXTURE_MAG_FILTER, filter));
+ resource->filter = filter;
+ }
+
+ if (resource->image_id && resource->dirty_image)
+ BindImageForSampling(resource);
+
+ return target;
+}
+
+void ResourceProvider::BeginSetPixels(ResourceId id) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
+ "ResourceProvider::BeginSetPixels");
+
+ Resource* resource = GetResource(id);
+ DCHECK(!resource->pending_set_pixels);
+
+ LazyCreate(resource);
+ DCHECK(resource->origin == Resource::Internal);
+ DCHECK(resource->gl_id || resource->allocated);
+ DCHECK(ReadLockFenceHasPassed(resource));
+ DCHECK(!resource->image_id);
+
+ bool allocate = !resource->allocated;
+ resource->allocated = true;
+ LockForWrite(id);
+
+ DCHECK_EQ(GLTexture, resource->type);
+ DCHECK(resource->gl_id);
+ GLES2Interface* gl = ContextGL();
+ DCHECK(gl);
+ DCHECK(resource->gl_pixel_buffer_id);
+ DCHECK_EQ(resource->target, static_cast<GLenum>(GL_TEXTURE_2D));
+ gl->BindTexture(GL_TEXTURE_2D, resource->gl_id);
+ gl->BindBuffer(GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM,
+ resource->gl_pixel_buffer_id);
+ if (!resource->gl_upload_query_id)
+ gl->GenQueriesEXT(1, &resource->gl_upload_query_id);
+ gl->BeginQueryEXT(GL_ASYNC_PIXEL_UNPACK_COMPLETED_CHROMIUM,
+ resource->gl_upload_query_id);
+ if (allocate) {
+ gl->AsyncTexImage2DCHROMIUM(GL_TEXTURE_2D,
+ 0, /* level */
+ GLInternalFormat(resource->format),
+ resource->size.width(),
+ resource->size.height(),
+ 0, /* border */
+ GLDataFormat(resource->format),
+ GLDataType(resource->format),
+ NULL);
+ } else {
+ gl->AsyncTexSubImage2DCHROMIUM(GL_TEXTURE_2D,
+ 0, /* level */
+ 0, /* x */
+ 0, /* y */
+ resource->size.width(),
+ resource->size.height(),
+ GLDataFormat(resource->format),
+ GLDataType(resource->format),
+ NULL);
+ }
+ gl->EndQueryEXT(GL_ASYNC_PIXEL_UNPACK_COMPLETED_CHROMIUM);
+ gl->BindBuffer(GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM, 0);
+
+ resource->pending_set_pixels = true;
+ resource->set_pixels_completion_forced = false;
+}
+
+void ResourceProvider::ForceSetPixelsToComplete(ResourceId id) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
+ "ResourceProvider::ForceSetPixelsToComplete");
+
+ Resource* resource = GetResource(id);
+
+ DCHECK(resource->locked_for_write);
+ DCHECK(resource->pending_set_pixels);
+ DCHECK(!resource->set_pixels_completion_forced);
+
+ if (resource->gl_id) {
+ GLES2Interface* gl = ContextGL();
+ GLC(gl, gl->BindTexture(GL_TEXTURE_2D, resource->gl_id));
+ GLC(gl, gl->WaitAsyncTexImage2DCHROMIUM(GL_TEXTURE_2D));
+ GLC(gl, gl->BindTexture(GL_TEXTURE_2D, 0));
+ }
+
+ resource->set_pixels_completion_forced = true;
+}
+
+bool ResourceProvider::DidSetPixelsComplete(ResourceId id) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
+ "ResourceProvider::DidSetPixelsComplete");
+
+ Resource* resource = GetResource(id);
+
+ DCHECK(resource->locked_for_write);
+ DCHECK(resource->pending_set_pixels);
+
+ if (resource->gl_id) {
+ GLES2Interface* gl = ContextGL();
+ DCHECK(gl);
+ DCHECK(resource->gl_upload_query_id);
+ GLuint complete = 1;
+ gl->GetQueryObjectuivEXT(
+ resource->gl_upload_query_id, GL_QUERY_RESULT_AVAILABLE_EXT, &complete);
+ if (!complete)
+ return false;
+ }
+
+ resource->pending_set_pixels = false;
+ UnlockForWrite(id);
+
+ return true;
+}
+
+void ResourceProvider::CreateForTesting(ResourceId id) {
+ LazyCreate(GetResource(id));
+}
+
+GLenum ResourceProvider::TargetForTesting(ResourceId id) {
+ Resource* resource = GetResource(id);
+ return resource->target;
+}
+
+void ResourceProvider::LazyCreate(Resource* resource) {
+ if (resource->type != GLTexture || resource->origin != Resource::Internal)
+ return;
+
+ if (resource->gl_id)
+ return;
+
+ DCHECK(resource->texture_pool);
+ DCHECK(resource->origin == Resource::Internal);
+ DCHECK(!resource->mailbox.IsValid());
+ resource->gl_id = texture_id_allocator_->NextId();
+
+ GLES2Interface* gl = ContextGL();
+ DCHECK(gl);
+
+ // Create and set texture properties. Allocation is delayed until needed.
+ GLC(gl, gl->BindTexture(resource->target, resource->gl_id));
+ GLC(gl,
+ gl->TexParameteri(
+ resource->target, GL_TEXTURE_MIN_FILTER, resource->original_filter));
+ GLC(gl,
+ gl->TexParameteri(
+ resource->target, GL_TEXTURE_MAG_FILTER, resource->original_filter));
+ GLC(gl,
+ gl->TexParameteri(
+ resource->target, GL_TEXTURE_WRAP_S, resource->wrap_mode));
+ GLC(gl,
+ gl->TexParameteri(
+ resource->target, GL_TEXTURE_WRAP_T, resource->wrap_mode));
+ GLC(gl,
+ gl->TexParameteri(
+ resource->target, GL_TEXTURE_POOL_CHROMIUM, resource->texture_pool));
+ if (use_texture_usage_hint_ && (resource->hint & TextureHintFramebuffer)) {
+ GLC(gl,
+ gl->TexParameteri(resource->target,
+ GL_TEXTURE_USAGE_ANGLE,
+ GL_FRAMEBUFFER_ATTACHMENT_ANGLE));
+ }
+}
+
+void ResourceProvider::AllocateForTesting(ResourceId id) {
+ LazyAllocate(GetResource(id));
+}
+
+void ResourceProvider::LazyAllocate(Resource* resource) {
+ DCHECK(resource);
+ if (resource->allocated)
+ return;
+ LazyCreate(resource);
+ if (!resource->gl_id)
+ return;
+ resource->allocated = true;
+ GLES2Interface* gl = ContextGL();
+ gfx::Size& size = resource->size;
+ DCHECK_EQ(resource->target, static_cast<GLenum>(GL_TEXTURE_2D));
+ ResourceFormat format = resource->format;
+ GLC(gl, gl->BindTexture(GL_TEXTURE_2D, resource->gl_id));
+ if (use_texture_storage_ext_ &&
+ IsFormatSupportedForStorage(format, use_texture_format_bgra_) &&
+ (resource->hint & TextureHintImmutable)) {
+ GLenum storage_format = TextureToStorageFormat(format);
+ GLC(gl,
+ gl->TexStorage2DEXT(
+ GL_TEXTURE_2D, 1, storage_format, size.width(), size.height()));
+ } else {
+ // ETC1 does not support preallocation.
+ if (format != ETC1) {
+ GLC(gl,
+ gl->TexImage2D(GL_TEXTURE_2D,
+ 0,
+ GLInternalFormat(format),
+ size.width(),
+ size.height(),
+ 0,
+ GLDataFormat(format),
+ GLDataType(format),
+ NULL));
+ }
+ }
+}
+
+void ResourceProvider::BindImageForSampling(Resource* resource) {
+ GLES2Interface* gl = ContextGL();
+ DCHECK(resource->gl_id);
+ DCHECK(resource->image_id);
+
+ // Release image currently bound to texture.
+ if (resource->bound_image_id)
+ gl->ReleaseTexImage2DCHROMIUM(resource->target, resource->bound_image_id);
+ gl->BindTexImage2DCHROMIUM(resource->target, resource->image_id);
+ resource->bound_image_id = resource->image_id;
+ resource->dirty_image = false;
+}
+
+void ResourceProvider::EnableReadLockFences(ResourceId id) {
+ Resource* resource = GetResource(id);
+ resource->read_lock_fences_enabled = true;
+}
+
+void ResourceProvider::AcquireImage(ResourceId id) {
+ Resource* resource = GetResource(id);
+ DCHECK(resource->origin == Resource::Internal);
+ DCHECK_EQ(resource->exported_count, 0);
+ DCHECK_EQ(GLTexture, resource->type);
+
+ if (resource->image_id)
+ return;
+
+ resource->allocated = true;
+ GLES2Interface* gl = ContextGL();
+ DCHECK(gl);
+ resource->image_id =
+ gl->CreateImageCHROMIUM(resource->size.width(),
+ resource->size.height(),
+ TextureToStorageFormat(resource->format),
+ GL_IMAGE_MAP_CHROMIUM);
+ DCHECK(resource->image_id);
+}
+
+void ResourceProvider::ReleaseImage(ResourceId id) {
+ Resource* resource = GetResource(id);
+ DCHECK(resource->origin == Resource::Internal);
+ DCHECK_EQ(resource->exported_count, 0);
+ DCHECK_EQ(GLTexture, resource->type);
+
+ if (!resource->image_id)
+ return;
+
+ GLES2Interface* gl = ContextGL();
+ DCHECK(gl);
+ gl->DestroyImageCHROMIUM(resource->image_id);
+ resource->image_id = 0;
+ resource->bound_image_id = 0;
+ resource->dirty_image = false;
+ resource->allocated = false;
+}
+
+uint8_t* ResourceProvider::MapImage(ResourceId id, int* stride) {
+ Resource* resource = GetResource(id);
+ DCHECK(ReadLockFenceHasPassed(resource));
+ DCHECK(resource->origin == Resource::Internal);
+ DCHECK_EQ(resource->exported_count, 0);
+ DCHECK(resource->image_id);
+
+ LockForWrite(id);
+
+ GLES2Interface* gl = ContextGL();
+ DCHECK(gl);
+ // MapImageCHROMIUM should be called prior to GetImageParameterivCHROMIUM.
+ uint8_t* pixels =
+ static_cast<uint8_t*>(gl->MapImageCHROMIUM(resource->image_id));
+ gl->GetImageParameterivCHROMIUM(
+ resource->image_id, GL_IMAGE_ROWBYTES_CHROMIUM, stride);
+ return pixels;
+}
+
+void ResourceProvider::UnmapImage(ResourceId id) {
+ Resource* resource = GetResource(id);
+ DCHECK(resource->origin == Resource::Internal);
+ DCHECK_EQ(resource->exported_count, 0);
+ DCHECK(resource->image_id);
+ DCHECK(resource->locked_for_write);
+
+ GLES2Interface* gl = ContextGL();
+ DCHECK(gl);
+ gl->UnmapImageCHROMIUM(resource->image_id);
+ resource->dirty_image = true;
+
+ UnlockForWrite(id);
+}
+
+void ResourceProvider::AcquireSkSurface(ResourceId id) {
+ Resource* resource = GetResource(id);
+ DCHECK(resource->origin == Resource::Internal);
+ DCHECK_EQ(resource->exported_count, 0);
+ DCHECK_EQ(GLTexture, resource->type);
+
+ if (resource->sk_surface)
+ return;
+
+ class GrContext* gr_context = GrContext();
+ // TODO(alokp): Implement TestContextProvider::GrContext().
+ if (!gr_context)
+ return;
+
+ LazyAllocate(resource);
+
+ GrBackendTextureDesc desc;
+ desc.fFlags = kRenderTarget_GrBackendTextureFlag;
+ desc.fWidth = resource->size.width();
+ desc.fHeight = resource->size.height();
+ desc.fConfig = ToGrPixelConfig(resource->format);
+ desc.fOrigin = kTopLeft_GrSurfaceOrigin;
+ desc.fTextureHandle = resource->gl_id;
+ skia::RefPtr<GrTexture> gr_texture =
+ skia::AdoptRef(gr_context->wrapBackendTexture(desc));
+ SkSurface::TextRenderMode text_render_mode =
+ use_distance_field_text_ ? SkSurface::kDistanceField_TextRenderMode
+ : SkSurface::kStandard_TextRenderMode;
+ resource->sk_surface = skia::AdoptRef(SkSurface::NewRenderTargetDirect(
+ gr_texture->asRenderTarget(), text_render_mode));
+}
+
+void ResourceProvider::ReleaseSkSurface(ResourceId id) {
+ Resource* resource = GetResource(id);
+ DCHECK(resource->origin == Resource::Internal);
+ DCHECK_EQ(resource->exported_count, 0);
+ DCHECK_EQ(GLTexture, resource->type);
+
+ resource->sk_surface.clear();
+}
+
+SkSurface* ResourceProvider::LockForWriteToSkSurface(ResourceId id) {
+ Resource* resource = GetResource(id);
+ DCHECK(resource->origin == Resource::Internal);
+ DCHECK_EQ(resource->exported_count, 0);
+ DCHECK_EQ(GLTexture, resource->type);
+
+ LockForWrite(id);
+ return resource->sk_surface.get();
+}
+
+void ResourceProvider::UnlockForWriteToSkSurface(ResourceId id) {
+ UnlockForWrite(id);
+}
+
+void ResourceProvider::CopyResource(ResourceId source_id, ResourceId dest_id) {
+ TRACE_EVENT0("cc", "ResourceProvider::CopyResource");
+
+ Resource* source_resource = GetResource(source_id);
+ DCHECK(!source_resource->lock_for_read_count);
+ DCHECK(source_resource->origin == Resource::Internal);
+ DCHECK_EQ(source_resource->exported_count, 0);
+ DCHECK_EQ(GLTexture, source_resource->type);
+ DCHECK(source_resource->allocated);
+ LazyCreate(source_resource);
+
+ Resource* dest_resource = GetResource(dest_id);
+ DCHECK(!dest_resource->locked_for_write);
+ DCHECK(!dest_resource->lock_for_read_count);
+ DCHECK(dest_resource->origin == Resource::Internal);
+ DCHECK_EQ(dest_resource->exported_count, 0);
+ DCHECK_EQ(GLTexture, dest_resource->type);
+ LazyCreate(dest_resource);
+
+ DCHECK_EQ(source_resource->type, dest_resource->type);
+ DCHECK_EQ(source_resource->format, dest_resource->format);
+ DCHECK(source_resource->size == dest_resource->size);
+
+ GLES2Interface* gl = ContextGL();
+ DCHECK(gl);
+ if (source_resource->image_id && source_resource->dirty_image) {
+ gl->BindTexture(source_resource->target, source_resource->gl_id);
+ BindImageForSampling(source_resource);
+ }
+ DCHECK(use_sync_query_) << "CHROMIUM_sync_query extension missing";
+ if (!source_resource->gl_read_lock_query_id)
+ gl->GenQueriesEXT(1, &source_resource->gl_read_lock_query_id);
+ gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM,
+ source_resource->gl_read_lock_query_id);
+ DCHECK(!dest_resource->image_id);
+ dest_resource->allocated = true;
+ gl->CopyTextureCHROMIUM(dest_resource->target,
+ source_resource->gl_id,
+ dest_resource->gl_id,
+ 0,
+ GLInternalFormat(dest_resource->format),
+ GLDataType(dest_resource->format));
+ // End query and create a read lock fence that will prevent access to
+ // source resource until CopyTextureCHROMIUM command has completed.
+ gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM);
+ source_resource->read_lock_fence = make_scoped_refptr(
+ new QueryFence(gl, source_resource->gl_read_lock_query_id));
+}
+
+void ResourceProvider::WaitSyncPointIfNeeded(ResourceId id) {
+ Resource* resource = GetResource(id);
+ DCHECK_EQ(resource->exported_count, 0);
+ DCHECK(resource->allocated);
+ if (resource->type != GLTexture || resource->gl_id)
+ return;
+ if (!resource->mailbox.sync_point())
+ return;
+ DCHECK(resource->mailbox.IsValid());
+ GLES2Interface* gl = ContextGL();
+ DCHECK(gl);
+ GLC(gl, gl->WaitSyncPointCHROMIUM(resource->mailbox.sync_point()));
+ resource->mailbox.set_sync_point(0);
+}
+
+GLint ResourceProvider::GetActiveTextureUnit(GLES2Interface* gl) {
+ GLint active_unit = 0;
+ gl->GetIntegerv(GL_ACTIVE_TEXTURE, &active_unit);
+ return active_unit;
+}
+
+GLES2Interface* ResourceProvider::ContextGL() const {
+ ContextProvider* context_provider = output_surface_->context_provider();
+ return context_provider ? context_provider->ContextGL() : NULL;
+}
+
+class GrContext* ResourceProvider::GrContext() const {
+ ContextProvider* context_provider = output_surface_->context_provider();
+ return context_provider ? context_provider->GrContext() : NULL;
+}
+
+} // namespace cc
diff --git a/cc/resources/resource_provider.h b/cc/resources/resource_provider.h
new file mode 100644
index 0000000..fc2fc5a
--- /dev/null
+++ b/cc/resources/resource_provider.h
@@ -0,0 +1,597 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_RESOURCE_PROVIDER_H_
+#define CC_RESOURCES_RESOURCE_PROVIDER_H_
+
+#include <deque>
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/containers/hash_tables.h"
+#include "base/memory/linked_ptr.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/threading/thread_checker.h"
+#include "cc/base/cc_export.h"
+#include "cc/output/context_provider.h"
+#include "cc/output/output_surface.h"
+#include "cc/resources/release_callback_impl.h"
+#include "cc/resources/resource_format.h"
+#include "cc/resources/return_callback.h"
+#include "cc/resources/shared_bitmap.h"
+#include "cc/resources/single_release_callback_impl.h"
+#include "cc/resources/texture_mailbox.h"
+#include "cc/resources/transferable_resource.h"
+#include "third_party/khronos/GLES2/gl2.h"
+#include "third_party/khronos/GLES2/gl2ext.h"
+#include "third_party/skia/include/core/SkBitmap.h"
+#include "third_party/skia/include/core/SkCanvas.h"
+#include "ui/gfx/size.h"
+
+class GrContext;
+
+namespace gpu {
+namespace gles {
+class GLES2Interface;
+}
+}
+
+namespace gfx {
+class Rect;
+class Vector2d;
+}
+
+namespace cc {
+class BlockingTaskRunner;
+class IdAllocator;
+class SharedBitmap;
+class SharedBitmapManager;
+class TextureUploader;
+
+// This class is not thread-safe and can only be called from the thread it was
+// created on (in practice, the impl thread).
+class CC_EXPORT ResourceProvider {
+ public:
+ typedef unsigned ResourceId;
+ typedef std::vector<ResourceId> ResourceIdArray;
+ typedef std::set<ResourceId> ResourceIdSet;
+ typedef base::hash_map<ResourceId, ResourceId> ResourceIdMap;
+ enum TextureHint {
+ TextureHintDefault = 0x0,
+ TextureHintImmutable = 0x1,
+ TextureHintFramebuffer = 0x2,
+ TextureHintImmutableFramebuffer =
+ TextureHintImmutable | TextureHintFramebuffer
+ };
+ enum ResourceType {
+ InvalidType = 0,
+ GLTexture = 1,
+ Bitmap,
+ };
+
+ static scoped_ptr<ResourceProvider> Create(
+ OutputSurface* output_surface,
+ SharedBitmapManager* shared_bitmap_manager,
+ BlockingTaskRunner* blocking_main_thread_task_runner,
+ int highp_threshold_min,
+ bool use_rgba_4444_texture_format,
+ size_t id_allocation_chunk_size,
+ bool use_distance_field_text);
+ virtual ~ResourceProvider();
+
+ void InitializeSoftware();
+ void InitializeGL();
+
+ void DidLoseOutputSurface() { lost_output_surface_ = true; }
+
+ int max_texture_size() const { return max_texture_size_; }
+ ResourceFormat memory_efficient_texture_format() const {
+ return use_rgba_4444_texture_format_ ? RGBA_4444 : best_texture_format_;
+ }
+ ResourceFormat best_texture_format() const { return best_texture_format_; }
+ bool use_sync_query() const { return use_sync_query_; }
+ size_t num_resources() const { return resources_.size(); }
+
+ // Checks whether a resource is in use by a consumer.
+ bool InUseByConsumer(ResourceId id);
+
+ bool IsLost(ResourceId id);
+ bool AllowOverlay(ResourceId id);
+
+ // Producer interface.
+
+ ResourceType default_resource_type() const { return default_resource_type_; }
+ ResourceType GetResourceType(ResourceId id);
+
+ // Creates a resource of the default resource type.
+ ResourceId CreateResource(const gfx::Size& size,
+ GLint wrap_mode,
+ TextureHint hint,
+ ResourceFormat format);
+
+ // Creates a resource which is tagged as being managed for GPU memory
+ // accounting purposes.
+ ResourceId CreateManagedResource(const gfx::Size& size,
+ GLenum target,
+ GLint wrap_mode,
+ TextureHint hint,
+ ResourceFormat format);
+
+ // You can also explicitly create a specific resource type.
+ ResourceId CreateGLTexture(const gfx::Size& size,
+ GLenum target,
+ GLenum texture_pool,
+ GLint wrap_mode,
+ TextureHint hint,
+ ResourceFormat format);
+
+ ResourceId CreateBitmap(const gfx::Size& size, GLint wrap_mode);
+ // Wraps an IOSurface into a GL resource.
+ ResourceId CreateResourceFromIOSurface(const gfx::Size& size,
+ unsigned io_surface_id);
+
+ // Wraps an external texture mailbox into a GL resource.
+ ResourceId CreateResourceFromTextureMailbox(
+ const TextureMailbox& mailbox,
+ scoped_ptr<SingleReleaseCallbackImpl> release_callback_impl);
+
+ void DeleteResource(ResourceId id);
+
+ // Update pixels from image, copying source_rect (in image) to dest_offset (in
+ // the resource).
+ void SetPixels(ResourceId id,
+ const uint8_t* image,
+ const gfx::Rect& image_rect,
+ const gfx::Rect& source_rect,
+ const gfx::Vector2d& dest_offset);
+
+ // Check upload status.
+ size_t NumBlockingUploads();
+ void MarkPendingUploadsAsNonBlocking();
+ size_t EstimatedUploadsPerTick();
+ void FlushUploads();
+ void ReleaseCachedData();
+ base::TimeTicks EstimatedUploadCompletionTime(size_t uploads_per_tick);
+
+ // Only flush the command buffer if supported.
+ // Returns true if the shallow flush occurred, false otherwise.
+ bool ShallowFlushIfSupported();
+
+ // Creates accounting for a child. Returns a child ID.
+ int CreateChild(const ReturnCallback& return_callback);
+
+ // Destroys accounting for the child, deleting all accounted resources.
+ void DestroyChild(int child);
+
+ // Gets the child->parent resource ID map.
+ const ResourceIdMap& GetChildToParentMap(int child) const;
+
+ // Prepares resources to be transfered to the parent, moving them to
+ // mailboxes and serializing meta-data into TransferableResources.
+ // Resources are not removed from the ResourceProvider, but are marked as
+ // "in use".
+ void PrepareSendToParent(const ResourceIdArray& resources,
+ TransferableResourceArray* transferable_resources);
+
+ // Receives resources from a child, moving them from mailboxes. Resource IDs
+ // passed are in the child namespace, and will be translated to the parent
+ // namespace, added to the child->parent map.
+ // This adds the resources to the working set in the ResourceProvider without
+ // declaring which resources are in use. Use DeclareUsedResourcesFromChild
+ // after calling this method to do that. All calls to ReceiveFromChild should
+ // be followed by a DeclareUsedResourcesFromChild.
+ // NOTE: if the sync_point is set on any TransferableResource, this will
+ // wait on it.
+ void ReceiveFromChild(
+ int child, const TransferableResourceArray& transferable_resources);
+
+ // Once a set of resources have been received, they may or may not be used.
+ // This declares what set of resources are currently in use from the child,
+ // releasing any other resources back to the child.
+ void DeclareUsedResourcesFromChild(
+ int child,
+ const ResourceIdArray& resources_from_child);
+
+ // Receives resources from the parent, moving them from mailboxes. Resource
+ // IDs passed are in the child namespace.
+ // NOTE: if the sync_point is set on any TransferableResource, this will
+ // wait on it.
+ void ReceiveReturnsFromParent(
+ const ReturnedResourceArray& transferable_resources);
+
+ // The following lock classes are part of the ResourceProvider API and are
+ // needed to read and write the resource contents. The user must ensure
+ // that they only use GL locks on GL resources, etc, and this is enforced
+ // by assertions.
+ class CC_EXPORT ScopedReadLockGL {
+ public:
+ ScopedReadLockGL(ResourceProvider* resource_provider,
+ ResourceProvider::ResourceId resource_id);
+ virtual ~ScopedReadLockGL();
+
+ unsigned texture_id() const { return texture_id_; }
+
+ protected:
+ ResourceProvider* resource_provider_;
+ ResourceProvider::ResourceId resource_id_;
+
+ private:
+ unsigned texture_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedReadLockGL);
+ };
+
+ class CC_EXPORT ScopedSamplerGL : public ScopedReadLockGL {
+ public:
+ ScopedSamplerGL(ResourceProvider* resource_provider,
+ ResourceProvider::ResourceId resource_id,
+ GLenum filter);
+ ScopedSamplerGL(ResourceProvider* resource_provider,
+ ResourceProvider::ResourceId resource_id,
+ GLenum unit,
+ GLenum filter);
+ virtual ~ScopedSamplerGL();
+
+ GLenum target() const { return target_; }
+
+ private:
+ GLenum unit_;
+ GLenum target_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedSamplerGL);
+ };
+
+ class CC_EXPORT ScopedWriteLockGL {
+ public:
+ ScopedWriteLockGL(ResourceProvider* resource_provider,
+ ResourceProvider::ResourceId resource_id);
+ ~ScopedWriteLockGL();
+
+ unsigned texture_id() const { return texture_id_; }
+
+ private:
+ ResourceProvider* resource_provider_;
+ ResourceProvider::ResourceId resource_id_;
+ unsigned texture_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedWriteLockGL);
+ };
+
+ class CC_EXPORT ScopedReadLockSoftware {
+ public:
+ ScopedReadLockSoftware(ResourceProvider* resource_provider,
+ ResourceProvider::ResourceId resource_id);
+ ~ScopedReadLockSoftware();
+
+ const SkBitmap* sk_bitmap() const {
+ DCHECK(valid());
+ return &sk_bitmap_;
+ }
+ GLint wrap_mode() const { return wrap_mode_; }
+
+ bool valid() const { return !!sk_bitmap_.getPixels(); }
+
+ private:
+ ResourceProvider* resource_provider_;
+ ResourceProvider::ResourceId resource_id_;
+ SkBitmap sk_bitmap_;
+ GLint wrap_mode_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedReadLockSoftware);
+ };
+
+ class CC_EXPORT ScopedWriteLockSoftware {
+ public:
+ ScopedWriteLockSoftware(ResourceProvider* resource_provider,
+ ResourceProvider::ResourceId resource_id);
+ ~ScopedWriteLockSoftware();
+
+ SkCanvas* sk_canvas() { return sk_canvas_.get(); }
+ bool valid() const { return !!sk_bitmap_.getPixels(); }
+
+ private:
+ ResourceProvider* resource_provider_;
+ ResourceProvider::ResourceId resource_id_;
+ SkBitmap sk_bitmap_;
+ scoped_ptr<SkCanvas> sk_canvas_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedWriteLockSoftware);
+ };
+
+ class Fence : public base::RefCounted<Fence> {
+ public:
+ Fence() {}
+
+ virtual void Set() = 0;
+ virtual bool HasPassed() = 0;
+
+ protected:
+ friend class base::RefCounted<Fence>;
+ virtual ~Fence() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Fence);
+ };
+
+ // Acquire pixel buffer for resource. The pixel buffer can be used to
+ // set resource pixels without performing unnecessary copying.
+ void AcquirePixelBuffer(ResourceId resource);
+ void ReleasePixelBuffer(ResourceId resource);
+ // Map/unmap the acquired pixel buffer.
+ uint8_t* MapPixelBuffer(ResourceId id, int* stride);
+ void UnmapPixelBuffer(ResourceId id);
+ // Asynchronously update pixels from acquired pixel buffer.
+ void BeginSetPixels(ResourceId id);
+ void ForceSetPixelsToComplete(ResourceId id);
+ bool DidSetPixelsComplete(ResourceId id);
+
+ // Acquire and release an image. The image allows direct
+ // manipulation of texture memory.
+ void AcquireImage(ResourceId id);
+ void ReleaseImage(ResourceId id);
+ // Maps the acquired image so that its pixels could be modified.
+ // Unmap is called when all pixels are set.
+ uint8_t* MapImage(ResourceId id, int* stride);
+ void UnmapImage(ResourceId id);
+
+ // Acquire and release a SkSurface.
+ void AcquireSkSurface(ResourceId id);
+ void ReleaseSkSurface(ResourceId id);
+ // Lock/unlock resource for writing to SkSurface.
+ SkSurface* LockForWriteToSkSurface(ResourceId id);
+ void UnlockForWriteToSkSurface(ResourceId id);
+
+ // For tests only! This prevents detecting uninitialized reads.
+ // Use SetPixels or LockForWrite to allocate implicitly.
+ void AllocateForTesting(ResourceId id);
+
+ // For tests only!
+ void CreateForTesting(ResourceId id);
+
+ GLenum TargetForTesting(ResourceId id);
+
+ // Sets the current read fence. If a resource is locked for read
+ // and has read fences enabled, the resource will not allow writes
+ // until this fence has passed.
+ void SetReadLockFence(Fence* fence) { current_read_lock_fence_ = fence; }
+
+ // Enable read lock fences for a specific resource.
+ void EnableReadLockFences(ResourceId id);
+
+ // Indicates if we can currently lock this resource for write.
+ bool CanLockForWrite(ResourceId id);
+
+ // Copy pixels from source to destination.
+ void CopyResource(ResourceId source_id, ResourceId dest_id);
+
+ void WaitSyncPointIfNeeded(ResourceId id);
+
+ static GLint GetActiveTextureUnit(gpu::gles2::GLES2Interface* gl);
+
+ private:
+ struct Resource {
+ enum Origin { Internal, External, Delegated };
+
+ Resource();
+ ~Resource();
+ Resource(unsigned texture_id,
+ const gfx::Size& size,
+ Origin origin,
+ GLenum target,
+ GLenum filter,
+ GLenum texture_pool,
+ GLint wrap_mode,
+ TextureHint hint,
+ ResourceFormat format);
+ Resource(uint8_t* pixels,
+ SharedBitmap* bitmap,
+ const gfx::Size& size,
+ Origin origin,
+ GLenum filter,
+ GLint wrap_mode);
+ Resource(const SharedBitmapId& bitmap_id,
+ const gfx::Size& size,
+ Origin origin,
+ GLenum filter,
+ GLint wrap_mode);
+
+ int child_id;
+ unsigned gl_id;
+ // Pixel buffer used for set pixels without unnecessary copying.
+ unsigned gl_pixel_buffer_id;
+ // Query used to determine when asynchronous set pixels complete.
+ unsigned gl_upload_query_id;
+ // Query used to determine when read lock fence has passed.
+ unsigned gl_read_lock_query_id;
+ TextureMailbox mailbox;
+ ReleaseCallbackImpl release_callback_impl;
+ uint8_t* pixels;
+ int lock_for_read_count;
+ int imported_count;
+ int exported_count;
+ bool dirty_image : 1;
+ bool locked_for_write : 1;
+ bool lost : 1;
+ bool marked_for_deletion : 1;
+ bool pending_set_pixels : 1;
+ bool set_pixels_completion_forced : 1;
+ bool allocated : 1;
+ bool read_lock_fences_enabled : 1;
+ bool has_shared_bitmap_id : 1;
+ bool allow_overlay : 1;
+ scoped_refptr<Fence> read_lock_fence;
+ gfx::Size size;
+ Origin origin;
+ GLenum target;
+ // TODO(skyostil): Use a separate sampler object for filter state.
+ GLenum original_filter;
+ GLenum filter;
+ unsigned image_id;
+ unsigned bound_image_id;
+ GLenum texture_pool;
+ GLint wrap_mode;
+ TextureHint hint;
+ ResourceType type;
+ ResourceFormat format;
+ SharedBitmapId shared_bitmap_id;
+ SharedBitmap* shared_bitmap;
+ skia::RefPtr<SkSurface> sk_surface;
+ };
+ typedef base::hash_map<ResourceId, Resource> ResourceMap;
+
+ static bool CompareResourceMapIteratorsByChildId(
+ const std::pair<ReturnedResource, ResourceMap::iterator>& a,
+ const std::pair<ReturnedResource, ResourceMap::iterator>& b);
+
+ struct Child {
+ Child();
+ ~Child();
+
+ ResourceIdMap child_to_parent_map;
+ ResourceIdMap parent_to_child_map;
+ ReturnCallback return_callback;
+ ResourceIdSet in_use_resources;
+ bool marked_for_deletion;
+ };
+ typedef base::hash_map<int, Child> ChildMap;
+
+ bool ReadLockFenceHasPassed(const Resource* resource) {
+ return !resource->read_lock_fence.get() ||
+ resource->read_lock_fence->HasPassed();
+ }
+
+ ResourceProvider(OutputSurface* output_surface,
+ SharedBitmapManager* shared_bitmap_manager,
+ BlockingTaskRunner* blocking_main_thread_task_runner,
+ int highp_threshold_min,
+ bool use_rgba_4444_texture_format,
+ size_t id_allocation_chunk_size,
+ bool use_distance_field_text);
+
+ void CleanUpGLIfNeeded();
+
+ Resource* GetResource(ResourceId id);
+ const Resource* LockForRead(ResourceId id);
+ void UnlockForRead(ResourceId id);
+ const Resource* LockForWrite(ResourceId id);
+ void UnlockForWrite(ResourceId id);
+ static void PopulateSkBitmapWithResource(SkBitmap* sk_bitmap,
+ const Resource* resource);
+
+ void TransferResource(gpu::gles2::GLES2Interface* gl,
+ ResourceId id,
+ TransferableResource* resource);
+ enum DeleteStyle {
+ Normal,
+ ForShutdown,
+ };
+ void DeleteResourceInternal(ResourceMap::iterator it, DeleteStyle style);
+ void DeleteAndReturnUnusedResourcesToChild(ChildMap::iterator child_it,
+ DeleteStyle style,
+ const ResourceIdArray& unused);
+ void DestroyChildInternal(ChildMap::iterator it, DeleteStyle style);
+ void LazyCreate(Resource* resource);
+ void LazyAllocate(Resource* resource);
+
+ void BindImageForSampling(Resource* resource);
+ // Binds the given GL resource to a texture target for sampling using the
+ // specified filter for both minification and magnification. Returns the
+ // texture target used. The resource must be locked for reading.
+ GLenum BindForSampling(ResourceId resource_id, GLenum unit, GLenum filter);
+
+ // Returns NULL if the output_surface_ does not have a ContextProvider.
+ gpu::gles2::GLES2Interface* ContextGL() const;
+ class GrContext* GrContext() const;
+
+ OutputSurface* output_surface_;
+ SharedBitmapManager* shared_bitmap_manager_;
+ BlockingTaskRunner* blocking_main_thread_task_runner_;
+ bool lost_output_surface_;
+ int highp_threshold_min_;
+ ResourceId next_id_;
+ ResourceMap resources_;
+ int next_child_;
+ ChildMap children_;
+
+ ResourceType default_resource_type_;
+ bool use_texture_storage_ext_;
+ bool use_texture_format_bgra_;
+ bool use_texture_usage_hint_;
+ bool use_compressed_texture_etc1_;
+ scoped_ptr<TextureUploader> texture_uploader_;
+ int max_texture_size_;
+ ResourceFormat best_texture_format_;
+
+ base::ThreadChecker thread_checker_;
+
+ scoped_refptr<Fence> current_read_lock_fence_;
+ bool use_rgba_4444_texture_format_;
+
+ const size_t id_allocation_chunk_size_;
+ scoped_ptr<IdAllocator> texture_id_allocator_;
+ scoped_ptr<IdAllocator> buffer_id_allocator_;
+
+ bool use_sync_query_;
+
+ bool use_distance_field_text_;
+
+ DISALLOW_COPY_AND_ASSIGN(ResourceProvider);
+};
+
+
+// TODO(epenner): Move these format conversions to resource_format.h
+// once that builds on mac (npapi.h currently #includes OpenGL.h).
+inline unsigned BitsPerPixel(ResourceFormat format) {
+ DCHECK_LE(format, RESOURCE_FORMAT_MAX);
+ static const unsigned format_bits_per_pixel[RESOURCE_FORMAT_MAX + 1] = {
+ 32, // RGBA_8888
+ 16, // RGBA_4444
+ 32, // BGRA_8888
+ 8, // ALPHA_8
+ 8, // LUMINANCE_8
+ 16, // RGB_565,
+ 4 // ETC1
+ };
+ return format_bits_per_pixel[format];
+}
+
+inline GLenum GLDataType(ResourceFormat format) {
+ DCHECK_LE(format, RESOURCE_FORMAT_MAX);
+ static const unsigned format_gl_data_type[RESOURCE_FORMAT_MAX + 1] = {
+ GL_UNSIGNED_BYTE, // RGBA_8888
+ GL_UNSIGNED_SHORT_4_4_4_4, // RGBA_4444
+ GL_UNSIGNED_BYTE, // BGRA_8888
+ GL_UNSIGNED_BYTE, // ALPHA_8
+ GL_UNSIGNED_BYTE, // LUMINANCE_8
+ GL_UNSIGNED_SHORT_5_6_5, // RGB_565,
+ GL_UNSIGNED_BYTE // ETC1
+ };
+ return format_gl_data_type[format];
+}
+
+inline GLenum GLDataFormat(ResourceFormat format) {
+ DCHECK_LE(format, RESOURCE_FORMAT_MAX);
+ static const unsigned format_gl_data_format[RESOURCE_FORMAT_MAX + 1] = {
+ GL_RGBA, // RGBA_8888
+ GL_RGBA, // RGBA_4444
+ GL_BGRA_EXT, // BGRA_8888
+ GL_ALPHA, // ALPHA_8
+ GL_LUMINANCE, // LUMINANCE_8
+ GL_RGB, // RGB_565
+ GL_ETC1_RGB8_OES // ETC1
+ };
+ return format_gl_data_format[format];
+}
+
+inline GLenum GLInternalFormat(ResourceFormat format) {
+ return GLDataFormat(format);
+}
+
+} // namespace cc
+
+#endif // CC_RESOURCES_RESOURCE_PROVIDER_H_
diff --git a/cc/resources/resource_provider_unittest.cc b/cc/resources/resource_provider_unittest.cc
new file mode 100644
index 0000000..04d88c6
--- /dev/null
+++ b/cc/resources/resource_provider_unittest.cc
@@ -0,0 +1,3686 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/resource_provider.h"
+
+#include <algorithm>
+#include <map>
+#include <set>
+
+#include "base/bind.h"
+#include "base/containers/hash_tables.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "cc/base/scoped_ptr_deque.h"
+#include "cc/output/output_surface.h"
+#include "cc/resources/returned_resource.h"
+#include "cc/resources/shared_bitmap_manager.h"
+#include "cc/resources/single_release_callback.h"
+#include "cc/test/fake_output_surface.h"
+#include "cc/test/fake_output_surface_client.h"
+#include "cc/test/test_shared_bitmap_manager.h"
+#include "cc/test/test_texture.h"
+#include "cc/test/test_web_graphics_context_3d.h"
+#include "cc/trees/blocking_task_runner.h"
+#include "gpu/GLES2/gl2extchromium.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/khronos/GLES2/gl2.h"
+#include "third_party/khronos/GLES2/gl2ext.h"
+#include "ui/gfx/rect.h"
+
+using testing::Mock;
+using testing::NiceMock;
+using testing::Return;
+using testing::SetArgPointee;
+using testing::StrictMock;
+using testing::_;
+
+namespace cc {
+namespace {
+
+static void EmptyReleaseCallback(uint32 sync_point,
+ bool lost_resource,
+ BlockingTaskRunner* main_thread_task_runner) {
+}
+
+static void ReleaseCallback(
+ uint32* release_sync_point,
+ bool* release_lost_resource,
+ BlockingTaskRunner** release_main_thread_task_runner,
+ uint32 sync_point,
+ bool lost_resource,
+ BlockingTaskRunner* main_thread_task_runner) {
+ *release_sync_point = sync_point;
+ *release_lost_resource = lost_resource;
+ *release_main_thread_task_runner = main_thread_task_runner;
+}
+
+static void SharedMemoryReleaseCallback(
+ scoped_ptr<base::SharedMemory> memory,
+ uint32 sync_point,
+ bool lost_resource,
+ BlockingTaskRunner* main_thread_task_runner) {
+}
+
+static void ReleaseSharedMemoryCallback(
+ scoped_ptr<base::SharedMemory> shared_memory,
+ bool* release_called,
+ uint32* release_sync_point,
+ bool* lost_resource_result,
+ uint32 sync_point,
+ bool lost_resource,
+ BlockingTaskRunner* main_thread_task_runner) {
+ *release_called = true;
+ *release_sync_point = sync_point;
+ *lost_resource_result = lost_resource;
+}
+
+static scoped_ptr<base::SharedMemory> CreateAndFillSharedMemory(
+ const gfx::Size& size,
+ uint32_t value) {
+ scoped_ptr<base::SharedMemory> shared_memory(new base::SharedMemory);
+ CHECK(shared_memory->CreateAndMapAnonymous(4 * size.GetArea()));
+ uint32_t* pixels = reinterpret_cast<uint32_t*>(shared_memory->memory());
+ CHECK(pixels);
+ std::fill_n(pixels, size.GetArea(), value);
+ return shared_memory.Pass();
+}
+
+class TextureStateTrackingContext : public TestWebGraphicsContext3D {
+ public:
+ MOCK_METHOD2(bindTexture, void(GLenum target, GLuint texture));
+ MOCK_METHOD3(texParameteri, void(GLenum target, GLenum pname, GLint param));
+ MOCK_METHOD1(waitSyncPoint, void(GLuint sync_point));
+ MOCK_METHOD0(insertSyncPoint, GLuint(void));
+ MOCK_METHOD2(produceTextureCHROMIUM,
+ void(GLenum target, const GLbyte* mailbox));
+ MOCK_METHOD2(consumeTextureCHROMIUM,
+ void(GLenum target, const GLbyte* mailbox));
+
+ // Force all textures to be consecutive numbers starting at "1",
+ // so we easily can test for them.
+ virtual GLuint NextTextureId() OVERRIDE {
+ base::AutoLock lock(namespace_->lock);
+ return namespace_->next_texture_id++;
+ }
+ virtual void RetireTextureId(GLuint) OVERRIDE {}
+};
+
+// Shared data between multiple ResourceProviderContext. This contains mailbox
+// contents as well as information about sync points.
+class ContextSharedData {
+ public:
+ static scoped_ptr<ContextSharedData> Create() {
+ return make_scoped_ptr(new ContextSharedData());
+ }
+
+ uint32 InsertSyncPoint() { return next_sync_point_++; }
+
+ void GenMailbox(GLbyte* mailbox) {
+ memset(mailbox, 0, GL_MAILBOX_SIZE_CHROMIUM);
+ memcpy(mailbox, &next_mailbox_, sizeof(next_mailbox_));
+ ++next_mailbox_;
+ }
+
+ void ProduceTexture(const GLbyte* mailbox_name,
+ uint32 sync_point,
+ scoped_refptr<TestTexture> texture) {
+ unsigned mailbox = 0;
+ memcpy(&mailbox, mailbox_name, sizeof(mailbox));
+ ASSERT_TRUE(mailbox && mailbox < next_mailbox_);
+ textures_[mailbox] = texture;
+ ASSERT_LT(sync_point_for_mailbox_[mailbox], sync_point);
+ sync_point_for_mailbox_[mailbox] = sync_point;
+ }
+
+ scoped_refptr<TestTexture> ConsumeTexture(const GLbyte* mailbox_name,
+ uint32 sync_point) {
+ unsigned mailbox = 0;
+ memcpy(&mailbox, mailbox_name, sizeof(mailbox));
+ DCHECK(mailbox && mailbox < next_mailbox_);
+
+ // If the latest sync point the context has waited on is before the sync
+ // point for when the mailbox was set, pretend we never saw that
+ // ProduceTexture.
+ if (sync_point_for_mailbox_[mailbox] > sync_point) {
+ NOTREACHED();
+ return scoped_refptr<TestTexture>();
+ }
+ return textures_[mailbox];
+ }
+
+ private:
+ ContextSharedData() : next_sync_point_(1), next_mailbox_(1) {}
+
+ uint32 next_sync_point_;
+ unsigned next_mailbox_;
+ typedef base::hash_map<unsigned, scoped_refptr<TestTexture> > TextureMap;
+ TextureMap textures_;
+ base::hash_map<unsigned, uint32> sync_point_for_mailbox_;
+};
+
+class ResourceProviderContext : public TestWebGraphicsContext3D {
+ public:
+ static scoped_ptr<ResourceProviderContext> Create(
+ ContextSharedData* shared_data) {
+ return make_scoped_ptr(new ResourceProviderContext(shared_data));
+ }
+
+ virtual GLuint insertSyncPoint() OVERRIDE {
+ uint32 sync_point = shared_data_->InsertSyncPoint();
+ // Commit the produceTextureCHROMIUM calls at this point, so that
+ // they're associated with the sync point.
+ for (PendingProduceTextureList::iterator it =
+ pending_produce_textures_.begin();
+ it != pending_produce_textures_.end();
+ ++it) {
+ shared_data_->ProduceTexture(
+ (*it)->mailbox, sync_point, (*it)->texture);
+ }
+ pending_produce_textures_.clear();
+ return sync_point;
+ }
+
+ virtual void waitSyncPoint(GLuint sync_point) OVERRIDE {
+ last_waited_sync_point_ = std::max(sync_point, last_waited_sync_point_);
+ }
+
+ unsigned last_waited_sync_point() const { return last_waited_sync_point_; }
+
+ virtual void texStorage2DEXT(GLenum target,
+ GLint levels,
+ GLuint internalformat,
+ GLint width,
+ GLint height) OVERRIDE {
+ CheckTextureIsBound(target);
+ ASSERT_EQ(static_cast<unsigned>(GL_TEXTURE_2D), target);
+ ASSERT_EQ(1, levels);
+ GLenum format = GL_RGBA;
+ switch (internalformat) {
+ case GL_RGBA8_OES:
+ break;
+ case GL_BGRA8_EXT:
+ format = GL_BGRA_EXT;
+ break;
+ default:
+ NOTREACHED();
+ }
+ AllocateTexture(gfx::Size(width, height), format);
+ }
+
+ virtual void texImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels) OVERRIDE {
+ CheckTextureIsBound(target);
+ ASSERT_EQ(static_cast<unsigned>(GL_TEXTURE_2D), target);
+ ASSERT_FALSE(level);
+ ASSERT_EQ(internalformat, format);
+ ASSERT_FALSE(border);
+ ASSERT_EQ(static_cast<unsigned>(GL_UNSIGNED_BYTE), type);
+ AllocateTexture(gfx::Size(width, height), format);
+ if (pixels)
+ SetPixels(0, 0, width, height, pixels);
+ }
+
+ virtual void texSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* pixels) OVERRIDE {
+ CheckTextureIsBound(target);
+ ASSERT_EQ(static_cast<unsigned>(GL_TEXTURE_2D), target);
+ ASSERT_FALSE(level);
+ ASSERT_EQ(static_cast<unsigned>(GL_UNSIGNED_BYTE), type);
+ {
+ base::AutoLock lock_for_texture_access(namespace_->lock);
+ ASSERT_EQ(GLDataFormat(BoundTexture(target)->format), format);
+ }
+ ASSERT_TRUE(pixels);
+ SetPixels(xoffset, yoffset, width, height, pixels);
+ }
+
+ virtual void genMailboxCHROMIUM(GLbyte* mailbox) OVERRIDE {
+ return shared_data_->GenMailbox(mailbox);
+ }
+
+ virtual void produceTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) OVERRIDE {
+ CheckTextureIsBound(target);
+
+ // Delay moving the texture into the mailbox until the next
+ // InsertSyncPoint, so that it is not visible to other contexts that
+ // haven't waited on that sync point.
+ scoped_ptr<PendingProduceTexture> pending(new PendingProduceTexture);
+ memcpy(pending->mailbox, mailbox, sizeof(pending->mailbox));
+ base::AutoLock lock_for_texture_access(namespace_->lock);
+ pending->texture = BoundTexture(target);
+ pending_produce_textures_.push_back(pending.Pass());
+ }
+
+ virtual void consumeTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) OVERRIDE {
+ CheckTextureIsBound(target);
+ base::AutoLock lock_for_texture_access(namespace_->lock);
+ scoped_refptr<TestTexture> texture =
+ shared_data_->ConsumeTexture(mailbox, last_waited_sync_point_);
+ namespace_->textures.Replace(BoundTextureId(target), texture);
+ }
+
+ void GetPixels(const gfx::Size& size,
+ ResourceFormat format,
+ uint8_t* pixels) {
+ CheckTextureIsBound(GL_TEXTURE_2D);
+ base::AutoLock lock_for_texture_access(namespace_->lock);
+ scoped_refptr<TestTexture> texture = BoundTexture(GL_TEXTURE_2D);
+ ASSERT_EQ(texture->size, size);
+ ASSERT_EQ(texture->format, format);
+ memcpy(pixels, texture->data.get(), TextureSizeBytes(size, format));
+ }
+
+ protected:
+ explicit ResourceProviderContext(ContextSharedData* shared_data)
+ : shared_data_(shared_data),
+ last_waited_sync_point_(0) {}
+
+ private:
+ void AllocateTexture(const gfx::Size& size, GLenum format) {
+ CheckTextureIsBound(GL_TEXTURE_2D);
+ ResourceFormat texture_format = RGBA_8888;
+ switch (format) {
+ case GL_RGBA:
+ texture_format = RGBA_8888;
+ break;
+ case GL_BGRA_EXT:
+ texture_format = BGRA_8888;
+ break;
+ }
+ base::AutoLock lock_for_texture_access(namespace_->lock);
+ BoundTexture(GL_TEXTURE_2D)->Reallocate(size, texture_format);
+ }
+
+ void SetPixels(int xoffset,
+ int yoffset,
+ int width,
+ int height,
+ const void* pixels) {
+ CheckTextureIsBound(GL_TEXTURE_2D);
+ base::AutoLock lock_for_texture_access(namespace_->lock);
+ scoped_refptr<TestTexture> texture = BoundTexture(GL_TEXTURE_2D);
+ ASSERT_TRUE(texture->data.get());
+ ASSERT_TRUE(xoffset >= 0 && xoffset + width <= texture->size.width());
+ ASSERT_TRUE(yoffset >= 0 && yoffset + height <= texture->size.height());
+ ASSERT_TRUE(pixels);
+ size_t in_pitch = TextureSizeBytes(gfx::Size(width, 1), texture->format);
+ size_t out_pitch =
+ TextureSizeBytes(gfx::Size(texture->size.width(), 1), texture->format);
+ uint8_t* dest = texture->data.get() + yoffset * out_pitch +
+ TextureSizeBytes(gfx::Size(xoffset, 1), texture->format);
+ const uint8_t* src = static_cast<const uint8_t*>(pixels);
+ for (int i = 0; i < height; ++i) {
+ memcpy(dest, src, in_pitch);
+ dest += out_pitch;
+ src += in_pitch;
+ }
+ }
+
+ struct PendingProduceTexture {
+ GLbyte mailbox[GL_MAILBOX_SIZE_CHROMIUM];
+ scoped_refptr<TestTexture> texture;
+ };
+ typedef ScopedPtrDeque<PendingProduceTexture> PendingProduceTextureList;
+ ContextSharedData* shared_data_;
+ GLuint last_waited_sync_point_;
+ PendingProduceTextureList pending_produce_textures_;
+};
+
+void GetResourcePixels(ResourceProvider* resource_provider,
+ ResourceProviderContext* context,
+ ResourceProvider::ResourceId id,
+ const gfx::Size& size,
+ ResourceFormat format,
+ uint8_t* pixels) {
+ resource_provider->WaitSyncPointIfNeeded(id);
+ switch (resource_provider->default_resource_type()) {
+ case ResourceProvider::GLTexture: {
+ ResourceProvider::ScopedReadLockGL lock_gl(resource_provider, id);
+ ASSERT_NE(0U, lock_gl.texture_id());
+ context->bindTexture(GL_TEXTURE_2D, lock_gl.texture_id());
+ context->GetPixels(size, format, pixels);
+ break;
+ }
+ case ResourceProvider::Bitmap: {
+ ResourceProvider::ScopedReadLockSoftware lock_software(resource_provider,
+ id);
+ memcpy(pixels,
+ lock_software.sk_bitmap()->getPixels(),
+ lock_software.sk_bitmap()->getSize());
+ break;
+ }
+ case ResourceProvider::InvalidType:
+ NOTREACHED();
+ break;
+ }
+}
+
+class ResourceProviderTest
+ : public testing::TestWithParam<ResourceProvider::ResourceType> {
+ public:
+ ResourceProviderTest()
+ : shared_data_(ContextSharedData::Create()),
+ context3d_(NULL),
+ child_context_(NULL),
+ main_thread_task_runner_(BlockingTaskRunner::Create(NULL)) {
+ switch (GetParam()) {
+ case ResourceProvider::GLTexture: {
+ scoped_ptr<ResourceProviderContext> context3d(
+ ResourceProviderContext::Create(shared_data_.get()));
+ context3d_ = context3d.get();
+
+ scoped_refptr<TestContextProvider> context_provider =
+ TestContextProvider::Create(context3d.Pass());
+
+ output_surface_ = FakeOutputSurface::Create3d(context_provider);
+
+ scoped_ptr<ResourceProviderContext> child_context_owned =
+ ResourceProviderContext::Create(shared_data_.get());
+ child_context_ = child_context_owned.get();
+ child_output_surface_ =
+ FakeOutputSurface::Create3d(child_context_owned.Pass());
+ break;
+ }
+ case ResourceProvider::Bitmap:
+ output_surface_ = FakeOutputSurface::CreateSoftware(
+ make_scoped_ptr(new SoftwareOutputDevice));
+ child_output_surface_ = FakeOutputSurface::CreateSoftware(
+ make_scoped_ptr(new SoftwareOutputDevice));
+ break;
+ case ResourceProvider::InvalidType:
+ NOTREACHED();
+ break;
+ }
+ CHECK(output_surface_->BindToClient(&output_surface_client_));
+ CHECK(child_output_surface_->BindToClient(&child_output_surface_client_));
+
+ shared_bitmap_manager_.reset(new TestSharedBitmapManager());
+
+ resource_provider_ =
+ ResourceProvider::Create(output_surface_.get(),
+ shared_bitmap_manager_.get(),
+ main_thread_task_runner_.get(),
+ 0,
+ false,
+ 1,
+ false);
+ child_resource_provider_ =
+ ResourceProvider::Create(child_output_surface_.get(),
+ shared_bitmap_manager_.get(),
+ main_thread_task_runner_.get(),
+ 0,
+ false,
+ 1,
+ false);
+ }
+
+ static void CollectResources(ReturnedResourceArray* array,
+ const ReturnedResourceArray& returned,
+ BlockingTaskRunner* main_thread_task_runner) {
+ array->insert(array->end(), returned.begin(), returned.end());
+ }
+
+ static ReturnCallback GetReturnCallback(ReturnedResourceArray* array) {
+ return base::Bind(&ResourceProviderTest::CollectResources, array);
+ }
+
+ static void SetResourceFilter(ResourceProvider* resource_provider,
+ ResourceProvider::ResourceId id,
+ GLenum filter) {
+ ResourceProvider::ScopedSamplerGL sampler(
+ resource_provider, id, GL_TEXTURE_2D, filter);
+ }
+
+ ResourceProviderContext* context() { return context3d_; }
+
+ ResourceProvider::ResourceId CreateChildMailbox(uint32* release_sync_point,
+ bool* lost_resource,
+ bool* release_called,
+ uint32* sync_point) {
+ if (GetParam() == ResourceProvider::GLTexture) {
+ unsigned texture = child_context_->createTexture();
+ gpu::Mailbox gpu_mailbox;
+ child_context_->bindTexture(GL_TEXTURE_2D, texture);
+ child_context_->genMailboxCHROMIUM(gpu_mailbox.name);
+ child_context_->produceTextureCHROMIUM(GL_TEXTURE_2D, gpu_mailbox.name);
+ *sync_point = child_context_->insertSyncPoint();
+ EXPECT_LT(0u, *sync_point);
+
+ scoped_ptr<base::SharedMemory> shared_memory;
+ scoped_ptr<SingleReleaseCallbackImpl> callback =
+ SingleReleaseCallbackImpl::Create(
+ base::Bind(ReleaseSharedMemoryCallback,
+ base::Passed(&shared_memory),
+ release_called,
+ release_sync_point,
+ lost_resource));
+ return child_resource_provider_->CreateResourceFromTextureMailbox(
+ TextureMailbox(gpu_mailbox, GL_TEXTURE_2D, *sync_point),
+ callback.Pass());
+ } else {
+ gfx::Size size(64, 64);
+ scoped_ptr<base::SharedMemory> shared_memory(
+ CreateAndFillSharedMemory(size, 0));
+
+ base::SharedMemory* shared_memory_ptr = shared_memory.get();
+ scoped_ptr<SingleReleaseCallbackImpl> callback =
+ SingleReleaseCallbackImpl::Create(
+ base::Bind(ReleaseSharedMemoryCallback,
+ base::Passed(&shared_memory),
+ release_called,
+ release_sync_point,
+ lost_resource));
+ return child_resource_provider_->CreateResourceFromTextureMailbox(
+ TextureMailbox(shared_memory_ptr, size), callback.Pass());
+ }
+ }
+
+ protected:
+ scoped_ptr<ContextSharedData> shared_data_;
+ ResourceProviderContext* context3d_;
+ ResourceProviderContext* child_context_;
+ FakeOutputSurfaceClient output_surface_client_;
+ FakeOutputSurfaceClient child_output_surface_client_;
+ scoped_ptr<OutputSurface> output_surface_;
+ scoped_ptr<OutputSurface> child_output_surface_;
+ scoped_ptr<BlockingTaskRunner> main_thread_task_runner_;
+ scoped_ptr<ResourceProvider> resource_provider_;
+ scoped_ptr<ResourceProvider> child_resource_provider_;
+ scoped_ptr<TestSharedBitmapManager> shared_bitmap_manager_;
+};
+
+void CheckCreateResource(ResourceProvider::ResourceType expected_default_type,
+ ResourceProvider* resource_provider,
+ ResourceProviderContext* context) {
+ DCHECK_EQ(expected_default_type, resource_provider->default_resource_type());
+
+ gfx::Size size(1, 1);
+ ResourceFormat format = RGBA_8888;
+ size_t pixel_size = TextureSizeBytes(size, format);
+ ASSERT_EQ(4U, pixel_size);
+
+ ResourceProvider::ResourceId id = resource_provider->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+ EXPECT_EQ(1, static_cast<int>(resource_provider->num_resources()));
+ if (expected_default_type == ResourceProvider::GLTexture)
+ EXPECT_EQ(0u, context->NumTextures());
+
+ uint8_t data[4] = { 1, 2, 3, 4 };
+ gfx::Rect rect(size);
+ resource_provider->SetPixels(id, data, rect, rect, gfx::Vector2d());
+ if (expected_default_type == ResourceProvider::GLTexture)
+ EXPECT_EQ(1u, context->NumTextures());
+
+ uint8_t result[4] = { 0 };
+ GetResourcePixels(resource_provider, context, id, size, format, result);
+ EXPECT_EQ(0, memcmp(data, result, pixel_size));
+
+ resource_provider->DeleteResource(id);
+ EXPECT_EQ(0, static_cast<int>(resource_provider->num_resources()));
+ if (expected_default_type == ResourceProvider::GLTexture)
+ EXPECT_EQ(0u, context->NumTextures());
+}
+
+TEST_P(ResourceProviderTest, Basic) {
+ CheckCreateResource(GetParam(), resource_provider_.get(), context());
+}
+
+TEST_P(ResourceProviderTest, Upload) {
+ gfx::Size size(2, 2);
+ ResourceFormat format = RGBA_8888;
+ size_t pixel_size = TextureSizeBytes(size, format);
+ ASSERT_EQ(16U, pixel_size);
+
+ ResourceProvider::ResourceId id = resource_provider_->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+
+ uint8_t image[16] = { 0 };
+ gfx::Rect image_rect(size);
+ resource_provider_->SetPixels(
+ id, image, image_rect, image_rect, gfx::Vector2d());
+
+ for (uint8_t i = 0; i < pixel_size; ++i)
+ image[i] = i;
+
+ uint8_t result[16] = { 0 };
+ {
+ gfx::Rect source_rect(0, 0, 1, 1);
+ gfx::Vector2d dest_offset(0, 0);
+ resource_provider_->SetPixels(
+ id, image, image_rect, source_rect, dest_offset);
+
+ uint8_t expected[16] = { 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ GetResourcePixels(
+ resource_provider_.get(), context(), id, size, format, result);
+ EXPECT_EQ(0, memcmp(expected, result, pixel_size));
+ }
+ {
+ gfx::Rect source_rect(0, 0, 1, 1);
+ gfx::Vector2d dest_offset(1, 1);
+ resource_provider_->SetPixels(
+ id, image, image_rect, source_rect, dest_offset);
+
+ uint8_t expected[16] = { 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 };
+ GetResourcePixels(
+ resource_provider_.get(), context(), id, size, format, result);
+ EXPECT_EQ(0, memcmp(expected, result, pixel_size));
+ }
+ {
+ gfx::Rect source_rect(1, 0, 1, 1);
+ gfx::Vector2d dest_offset(0, 1);
+ resource_provider_->SetPixels(
+ id, image, image_rect, source_rect, dest_offset);
+
+ uint8_t expected[16] = { 0, 1, 2, 3, 0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3 };
+ GetResourcePixels(
+ resource_provider_.get(), context(), id, size, format, result);
+ EXPECT_EQ(0, memcmp(expected, result, pixel_size));
+ }
+ {
+ gfx::Rect offset_image_rect(gfx::Point(100, 100), size);
+ gfx::Rect source_rect(100, 100, 1, 1);
+ gfx::Vector2d dest_offset(1, 0);
+ resource_provider_->SetPixels(
+ id, image, offset_image_rect, source_rect, dest_offset);
+
+ uint8_t expected[16] = { 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3 };
+ GetResourcePixels(
+ resource_provider_.get(), context(), id, size, format, result);
+ EXPECT_EQ(0, memcmp(expected, result, pixel_size));
+ }
+
+ resource_provider_->DeleteResource(id);
+}
+
+TEST_P(ResourceProviderTest, TransferGLResources) {
+ if (GetParam() != ResourceProvider::GLTexture)
+ return;
+ gfx::Size size(1, 1);
+ ResourceFormat format = RGBA_8888;
+ size_t pixel_size = TextureSizeBytes(size, format);
+ ASSERT_EQ(4U, pixel_size);
+
+ ResourceProvider::ResourceId id1 = child_resource_provider_->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+ uint8_t data1[4] = { 1, 2, 3, 4 };
+ gfx::Rect rect(size);
+ child_resource_provider_->SetPixels(id1, data1, rect, rect, gfx::Vector2d());
+
+ ResourceProvider::ResourceId id2 = child_resource_provider_->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+ uint8_t data2[4] = { 5, 5, 5, 5 };
+ child_resource_provider_->SetPixels(id2, data2, rect, rect, gfx::Vector2d());
+
+ ResourceProvider::ResourceId id3 = child_resource_provider_->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+ child_resource_provider_->AcquireImage(id3);
+ int stride;
+ child_resource_provider_->MapImage(id3, &stride);
+ child_resource_provider_->UnmapImage(id3);
+
+ GLuint external_texture_id = child_context_->createExternalTexture();
+ child_context_->bindTexture(GL_TEXTURE_EXTERNAL_OES, external_texture_id);
+
+ gpu::Mailbox external_mailbox;
+ child_context_->genMailboxCHROMIUM(external_mailbox.name);
+ child_context_->produceTextureCHROMIUM(GL_TEXTURE_EXTERNAL_OES,
+ external_mailbox.name);
+ const GLuint external_sync_point = child_context_->insertSyncPoint();
+ ResourceProvider::ResourceId id4 =
+ child_resource_provider_->CreateResourceFromTextureMailbox(
+ TextureMailbox(
+ external_mailbox, GL_TEXTURE_EXTERNAL_OES, external_sync_point),
+ SingleReleaseCallbackImpl::Create(base::Bind(&EmptyReleaseCallback)));
+
+ ReturnedResourceArray returned_to_child;
+ int child_id =
+ resource_provider_->CreateChild(GetReturnCallback(&returned_to_child));
+ {
+ // Transfer some resources to the parent.
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(id1);
+ resource_ids_to_transfer.push_back(id2);
+ resource_ids_to_transfer.push_back(id3);
+ resource_ids_to_transfer.push_back(id4);
+ TransferableResourceArray list;
+ child_resource_provider_->PrepareSendToParent(resource_ids_to_transfer,
+ &list);
+ ASSERT_EQ(4u, list.size());
+ EXPECT_NE(0u, list[0].mailbox_holder.sync_point);
+ EXPECT_NE(0u, list[1].mailbox_holder.sync_point);
+ EXPECT_EQ(list[0].mailbox_holder.sync_point,
+ list[1].mailbox_holder.sync_point);
+ EXPECT_NE(0u, list[2].mailbox_holder.sync_point);
+ EXPECT_EQ(list[0].mailbox_holder.sync_point,
+ list[2].mailbox_holder.sync_point);
+ EXPECT_EQ(external_sync_point, list[3].mailbox_holder.sync_point);
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D),
+ list[0].mailbox_holder.texture_target);
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D),
+ list[1].mailbox_holder.texture_target);
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D),
+ list[2].mailbox_holder.texture_target);
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_EXTERNAL_OES),
+ list[3].mailbox_holder.texture_target);
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id1));
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id2));
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id3));
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id4));
+ resource_provider_->ReceiveFromChild(child_id, list);
+ EXPECT_NE(list[0].mailbox_holder.sync_point,
+ context3d_->last_waited_sync_point());
+ {
+ resource_provider_->WaitSyncPointIfNeeded(list[0].id);
+ ResourceProvider::ScopedReadLockGL lock(resource_provider_.get(),
+ list[0].id);
+ }
+ EXPECT_EQ(list[0].mailbox_holder.sync_point,
+ context3d_->last_waited_sync_point());
+ resource_provider_->DeclareUsedResourcesFromChild(child_id,
+ resource_ids_to_transfer);
+ }
+
+ EXPECT_EQ(4u, resource_provider_->num_resources());
+ ResourceProvider::ResourceIdMap resource_map =
+ resource_provider_->GetChildToParentMap(child_id);
+ ResourceProvider::ResourceId mapped_id1 = resource_map[id1];
+ ResourceProvider::ResourceId mapped_id2 = resource_map[id2];
+ ResourceProvider::ResourceId mapped_id3 = resource_map[id3];
+ ResourceProvider::ResourceId mapped_id4 = resource_map[id4];
+ EXPECT_NE(0u, mapped_id1);
+ EXPECT_NE(0u, mapped_id2);
+ EXPECT_NE(0u, mapped_id3);
+ EXPECT_NE(0u, mapped_id4);
+ EXPECT_FALSE(resource_provider_->InUseByConsumer(id1));
+ EXPECT_FALSE(resource_provider_->InUseByConsumer(id2));
+ EXPECT_FALSE(resource_provider_->InUseByConsumer(id3));
+ EXPECT_FALSE(resource_provider_->InUseByConsumer(id4));
+
+ uint8_t result[4] = { 0 };
+ GetResourcePixels(
+ resource_provider_.get(), context(), mapped_id1, size, format, result);
+ EXPECT_EQ(0, memcmp(data1, result, pixel_size));
+
+ GetResourcePixels(
+ resource_provider_.get(), context(), mapped_id2, size, format, result);
+ EXPECT_EQ(0, memcmp(data2, result, pixel_size));
+
+ {
+ // Check that transfering again the same resource from the child to the
+ // parent works.
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(id1);
+ resource_ids_to_transfer.push_back(id2);
+ resource_ids_to_transfer.push_back(id3);
+ TransferableResourceArray list;
+ child_resource_provider_->PrepareSendToParent(resource_ids_to_transfer,
+ &list);
+ EXPECT_EQ(3u, list.size());
+ EXPECT_EQ(id1, list[0].id);
+ EXPECT_EQ(id2, list[1].id);
+ EXPECT_EQ(id3, list[2].id);
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D),
+ list[0].mailbox_holder.texture_target);
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D),
+ list[1].mailbox_holder.texture_target);
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D),
+ list[2].mailbox_holder.texture_target);
+ ReturnedResourceArray returned;
+ TransferableResource::ReturnResources(list, &returned);
+ child_resource_provider_->ReceiveReturnsFromParent(returned);
+ // ids were exported twice, we returned them only once, they should still
+ // be in-use.
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id1));
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id2));
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id3));
+ }
+ {
+ EXPECT_EQ(0u, returned_to_child.size());
+
+ // Transfer resources back from the parent to the child. Set no resources as
+ // being in use.
+ ResourceProvider::ResourceIdArray no_resources;
+ resource_provider_->DeclareUsedResourcesFromChild(child_id, no_resources);
+
+ ASSERT_EQ(4u, returned_to_child.size());
+ EXPECT_NE(0u, returned_to_child[0].sync_point);
+ EXPECT_NE(0u, returned_to_child[1].sync_point);
+ EXPECT_NE(0u, returned_to_child[2].sync_point);
+ EXPECT_NE(0u, returned_to_child[3].sync_point);
+ EXPECT_FALSE(returned_to_child[0].lost);
+ EXPECT_FALSE(returned_to_child[1].lost);
+ EXPECT_FALSE(returned_to_child[2].lost);
+ EXPECT_FALSE(returned_to_child[3].lost);
+ child_resource_provider_->ReceiveReturnsFromParent(returned_to_child);
+ returned_to_child.clear();
+ }
+ EXPECT_FALSE(child_resource_provider_->InUseByConsumer(id1));
+ EXPECT_FALSE(child_resource_provider_->InUseByConsumer(id2));
+ EXPECT_FALSE(child_resource_provider_->InUseByConsumer(id3));
+ EXPECT_FALSE(child_resource_provider_->InUseByConsumer(id4));
+
+ {
+ child_resource_provider_->WaitSyncPointIfNeeded(id1);
+ ResourceProvider::ScopedReadLockGL lock(child_resource_provider_.get(),
+ id1);
+ ASSERT_NE(0U, lock.texture_id());
+ child_context_->bindTexture(GL_TEXTURE_2D, lock.texture_id());
+ child_context_->GetPixels(size, format, result);
+ EXPECT_EQ(0, memcmp(data1, result, pixel_size));
+ }
+ {
+ child_resource_provider_->WaitSyncPointIfNeeded(id2);
+ ResourceProvider::ScopedReadLockGL lock(child_resource_provider_.get(),
+ id2);
+ ASSERT_NE(0U, lock.texture_id());
+ child_context_->bindTexture(GL_TEXTURE_2D, lock.texture_id());
+ child_context_->GetPixels(size, format, result);
+ EXPECT_EQ(0, memcmp(data2, result, pixel_size));
+ }
+ {
+ child_resource_provider_->WaitSyncPointIfNeeded(id3);
+ ResourceProvider::ScopedReadLockGL lock(child_resource_provider_.get(),
+ id3);
+ ASSERT_NE(0U, lock.texture_id());
+ child_context_->bindTexture(GL_TEXTURE_2D, lock.texture_id());
+ }
+ {
+ // Transfer resources to the parent again.
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(id1);
+ resource_ids_to_transfer.push_back(id2);
+ resource_ids_to_transfer.push_back(id3);
+ resource_ids_to_transfer.push_back(id4);
+ TransferableResourceArray list;
+ child_resource_provider_->PrepareSendToParent(resource_ids_to_transfer,
+ &list);
+ ASSERT_EQ(4u, list.size());
+ EXPECT_EQ(id1, list[0].id);
+ EXPECT_EQ(id2, list[1].id);
+ EXPECT_EQ(id3, list[2].id);
+ EXPECT_EQ(id4, list[3].id);
+ EXPECT_NE(0u, list[0].mailbox_holder.sync_point);
+ EXPECT_NE(0u, list[1].mailbox_holder.sync_point);
+ EXPECT_NE(0u, list[2].mailbox_holder.sync_point);
+ EXPECT_NE(0u, list[3].mailbox_holder.sync_point);
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D),
+ list[0].mailbox_holder.texture_target);
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D),
+ list[1].mailbox_holder.texture_target);
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D),
+ list[2].mailbox_holder.texture_target);
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_EXTERNAL_OES),
+ list[3].mailbox_holder.texture_target);
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id1));
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id2));
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id3));
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id4));
+ resource_provider_->ReceiveFromChild(child_id, list);
+ resource_provider_->DeclareUsedResourcesFromChild(child_id,
+ resource_ids_to_transfer);
+ }
+
+ EXPECT_EQ(0u, returned_to_child.size());
+
+ EXPECT_EQ(4u, resource_provider_->num_resources());
+ resource_provider_->DestroyChild(child_id);
+ EXPECT_EQ(0u, resource_provider_->num_resources());
+
+ ASSERT_EQ(4u, returned_to_child.size());
+ EXPECT_NE(0u, returned_to_child[0].sync_point);
+ EXPECT_NE(0u, returned_to_child[1].sync_point);
+ EXPECT_NE(0u, returned_to_child[2].sync_point);
+ EXPECT_NE(0u, returned_to_child[3].sync_point);
+ EXPECT_FALSE(returned_to_child[0].lost);
+ EXPECT_FALSE(returned_to_child[1].lost);
+ EXPECT_FALSE(returned_to_child[2].lost);
+ EXPECT_FALSE(returned_to_child[3].lost);
+}
+
+TEST_P(ResourceProviderTest, ReadLockCountStopsReturnToChildOrDelete) {
+ if (GetParam() != ResourceProvider::GLTexture)
+ return;
+ gfx::Size size(1, 1);
+ ResourceFormat format = RGBA_8888;
+
+ ResourceProvider::ResourceId id1 = child_resource_provider_->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+ uint8_t data1[4] = {1, 2, 3, 4};
+ gfx::Rect rect(size);
+ child_resource_provider_->SetPixels(id1, data1, rect, rect, gfx::Vector2d());
+
+ ReturnedResourceArray returned_to_child;
+ int child_id =
+ resource_provider_->CreateChild(GetReturnCallback(&returned_to_child));
+ {
+ // Transfer some resources to the parent.
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(id1);
+ TransferableResourceArray list;
+ child_resource_provider_->PrepareSendToParent(resource_ids_to_transfer,
+ &list);
+ ASSERT_EQ(1u, list.size());
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id1));
+
+ resource_provider_->ReceiveFromChild(child_id, list);
+
+ resource_provider_->WaitSyncPointIfNeeded(list[0].id);
+ ResourceProvider::ScopedReadLockGL lock(resource_provider_.get(),
+ list[0].id);
+
+ resource_provider_->DeclareUsedResourcesFromChild(
+ child_id, ResourceProvider::ResourceIdArray());
+ EXPECT_EQ(0u, returned_to_child.size());
+ }
+
+ EXPECT_EQ(1u, returned_to_child.size());
+ child_resource_provider_->ReceiveReturnsFromParent(returned_to_child);
+
+ {
+ child_resource_provider_->WaitSyncPointIfNeeded(id1);
+ ResourceProvider::ScopedReadLockGL lock(child_resource_provider_.get(),
+ id1);
+ child_resource_provider_->DeleteResource(id1);
+ EXPECT_EQ(1u, child_resource_provider_->num_resources());
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id1));
+ }
+
+ EXPECT_EQ(0u, child_resource_provider_->num_resources());
+ resource_provider_->DestroyChild(child_id);
+}
+
+TEST_P(ResourceProviderTest, AllowOverlayTransfersToParent) {
+ // Overlays only supported on the GL path.
+ if (GetParam() != ResourceProvider::GLTexture)
+ return;
+
+ uint32 sync_point = 0;
+ TextureMailbox mailbox(gpu::Mailbox::Generate(), GL_TEXTURE_2D, sync_point);
+ mailbox.set_allow_overlay(true);
+ scoped_ptr<SingleReleaseCallbackImpl> release_callback =
+ SingleReleaseCallbackImpl::Create(base::Bind(&EmptyReleaseCallback));
+ ResourceProvider::ResourceId id1 =
+ child_resource_provider_->CreateResourceFromTextureMailbox(
+ mailbox, release_callback.Pass());
+
+ TextureMailbox mailbox2(gpu::Mailbox::Generate(), GL_TEXTURE_2D, sync_point);
+ mailbox2.set_allow_overlay(false);
+ scoped_ptr<SingleReleaseCallbackImpl> release_callback2 =
+ SingleReleaseCallbackImpl::Create(base::Bind(&EmptyReleaseCallback));
+ ResourceProvider::ResourceId id2 =
+ child_resource_provider_->CreateResourceFromTextureMailbox(
+ mailbox2, release_callback2.Pass());
+
+ ReturnedResourceArray returned_to_child;
+ int child_id =
+ resource_provider_->CreateChild(GetReturnCallback(&returned_to_child));
+
+ // Transfer some resources to the parent.
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(id1);
+ resource_ids_to_transfer.push_back(id2);
+ TransferableResourceArray list;
+ child_resource_provider_->PrepareSendToParent(resource_ids_to_transfer,
+ &list);
+ ASSERT_EQ(2u, list.size());
+ resource_provider_->ReceiveFromChild(child_id, list);
+ EXPECT_TRUE(resource_provider_->AllowOverlay(list[0].id));
+ EXPECT_FALSE(resource_provider_->AllowOverlay(list[1].id));
+
+ resource_provider_->DeclareUsedResourcesFromChild(
+ child_id, ResourceProvider::ResourceIdArray());
+
+ EXPECT_EQ(2u, returned_to_child.size());
+ child_resource_provider_->ReceiveReturnsFromParent(returned_to_child);
+
+ child_resource_provider_->DeleteResource(id1);
+ child_resource_provider_->DeleteResource(id2);
+ EXPECT_EQ(0u, child_resource_provider_->num_resources());
+
+ resource_provider_->DestroyChild(child_id);
+}
+
+TEST_P(ResourceProviderTest, TransferSoftwareResources) {
+ if (GetParam() != ResourceProvider::Bitmap)
+ return;
+
+ gfx::Size size(1, 1);
+ ResourceFormat format = RGBA_8888;
+ size_t pixel_size = TextureSizeBytes(size, format);
+ ASSERT_EQ(4U, pixel_size);
+
+ ResourceProvider::ResourceId id1 = child_resource_provider_->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+ uint8_t data1[4] = { 1, 2, 3, 4 };
+ gfx::Rect rect(size);
+ child_resource_provider_->SetPixels(id1, data1, rect, rect, gfx::Vector2d());
+
+ ResourceProvider::ResourceId id2 = child_resource_provider_->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+ uint8_t data2[4] = { 5, 5, 5, 5 };
+ child_resource_provider_->SetPixels(id2, data2, rect, rect, gfx::Vector2d());
+
+ scoped_ptr<base::SharedMemory> shared_memory(new base::SharedMemory());
+ shared_memory->CreateAndMapAnonymous(1);
+ base::SharedMemory* shared_memory_ptr = shared_memory.get();
+ ResourceProvider::ResourceId id3 =
+ child_resource_provider_->CreateResourceFromTextureMailbox(
+ TextureMailbox(shared_memory_ptr, gfx::Size(1, 1)),
+ SingleReleaseCallbackImpl::Create(base::Bind(
+ &SharedMemoryReleaseCallback, base::Passed(&shared_memory))));
+
+ ReturnedResourceArray returned_to_child;
+ int child_id =
+ resource_provider_->CreateChild(GetReturnCallback(&returned_to_child));
+ {
+ // Transfer some resources to the parent.
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(id1);
+ resource_ids_to_transfer.push_back(id2);
+ resource_ids_to_transfer.push_back(id3);
+ TransferableResourceArray list;
+ child_resource_provider_->PrepareSendToParent(resource_ids_to_transfer,
+ &list);
+ ASSERT_EQ(3u, list.size());
+ EXPECT_EQ(0u, list[0].mailbox_holder.sync_point);
+ EXPECT_EQ(0u, list[1].mailbox_holder.sync_point);
+ EXPECT_EQ(0u, list[2].mailbox_holder.sync_point);
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id1));
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id2));
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id3));
+ resource_provider_->ReceiveFromChild(child_id, list);
+ resource_provider_->DeclareUsedResourcesFromChild(child_id,
+ resource_ids_to_transfer);
+ }
+
+ EXPECT_EQ(3u, resource_provider_->num_resources());
+ ResourceProvider::ResourceIdMap resource_map =
+ resource_provider_->GetChildToParentMap(child_id);
+ ResourceProvider::ResourceId mapped_id1 = resource_map[id1];
+ ResourceProvider::ResourceId mapped_id2 = resource_map[id2];
+ ResourceProvider::ResourceId mapped_id3 = resource_map[id3];
+ EXPECT_NE(0u, mapped_id1);
+ EXPECT_NE(0u, mapped_id2);
+ EXPECT_NE(0u, mapped_id3);
+ EXPECT_FALSE(resource_provider_->InUseByConsumer(id1));
+ EXPECT_FALSE(resource_provider_->InUseByConsumer(id2));
+ EXPECT_FALSE(resource_provider_->InUseByConsumer(id3));
+
+ uint8_t result[4] = { 0 };
+ GetResourcePixels(
+ resource_provider_.get(), context(), mapped_id1, size, format, result);
+ EXPECT_EQ(0, memcmp(data1, result, pixel_size));
+
+ GetResourcePixels(
+ resource_provider_.get(), context(), mapped_id2, size, format, result);
+ EXPECT_EQ(0, memcmp(data2, result, pixel_size));
+
+ {
+ // Check that transfering again the same resource from the child to the
+ // parent works.
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(id1);
+ resource_ids_to_transfer.push_back(id2);
+ TransferableResourceArray list;
+ child_resource_provider_->PrepareSendToParent(resource_ids_to_transfer,
+ &list);
+ EXPECT_EQ(2u, list.size());
+ EXPECT_EQ(id1, list[0].id);
+ EXPECT_EQ(id2, list[1].id);
+ ReturnedResourceArray returned;
+ TransferableResource::ReturnResources(list, &returned);
+ child_resource_provider_->ReceiveReturnsFromParent(returned);
+ // ids were exported twice, we returned them only once, they should still
+ // be in-use.
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id1));
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id2));
+ }
+ {
+ EXPECT_EQ(0u, returned_to_child.size());
+
+ // Transfer resources back from the parent to the child. Set no resources as
+ // being in use.
+ ResourceProvider::ResourceIdArray no_resources;
+ resource_provider_->DeclareUsedResourcesFromChild(child_id, no_resources);
+
+ ASSERT_EQ(3u, returned_to_child.size());
+ EXPECT_EQ(0u, returned_to_child[0].sync_point);
+ EXPECT_EQ(0u, returned_to_child[1].sync_point);
+ EXPECT_EQ(0u, returned_to_child[2].sync_point);
+ std::set<ResourceProvider::ResourceId> expected_ids;
+ expected_ids.insert(id1);
+ expected_ids.insert(id2);
+ expected_ids.insert(id3);
+ std::set<ResourceProvider::ResourceId> returned_ids;
+ for (unsigned i = 0; i < 3; i++)
+ returned_ids.insert(returned_to_child[i].id);
+ EXPECT_EQ(expected_ids, returned_ids);
+ EXPECT_FALSE(returned_to_child[0].lost);
+ EXPECT_FALSE(returned_to_child[1].lost);
+ EXPECT_FALSE(returned_to_child[2].lost);
+ child_resource_provider_->ReceiveReturnsFromParent(returned_to_child);
+ returned_to_child.clear();
+ }
+ EXPECT_FALSE(child_resource_provider_->InUseByConsumer(id1));
+ EXPECT_FALSE(child_resource_provider_->InUseByConsumer(id2));
+ EXPECT_FALSE(child_resource_provider_->InUseByConsumer(id3));
+
+ {
+ ResourceProvider::ScopedReadLockSoftware lock(
+ child_resource_provider_.get(), id1);
+ const SkBitmap* sk_bitmap = lock.sk_bitmap();
+ EXPECT_EQ(sk_bitmap->width(), size.width());
+ EXPECT_EQ(sk_bitmap->height(), size.height());
+ EXPECT_EQ(0, memcmp(data1, sk_bitmap->getPixels(), pixel_size));
+ }
+ {
+ ResourceProvider::ScopedReadLockSoftware lock(
+ child_resource_provider_.get(), id2);
+ const SkBitmap* sk_bitmap = lock.sk_bitmap();
+ EXPECT_EQ(sk_bitmap->width(), size.width());
+ EXPECT_EQ(sk_bitmap->height(), size.height());
+ EXPECT_EQ(0, memcmp(data2, sk_bitmap->getPixels(), pixel_size));
+ }
+ {
+ // Transfer resources to the parent again.
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(id1);
+ resource_ids_to_transfer.push_back(id2);
+ resource_ids_to_transfer.push_back(id3);
+ TransferableResourceArray list;
+ child_resource_provider_->PrepareSendToParent(resource_ids_to_transfer,
+ &list);
+ ASSERT_EQ(3u, list.size());
+ EXPECT_EQ(id1, list[0].id);
+ EXPECT_EQ(id2, list[1].id);
+ EXPECT_EQ(id3, list[2].id);
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id1));
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id2));
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id3));
+ resource_provider_->ReceiveFromChild(child_id, list);
+ resource_provider_->DeclareUsedResourcesFromChild(child_id,
+ resource_ids_to_transfer);
+ }
+
+ EXPECT_EQ(0u, returned_to_child.size());
+
+ EXPECT_EQ(3u, resource_provider_->num_resources());
+ resource_provider_->DestroyChild(child_id);
+ EXPECT_EQ(0u, resource_provider_->num_resources());
+
+ ASSERT_EQ(3u, returned_to_child.size());
+ EXPECT_EQ(0u, returned_to_child[0].sync_point);
+ EXPECT_EQ(0u, returned_to_child[1].sync_point);
+ EXPECT_EQ(0u, returned_to_child[2].sync_point);
+ std::set<ResourceProvider::ResourceId> expected_ids;
+ expected_ids.insert(id1);
+ expected_ids.insert(id2);
+ expected_ids.insert(id3);
+ std::set<ResourceProvider::ResourceId> returned_ids;
+ for (unsigned i = 0; i < 3; i++)
+ returned_ids.insert(returned_to_child[i].id);
+ EXPECT_EQ(expected_ids, returned_ids);
+ EXPECT_FALSE(returned_to_child[0].lost);
+ EXPECT_FALSE(returned_to_child[1].lost);
+ EXPECT_FALSE(returned_to_child[2].lost);
+}
+
+TEST_P(ResourceProviderTest, TransferGLToSoftware) {
+ if (GetParam() != ResourceProvider::Bitmap)
+ return;
+
+ scoped_ptr<ResourceProviderContext> child_context_owned(
+ ResourceProviderContext::Create(shared_data_.get()));
+
+ FakeOutputSurfaceClient child_output_surface_client;
+ scoped_ptr<OutputSurface> child_output_surface(
+ FakeOutputSurface::Create3d(child_context_owned.Pass()));
+ CHECK(child_output_surface->BindToClient(&child_output_surface_client));
+
+ scoped_ptr<ResourceProvider> child_resource_provider(
+ ResourceProvider::Create(child_output_surface.get(),
+ shared_bitmap_manager_.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false));
+
+ gfx::Size size(1, 1);
+ ResourceFormat format = RGBA_8888;
+ size_t pixel_size = TextureSizeBytes(size, format);
+ ASSERT_EQ(4U, pixel_size);
+
+ ResourceProvider::ResourceId id1 = child_resource_provider->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+ uint8_t data1[4] = { 1, 2, 3, 4 };
+ gfx::Rect rect(size);
+ child_resource_provider->SetPixels(id1, data1, rect, rect, gfx::Vector2d());
+
+ ReturnedResourceArray returned_to_child;
+ int child_id =
+ resource_provider_->CreateChild(GetReturnCallback(&returned_to_child));
+ {
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(id1);
+ TransferableResourceArray list;
+ child_resource_provider->PrepareSendToParent(resource_ids_to_transfer,
+ &list);
+ ASSERT_EQ(1u, list.size());
+ EXPECT_NE(0u, list[0].mailbox_holder.sync_point);
+ EXPECT_EQ(static_cast<GLenum>(GL_TEXTURE_2D),
+ list[0].mailbox_holder.texture_target);
+ EXPECT_TRUE(child_resource_provider->InUseByConsumer(id1));
+ resource_provider_->ReceiveFromChild(child_id, list);
+ }
+
+ EXPECT_EQ(0u, resource_provider_->num_resources());
+ ASSERT_EQ(1u, returned_to_child.size());
+ EXPECT_EQ(returned_to_child[0].id, id1);
+ ResourceProvider::ResourceIdMap resource_map =
+ resource_provider_->GetChildToParentMap(child_id);
+ ResourceProvider::ResourceId mapped_id1 = resource_map[id1];
+ EXPECT_EQ(0u, mapped_id1);
+
+ resource_provider_->DestroyChild(child_id);
+ EXPECT_EQ(0u, resource_provider_->num_resources());
+
+ ASSERT_EQ(1u, returned_to_child.size());
+ EXPECT_FALSE(returned_to_child[0].lost);
+}
+
+TEST_P(ResourceProviderTest, TransferInvalidSoftware) {
+ if (GetParam() != ResourceProvider::Bitmap)
+ return;
+
+ gfx::Size size(1, 1);
+ ResourceFormat format = RGBA_8888;
+ size_t pixel_size = TextureSizeBytes(size, format);
+ ASSERT_EQ(4U, pixel_size);
+
+ ResourceProvider::ResourceId id1 = child_resource_provider_->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+ uint8_t data1[4] = { 1, 2, 3, 4 };
+ gfx::Rect rect(size);
+ child_resource_provider_->SetPixels(id1, data1, rect, rect, gfx::Vector2d());
+
+ ReturnedResourceArray returned_to_child;
+ int child_id =
+ resource_provider_->CreateChild(GetReturnCallback(&returned_to_child));
+ {
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(id1);
+ TransferableResourceArray list;
+ child_resource_provider_->PrepareSendToParent(resource_ids_to_transfer,
+ &list);
+ ASSERT_EQ(1u, list.size());
+ // Make invalid.
+ list[0].mailbox_holder.mailbox.name[1] = 5;
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id1));
+ resource_provider_->ReceiveFromChild(child_id, list);
+ }
+
+ EXPECT_EQ(1u, resource_provider_->num_resources());
+ EXPECT_EQ(0u, returned_to_child.size());
+
+ ResourceProvider::ResourceIdMap resource_map =
+ resource_provider_->GetChildToParentMap(child_id);
+ ResourceProvider::ResourceId mapped_id1 = resource_map[id1];
+ EXPECT_NE(0u, mapped_id1);
+ {
+ ResourceProvider::ScopedReadLockSoftware lock(resource_provider_.get(),
+ mapped_id1);
+ EXPECT_FALSE(lock.valid());
+ }
+
+ resource_provider_->DestroyChild(child_id);
+ EXPECT_EQ(0u, resource_provider_->num_resources());
+
+ ASSERT_EQ(1u, returned_to_child.size());
+ EXPECT_FALSE(returned_to_child[0].lost);
+}
+
+TEST_P(ResourceProviderTest, DeleteExportedResources) {
+ gfx::Size size(1, 1);
+ ResourceFormat format = RGBA_8888;
+ size_t pixel_size = TextureSizeBytes(size, format);
+ ASSERT_EQ(4U, pixel_size);
+
+ ResourceProvider::ResourceId id1 = child_resource_provider_->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+ uint8_t data1[4] = { 1, 2, 3, 4 };
+ gfx::Rect rect(size);
+ child_resource_provider_->SetPixels(id1, data1, rect, rect, gfx::Vector2d());
+
+ ResourceProvider::ResourceId id2 = child_resource_provider_->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+ uint8_t data2[4] = {5, 5, 5, 5};
+ child_resource_provider_->SetPixels(id2, data2, rect, rect, gfx::Vector2d());
+
+ ReturnedResourceArray returned_to_child;
+ int child_id =
+ resource_provider_->CreateChild(GetReturnCallback(&returned_to_child));
+ {
+ // Transfer some resources to the parent.
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(id1);
+ resource_ids_to_transfer.push_back(id2);
+ TransferableResourceArray list;
+ child_resource_provider_->PrepareSendToParent(resource_ids_to_transfer,
+ &list);
+ ASSERT_EQ(2u, list.size());
+ if (GetParam() == ResourceProvider::GLTexture) {
+ EXPECT_NE(0u, list[0].mailbox_holder.sync_point);
+ EXPECT_NE(0u, list[1].mailbox_holder.sync_point);
+ }
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id1));
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id2));
+ resource_provider_->ReceiveFromChild(child_id, list);
+ resource_provider_->DeclareUsedResourcesFromChild(child_id,
+ resource_ids_to_transfer);
+ }
+
+ EXPECT_EQ(2u, resource_provider_->num_resources());
+ ResourceProvider::ResourceIdMap resource_map =
+ resource_provider_->GetChildToParentMap(child_id);
+ ResourceProvider::ResourceId mapped_id1 = resource_map[id1];
+ ResourceProvider::ResourceId mapped_id2 = resource_map[id2];
+ EXPECT_NE(0u, mapped_id1);
+ EXPECT_NE(0u, mapped_id2);
+ EXPECT_FALSE(resource_provider_->InUseByConsumer(id1));
+ EXPECT_FALSE(resource_provider_->InUseByConsumer(id2));
+
+ {
+ // The parent transfers the resources to the grandparent.
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(mapped_id1);
+ resource_ids_to_transfer.push_back(mapped_id2);
+ TransferableResourceArray list;
+ resource_provider_->PrepareSendToParent(resource_ids_to_transfer, &list);
+
+ ASSERT_EQ(2u, list.size());
+ if (GetParam() == ResourceProvider::GLTexture) {
+ EXPECT_NE(0u, list[0].mailbox_holder.sync_point);
+ EXPECT_NE(0u, list[1].mailbox_holder.sync_point);
+ }
+ EXPECT_TRUE(resource_provider_->InUseByConsumer(id1));
+ EXPECT_TRUE(resource_provider_->InUseByConsumer(id2));
+
+ // Release the resource in the parent. Set no resources as being in use. The
+ // resources are exported so that can't be transferred back yet.
+ ResourceProvider::ResourceIdArray no_resources;
+ resource_provider_->DeclareUsedResourcesFromChild(child_id, no_resources);
+
+ EXPECT_EQ(0u, returned_to_child.size());
+ EXPECT_EQ(2u, resource_provider_->num_resources());
+
+ // Return the resources from the grandparent to the parent. They should be
+ // returned to the child then.
+ EXPECT_EQ(2u, list.size());
+ EXPECT_EQ(mapped_id1, list[0].id);
+ EXPECT_EQ(mapped_id2, list[1].id);
+ ReturnedResourceArray returned;
+ TransferableResource::ReturnResources(list, &returned);
+ resource_provider_->ReceiveReturnsFromParent(returned);
+
+ EXPECT_EQ(0u, resource_provider_->num_resources());
+ ASSERT_EQ(2u, returned_to_child.size());
+ if (GetParam() == ResourceProvider::GLTexture) {
+ EXPECT_NE(0u, returned_to_child[0].sync_point);
+ EXPECT_NE(0u, returned_to_child[1].sync_point);
+ }
+ EXPECT_FALSE(returned_to_child[0].lost);
+ EXPECT_FALSE(returned_to_child[1].lost);
+ }
+}
+
+TEST_P(ResourceProviderTest, DestroyChildWithExportedResources) {
+ gfx::Size size(1, 1);
+ ResourceFormat format = RGBA_8888;
+ size_t pixel_size = TextureSizeBytes(size, format);
+ ASSERT_EQ(4U, pixel_size);
+
+ ResourceProvider::ResourceId id1 = child_resource_provider_->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+ uint8_t data1[4] = {1, 2, 3, 4};
+ gfx::Rect rect(size);
+ child_resource_provider_->SetPixels(id1, data1, rect, rect, gfx::Vector2d());
+
+ ResourceProvider::ResourceId id2 = child_resource_provider_->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+ uint8_t data2[4] = {5, 5, 5, 5};
+ child_resource_provider_->SetPixels(id2, data2, rect, rect, gfx::Vector2d());
+
+ ReturnedResourceArray returned_to_child;
+ int child_id =
+ resource_provider_->CreateChild(GetReturnCallback(&returned_to_child));
+ {
+ // Transfer some resources to the parent.
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(id1);
+ resource_ids_to_transfer.push_back(id2);
+ TransferableResourceArray list;
+ child_resource_provider_->PrepareSendToParent(resource_ids_to_transfer,
+ &list);
+ ASSERT_EQ(2u, list.size());
+ if (GetParam() == ResourceProvider::GLTexture) {
+ EXPECT_NE(0u, list[0].mailbox_holder.sync_point);
+ EXPECT_NE(0u, list[1].mailbox_holder.sync_point);
+ }
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id1));
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id2));
+ resource_provider_->ReceiveFromChild(child_id, list);
+ resource_provider_->DeclareUsedResourcesFromChild(child_id,
+ resource_ids_to_transfer);
+ }
+
+ EXPECT_EQ(2u, resource_provider_->num_resources());
+ ResourceProvider::ResourceIdMap resource_map =
+ resource_provider_->GetChildToParentMap(child_id);
+ ResourceProvider::ResourceId mapped_id1 = resource_map[id1];
+ ResourceProvider::ResourceId mapped_id2 = resource_map[id2];
+ EXPECT_NE(0u, mapped_id1);
+ EXPECT_NE(0u, mapped_id2);
+ EXPECT_FALSE(resource_provider_->InUseByConsumer(id1));
+ EXPECT_FALSE(resource_provider_->InUseByConsumer(id2));
+
+ {
+ // The parent transfers the resources to the grandparent.
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(mapped_id1);
+ resource_ids_to_transfer.push_back(mapped_id2);
+ TransferableResourceArray list;
+ resource_provider_->PrepareSendToParent(resource_ids_to_transfer, &list);
+
+ ASSERT_EQ(2u, list.size());
+ if (GetParam() == ResourceProvider::GLTexture) {
+ EXPECT_NE(0u, list[0].mailbox_holder.sync_point);
+ EXPECT_NE(0u, list[1].mailbox_holder.sync_point);
+ }
+ EXPECT_TRUE(resource_provider_->InUseByConsumer(id1));
+ EXPECT_TRUE(resource_provider_->InUseByConsumer(id2));
+
+ // Release the resource in the parent. Set no resources as being in use. The
+ // resources are exported so that can't be transferred back yet.
+ ResourceProvider::ResourceIdArray no_resources;
+ resource_provider_->DeclareUsedResourcesFromChild(child_id, no_resources);
+
+ // Destroy the child, the resources should not be returned yet.
+ EXPECT_EQ(0u, returned_to_child.size());
+ EXPECT_EQ(2u, resource_provider_->num_resources());
+
+ resource_provider_->DestroyChild(child_id);
+
+ EXPECT_EQ(2u, resource_provider_->num_resources());
+ ASSERT_EQ(0u, returned_to_child.size());
+
+ // Return a resource from the grandparent, it should be returned at this
+ // point.
+ EXPECT_EQ(2u, list.size());
+ EXPECT_EQ(mapped_id1, list[0].id);
+ EXPECT_EQ(mapped_id2, list[1].id);
+ TransferableResourceArray return_list;
+ return_list.push_back(list[1]);
+ list.pop_back();
+ ReturnedResourceArray returned;
+ TransferableResource::ReturnResources(return_list, &returned);
+ resource_provider_->ReceiveReturnsFromParent(returned);
+
+ EXPECT_EQ(1u, resource_provider_->num_resources());
+ ASSERT_EQ(1u, returned_to_child.size());
+ if (GetParam() == ResourceProvider::GLTexture) {
+ EXPECT_NE(0u, returned_to_child[0].sync_point);
+ }
+ EXPECT_FALSE(returned_to_child[0].lost);
+ returned_to_child.clear();
+
+ // Destroy the parent resource provider. The resource that's left should be
+ // lost at this point, and returned.
+ resource_provider_ = nullptr;
+ ASSERT_EQ(1u, returned_to_child.size());
+ if (GetParam() == ResourceProvider::GLTexture) {
+ EXPECT_NE(0u, returned_to_child[0].sync_point);
+ }
+ EXPECT_TRUE(returned_to_child[0].lost);
+ }
+}
+
+TEST_P(ResourceProviderTest, DeleteTransferredResources) {
+ gfx::Size size(1, 1);
+ ResourceFormat format = RGBA_8888;
+ size_t pixel_size = TextureSizeBytes(size, format);
+ ASSERT_EQ(4U, pixel_size);
+
+ ResourceProvider::ResourceId id = child_resource_provider_->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+ uint8_t data[4] = { 1, 2, 3, 4 };
+ gfx::Rect rect(size);
+ child_resource_provider_->SetPixels(id, data, rect, rect, gfx::Vector2d());
+
+ ReturnedResourceArray returned_to_child;
+ int child_id =
+ resource_provider_->CreateChild(GetReturnCallback(&returned_to_child));
+ {
+ // Transfer some resource to the parent.
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(id);
+ TransferableResourceArray list;
+ child_resource_provider_->PrepareSendToParent(resource_ids_to_transfer,
+ &list);
+ ASSERT_EQ(1u, list.size());
+ if (GetParam() == ResourceProvider::GLTexture)
+ EXPECT_NE(0u, list[0].mailbox_holder.sync_point);
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id));
+ resource_provider_->ReceiveFromChild(child_id, list);
+ resource_provider_->DeclareUsedResourcesFromChild(child_id,
+ resource_ids_to_transfer);
+ }
+
+ // Delete textures in the child, while they are transfered.
+ child_resource_provider_->DeleteResource(id);
+ EXPECT_EQ(1u, child_resource_provider_->num_resources());
+ {
+ EXPECT_EQ(0u, returned_to_child.size());
+
+ // Transfer resources back from the parent to the child. Set no resources as
+ // being in use.
+ ResourceProvider::ResourceIdArray no_resources;
+ resource_provider_->DeclareUsedResourcesFromChild(child_id, no_resources);
+
+ ASSERT_EQ(1u, returned_to_child.size());
+ if (GetParam() == ResourceProvider::GLTexture)
+ EXPECT_NE(0u, returned_to_child[0].sync_point);
+ child_resource_provider_->ReceiveReturnsFromParent(returned_to_child);
+ }
+ EXPECT_EQ(0u, child_resource_provider_->num_resources());
+}
+
+TEST_P(ResourceProviderTest, UnuseTransferredResources) {
+ gfx::Size size(1, 1);
+ ResourceFormat format = RGBA_8888;
+ size_t pixel_size = TextureSizeBytes(size, format);
+ ASSERT_EQ(4U, pixel_size);
+
+ ResourceProvider::ResourceId id = child_resource_provider_->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+ uint8_t data[4] = {1, 2, 3, 4};
+ gfx::Rect rect(size);
+ child_resource_provider_->SetPixels(id, data, rect, rect, gfx::Vector2d());
+
+ ReturnedResourceArray returned_to_child;
+ int child_id =
+ resource_provider_->CreateChild(GetReturnCallback(&returned_to_child));
+ const ResourceProvider::ResourceIdMap& map =
+ resource_provider_->GetChildToParentMap(child_id);
+ {
+ // Transfer some resource to the parent.
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(id);
+ TransferableResourceArray list;
+ child_resource_provider_->PrepareSendToParent(resource_ids_to_transfer,
+ &list);
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id));
+ resource_provider_->ReceiveFromChild(child_id, list);
+ resource_provider_->DeclareUsedResourcesFromChild(child_id,
+ resource_ids_to_transfer);
+ }
+ TransferableResourceArray sent_to_top_level;
+ {
+ // Parent transfers to top-level.
+ ASSERT_TRUE(map.find(id) != map.end());
+ ResourceProvider::ResourceId parent_id = map.find(id)->second;
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(parent_id);
+ resource_provider_->PrepareSendToParent(resource_ids_to_transfer,
+ &sent_to_top_level);
+ EXPECT_TRUE(resource_provider_->InUseByConsumer(parent_id));
+ }
+ {
+ // Stop using resource.
+ ResourceProvider::ResourceIdArray empty;
+ resource_provider_->DeclareUsedResourcesFromChild(child_id, empty);
+ // Resource is not yet returned to the child, since it's in use by the
+ // top-level.
+ EXPECT_TRUE(returned_to_child.empty());
+ }
+ {
+ // Send the resource to the parent again.
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(id);
+ TransferableResourceArray list;
+ child_resource_provider_->PrepareSendToParent(resource_ids_to_transfer,
+ &list);
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(id));
+ resource_provider_->ReceiveFromChild(child_id, list);
+ resource_provider_->DeclareUsedResourcesFromChild(child_id,
+ resource_ids_to_transfer);
+ }
+ {
+ // Receive returns back from top-level.
+ ReturnedResourceArray returned;
+ TransferableResource::ReturnResources(sent_to_top_level, &returned);
+ resource_provider_->ReceiveReturnsFromParent(returned);
+ // Resource is still not yet returned to the child, since it's declared used
+ // in the parent.
+ EXPECT_TRUE(returned_to_child.empty());
+ ASSERT_TRUE(map.find(id) != map.end());
+ ResourceProvider::ResourceId parent_id = map.find(id)->second;
+ EXPECT_FALSE(resource_provider_->InUseByConsumer(parent_id));
+ }
+ {
+ sent_to_top_level.clear();
+ // Parent transfers again to top-level.
+ ASSERT_TRUE(map.find(id) != map.end());
+ ResourceProvider::ResourceId parent_id = map.find(id)->second;
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(parent_id);
+ resource_provider_->PrepareSendToParent(resource_ids_to_transfer,
+ &sent_to_top_level);
+ EXPECT_TRUE(resource_provider_->InUseByConsumer(parent_id));
+ }
+ {
+ // Receive returns back from top-level.
+ ReturnedResourceArray returned;
+ TransferableResource::ReturnResources(sent_to_top_level, &returned);
+ resource_provider_->ReceiveReturnsFromParent(returned);
+ // Resource is still not yet returned to the child, since it's still
+ // declared used in the parent.
+ EXPECT_TRUE(returned_to_child.empty());
+ ASSERT_TRUE(map.find(id) != map.end());
+ ResourceProvider::ResourceId parent_id = map.find(id)->second;
+ EXPECT_FALSE(resource_provider_->InUseByConsumer(parent_id));
+ }
+ {
+ // Stop using resource.
+ ResourceProvider::ResourceIdArray empty;
+ resource_provider_->DeclareUsedResourcesFromChild(child_id, empty);
+ // Resource should have been returned to the child, since it's no longer in
+ // use by the top-level.
+ ASSERT_EQ(1u, returned_to_child.size());
+ EXPECT_EQ(id, returned_to_child[0].id);
+ EXPECT_EQ(2, returned_to_child[0].count);
+ child_resource_provider_->ReceiveReturnsFromParent(returned_to_child);
+ returned_to_child.clear();
+ EXPECT_FALSE(child_resource_provider_->InUseByConsumer(id));
+ }
+}
+
+class ResourceProviderTestTextureFilters : public ResourceProviderTest {
+ public:
+ static void RunTest(GLenum child_filter, GLenum parent_filter) {
+ scoped_ptr<TextureStateTrackingContext> child_context_owned(
+ new TextureStateTrackingContext);
+ TextureStateTrackingContext* child_context = child_context_owned.get();
+
+ FakeOutputSurfaceClient child_output_surface_client;
+ scoped_ptr<OutputSurface> child_output_surface(
+ FakeOutputSurface::Create3d(child_context_owned.Pass()));
+ CHECK(child_output_surface->BindToClient(&child_output_surface_client));
+ scoped_ptr<SharedBitmapManager> shared_bitmap_manager(
+ new TestSharedBitmapManager());
+
+ scoped_ptr<ResourceProvider> child_resource_provider(
+ ResourceProvider::Create(child_output_surface.get(),
+ shared_bitmap_manager.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false));
+
+ scoped_ptr<TextureStateTrackingContext> parent_context_owned(
+ new TextureStateTrackingContext);
+ TextureStateTrackingContext* parent_context = parent_context_owned.get();
+
+ FakeOutputSurfaceClient parent_output_surface_client;
+ scoped_ptr<OutputSurface> parent_output_surface(
+ FakeOutputSurface::Create3d(parent_context_owned.Pass()));
+ CHECK(parent_output_surface->BindToClient(&parent_output_surface_client));
+
+ scoped_ptr<ResourceProvider> parent_resource_provider(
+ ResourceProvider::Create(parent_output_surface.get(),
+ shared_bitmap_manager.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false));
+
+ gfx::Size size(1, 1);
+ ResourceFormat format = RGBA_8888;
+ int child_texture_id = 1;
+ int parent_texture_id = 2;
+
+ size_t pixel_size = TextureSizeBytes(size, format);
+ ASSERT_EQ(4U, pixel_size);
+
+ ResourceProvider::ResourceId id = child_resource_provider->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+
+ // The new texture is created with GL_LINEAR.
+ EXPECT_CALL(*child_context, bindTexture(GL_TEXTURE_2D, child_texture_id))
+ .Times(2); // Once to create and once to allocate.
+ EXPECT_CALL(*child_context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR));
+ EXPECT_CALL(*child_context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR));
+ EXPECT_CALL(
+ *child_context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE));
+ EXPECT_CALL(
+ *child_context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE));
+ EXPECT_CALL(*child_context,
+ texParameteri(GL_TEXTURE_2D,
+ GL_TEXTURE_POOL_CHROMIUM,
+ GL_TEXTURE_POOL_UNMANAGED_CHROMIUM));
+ child_resource_provider->AllocateForTesting(id);
+ Mock::VerifyAndClearExpectations(child_context);
+
+ uint8_t data[4] = { 1, 2, 3, 4 };
+ gfx::Rect rect(size);
+
+ EXPECT_CALL(*child_context, bindTexture(GL_TEXTURE_2D, child_texture_id));
+ child_resource_provider->SetPixels(id, data, rect, rect, gfx::Vector2d());
+ Mock::VerifyAndClearExpectations(child_context);
+
+ // The texture is set to |child_filter| in the child.
+ EXPECT_CALL(*child_context, bindTexture(GL_TEXTURE_2D, child_texture_id));
+ if (child_filter != GL_LINEAR) {
+ EXPECT_CALL(
+ *child_context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, child_filter));
+ EXPECT_CALL(
+ *child_context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, child_filter));
+ }
+ SetResourceFilter(child_resource_provider.get(), id, child_filter);
+ Mock::VerifyAndClearExpectations(child_context);
+
+ ReturnedResourceArray returned_to_child;
+ int child_id = parent_resource_provider->CreateChild(
+ GetReturnCallback(&returned_to_child));
+ {
+ // Transfer some resource to the parent.
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(id);
+ TransferableResourceArray list;
+
+ EXPECT_CALL(*child_context, bindTexture(GL_TEXTURE_2D, child_texture_id));
+ EXPECT_CALL(*child_context,
+ produceTextureCHROMIUM(GL_TEXTURE_2D, _));
+ EXPECT_CALL(*child_context, insertSyncPoint());
+ child_resource_provider->PrepareSendToParent(resource_ids_to_transfer,
+ &list);
+ Mock::VerifyAndClearExpectations(child_context);
+
+ ASSERT_EQ(1u, list.size());
+ EXPECT_EQ(static_cast<unsigned>(child_filter), list[0].filter);
+
+ EXPECT_CALL(*parent_context,
+ bindTexture(GL_TEXTURE_2D, parent_texture_id));
+ EXPECT_CALL(*parent_context, consumeTextureCHROMIUM(GL_TEXTURE_2D, _));
+ parent_resource_provider->ReceiveFromChild(child_id, list);
+ {
+ parent_resource_provider->WaitSyncPointIfNeeded(list[0].id);
+ ResourceProvider::ScopedReadLockGL lock(parent_resource_provider.get(),
+ list[0].id);
+ }
+ Mock::VerifyAndClearExpectations(parent_context);
+
+ parent_resource_provider->DeclareUsedResourcesFromChild(
+ child_id, resource_ids_to_transfer);
+ Mock::VerifyAndClearExpectations(parent_context);
+ }
+ ResourceProvider::ResourceIdMap resource_map =
+ parent_resource_provider->GetChildToParentMap(child_id);
+ ResourceProvider::ResourceId mapped_id = resource_map[id];
+ EXPECT_NE(0u, mapped_id);
+
+ // The texture is set to |parent_filter| in the parent.
+ EXPECT_CALL(*parent_context, bindTexture(GL_TEXTURE_2D, parent_texture_id));
+ EXPECT_CALL(
+ *parent_context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, parent_filter));
+ EXPECT_CALL(
+ *parent_context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, parent_filter));
+ SetResourceFilter(parent_resource_provider.get(), mapped_id, parent_filter);
+ Mock::VerifyAndClearExpectations(parent_context);
+
+ // The texture should be reset to |child_filter| in the parent when it is
+ // returned, since that is how it was received.
+ EXPECT_CALL(*parent_context, bindTexture(GL_TEXTURE_2D, parent_texture_id));
+ EXPECT_CALL(
+ *parent_context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, child_filter));
+ EXPECT_CALL(
+ *parent_context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, child_filter));
+
+ {
+ EXPECT_EQ(0u, returned_to_child.size());
+
+ // Transfer resources back from the parent to the child. Set no resources
+ // as being in use.
+ ResourceProvider::ResourceIdArray no_resources;
+ EXPECT_CALL(*parent_context, insertSyncPoint());
+ parent_resource_provider->DeclareUsedResourcesFromChild(child_id,
+ no_resources);
+ Mock::VerifyAndClearExpectations(parent_context);
+
+ ASSERT_EQ(1u, returned_to_child.size());
+ child_resource_provider->ReceiveReturnsFromParent(returned_to_child);
+ }
+
+ // The child remembers the texture filter is set to |child_filter|.
+ EXPECT_CALL(*child_context, bindTexture(GL_TEXTURE_2D, child_texture_id));
+ SetResourceFilter(child_resource_provider.get(), id, child_filter);
+ Mock::VerifyAndClearExpectations(child_context);
+ }
+};
+
+TEST_P(ResourceProviderTest, TextureFilters_ChildNearestParentLinear) {
+ if (GetParam() != ResourceProvider::GLTexture)
+ return;
+ ResourceProviderTestTextureFilters::RunTest(GL_NEAREST, GL_LINEAR);
+}
+
+TEST_P(ResourceProviderTest, TextureFilters_ChildLinearParentNearest) {
+ if (GetParam() != ResourceProvider::GLTexture)
+ return;
+ ResourceProviderTestTextureFilters::RunTest(GL_LINEAR, GL_NEAREST);
+}
+
+TEST_P(ResourceProviderTest, TransferMailboxResources) {
+ // Other mailbox transfers tested elsewhere.
+ if (GetParam() != ResourceProvider::GLTexture)
+ return;
+ unsigned texture = context()->createTexture();
+ context()->bindTexture(GL_TEXTURE_2D, texture);
+ uint8_t data[4] = { 1, 2, 3, 4 };
+ context()->texImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, &data);
+ gpu::Mailbox mailbox;
+ context()->genMailboxCHROMIUM(mailbox.name);
+ context()->produceTextureCHROMIUM(GL_TEXTURE_2D, mailbox.name);
+ uint32 sync_point = context()->insertSyncPoint();
+
+ // All the logic below assumes that the sync points are all positive.
+ EXPECT_LT(0u, sync_point);
+
+ uint32 release_sync_point = 0;
+ bool lost_resource = false;
+ BlockingTaskRunner* main_thread_task_runner = NULL;
+ ReleaseCallbackImpl callback = base::Bind(ReleaseCallback,
+ &release_sync_point,
+ &lost_resource,
+ &main_thread_task_runner);
+ ResourceProvider::ResourceId resource =
+ resource_provider_->CreateResourceFromTextureMailbox(
+ TextureMailbox(mailbox, GL_TEXTURE_2D, sync_point),
+ SingleReleaseCallbackImpl::Create(callback));
+ EXPECT_EQ(1u, context()->NumTextures());
+ EXPECT_EQ(0u, release_sync_point);
+ {
+ // Transfer the resource, expect the sync points to be consistent.
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(resource);
+ TransferableResourceArray list;
+ resource_provider_->PrepareSendToParent(resource_ids_to_transfer, &list);
+ ASSERT_EQ(1u, list.size());
+ EXPECT_LE(sync_point, list[0].mailbox_holder.sync_point);
+ EXPECT_EQ(0,
+ memcmp(mailbox.name,
+ list[0].mailbox_holder.mailbox.name,
+ sizeof(mailbox.name)));
+ EXPECT_EQ(0u, release_sync_point);
+
+ context()->waitSyncPoint(list[0].mailbox_holder.sync_point);
+ unsigned other_texture = context()->createTexture();
+ context()->bindTexture(GL_TEXTURE_2D, other_texture);
+ context()->consumeTextureCHROMIUM(GL_TEXTURE_2D, mailbox.name);
+ uint8_t test_data[4] = { 0 };
+ context()->GetPixels(
+ gfx::Size(1, 1), RGBA_8888, test_data);
+ EXPECT_EQ(0, memcmp(data, test_data, sizeof(data)));
+ context()->produceTextureCHROMIUM(GL_TEXTURE_2D, mailbox.name);
+ context()->deleteTexture(other_texture);
+ list[0].mailbox_holder.sync_point = context()->insertSyncPoint();
+ EXPECT_LT(0u, list[0].mailbox_holder.sync_point);
+
+ // Receive the resource, then delete it, expect the sync points to be
+ // consistent.
+ ReturnedResourceArray returned;
+ TransferableResource::ReturnResources(list, &returned);
+ resource_provider_->ReceiveReturnsFromParent(returned);
+ EXPECT_EQ(1u, context()->NumTextures());
+ EXPECT_EQ(0u, release_sync_point);
+
+ resource_provider_->DeleteResource(resource);
+ EXPECT_LE(list[0].mailbox_holder.sync_point, release_sync_point);
+ EXPECT_FALSE(lost_resource);
+ EXPECT_EQ(main_thread_task_runner_.get(), main_thread_task_runner);
+ }
+
+ // We're going to do the same thing as above, but testing the case where we
+ // delete the resource before we receive it back.
+ sync_point = release_sync_point;
+ EXPECT_LT(0u, sync_point);
+ release_sync_point = 0;
+ resource = resource_provider_->CreateResourceFromTextureMailbox(
+ TextureMailbox(mailbox, GL_TEXTURE_2D, sync_point),
+ SingleReleaseCallbackImpl::Create(callback));
+ EXPECT_EQ(1u, context()->NumTextures());
+ EXPECT_EQ(0u, release_sync_point);
+ {
+ // Transfer the resource, expect the sync points to be consistent.
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(resource);
+ TransferableResourceArray list;
+ resource_provider_->PrepareSendToParent(resource_ids_to_transfer, &list);
+ ASSERT_EQ(1u, list.size());
+ EXPECT_LE(sync_point, list[0].mailbox_holder.sync_point);
+ EXPECT_EQ(0,
+ memcmp(mailbox.name,
+ list[0].mailbox_holder.mailbox.name,
+ sizeof(mailbox.name)));
+ EXPECT_EQ(0u, release_sync_point);
+
+ context()->waitSyncPoint(list[0].mailbox_holder.sync_point);
+ unsigned other_texture = context()->createTexture();
+ context()->bindTexture(GL_TEXTURE_2D, other_texture);
+ context()->consumeTextureCHROMIUM(GL_TEXTURE_2D, mailbox.name);
+ uint8_t test_data[4] = { 0 };
+ context()->GetPixels(
+ gfx::Size(1, 1), RGBA_8888, test_data);
+ EXPECT_EQ(0, memcmp(data, test_data, sizeof(data)));
+ context()->produceTextureCHROMIUM(GL_TEXTURE_2D, mailbox.name);
+ context()->deleteTexture(other_texture);
+ list[0].mailbox_holder.sync_point = context()->insertSyncPoint();
+ EXPECT_LT(0u, list[0].mailbox_holder.sync_point);
+
+ // Delete the resource, which shouldn't do anything.
+ resource_provider_->DeleteResource(resource);
+ EXPECT_EQ(1u, context()->NumTextures());
+ EXPECT_EQ(0u, release_sync_point);
+
+ // Then receive the resource which should release the mailbox, expect the
+ // sync points to be consistent.
+ ReturnedResourceArray returned;
+ TransferableResource::ReturnResources(list, &returned);
+ resource_provider_->ReceiveReturnsFromParent(returned);
+ EXPECT_LE(list[0].mailbox_holder.sync_point, release_sync_point);
+ EXPECT_FALSE(lost_resource);
+ EXPECT_EQ(main_thread_task_runner_.get(), main_thread_task_runner);
+ }
+
+ context()->waitSyncPoint(release_sync_point);
+ context()->bindTexture(GL_TEXTURE_2D, texture);
+ context()->consumeTextureCHROMIUM(GL_TEXTURE_2D, mailbox.name);
+ context()->deleteTexture(texture);
+}
+
+TEST_P(ResourceProviderTest, LostResourceInParent) {
+ gfx::Size size(1, 1);
+ ResourceFormat format = RGBA_8888;
+ ResourceProvider::ResourceId resource =
+ child_resource_provider_->CreateResource(
+ size,
+ GL_CLAMP_TO_EDGE,
+ ResourceProvider::TextureHintImmutable,
+ format);
+ child_resource_provider_->AllocateForTesting(resource);
+ // Expect a GL resource to be lost.
+ bool should_lose_resource = GetParam() == ResourceProvider::GLTexture;
+
+ ReturnedResourceArray returned_to_child;
+ int child_id =
+ resource_provider_->CreateChild(GetReturnCallback(&returned_to_child));
+ {
+ // Transfer the resource to the parent.
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(resource);
+ TransferableResourceArray list;
+ child_resource_provider_->PrepareSendToParent(resource_ids_to_transfer,
+ &list);
+ EXPECT_EQ(1u, list.size());
+
+ resource_provider_->ReceiveFromChild(child_id, list);
+ resource_provider_->DeclareUsedResourcesFromChild(child_id,
+ resource_ids_to_transfer);
+ }
+
+ // Lose the output surface in the parent.
+ resource_provider_->DidLoseOutputSurface();
+
+ {
+ EXPECT_EQ(0u, returned_to_child.size());
+
+ // Transfer resources back from the parent to the child. Set no resources as
+ // being in use.
+ ResourceProvider::ResourceIdArray no_resources;
+ resource_provider_->DeclareUsedResourcesFromChild(child_id, no_resources);
+
+ // Expect a GL resource to be lost.
+ ASSERT_EQ(1u, returned_to_child.size());
+ EXPECT_EQ(should_lose_resource, returned_to_child[0].lost);
+ child_resource_provider_->ReceiveReturnsFromParent(returned_to_child);
+ returned_to_child.clear();
+ }
+
+ // A GL resource should be lost.
+ EXPECT_EQ(should_lose_resource, child_resource_provider_->IsLost(resource));
+
+ // Lost resources stay in use in the parent forever.
+ EXPECT_EQ(should_lose_resource,
+ child_resource_provider_->InUseByConsumer(resource));
+}
+
+TEST_P(ResourceProviderTest, LostResourceInGrandParent) {
+ gfx::Size size(1, 1);
+ ResourceFormat format = RGBA_8888;
+ ResourceProvider::ResourceId resource =
+ child_resource_provider_->CreateResource(
+ size,
+ GL_CLAMP_TO_EDGE,
+ ResourceProvider::TextureHintImmutable,
+ format);
+ child_resource_provider_->AllocateForTesting(resource);
+
+ ReturnedResourceArray returned_to_child;
+ int child_id =
+ resource_provider_->CreateChild(GetReturnCallback(&returned_to_child));
+ {
+ // Transfer the resource to the parent.
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(resource);
+ TransferableResourceArray list;
+ child_resource_provider_->PrepareSendToParent(resource_ids_to_transfer,
+ &list);
+ EXPECT_EQ(1u, list.size());
+
+ resource_provider_->ReceiveFromChild(child_id, list);
+ resource_provider_->DeclareUsedResourcesFromChild(child_id,
+ resource_ids_to_transfer);
+ }
+
+ {
+ ResourceProvider::ResourceIdMap resource_map =
+ resource_provider_->GetChildToParentMap(child_id);
+ ResourceProvider::ResourceId parent_resource = resource_map[resource];
+ EXPECT_NE(0u, parent_resource);
+
+ // Transfer to a grandparent.
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(parent_resource);
+ TransferableResourceArray list;
+ resource_provider_->PrepareSendToParent(resource_ids_to_transfer, &list);
+
+ // Receive back a lost resource from the grandparent.
+ EXPECT_EQ(1u, list.size());
+ EXPECT_EQ(parent_resource, list[0].id);
+ ReturnedResourceArray returned;
+ TransferableResource::ReturnResources(list, &returned);
+ EXPECT_EQ(1u, returned.size());
+ EXPECT_EQ(parent_resource, returned[0].id);
+ returned[0].lost = true;
+ resource_provider_->ReceiveReturnsFromParent(returned);
+
+ // The resource should be lost.
+ EXPECT_TRUE(resource_provider_->IsLost(parent_resource));
+
+ // Lost resources stay in use in the parent forever.
+ EXPECT_TRUE(resource_provider_->InUseByConsumer(parent_resource));
+ }
+
+ {
+ EXPECT_EQ(0u, returned_to_child.size());
+
+ // Transfer resources back from the parent to the child. Set no resources as
+ // being in use.
+ ResourceProvider::ResourceIdArray no_resources;
+ resource_provider_->DeclareUsedResourcesFromChild(child_id, no_resources);
+
+ // Expect the resource to be lost.
+ ASSERT_EQ(1u, returned_to_child.size());
+ EXPECT_TRUE(returned_to_child[0].lost);
+ child_resource_provider_->ReceiveReturnsFromParent(returned_to_child);
+ returned_to_child.clear();
+ }
+
+ // The resource should be lost.
+ EXPECT_TRUE(child_resource_provider_->IsLost(resource));
+
+ // Lost resources stay in use in the parent forever.
+ EXPECT_TRUE(child_resource_provider_->InUseByConsumer(resource));
+}
+
+TEST_P(ResourceProviderTest, LostMailboxInParent) {
+ uint32 release_sync_point = 0;
+ bool lost_resource = false;
+ bool release_called = false;
+ uint32 sync_point = 0;
+ ResourceProvider::ResourceId resource = CreateChildMailbox(
+ &release_sync_point, &lost_resource, &release_called, &sync_point);
+
+ ReturnedResourceArray returned_to_child;
+ int child_id =
+ resource_provider_->CreateChild(GetReturnCallback(&returned_to_child));
+ {
+ // Transfer the resource to the parent.
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(resource);
+ TransferableResourceArray list;
+ child_resource_provider_->PrepareSendToParent(resource_ids_to_transfer,
+ &list);
+ EXPECT_EQ(1u, list.size());
+
+ resource_provider_->ReceiveFromChild(child_id, list);
+ resource_provider_->DeclareUsedResourcesFromChild(child_id,
+ resource_ids_to_transfer);
+ }
+
+ // Lose the output surface in the parent.
+ resource_provider_->DidLoseOutputSurface();
+
+ {
+ EXPECT_EQ(0u, returned_to_child.size());
+
+ // Transfer resources back from the parent to the child. Set no resources as
+ // being in use.
+ ResourceProvider::ResourceIdArray no_resources;
+ resource_provider_->DeclareUsedResourcesFromChild(child_id, no_resources);
+
+ ASSERT_EQ(1u, returned_to_child.size());
+ // Losing an output surface only loses hardware resources.
+ EXPECT_EQ(returned_to_child[0].lost,
+ GetParam() == ResourceProvider::GLTexture);
+ child_resource_provider_->ReceiveReturnsFromParent(returned_to_child);
+ returned_to_child.clear();
+ }
+
+ // Delete the resource in the child. Expect the resource to be lost if it's
+ // a GL texture.
+ child_resource_provider_->DeleteResource(resource);
+ EXPECT_EQ(lost_resource, GetParam() == ResourceProvider::GLTexture);
+}
+
+TEST_P(ResourceProviderTest, LostMailboxInGrandParent) {
+ uint32 release_sync_point = 0;
+ bool lost_resource = false;
+ bool release_called = false;
+ uint32 sync_point = 0;
+ ResourceProvider::ResourceId resource = CreateChildMailbox(
+ &release_sync_point, &lost_resource, &release_called, &sync_point);
+
+ ReturnedResourceArray returned_to_child;
+ int child_id =
+ resource_provider_->CreateChild(GetReturnCallback(&returned_to_child));
+ {
+ // Transfer the resource to the parent.
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(resource);
+ TransferableResourceArray list;
+ child_resource_provider_->PrepareSendToParent(resource_ids_to_transfer,
+ &list);
+ EXPECT_EQ(1u, list.size());
+
+ resource_provider_->ReceiveFromChild(child_id, list);
+ resource_provider_->DeclareUsedResourcesFromChild(child_id,
+ resource_ids_to_transfer);
+ }
+
+ {
+ ResourceProvider::ResourceIdMap resource_map =
+ resource_provider_->GetChildToParentMap(child_id);
+ ResourceProvider::ResourceId parent_resource = resource_map[resource];
+ EXPECT_NE(0u, parent_resource);
+
+ // Transfer to a grandparent.
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(parent_resource);
+ TransferableResourceArray list;
+ resource_provider_->PrepareSendToParent(resource_ids_to_transfer, &list);
+
+ // Receive back a lost resource from the grandparent.
+ EXPECT_EQ(1u, list.size());
+ EXPECT_EQ(parent_resource, list[0].id);
+ ReturnedResourceArray returned;
+ TransferableResource::ReturnResources(list, &returned);
+ EXPECT_EQ(1u, returned.size());
+ EXPECT_EQ(parent_resource, returned[0].id);
+ returned[0].lost = true;
+ resource_provider_->ReceiveReturnsFromParent(returned);
+ }
+
+ {
+ EXPECT_EQ(0u, returned_to_child.size());
+
+ // Transfer resources back from the parent to the child. Set no resources as
+ // being in use.
+ ResourceProvider::ResourceIdArray no_resources;
+ resource_provider_->DeclareUsedResourcesFromChild(child_id, no_resources);
+
+ // Expect the resource to be lost.
+ ASSERT_EQ(1u, returned_to_child.size());
+ EXPECT_TRUE(returned_to_child[0].lost);
+ child_resource_provider_->ReceiveReturnsFromParent(returned_to_child);
+ returned_to_child.clear();
+ }
+
+ // Delete the resource in the child. Expect the resource to be lost.
+ child_resource_provider_->DeleteResource(resource);
+ EXPECT_TRUE(lost_resource);
+}
+
+TEST_P(ResourceProviderTest, Shutdown) {
+ uint32 release_sync_point = 0;
+ bool lost_resource = false;
+ bool release_called = false;
+ uint32 sync_point = 0;
+ CreateChildMailbox(
+ &release_sync_point, &lost_resource, &release_called, &sync_point);
+
+ EXPECT_EQ(0u, release_sync_point);
+ EXPECT_FALSE(lost_resource);
+
+ child_resource_provider_ = nullptr;
+
+ if (GetParam() == ResourceProvider::GLTexture) {
+ EXPECT_LE(sync_point, release_sync_point);
+ }
+ EXPECT_TRUE(release_called);
+ EXPECT_FALSE(lost_resource);
+}
+
+TEST_P(ResourceProviderTest, ShutdownWithExportedResource) {
+ uint32 release_sync_point = 0;
+ bool lost_resource = false;
+ bool release_called = false;
+ uint32 sync_point = 0;
+ ResourceProvider::ResourceId resource = CreateChildMailbox(
+ &release_sync_point, &lost_resource, &release_called, &sync_point);
+
+ // Transfer the resource, so we can't release it properly on shutdown.
+ ResourceProvider::ResourceIdArray resource_ids_to_transfer;
+ resource_ids_to_transfer.push_back(resource);
+ TransferableResourceArray list;
+ child_resource_provider_->PrepareSendToParent(resource_ids_to_transfer,
+ &list);
+
+ EXPECT_EQ(0u, release_sync_point);
+ EXPECT_FALSE(lost_resource);
+
+ child_resource_provider_ = nullptr;
+
+ // Since the resource is in the parent, the child considers it lost.
+ EXPECT_EQ(0u, release_sync_point);
+ EXPECT_TRUE(lost_resource);
+}
+
+TEST_P(ResourceProviderTest, LostContext) {
+ // TextureMailbox callbacks only exist for GL textures for now.
+ if (GetParam() != ResourceProvider::GLTexture)
+ return;
+ unsigned texture = context()->createTexture();
+ context()->bindTexture(GL_TEXTURE_2D, texture);
+ gpu::Mailbox mailbox;
+ context()->genMailboxCHROMIUM(mailbox.name);
+ context()->produceTextureCHROMIUM(GL_TEXTURE_2D, mailbox.name);
+ uint32 sync_point = context()->insertSyncPoint();
+
+ EXPECT_LT(0u, sync_point);
+
+ uint32 release_sync_point = 0;
+ bool lost_resource = false;
+ BlockingTaskRunner* main_thread_task_runner = NULL;
+ scoped_ptr<SingleReleaseCallbackImpl> callback =
+ SingleReleaseCallbackImpl::Create(base::Bind(ReleaseCallback,
+ &release_sync_point,
+ &lost_resource,
+ &main_thread_task_runner));
+ resource_provider_->CreateResourceFromTextureMailbox(
+ TextureMailbox(mailbox, GL_TEXTURE_2D, sync_point), callback.Pass());
+
+ EXPECT_EQ(0u, release_sync_point);
+ EXPECT_FALSE(lost_resource);
+ EXPECT_EQ(NULL, main_thread_task_runner);
+
+ resource_provider_->DidLoseOutputSurface();
+ resource_provider_ = nullptr;
+
+ EXPECT_LE(sync_point, release_sync_point);
+ EXPECT_TRUE(lost_resource);
+ EXPECT_EQ(main_thread_task_runner_.get(), main_thread_task_runner);
+}
+
+TEST_P(ResourceProviderTest, ScopedSampler) {
+ // Sampling is only supported for GL textures.
+ if (GetParam() != ResourceProvider::GLTexture)
+ return;
+
+ scoped_ptr<TextureStateTrackingContext> context_owned(
+ new TextureStateTrackingContext);
+ TextureStateTrackingContext* context = context_owned.get();
+
+ FakeOutputSurfaceClient output_surface_client;
+ scoped_ptr<OutputSurface> output_surface(
+ FakeOutputSurface::Create3d(context_owned.Pass()));
+ CHECK(output_surface->BindToClient(&output_surface_client));
+
+ scoped_ptr<ResourceProvider> resource_provider(
+ ResourceProvider::Create(output_surface.get(),
+ shared_bitmap_manager_.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false));
+
+ gfx::Size size(1, 1);
+ ResourceFormat format = RGBA_8888;
+ int texture_id = 1;
+
+ ResourceProvider::ResourceId id = resource_provider->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+
+ // Check that the texture gets created with the right sampler settings.
+ EXPECT_CALL(*context, bindTexture(GL_TEXTURE_2D, texture_id))
+ .Times(2); // Once to create and once to allocate.
+ EXPECT_CALL(*context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR));
+ EXPECT_CALL(*context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR));
+ EXPECT_CALL(
+ *context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE));
+ EXPECT_CALL(
+ *context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE));
+ EXPECT_CALL(*context,
+ texParameteri(GL_TEXTURE_2D,
+ GL_TEXTURE_POOL_CHROMIUM,
+ GL_TEXTURE_POOL_UNMANAGED_CHROMIUM));
+
+ resource_provider->AllocateForTesting(id);
+ Mock::VerifyAndClearExpectations(context);
+
+ // Creating a sampler with the default filter should not change any texture
+ // parameters.
+ {
+ EXPECT_CALL(*context, bindTexture(GL_TEXTURE_2D, texture_id));
+ ResourceProvider::ScopedSamplerGL sampler(
+ resource_provider.get(), id, GL_TEXTURE_2D, GL_LINEAR);
+ Mock::VerifyAndClearExpectations(context);
+ }
+
+ // Using a different filter should be reflected in the texture parameters.
+ {
+ EXPECT_CALL(*context, bindTexture(GL_TEXTURE_2D, texture_id));
+ EXPECT_CALL(
+ *context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST));
+ EXPECT_CALL(
+ *context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST));
+ ResourceProvider::ScopedSamplerGL sampler(
+ resource_provider.get(), id, GL_TEXTURE_2D, GL_NEAREST);
+ Mock::VerifyAndClearExpectations(context);
+ }
+
+ // Test resetting to the default filter.
+ {
+ EXPECT_CALL(*context, bindTexture(GL_TEXTURE_2D, texture_id));
+ EXPECT_CALL(*context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR));
+ EXPECT_CALL(*context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR));
+ ResourceProvider::ScopedSamplerGL sampler(
+ resource_provider.get(), id, GL_TEXTURE_2D, GL_LINEAR);
+ Mock::VerifyAndClearExpectations(context);
+ }
+}
+
+TEST_P(ResourceProviderTest, ManagedResource) {
+ // Sampling is only supported for GL textures.
+ if (GetParam() != ResourceProvider::GLTexture)
+ return;
+
+ scoped_ptr<TextureStateTrackingContext> context_owned(
+ new TextureStateTrackingContext);
+ TextureStateTrackingContext* context = context_owned.get();
+
+ FakeOutputSurfaceClient output_surface_client;
+ scoped_ptr<OutputSurface> output_surface(
+ FakeOutputSurface::Create3d(context_owned.Pass()));
+ CHECK(output_surface->BindToClient(&output_surface_client));
+
+ scoped_ptr<ResourceProvider> resource_provider(
+ ResourceProvider::Create(output_surface.get(),
+ shared_bitmap_manager_.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false));
+
+ gfx::Size size(1, 1);
+ ResourceFormat format = RGBA_8888;
+ int texture_id = 1;
+
+ // Check that the texture gets created with the right sampler settings.
+ ResourceProvider::ResourceId id = resource_provider->CreateManagedResource(
+ size,
+ GL_TEXTURE_2D,
+ GL_CLAMP_TO_EDGE,
+ ResourceProvider::TextureHintImmutable,
+ format);
+ EXPECT_CALL(*context, bindTexture(GL_TEXTURE_2D, texture_id));
+ EXPECT_CALL(*context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR));
+ EXPECT_CALL(*context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR));
+ EXPECT_CALL(
+ *context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE));
+ EXPECT_CALL(
+ *context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE));
+ EXPECT_CALL(*context,
+ texParameteri(GL_TEXTURE_2D,
+ GL_TEXTURE_POOL_CHROMIUM,
+ GL_TEXTURE_POOL_MANAGED_CHROMIUM));
+ resource_provider->CreateForTesting(id);
+ EXPECT_NE(0u, id);
+
+ Mock::VerifyAndClearExpectations(context);
+}
+
+TEST_P(ResourceProviderTest, TextureWrapMode) {
+ // Sampling is only supported for GL textures.
+ if (GetParam() != ResourceProvider::GLTexture)
+ return;
+
+ scoped_ptr<TextureStateTrackingContext> context_owned(
+ new TextureStateTrackingContext);
+ TextureStateTrackingContext* context = context_owned.get();
+
+ FakeOutputSurfaceClient output_surface_client;
+ scoped_ptr<OutputSurface> output_surface(
+ FakeOutputSurface::Create3d(context_owned.Pass()));
+ CHECK(output_surface->BindToClient(&output_surface_client));
+
+ scoped_ptr<ResourceProvider> resource_provider(
+ ResourceProvider::Create(output_surface.get(),
+ shared_bitmap_manager_.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false));
+
+ gfx::Size size(1, 1);
+ ResourceFormat format = RGBA_8888;
+ GLenum texture_pool = GL_TEXTURE_POOL_UNMANAGED_CHROMIUM;
+
+ for (int texture_id = 1; texture_id <= 2; ++texture_id) {
+ GLint wrap_mode = texture_id == 1 ? GL_CLAMP_TO_EDGE : GL_REPEAT;
+ // Check that the texture gets created with the right sampler settings.
+ ResourceProvider::ResourceId id = resource_provider->CreateGLTexture(
+ size,
+ GL_TEXTURE_2D,
+ texture_pool,
+ wrap_mode,
+ ResourceProvider::TextureHintImmutable,
+ format);
+ EXPECT_CALL(*context, bindTexture(GL_TEXTURE_2D, texture_id));
+ EXPECT_CALL(*context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR));
+ EXPECT_CALL(*context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR));
+ EXPECT_CALL(*context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, wrap_mode));
+ EXPECT_CALL(*context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, wrap_mode));
+ EXPECT_CALL(*context,
+ texParameteri(GL_TEXTURE_2D,
+ GL_TEXTURE_POOL_CHROMIUM,
+ GL_TEXTURE_POOL_UNMANAGED_CHROMIUM));
+ resource_provider->CreateForTesting(id);
+ EXPECT_NE(0u, id);
+
+ Mock::VerifyAndClearExpectations(context);
+ }
+}
+
+TEST_P(ResourceProviderTest, TextureHint) {
+ // Sampling is only supported for GL textures.
+ if (GetParam() != ResourceProvider::GLTexture)
+ return;
+
+ scoped_ptr<TextureStateTrackingContext> context_owned(
+ new TextureStateTrackingContext);
+ TextureStateTrackingContext* context = context_owned.get();
+ context->set_support_texture_storage(true);
+ context->set_support_texture_usage(true);
+
+ FakeOutputSurfaceClient output_surface_client;
+ scoped_ptr<OutputSurface> output_surface(
+ FakeOutputSurface::Create3d(context_owned.Pass()));
+ CHECK(output_surface->BindToClient(&output_surface_client));
+
+ scoped_ptr<ResourceProvider> resource_provider(
+ ResourceProvider::Create(output_surface.get(),
+ shared_bitmap_manager_.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false));
+
+ gfx::Size size(1, 1);
+ ResourceFormat format = RGBA_8888;
+ GLenum texture_pool = GL_TEXTURE_POOL_UNMANAGED_CHROMIUM;
+
+ const ResourceProvider::TextureHint hints[4] = {
+ ResourceProvider::TextureHintDefault,
+ ResourceProvider::TextureHintImmutable,
+ ResourceProvider::TextureHintFramebuffer,
+ ResourceProvider::TextureHintImmutableFramebuffer,
+ };
+ for (GLuint texture_id = 1; texture_id <= arraysize(hints); ++texture_id) {
+ // Check that the texture gets created with the right sampler settings.
+ ResourceProvider::ResourceId id =
+ resource_provider->CreateGLTexture(size,
+ GL_TEXTURE_2D,
+ texture_pool,
+ GL_CLAMP_TO_EDGE,
+ hints[texture_id - 1],
+ format);
+ EXPECT_CALL(*context, bindTexture(GL_TEXTURE_2D, texture_id));
+ EXPECT_CALL(*context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR));
+ EXPECT_CALL(*context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR));
+ EXPECT_CALL(
+ *context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE));
+ EXPECT_CALL(
+ *context,
+ texParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE));
+ EXPECT_CALL(*context,
+ texParameteri(GL_TEXTURE_2D,
+ GL_TEXTURE_POOL_CHROMIUM,
+ GL_TEXTURE_POOL_UNMANAGED_CHROMIUM));
+ // Check only TextureHintFramebuffer set GL_TEXTURE_USAGE_ANGLE.
+ bool is_framebuffer_hint =
+ hints[texture_id - 1] & ResourceProvider::TextureHintFramebuffer;
+ EXPECT_CALL(*context,
+ texParameteri(GL_TEXTURE_2D,
+ GL_TEXTURE_USAGE_ANGLE,
+ GL_FRAMEBUFFER_ATTACHMENT_ANGLE))
+ .Times(is_framebuffer_hint ? 1 : 0);
+ resource_provider->CreateForTesting(id);
+ EXPECT_NE(0u, id);
+
+ Mock::VerifyAndClearExpectations(context);
+ }
+}
+
+TEST_P(ResourceProviderTest, TextureMailbox_SharedMemory) {
+ if (GetParam() != ResourceProvider::Bitmap)
+ return;
+
+ gfx::Size size(64, 64);
+ const uint32_t kBadBeef = 0xbadbeef;
+ scoped_ptr<base::SharedMemory> shared_memory(
+ CreateAndFillSharedMemory(size, kBadBeef));
+
+ FakeOutputSurfaceClient output_surface_client;
+ scoped_ptr<OutputSurface> output_surface(
+ FakeOutputSurface::CreateSoftware(make_scoped_ptr(
+ new SoftwareOutputDevice)));
+ CHECK(output_surface->BindToClient(&output_surface_client));
+
+ scoped_ptr<ResourceProvider> resource_provider(
+ ResourceProvider::Create(output_surface.get(),
+ shared_bitmap_manager_.get(),
+ main_thread_task_runner_.get(),
+ 0,
+ false,
+ 1,
+ false));
+
+ uint32 release_sync_point = 0;
+ bool lost_resource = false;
+ BlockingTaskRunner* main_thread_task_runner = NULL;
+ scoped_ptr<SingleReleaseCallbackImpl> callback =
+ SingleReleaseCallbackImpl::Create(base::Bind(&ReleaseCallback,
+ &release_sync_point,
+ &lost_resource,
+ &main_thread_task_runner));
+ TextureMailbox mailbox(shared_memory.get(), size);
+
+ ResourceProvider::ResourceId id =
+ resource_provider->CreateResourceFromTextureMailbox(
+ mailbox, callback.Pass());
+ EXPECT_NE(0u, id);
+
+ {
+ ResourceProvider::ScopedReadLockSoftware lock(resource_provider.get(), id);
+ const SkBitmap* sk_bitmap = lock.sk_bitmap();
+ EXPECT_EQ(sk_bitmap->width(), size.width());
+ EXPECT_EQ(sk_bitmap->height(), size.height());
+ EXPECT_EQ(*sk_bitmap->getAddr32(16, 16), kBadBeef);
+ }
+
+ resource_provider->DeleteResource(id);
+ EXPECT_EQ(0u, release_sync_point);
+ EXPECT_FALSE(lost_resource);
+ EXPECT_EQ(main_thread_task_runner_.get(), main_thread_task_runner);
+}
+
+TEST_P(ResourceProviderTest, TextureMailbox_GLTexture2D) {
+ // Mailboxing is only supported for GL textures.
+ if (GetParam() != ResourceProvider::GLTexture)
+ return;
+
+ scoped_ptr<TextureStateTrackingContext> context_owned(
+ new TextureStateTrackingContext);
+ TextureStateTrackingContext* context = context_owned.get();
+
+ FakeOutputSurfaceClient output_surface_client;
+ scoped_ptr<OutputSurface> output_surface(
+ FakeOutputSurface::Create3d(context_owned.Pass()));
+ CHECK(output_surface->BindToClient(&output_surface_client));
+
+ scoped_ptr<ResourceProvider> resource_provider(
+ ResourceProvider::Create(output_surface.get(),
+ shared_bitmap_manager_.get(),
+ main_thread_task_runner_.get(),
+ 0,
+ false,
+ 1,
+ false));
+
+ unsigned texture_id = 1;
+ uint32 sync_point = 30;
+ unsigned target = GL_TEXTURE_2D;
+
+ EXPECT_CALL(*context, bindTexture(_, _)).Times(0);
+ EXPECT_CALL(*context, waitSyncPoint(_)).Times(0);
+ EXPECT_CALL(*context, insertSyncPoint()).Times(0);
+ EXPECT_CALL(*context, produceTextureCHROMIUM(_, _)).Times(0);
+ EXPECT_CALL(*context, consumeTextureCHROMIUM(_, _)).Times(0);
+
+ gpu::Mailbox gpu_mailbox;
+ memcpy(gpu_mailbox.name, "Hello world", strlen("Hello world") + 1);
+ uint32 release_sync_point = 0;
+ bool lost_resource = false;
+ BlockingTaskRunner* main_thread_task_runner = NULL;
+ scoped_ptr<SingleReleaseCallbackImpl> callback =
+ SingleReleaseCallbackImpl::Create(base::Bind(&ReleaseCallback,
+ &release_sync_point,
+ &lost_resource,
+ &main_thread_task_runner));
+
+ TextureMailbox mailbox(gpu_mailbox, target, sync_point);
+
+ ResourceProvider::ResourceId id =
+ resource_provider->CreateResourceFromTextureMailbox(
+ mailbox, callback.Pass());
+ EXPECT_NE(0u, id);
+
+ Mock::VerifyAndClearExpectations(context);
+
+ {
+ // Mailbox sync point WaitSyncPoint before using the texture.
+ EXPECT_CALL(*context, waitSyncPoint(sync_point));
+ resource_provider->WaitSyncPointIfNeeded(id);
+ Mock::VerifyAndClearExpectations(context);
+
+ // Using the texture does a consume of the mailbox.
+ EXPECT_CALL(*context, bindTexture(target, texture_id));
+ EXPECT_CALL(*context, consumeTextureCHROMIUM(target, _));
+
+ EXPECT_CALL(*context, insertSyncPoint()).Times(0);
+ EXPECT_CALL(*context, produceTextureCHROMIUM(_, _)).Times(0);
+
+ ResourceProvider::ScopedReadLockGL lock(resource_provider.get(), id);
+ Mock::VerifyAndClearExpectations(context);
+
+ // When done with it, a sync point should be inserted, but no produce is
+ // necessary.
+ EXPECT_CALL(*context, bindTexture(_, _)).Times(0);
+ EXPECT_CALL(*context, insertSyncPoint());
+ EXPECT_CALL(*context, produceTextureCHROMIUM(_, _)).Times(0);
+
+ EXPECT_CALL(*context, waitSyncPoint(_)).Times(0);
+ EXPECT_CALL(*context, consumeTextureCHROMIUM(_, _)).Times(0);
+ }
+
+ resource_provider->DeleteResource(id);
+ EXPECT_EQ(0u, release_sync_point);
+ EXPECT_FALSE(lost_resource);
+ EXPECT_EQ(main_thread_task_runner_.get(), main_thread_task_runner);
+}
+
+TEST_P(ResourceProviderTest, TextureMailbox_GLTextureExternalOES) {
+ // Mailboxing is only supported for GL textures.
+ if (GetParam() != ResourceProvider::GLTexture)
+ return;
+
+ scoped_ptr<TextureStateTrackingContext> context_owned(
+ new TextureStateTrackingContext);
+ TextureStateTrackingContext* context = context_owned.get();
+
+ FakeOutputSurfaceClient output_surface_client;
+ scoped_ptr<OutputSurface> output_surface(
+ FakeOutputSurface::Create3d(context_owned.Pass()));
+ CHECK(output_surface->BindToClient(&output_surface_client));
+
+ scoped_ptr<ResourceProvider> resource_provider(
+ ResourceProvider::Create(output_surface.get(),
+ shared_bitmap_manager_.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false));
+
+ unsigned texture_id = 1;
+ uint32 sync_point = 30;
+ unsigned target = GL_TEXTURE_EXTERNAL_OES;
+
+ EXPECT_CALL(*context, bindTexture(_, _)).Times(0);
+ EXPECT_CALL(*context, waitSyncPoint(_)).Times(0);
+ EXPECT_CALL(*context, insertSyncPoint()).Times(0);
+ EXPECT_CALL(*context, produceTextureCHROMIUM(_, _)).Times(0);
+ EXPECT_CALL(*context, consumeTextureCHROMIUM(_, _)).Times(0);
+
+ gpu::Mailbox gpu_mailbox;
+ memcpy(gpu_mailbox.name, "Hello world", strlen("Hello world") + 1);
+ scoped_ptr<SingleReleaseCallbackImpl> callback =
+ SingleReleaseCallbackImpl::Create(base::Bind(&EmptyReleaseCallback));
+
+ TextureMailbox mailbox(gpu_mailbox, target, sync_point);
+
+ ResourceProvider::ResourceId id =
+ resource_provider->CreateResourceFromTextureMailbox(
+ mailbox, callback.Pass());
+ EXPECT_NE(0u, id);
+
+ Mock::VerifyAndClearExpectations(context);
+
+ {
+ // Mailbox sync point WaitSyncPoint before using the texture.
+ EXPECT_CALL(*context, waitSyncPoint(sync_point));
+ resource_provider->WaitSyncPointIfNeeded(id);
+ Mock::VerifyAndClearExpectations(context);
+
+ // Using the texture does a consume of the mailbox.
+ EXPECT_CALL(*context, bindTexture(target, texture_id));
+ EXPECT_CALL(*context, consumeTextureCHROMIUM(target, _));
+
+ EXPECT_CALL(*context, insertSyncPoint()).Times(0);
+ EXPECT_CALL(*context, produceTextureCHROMIUM(_, _)).Times(0);
+
+ ResourceProvider::ScopedReadLockGL lock(resource_provider.get(), id);
+ Mock::VerifyAndClearExpectations(context);
+
+ // When done with it, a sync point should be inserted, but no produce is
+ // necessary.
+ EXPECT_CALL(*context, bindTexture(_, _)).Times(0);
+ EXPECT_CALL(*context, insertSyncPoint());
+ EXPECT_CALL(*context, produceTextureCHROMIUM(_, _)).Times(0);
+
+ EXPECT_CALL(*context, waitSyncPoint(_)).Times(0);
+ EXPECT_CALL(*context, consumeTextureCHROMIUM(_, _)).Times(0);
+ }
+}
+
+TEST_P(ResourceProviderTest,
+ TextureMailbox_WaitSyncPointIfNeeded_WithSyncPoint) {
+ // Mailboxing is only supported for GL textures.
+ if (GetParam() != ResourceProvider::GLTexture)
+ return;
+
+ scoped_ptr<TextureStateTrackingContext> context_owned(
+ new TextureStateTrackingContext);
+ TextureStateTrackingContext* context = context_owned.get();
+
+ FakeOutputSurfaceClient output_surface_client;
+ scoped_ptr<OutputSurface> output_surface(
+ FakeOutputSurface::Create3d(context_owned.Pass()));
+ CHECK(output_surface->BindToClient(&output_surface_client));
+
+ scoped_ptr<ResourceProvider> resource_provider(
+ ResourceProvider::Create(output_surface.get(),
+ shared_bitmap_manager_.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false));
+
+ uint32 sync_point = 30;
+ unsigned target = GL_TEXTURE_2D;
+
+ EXPECT_CALL(*context, bindTexture(_, _)).Times(0);
+ EXPECT_CALL(*context, waitSyncPoint(_)).Times(0);
+ EXPECT_CALL(*context, insertSyncPoint()).Times(0);
+ EXPECT_CALL(*context, produceTextureCHROMIUM(_, _)).Times(0);
+ EXPECT_CALL(*context, consumeTextureCHROMIUM(_, _)).Times(0);
+
+ gpu::Mailbox gpu_mailbox;
+ memcpy(gpu_mailbox.name, "Hello world", strlen("Hello world") + 1);
+ scoped_ptr<SingleReleaseCallbackImpl> callback =
+ SingleReleaseCallbackImpl::Create(base::Bind(&EmptyReleaseCallback));
+
+ TextureMailbox mailbox(gpu_mailbox, target, sync_point);
+
+ ResourceProvider::ResourceId id =
+ resource_provider->CreateResourceFromTextureMailbox(mailbox,
+ callback.Pass());
+ EXPECT_NE(0u, id);
+
+ Mock::VerifyAndClearExpectations(context);
+
+ {
+ // First call to WaitSyncPointIfNeeded should call waitSyncPoint.
+ EXPECT_CALL(*context, waitSyncPoint(sync_point));
+ resource_provider->WaitSyncPointIfNeeded(id);
+ Mock::VerifyAndClearExpectations(context);
+
+ // Subsequent calls to WaitSyncPointIfNeeded shouldn't call waitSyncPoint.
+ EXPECT_CALL(*context, waitSyncPoint(_)).Times(0);
+ resource_provider->WaitSyncPointIfNeeded(id);
+ Mock::VerifyAndClearExpectations(context);
+ }
+}
+
+TEST_P(ResourceProviderTest, TextureMailbox_WaitSyncPointIfNeeded_NoSyncPoint) {
+ // Mailboxing is only supported for GL textures.
+ if (GetParam() != ResourceProvider::GLTexture)
+ return;
+
+ scoped_ptr<TextureStateTrackingContext> context_owned(
+ new TextureStateTrackingContext);
+ TextureStateTrackingContext* context = context_owned.get();
+
+ FakeOutputSurfaceClient output_surface_client;
+ scoped_ptr<OutputSurface> output_surface(
+ FakeOutputSurface::Create3d(context_owned.Pass()));
+ CHECK(output_surface->BindToClient(&output_surface_client));
+
+ scoped_ptr<ResourceProvider> resource_provider(
+ ResourceProvider::Create(output_surface.get(),
+ shared_bitmap_manager_.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false));
+
+ uint32 sync_point = 0;
+ unsigned target = GL_TEXTURE_2D;
+
+ EXPECT_CALL(*context, bindTexture(_, _)).Times(0);
+ EXPECT_CALL(*context, waitSyncPoint(_)).Times(0);
+ EXPECT_CALL(*context, insertSyncPoint()).Times(0);
+ EXPECT_CALL(*context, produceTextureCHROMIUM(_, _)).Times(0);
+ EXPECT_CALL(*context, consumeTextureCHROMIUM(_, _)).Times(0);
+
+ gpu::Mailbox gpu_mailbox;
+ memcpy(gpu_mailbox.name, "Hello world", strlen("Hello world") + 1);
+ scoped_ptr<SingleReleaseCallbackImpl> callback =
+ SingleReleaseCallbackImpl::Create(base::Bind(&EmptyReleaseCallback));
+
+ TextureMailbox mailbox(gpu_mailbox, target, sync_point);
+
+ ResourceProvider::ResourceId id =
+ resource_provider->CreateResourceFromTextureMailbox(mailbox,
+ callback.Pass());
+ EXPECT_NE(0u, id);
+
+ Mock::VerifyAndClearExpectations(context);
+
+ {
+ // WaitSyncPointIfNeeded with sync_point == 0 shouldn't call waitSyncPoint.
+ EXPECT_CALL(*context, waitSyncPoint(_)).Times(0);
+ resource_provider->WaitSyncPointIfNeeded(id);
+ Mock::VerifyAndClearExpectations(context);
+ }
+}
+
+class AllocationTrackingContext3D : public TestWebGraphicsContext3D {
+ public:
+ MOCK_METHOD0(NextTextureId, GLuint());
+ MOCK_METHOD1(RetireTextureId, void(GLuint id));
+ MOCK_METHOD2(bindTexture, void(GLenum target, GLuint texture));
+ MOCK_METHOD5(texStorage2DEXT,
+ void(GLenum target,
+ GLint levels,
+ GLuint internalformat,
+ GLint width,
+ GLint height));
+ MOCK_METHOD9(texImage2D,
+ void(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels));
+ MOCK_METHOD9(texSubImage2D,
+ void(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* pixels));
+ MOCK_METHOD9(asyncTexImage2DCHROMIUM,
+ void(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels));
+ MOCK_METHOD9(asyncTexSubImage2DCHROMIUM,
+ void(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* pixels));
+ MOCK_METHOD8(compressedTexImage2D,
+ void(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLsizei image_size,
+ const void* data));
+ MOCK_METHOD1(waitAsyncTexImage2DCHROMIUM, void(GLenum));
+ MOCK_METHOD4(createImageCHROMIUM, GLuint(GLsizei, GLsizei, GLenum, GLenum));
+ MOCK_METHOD1(destroyImageCHROMIUM, void(GLuint));
+ MOCK_METHOD1(mapImageCHROMIUM, void*(GLuint));
+ MOCK_METHOD3(getImageParameterivCHROMIUM, void(GLuint, GLenum, GLint*));
+ MOCK_METHOD1(unmapImageCHROMIUM, void(GLuint));
+ MOCK_METHOD2(bindTexImage2DCHROMIUM, void(GLenum, GLint));
+ MOCK_METHOD2(releaseTexImage2DCHROMIUM, void(GLenum, GLint));
+
+ // We're mocking bindTexture, so we override
+ // TestWebGraphicsContext3D::texParameteri to avoid assertions related to the
+ // currently bound texture.
+ virtual void texParameteri(GLenum target, GLenum pname, GLint param) {}
+};
+
+TEST_P(ResourceProviderTest, TextureAllocation) {
+ // Only for GL textures.
+ if (GetParam() != ResourceProvider::GLTexture)
+ return;
+ scoped_ptr<AllocationTrackingContext3D> context_owned(
+ new StrictMock<AllocationTrackingContext3D>);
+ AllocationTrackingContext3D* context = context_owned.get();
+
+ FakeOutputSurfaceClient output_surface_client;
+ scoped_ptr<OutputSurface> output_surface(
+ FakeOutputSurface::Create3d(context_owned.Pass()));
+ CHECK(output_surface->BindToClient(&output_surface_client));
+
+ scoped_ptr<ResourceProvider> resource_provider(
+ ResourceProvider::Create(output_surface.get(),
+ shared_bitmap_manager_.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false));
+
+ gfx::Size size(2, 2);
+ gfx::Vector2d offset(0, 0);
+ gfx::Rect rect(0, 0, 2, 2);
+ ResourceFormat format = RGBA_8888;
+ ResourceProvider::ResourceId id = 0;
+ uint8_t pixels[16] = { 0 };
+ int texture_id = 123;
+
+ // Lazy allocation. Don't allocate when creating the resource.
+ id = resource_provider->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+
+ EXPECT_CALL(*context, NextTextureId()).WillOnce(Return(texture_id));
+ EXPECT_CALL(*context, bindTexture(GL_TEXTURE_2D, texture_id)).Times(1);
+ resource_provider->CreateForTesting(id);
+
+ EXPECT_CALL(*context, RetireTextureId(texture_id)).Times(1);
+ resource_provider->DeleteResource(id);
+
+ Mock::VerifyAndClearExpectations(context);
+
+ // Do allocate when we set the pixels.
+ id = resource_provider->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+
+ EXPECT_CALL(*context, NextTextureId()).WillOnce(Return(texture_id));
+ EXPECT_CALL(*context, bindTexture(GL_TEXTURE_2D, texture_id)).Times(3);
+ EXPECT_CALL(*context, texImage2D(_, _, _, 2, 2, _, _, _, _)).Times(1);
+ EXPECT_CALL(*context, texSubImage2D(_, _, _, _, 2, 2, _, _, _)).Times(1);
+ resource_provider->SetPixels(id, pixels, rect, rect, offset);
+
+ EXPECT_CALL(*context, RetireTextureId(texture_id)).Times(1);
+ resource_provider->DeleteResource(id);
+
+ Mock::VerifyAndClearExpectations(context);
+
+ // Same for async version.
+ id = resource_provider->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+ resource_provider->AcquirePixelBuffer(id);
+
+ EXPECT_CALL(*context, NextTextureId()).WillOnce(Return(texture_id));
+ EXPECT_CALL(*context, bindTexture(GL_TEXTURE_2D, texture_id)).Times(2);
+ EXPECT_CALL(*context, asyncTexImage2DCHROMIUM(_, _, _, 2, 2, _, _, _, _))
+ .Times(1);
+ resource_provider->BeginSetPixels(id);
+ ASSERT_TRUE(resource_provider->DidSetPixelsComplete(id));
+
+ resource_provider->ReleasePixelBuffer(id);
+
+ EXPECT_CALL(*context, RetireTextureId(texture_id)).Times(1);
+ resource_provider->DeleteResource(id);
+
+ Mock::VerifyAndClearExpectations(context);
+}
+
+TEST_P(ResourceProviderTest, TextureAllocationHint) {
+ // Only for GL textures.
+ if (GetParam() != ResourceProvider::GLTexture)
+ return;
+ scoped_ptr<AllocationTrackingContext3D> context_owned(
+ new StrictMock<AllocationTrackingContext3D>);
+ AllocationTrackingContext3D* context = context_owned.get();
+ context->set_support_texture_storage(true);
+ context->set_support_texture_usage(true);
+
+ FakeOutputSurfaceClient output_surface_client;
+ scoped_ptr<OutputSurface> output_surface(
+ FakeOutputSurface::Create3d(context_owned.Pass()));
+ CHECK(output_surface->BindToClient(&output_surface_client));
+
+ scoped_ptr<ResourceProvider> resource_provider(
+ ResourceProvider::Create(output_surface.get(),
+ shared_bitmap_manager_.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false));
+
+ gfx::Size size(2, 2);
+
+ const ResourceFormat formats[2] = {RGBA_8888, BGRA_8888};
+ const ResourceProvider::TextureHint hints[4] = {
+ ResourceProvider::TextureHintDefault,
+ ResourceProvider::TextureHintImmutable,
+ ResourceProvider::TextureHintFramebuffer,
+ ResourceProvider::TextureHintImmutableFramebuffer,
+ };
+ for (size_t i = 0; i < arraysize(formats); ++i) {
+ for (GLuint texture_id = 1; texture_id <= arraysize(hints); ++texture_id) {
+ // Lazy allocation. Don't allocate when creating the resource.
+ ResourceProvider::ResourceId id = resource_provider->CreateResource(
+ size, GL_CLAMP_TO_EDGE, hints[texture_id - 1], formats[i]);
+
+ EXPECT_CALL(*context, NextTextureId()).WillOnce(Return(texture_id));
+ EXPECT_CALL(*context, bindTexture(GL_TEXTURE_2D, texture_id)).Times(2);
+ bool is_immutable_hint =
+ hints[texture_id - 1] & ResourceProvider::TextureHintImmutable;
+ bool support_immutable_texture =
+ is_immutable_hint && formats[i] == RGBA_8888;
+ EXPECT_CALL(*context, texStorage2DEXT(_, _, _, 2, 2))
+ .Times(support_immutable_texture ? 1 : 0);
+ EXPECT_CALL(*context, texImage2D(_, _, _, 2, 2, _, _, _, _))
+ .Times(support_immutable_texture ? 0 : 1);
+ resource_provider->AllocateForTesting(id);
+
+ EXPECT_CALL(*context, RetireTextureId(texture_id)).Times(1);
+ resource_provider->DeleteResource(id);
+
+ Mock::VerifyAndClearExpectations(context);
+ }
+ }
+}
+
+TEST_P(ResourceProviderTest, TextureAllocationHint_BGRA) {
+ // Only for GL textures.
+ if (GetParam() != ResourceProvider::GLTexture)
+ return;
+ scoped_ptr<AllocationTrackingContext3D> context_owned(
+ new StrictMock<AllocationTrackingContext3D>);
+ AllocationTrackingContext3D* context = context_owned.get();
+ context->set_support_texture_format_bgra8888(true);
+ context->set_support_texture_storage(true);
+ context->set_support_texture_usage(true);
+
+ FakeOutputSurfaceClient output_surface_client;
+ scoped_ptr<OutputSurface> output_surface(
+ FakeOutputSurface::Create3d(context_owned.Pass()));
+ CHECK(output_surface->BindToClient(&output_surface_client));
+
+ scoped_ptr<ResourceProvider> resource_provider(
+ ResourceProvider::Create(output_surface.get(),
+ shared_bitmap_manager_.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false));
+
+ gfx::Size size(2, 2);
+ const ResourceFormat formats[2] = {RGBA_8888, BGRA_8888};
+
+ const ResourceProvider::TextureHint hints[4] = {
+ ResourceProvider::TextureHintDefault,
+ ResourceProvider::TextureHintImmutable,
+ ResourceProvider::TextureHintFramebuffer,
+ ResourceProvider::TextureHintImmutableFramebuffer,
+ };
+ for (size_t i = 0; i < arraysize(formats); ++i) {
+ for (GLuint texture_id = 1; texture_id <= arraysize(hints); ++texture_id) {
+ // Lazy allocation. Don't allocate when creating the resource.
+ ResourceProvider::ResourceId id = resource_provider->CreateResource(
+ size, GL_CLAMP_TO_EDGE, hints[texture_id - 1], formats[i]);
+
+ EXPECT_CALL(*context, NextTextureId()).WillOnce(Return(texture_id));
+ EXPECT_CALL(*context, bindTexture(GL_TEXTURE_2D, texture_id)).Times(2);
+ bool is_immutable_hint =
+ hints[texture_id - 1] & ResourceProvider::TextureHintImmutable;
+ EXPECT_CALL(*context, texStorage2DEXT(_, _, _, 2, 2))
+ .Times(is_immutable_hint ? 1 : 0);
+ EXPECT_CALL(*context, texImage2D(_, _, _, 2, 2, _, _, _, _))
+ .Times(is_immutable_hint ? 0 : 1);
+ resource_provider->AllocateForTesting(id);
+
+ EXPECT_CALL(*context, RetireTextureId(texture_id)).Times(1);
+ resource_provider->DeleteResource(id);
+
+ Mock::VerifyAndClearExpectations(context);
+ }
+ }
+}
+
+TEST_P(ResourceProviderTest, PixelBuffer_GLTexture) {
+ if (GetParam() != ResourceProvider::GLTexture)
+ return;
+ scoped_ptr<AllocationTrackingContext3D> context_owned(
+ new StrictMock<AllocationTrackingContext3D>);
+ AllocationTrackingContext3D* context = context_owned.get();
+
+ FakeOutputSurfaceClient output_surface_client;
+ scoped_ptr<OutputSurface> output_surface(
+ FakeOutputSurface::Create3d(context_owned.Pass()));
+ CHECK(output_surface->BindToClient(&output_surface_client));
+
+ gfx::Size size(2, 2);
+ ResourceFormat format = RGBA_8888;
+ ResourceProvider::ResourceId id = 0;
+ int texture_id = 123;
+
+ scoped_ptr<ResourceProvider> resource_provider(
+ ResourceProvider::Create(output_surface.get(),
+ shared_bitmap_manager_.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false));
+
+ id = resource_provider->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+ resource_provider->AcquirePixelBuffer(id);
+
+ EXPECT_CALL(*context, NextTextureId()).WillOnce(Return(texture_id));
+ EXPECT_CALL(*context, bindTexture(GL_TEXTURE_2D, texture_id)).Times(2);
+ EXPECT_CALL(*context, asyncTexImage2DCHROMIUM(_, _, _, 2, 2, _, _, _, _))
+ .Times(1);
+ resource_provider->BeginSetPixels(id);
+
+ EXPECT_TRUE(resource_provider->DidSetPixelsComplete(id));
+
+ resource_provider->ReleasePixelBuffer(id);
+
+ EXPECT_CALL(*context, RetireTextureId(texture_id)).Times(1);
+ resource_provider->DeleteResource(id);
+
+ Mock::VerifyAndClearExpectations(context);
+}
+
+TEST_P(ResourceProviderTest, ForcingAsyncUploadToComplete) {
+ // Only for GL textures.
+ if (GetParam() != ResourceProvider::GLTexture)
+ return;
+ scoped_ptr<AllocationTrackingContext3D> context_owned(
+ new StrictMock<AllocationTrackingContext3D>);
+ AllocationTrackingContext3D* context = context_owned.get();
+
+ FakeOutputSurfaceClient output_surface_client;
+ scoped_ptr<OutputSurface> output_surface(
+ FakeOutputSurface::Create3d(context_owned.Pass()));
+ CHECK(output_surface->BindToClient(&output_surface_client));
+
+ gfx::Size size(2, 2);
+ ResourceFormat format = RGBA_8888;
+ ResourceProvider::ResourceId id = 0;
+ int texture_id = 123;
+
+ scoped_ptr<ResourceProvider> resource_provider(
+ ResourceProvider::Create(output_surface.get(),
+ shared_bitmap_manager_.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false));
+
+ id = resource_provider->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+ resource_provider->AcquirePixelBuffer(id);
+
+ EXPECT_CALL(*context, NextTextureId()).WillOnce(Return(texture_id));
+ EXPECT_CALL(*context, bindTexture(GL_TEXTURE_2D, texture_id)).Times(2);
+ EXPECT_CALL(*context, asyncTexImage2DCHROMIUM(_, _, _, 2, 2, _, _, _, _))
+ .Times(1);
+ resource_provider->BeginSetPixels(id);
+
+ EXPECT_CALL(*context, bindTexture(GL_TEXTURE_2D, texture_id)).Times(1);
+ EXPECT_CALL(*context, waitAsyncTexImage2DCHROMIUM(GL_TEXTURE_2D)).Times(1);
+ EXPECT_CALL(*context, bindTexture(GL_TEXTURE_2D, 0)).Times(1);
+ resource_provider->ForceSetPixelsToComplete(id);
+
+ resource_provider->ReleasePixelBuffer(id);
+
+ EXPECT_CALL(*context, RetireTextureId(texture_id)).Times(1);
+ resource_provider->DeleteResource(id);
+
+ Mock::VerifyAndClearExpectations(context);
+}
+
+TEST_P(ResourceProviderTest, PixelBufferLostContext) {
+ scoped_ptr<AllocationTrackingContext3D> context_owned(
+ new NiceMock<AllocationTrackingContext3D>);
+ AllocationTrackingContext3D* context = context_owned.get();
+
+ FakeOutputSurfaceClient output_surface_client;
+ scoped_ptr<OutputSurface> output_surface(
+ FakeOutputSurface::Create3d(context_owned.Pass()));
+ CHECK(output_surface->BindToClient(&output_surface_client));
+
+ gfx::Size size(2, 2);
+ ResourceFormat format = RGBA_8888;
+ ResourceProvider::ResourceId id = 0;
+ int texture_id = 123;
+
+ scoped_ptr<ResourceProvider> resource_provider(
+ ResourceProvider::Create(output_surface.get(),
+ shared_bitmap_manager_.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false));
+
+ EXPECT_CALL(*context, NextTextureId()).WillRepeatedly(Return(texture_id));
+
+ id = resource_provider->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+ context->loseContextCHROMIUM(GL_GUILTY_CONTEXT_RESET_ARB,
+ GL_INNOCENT_CONTEXT_RESET_ARB);
+
+ resource_provider->AcquirePixelBuffer(id);
+ int stride;
+ void* buffer = resource_provider->MapPixelBuffer(id, &stride);
+ EXPECT_FALSE(buffer);
+ resource_provider->UnmapPixelBuffer(id);
+ Mock::VerifyAndClearExpectations(context);
+}
+
+TEST_P(ResourceProviderTest, Image_GLTexture) {
+ // Only for GL textures.
+ if (GetParam() != ResourceProvider::GLTexture)
+ return;
+ scoped_ptr<AllocationTrackingContext3D> context_owned(
+ new StrictMock<AllocationTrackingContext3D>);
+ AllocationTrackingContext3D* context = context_owned.get();
+
+ FakeOutputSurfaceClient output_surface_client;
+ scoped_ptr<OutputSurface> output_surface(
+ FakeOutputSurface::Create3d(context_owned.Pass()));
+ CHECK(output_surface->BindToClient(&output_surface_client));
+
+ const int kWidth = 2;
+ const int kHeight = 2;
+ gfx::Size size(kWidth, kHeight);
+ ResourceFormat format = RGBA_8888;
+ ResourceProvider::ResourceId id = 0;
+ const unsigned kTextureId = 123u;
+ const unsigned kImageId = 234u;
+
+ scoped_ptr<ResourceProvider> resource_provider(
+ ResourceProvider::Create(output_surface.get(),
+ shared_bitmap_manager_.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false));
+
+ id = resource_provider->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+
+ const int kStride = 4;
+ void* dummy_mapped_buffer_address = NULL;
+ EXPECT_CALL(
+ *context,
+ createImageCHROMIUM(kWidth, kHeight, GL_RGBA8_OES, GL_IMAGE_MAP_CHROMIUM))
+ .WillOnce(Return(kImageId))
+ .RetiresOnSaturation();
+ resource_provider->AcquireImage(id);
+
+ EXPECT_CALL(*context, getImageParameterivCHROMIUM(kImageId,
+ GL_IMAGE_ROWBYTES_CHROMIUM,
+ _))
+ .WillOnce(SetArgPointee<2>(kStride))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*context, mapImageCHROMIUM(kImageId))
+ .WillOnce(Return(dummy_mapped_buffer_address))
+ .RetiresOnSaturation();
+ int stride;
+ resource_provider->MapImage(id, &stride);
+
+ EXPECT_CALL(*context, unmapImageCHROMIUM(kImageId))
+ .Times(1)
+ .RetiresOnSaturation();
+ resource_provider->UnmapImage(id);
+
+ EXPECT_CALL(*context, NextTextureId())
+ .WillOnce(Return(kTextureId))
+ .RetiresOnSaturation();
+ // Once in CreateTextureId and once in BindForSampling
+ EXPECT_CALL(*context, bindTexture(GL_TEXTURE_2D, kTextureId)).Times(2)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*context, bindTexImage2DCHROMIUM(GL_TEXTURE_2D, kImageId))
+ .Times(1)
+ .RetiresOnSaturation();
+ {
+ ResourceProvider::ScopedSamplerGL lock_gl(
+ resource_provider.get(), id, GL_TEXTURE_2D, GL_LINEAR);
+ EXPECT_EQ(kTextureId, lock_gl.texture_id());
+ }
+
+ EXPECT_CALL(
+ *context,
+ getImageParameterivCHROMIUM(kImageId, GL_IMAGE_ROWBYTES_CHROMIUM, _))
+ .WillOnce(SetArgPointee<2>(kStride))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*context, mapImageCHROMIUM(kImageId))
+ .WillOnce(Return(dummy_mapped_buffer_address))
+ .RetiresOnSaturation();
+ resource_provider->MapImage(id, &stride);
+
+ EXPECT_CALL(*context, unmapImageCHROMIUM(kImageId))
+ .Times(1)
+ .RetiresOnSaturation();
+ resource_provider->UnmapImage(id);
+
+ EXPECT_CALL(*context, bindTexture(GL_TEXTURE_2D, kTextureId)).Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*context, releaseTexImage2DCHROMIUM(GL_TEXTURE_2D, kImageId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*context, bindTexImage2DCHROMIUM(GL_TEXTURE_2D, kImageId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*context, RetireTextureId(kTextureId))
+ .Times(1)
+ .RetiresOnSaturation();
+ {
+ ResourceProvider::ScopedSamplerGL lock_gl(
+ resource_provider.get(), id, GL_TEXTURE_2D, GL_LINEAR);
+ EXPECT_EQ(kTextureId, lock_gl.texture_id());
+ }
+
+ EXPECT_CALL(*context, destroyImageCHROMIUM(kImageId))
+ .Times(1)
+ .RetiresOnSaturation();
+}
+
+TEST_P(ResourceProviderTest, CopyResource_GLTexture) {
+ if (GetParam() != ResourceProvider::GLTexture)
+ return;
+ scoped_ptr<AllocationTrackingContext3D> context_owned(
+ new StrictMock<AllocationTrackingContext3D>);
+ AllocationTrackingContext3D* context = context_owned.get();
+ context_owned->set_support_sync_query(true);
+
+ FakeOutputSurfaceClient output_surface_client;
+ scoped_ptr<OutputSurface> output_surface(
+ FakeOutputSurface::Create3d(context_owned.Pass()));
+ ASSERT_TRUE(output_surface->BindToClient(&output_surface_client));
+
+ const int kWidth = 2;
+ const int kHeight = 2;
+ gfx::Size size(kWidth, kHeight);
+ ResourceFormat format = RGBA_8888;
+ ResourceProvider::ResourceId source_id = 0;
+ ResourceProvider::ResourceId dest_id = 0;
+ const unsigned kSourceTextureId = 123u;
+ const unsigned kDestTextureId = 321u;
+ const unsigned kImageId = 234u;
+
+ scoped_ptr<ResourceProvider> resource_provider(
+ ResourceProvider::Create(output_surface.get(),
+ shared_bitmap_manager_.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false));
+
+ source_id = resource_provider->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+
+ const int kStride = 4;
+ void* dummy_mapped_buffer_address = NULL;
+ EXPECT_CALL(
+ *context,
+ createImageCHROMIUM(kWidth, kHeight, GL_RGBA8_OES, GL_IMAGE_MAP_CHROMIUM))
+ .WillOnce(Return(kImageId))
+ .RetiresOnSaturation();
+ EXPECT_CALL(
+ *context,
+ getImageParameterivCHROMIUM(kImageId, GL_IMAGE_ROWBYTES_CHROMIUM, _))
+ .WillOnce(SetArgPointee<2>(kStride))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*context, mapImageCHROMIUM(kImageId))
+ .WillOnce(Return(dummy_mapped_buffer_address))
+ .RetiresOnSaturation();
+ resource_provider->AcquireImage(source_id);
+ int stride;
+ resource_provider->MapImage(source_id, &stride);
+ EXPECT_CALL(*context, unmapImageCHROMIUM(kImageId))
+ .Times(1)
+ .RetiresOnSaturation();
+ resource_provider->UnmapImage(source_id);
+ Mock::VerifyAndClearExpectations(context);
+
+ dest_id = resource_provider->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+
+ EXPECT_CALL(*context, NextTextureId())
+ .WillOnce(Return(kDestTextureId))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*context, bindTexture(GL_TEXTURE_2D, kDestTextureId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*context, NextTextureId())
+ .WillOnce(Return(kSourceTextureId))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*context, bindTexture(GL_TEXTURE_2D, kSourceTextureId))
+ .Times(2)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*context, bindTexImage2DCHROMIUM(GL_TEXTURE_2D, kImageId))
+ .Times(1)
+ .RetiresOnSaturation();
+ resource_provider->CopyResource(source_id, dest_id);
+ Mock::VerifyAndClearExpectations(context);
+
+ EXPECT_CALL(*context, destroyImageCHROMIUM(kImageId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*context, RetireTextureId(kSourceTextureId))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*context, RetireTextureId(kDestTextureId))
+ .Times(1)
+ .RetiresOnSaturation();
+ resource_provider->DeleteResource(source_id);
+ resource_provider->DeleteResource(dest_id);
+}
+
+void InitializeGLAndCheck(ContextSharedData* shared_data,
+ ResourceProvider* resource_provider,
+ FakeOutputSurface* output_surface) {
+ scoped_ptr<ResourceProviderContext> context_owned =
+ ResourceProviderContext::Create(shared_data);
+ ResourceProviderContext* context = context_owned.get();
+
+ scoped_refptr<TestContextProvider> context_provider =
+ TestContextProvider::Create(context_owned.Pass());
+ output_surface->InitializeAndSetContext3d(context_provider);
+ resource_provider->InitializeGL();
+
+ CheckCreateResource(ResourceProvider::GLTexture, resource_provider, context);
+}
+
+TEST(ResourceProviderTest, BasicInitializeGLSoftware) {
+ scoped_ptr<ContextSharedData> shared_data = ContextSharedData::Create();
+ bool delegated_rendering = false;
+ scoped_ptr<FakeOutputSurface> output_surface(
+ FakeOutputSurface::CreateDeferredGL(
+ scoped_ptr<SoftwareOutputDevice>(new SoftwareOutputDevice),
+ delegated_rendering));
+ FakeOutputSurfaceClient client(output_surface.get());
+ EXPECT_TRUE(output_surface->BindToClient(&client));
+ scoped_ptr<SharedBitmapManager> shared_bitmap_manager(
+ new TestSharedBitmapManager());
+ scoped_ptr<ResourceProvider> resource_provider(
+ ResourceProvider::Create(output_surface.get(),
+ shared_bitmap_manager.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false));
+
+ CheckCreateResource(ResourceProvider::Bitmap, resource_provider.get(), NULL);
+
+ InitializeGLAndCheck(shared_data.get(),
+ resource_provider.get(),
+ output_surface.get());
+
+ resource_provider->InitializeSoftware();
+ output_surface->ReleaseGL();
+ CheckCreateResource(ResourceProvider::Bitmap, resource_provider.get(), NULL);
+
+ InitializeGLAndCheck(shared_data.get(),
+ resource_provider.get(),
+ output_surface.get());
+}
+
+TEST_P(ResourceProviderTest, CompressedTextureETC1Allocate) {
+ if (GetParam() != ResourceProvider::GLTexture)
+ return;
+
+ scoped_ptr<AllocationTrackingContext3D> context_owned(
+ new AllocationTrackingContext3D);
+ AllocationTrackingContext3D* context = context_owned.get();
+ context_owned->set_support_compressed_texture_etc1(true);
+
+ FakeOutputSurfaceClient output_surface_client;
+ scoped_ptr<OutputSurface> output_surface(
+ FakeOutputSurface::Create3d(context_owned.Pass()));
+ CHECK(output_surface->BindToClient(&output_surface_client));
+
+ gfx::Size size(4, 4);
+ scoped_ptr<ResourceProvider> resource_provider(
+ ResourceProvider::Create(output_surface.get(),
+ shared_bitmap_manager_.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false));
+ int texture_id = 123;
+
+ ResourceProvider::ResourceId id = resource_provider->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, ETC1);
+ EXPECT_NE(0u, id);
+ EXPECT_CALL(*context, NextTextureId()).WillOnce(Return(texture_id));
+ EXPECT_CALL(*context, bindTexture(GL_TEXTURE_2D, texture_id)).Times(2);
+ resource_provider->AllocateForTesting(id);
+
+ EXPECT_CALL(*context, RetireTextureId(texture_id)).Times(1);
+ resource_provider->DeleteResource(id);
+}
+
+TEST_P(ResourceProviderTest, CompressedTextureETC1SetPixels) {
+ if (GetParam() != ResourceProvider::GLTexture)
+ return;
+
+ scoped_ptr<AllocationTrackingContext3D> context_owned(
+ new AllocationTrackingContext3D);
+ AllocationTrackingContext3D* context = context_owned.get();
+ context_owned->set_support_compressed_texture_etc1(true);
+
+ FakeOutputSurfaceClient output_surface_client;
+ scoped_ptr<OutputSurface> output_surface(
+ FakeOutputSurface::Create3d(context_owned.Pass()));
+ CHECK(output_surface->BindToClient(&output_surface_client));
+
+ gfx::Size size(4, 4);
+ scoped_ptr<ResourceProvider> resource_provider(
+ ResourceProvider::Create(output_surface.get(),
+ shared_bitmap_manager_.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false));
+ int texture_id = 123;
+ uint8_t pixels[8];
+
+ ResourceProvider::ResourceId id = resource_provider->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, ETC1);
+ EXPECT_NE(0u, id);
+ EXPECT_CALL(*context, NextTextureId()).WillOnce(Return(texture_id));
+ EXPECT_CALL(*context, bindTexture(GL_TEXTURE_2D, texture_id)).Times(3);
+ EXPECT_CALL(*context,
+ compressedTexImage2D(
+ _, 0, _, size.width(), size.height(), _, _, _)).Times(1);
+ resource_provider->SetPixels(
+ id, pixels, gfx::Rect(size), gfx::Rect(size), gfx::Vector2d(0, 0));
+
+ EXPECT_CALL(*context, RetireTextureId(texture_id)).Times(1);
+ resource_provider->DeleteResource(id);
+}
+
+INSTANTIATE_TEST_CASE_P(
+ ResourceProviderTests,
+ ResourceProviderTest,
+ ::testing::Values(ResourceProvider::GLTexture, ResourceProvider::Bitmap));
+
+class TextureIdAllocationTrackingContext : public TestWebGraphicsContext3D {
+ public:
+ virtual GLuint NextTextureId() OVERRIDE {
+ base::AutoLock lock(namespace_->lock);
+ return namespace_->next_texture_id++;
+ }
+ virtual void RetireTextureId(GLuint) OVERRIDE {}
+ GLuint PeekTextureId() {
+ base::AutoLock lock(namespace_->lock);
+ return namespace_->next_texture_id;
+ }
+};
+
+TEST(ResourceProviderTest, TextureAllocationChunkSize) {
+ scoped_ptr<TextureIdAllocationTrackingContext> context_owned(
+ new TextureIdAllocationTrackingContext);
+ TextureIdAllocationTrackingContext* context = context_owned.get();
+
+ FakeOutputSurfaceClient output_surface_client;
+ scoped_ptr<OutputSurface> output_surface(
+ FakeOutputSurface::Create3d(context_owned.Pass()));
+ CHECK(output_surface->BindToClient(&output_surface_client));
+ scoped_ptr<SharedBitmapManager> shared_bitmap_manager(
+ new TestSharedBitmapManager());
+
+ gfx::Size size(1, 1);
+ ResourceFormat format = RGBA_8888;
+
+ {
+ size_t kTextureAllocationChunkSize = 1;
+ scoped_ptr<ResourceProvider> resource_provider(
+ ResourceProvider::Create(output_surface.get(),
+ shared_bitmap_manager.get(),
+ NULL,
+ 0,
+ false,
+ kTextureAllocationChunkSize,
+ false));
+
+ ResourceProvider::ResourceId id = resource_provider->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+ resource_provider->AllocateForTesting(id);
+ Mock::VerifyAndClearExpectations(context);
+
+ DCHECK_EQ(2u, context->PeekTextureId());
+ resource_provider->DeleteResource(id);
+ }
+
+ {
+ size_t kTextureAllocationChunkSize = 8;
+ scoped_ptr<ResourceProvider> resource_provider(
+ ResourceProvider::Create(output_surface.get(),
+ shared_bitmap_manager.get(),
+ NULL,
+ 0,
+ false,
+ kTextureAllocationChunkSize,
+ false));
+
+ ResourceProvider::ResourceId id = resource_provider->CreateResource(
+ size, GL_CLAMP_TO_EDGE, ResourceProvider::TextureHintImmutable, format);
+ resource_provider->AllocateForTesting(id);
+ Mock::VerifyAndClearExpectations(context);
+
+ DCHECK_EQ(10u, context->PeekTextureId());
+ resource_provider->DeleteResource(id);
+ }
+}
+
+} // namespace
+} // namespace cc
diff --git a/cc/resources/resource_update.cc b/cc/resources/resource_update.cc
new file mode 100644
index 0000000..2fe8fe5
--- /dev/null
+++ b/cc/resources/resource_update.cc
@@ -0,0 +1,32 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/resource_update.h"
+
+#include "base/logging.h"
+
+namespace cc {
+
+ResourceUpdate ResourceUpdate::Create(PrioritizedResource* resource,
+ const SkBitmap* bitmap,
+ const gfx::Rect& content_rect,
+ const gfx::Rect& source_rect,
+ const gfx::Vector2d& dest_offset) {
+ CHECK(content_rect.Contains(source_rect));
+ ResourceUpdate update;
+ update.texture = resource;
+ update.bitmap = bitmap;
+ update.content_rect = content_rect;
+ update.source_rect = source_rect;
+ update.dest_offset = dest_offset;
+ return update;
+}
+
+ResourceUpdate::ResourceUpdate()
+ : texture(NULL),
+ bitmap(NULL) {}
+
+ResourceUpdate::~ResourceUpdate() {}
+
+} // namespace cc
diff --git a/cc/resources/resource_update.h b/cc/resources/resource_update.h
new file mode 100644
index 0000000..1eddf83
--- /dev/null
+++ b/cc/resources/resource_update.h
@@ -0,0 +1,37 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_RESOURCE_UPDATE_H_
+#define CC_RESOURCES_RESOURCE_UPDATE_H_
+
+#include "cc/base/cc_export.h"
+#include "ui/gfx/rect.h"
+#include "ui/gfx/vector2d.h"
+
+class SkBitmap;
+
+namespace cc {
+
+class PrioritizedResource;
+
+struct CC_EXPORT ResourceUpdate {
+ static ResourceUpdate Create(PrioritizedResource* resource,
+ const SkBitmap* bitmap,
+ const gfx::Rect& content_rect,
+ const gfx::Rect& source_rect,
+ const gfx::Vector2d& dest_offset);
+
+ ResourceUpdate();
+ virtual ~ResourceUpdate();
+
+ PrioritizedResource* texture;
+ const SkBitmap* bitmap;
+ gfx::Rect content_rect;
+ gfx::Rect source_rect;
+ gfx::Vector2d dest_offset;
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_RESOURCE_UPDATE_H_
diff --git a/cc/resources/resource_update_controller.cc b/cc/resources/resource_update_controller.cc
new file mode 100644
index 0000000..bd6d21d
--- /dev/null
+++ b/cc/resources/resource_update_controller.cc
@@ -0,0 +1,164 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/resource_update_controller.h"
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/single_thread_task_runner.h"
+#include "cc/resources/prioritized_resource.h"
+#include "cc/resources/resource_provider.h"
+#include "ui/gfx/frame_time.h"
+
+namespace {
+
+// Number of partial updates we allow.
+const size_t kPartialTextureUpdatesMax = 12;
+
+// Measured in seconds.
+const double kUploaderBusyTickRate = 0.001;
+
+// Number of blocking update intervals to allow.
+const size_t kMaxBlockingUpdateIntervals = 4;
+
+} // namespace
+
+namespace cc {
+
+size_t ResourceUpdateController::MaxPartialTextureUpdates() {
+ return kPartialTextureUpdatesMax;
+}
+
+size_t ResourceUpdateController::MaxFullUpdatesPerTick(
+ ResourceProvider* resource_provider) {
+ return resource_provider->EstimatedUploadsPerTick();
+}
+
+ResourceUpdateController::ResourceUpdateController(
+ ResourceUpdateControllerClient* client,
+ base::SingleThreadTaskRunner* task_runner,
+ scoped_ptr<ResourceUpdateQueue> queue,
+ ResourceProvider* resource_provider)
+ : client_(client),
+ queue_(queue.Pass()),
+ resource_provider_(resource_provider),
+ texture_updates_per_tick_(MaxFullUpdatesPerTick(resource_provider)),
+ first_update_attempt_(true),
+ task_runner_(task_runner),
+ task_posted_(false),
+ ready_to_finalize_(false),
+ weak_factory_(this) {}
+
+ResourceUpdateController::~ResourceUpdateController() {}
+
+void ResourceUpdateController::PerformMoreUpdates(
+ base::TimeTicks time_limit) {
+ time_limit_ = time_limit;
+
+ // Update already in progress or we are already done.
+ if (task_posted_ || ready_to_finalize_)
+ return;
+
+ // Call UpdateMoreTexturesNow() directly unless it's the first update
+ // attempt. This ensures that we empty the update queue in a finite
+ // amount of time.
+ if (!first_update_attempt_)
+ UpdateMoreTexturesNow();
+
+ // Post a 0-delay task when no updates were left. When it runs,
+ // ReadyToFinalizeTextureUpdates() will be called.
+ if (!UpdateMoreTexturesIfEnoughTimeRemaining()) {
+ task_posted_ = true;
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&ResourceUpdateController::OnTimerFired,
+ weak_factory_.GetWeakPtr()));
+ }
+
+ first_update_attempt_ = false;
+}
+
+void ResourceUpdateController::DiscardUploadsToEvictedResources() {
+ queue_->ClearUploadsToEvictedResources();
+}
+
+void ResourceUpdateController::UpdateTexture(ResourceUpdate update) {
+ update.bitmap->lockPixels();
+ update.texture->SetPixels(
+ resource_provider_,
+ static_cast<const uint8_t*>(update.bitmap->getPixels()),
+ update.content_rect,
+ update.source_rect,
+ update.dest_offset);
+ update.bitmap->unlockPixels();
+}
+
+void ResourceUpdateController::Finalize() {
+ while (queue_->FullUploadSize())
+ UpdateTexture(queue_->TakeFirstFullUpload());
+
+ while (queue_->PartialUploadSize())
+ UpdateTexture(queue_->TakeFirstPartialUpload());
+
+ resource_provider_->FlushUploads();
+}
+
+void ResourceUpdateController::OnTimerFired() {
+ task_posted_ = false;
+ if (!UpdateMoreTexturesIfEnoughTimeRemaining()) {
+ ready_to_finalize_ = true;
+ client_->ReadyToFinalizeTextureUpdates();
+ }
+}
+
+base::TimeTicks ResourceUpdateController::UpdateMoreTexturesCompletionTime() {
+ return resource_provider_->EstimatedUploadCompletionTime(
+ texture_updates_per_tick_);
+}
+
+size_t ResourceUpdateController::UpdateMoreTexturesSize() const {
+ return texture_updates_per_tick_;
+}
+
+size_t ResourceUpdateController::MaxBlockingUpdates() const {
+ return UpdateMoreTexturesSize() * kMaxBlockingUpdateIntervals;
+}
+
+bool ResourceUpdateController::UpdateMoreTexturesIfEnoughTimeRemaining() {
+ while (resource_provider_->NumBlockingUploads() < MaxBlockingUpdates()) {
+ if (!queue_->FullUploadSize())
+ return false;
+
+ if (!time_limit_.is_null()) {
+ base::TimeTicks completion_time = UpdateMoreTexturesCompletionTime();
+ if (completion_time > time_limit_)
+ return true;
+ }
+
+ UpdateMoreTexturesNow();
+ }
+
+ task_posted_ = true;
+ task_runner_->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&ResourceUpdateController::OnTimerFired,
+ weak_factory_.GetWeakPtr()),
+ base::TimeDelta::FromMilliseconds(kUploaderBusyTickRate * 1000));
+ return true;
+}
+
+void ResourceUpdateController::UpdateMoreTexturesNow() {
+ size_t uploads = std::min(
+ queue_->FullUploadSize(), UpdateMoreTexturesSize());
+
+ if (!uploads)
+ return;
+
+ while (queue_->FullUploadSize() && uploads--)
+ UpdateTexture(queue_->TakeFirstFullUpload());
+
+ resource_provider_->FlushUploads();
+}
+
+} // namespace cc
diff --git a/cc/resources/resource_update_controller.h b/cc/resources/resource_update_controller.h
new file mode 100644
index 0000000..994ae2f
--- /dev/null
+++ b/cc/resources/resource_update_controller.h
@@ -0,0 +1,89 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_RESOURCE_UPDATE_CONTROLLER_H_
+#define CC_RESOURCES_RESOURCE_UPDATE_CONTROLLER_H_
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/time/time.h"
+#include "cc/base/cc_export.h"
+#include "cc/resources/resource_update_queue.h"
+
+namespace base { class SingleThreadTaskRunner; }
+
+namespace cc {
+
+class ResourceProvider;
+
+class ResourceUpdateControllerClient {
+ public:
+ virtual void ReadyToFinalizeTextureUpdates() = 0;
+
+ protected:
+ virtual ~ResourceUpdateControllerClient() {}
+};
+
+class CC_EXPORT ResourceUpdateController {
+ public:
+ static scoped_ptr<ResourceUpdateController> Create(
+ ResourceUpdateControllerClient* client,
+ base::SingleThreadTaskRunner* task_runner,
+ scoped_ptr<ResourceUpdateQueue> queue,
+ ResourceProvider* resource_provider) {
+ return make_scoped_ptr(new ResourceUpdateController(
+ client, task_runner, queue.Pass(), resource_provider));
+ }
+ static size_t MaxPartialTextureUpdates();
+
+ virtual ~ResourceUpdateController();
+
+ // Discard uploads to textures that were evicted on the impl thread.
+ void DiscardUploadsToEvictedResources();
+
+ void PerformMoreUpdates(base::TimeTicks time_limit);
+ void Finalize();
+
+
+ // Virtual for testing.
+ virtual size_t UpdateMoreTexturesSize() const;
+ virtual base::TimeTicks UpdateMoreTexturesCompletionTime();
+
+ protected:
+ ResourceUpdateController(ResourceUpdateControllerClient* client,
+ base::SingleThreadTaskRunner* task_runner,
+ scoped_ptr<ResourceUpdateQueue> queue,
+ ResourceProvider* resource_provider);
+
+ private:
+ static size_t MaxFullUpdatesPerTick(ResourceProvider* resource_provider);
+
+ size_t MaxBlockingUpdates() const;
+
+ void UpdateTexture(ResourceUpdate update);
+
+ // This returns true when there were textures left to update.
+ bool UpdateMoreTexturesIfEnoughTimeRemaining();
+ void UpdateMoreTexturesNow();
+ void OnTimerFired();
+
+ ResourceUpdateControllerClient* client_;
+ scoped_ptr<ResourceUpdateQueue> queue_;
+ bool contents_textures_purged_;
+ ResourceProvider* resource_provider_;
+ base::TimeTicks time_limit_;
+ size_t texture_updates_per_tick_;
+ bool first_update_attempt_;
+ base::SingleThreadTaskRunner* task_runner_;
+ bool task_posted_;
+ bool ready_to_finalize_;
+ base::WeakPtrFactory<ResourceUpdateController> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(ResourceUpdateController);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_RESOURCE_UPDATE_CONTROLLER_H_
diff --git a/cc/resources/resource_update_controller_unittest.cc b/cc/resources/resource_update_controller_unittest.cc
new file mode 100644
index 0000000..58df016
--- /dev/null
+++ b/cc/resources/resource_update_controller_unittest.cc
@@ -0,0 +1,517 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/resource_update_controller.h"
+
+#include "base/test/test_simple_task_runner.h"
+#include "cc/resources/prioritized_resource_manager.h"
+#include "cc/test/fake_output_surface.h"
+#include "cc/test/fake_output_surface_client.h"
+#include "cc/test/fake_proxy.h"
+#include "cc/test/scheduler_test_common.h"
+#include "cc/test/test_shared_bitmap_manager.h"
+#include "cc/test/test_web_graphics_context_3d.h"
+#include "cc/test/tiled_layer_test_common.h"
+#include "cc/trees/single_thread_proxy.h" // For DebugScopedSetImplThread
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/khronos/GLES2/gl2ext.h"
+
+using testing::Test;
+
+namespace cc {
+namespace {
+
+const int kFlushPeriodFull = 4;
+const int kFlushPeriodPartial = kFlushPeriodFull;
+
+class ResourceUpdateControllerTest;
+
+class WebGraphicsContext3DForUploadTest : public TestWebGraphicsContext3D {
+ public:
+ explicit WebGraphicsContext3DForUploadTest(ResourceUpdateControllerTest* test)
+ : test_(test) {}
+
+ virtual void flush() OVERRIDE;
+ virtual void shallowFlushCHROMIUM() OVERRIDE;
+ virtual void texSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* pixels) OVERRIDE;
+
+ virtual void getQueryObjectuivEXT(GLuint id, GLenum pname, GLuint* value)
+ OVERRIDE;
+
+ private:
+ ResourceUpdateControllerTest* test_;
+};
+
+class ResourceUpdateControllerTest : public Test {
+ public:
+ ResourceUpdateControllerTest()
+ : proxy_(),
+ queue_(make_scoped_ptr(new ResourceUpdateQueue)),
+ resource_manager_(PrioritizedResourceManager::Create(&proxy_)),
+ query_results_available_(0),
+ full_upload_count_expected_(0),
+ partial_count_expected_(0),
+ total_upload_count_expected_(0),
+ max_upload_count_per_update_(0),
+ num_consecutive_flushes_(0),
+ num_dangling_uploads_(0),
+ num_total_uploads_(0),
+ num_total_flushes_(0) {}
+
+ virtual ~ResourceUpdateControllerTest() {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ resource_manager_->ClearAllMemory(resource_provider_.get());
+ }
+
+ public:
+ void OnFlush() {
+ // Check for back-to-back flushes.
+ EXPECT_EQ(0, num_consecutive_flushes_) << "Back-to-back flushes detected.";
+
+ num_dangling_uploads_ = 0;
+ num_consecutive_flushes_++;
+ num_total_flushes_++;
+ }
+
+ void OnUpload() {
+ // Check for too many consecutive uploads
+ if (num_total_uploads_ < full_upload_count_expected_) {
+ EXPECT_LT(num_dangling_uploads_, kFlushPeriodFull)
+ << "Too many consecutive full uploads detected.";
+ } else {
+ EXPECT_LT(num_dangling_uploads_, kFlushPeriodPartial)
+ << "Too many consecutive partial uploads detected.";
+ }
+
+ num_consecutive_flushes_ = 0;
+ num_dangling_uploads_++;
+ num_total_uploads_++;
+ }
+
+ bool IsQueryResultAvailable() {
+ if (!query_results_available_)
+ return false;
+
+ query_results_available_--;
+ return true;
+ }
+
+ protected:
+ virtual void SetUp() {
+ bitmap_.allocN32Pixels(300, 150);
+
+ for (int i = 0; i < 4; i++) {
+ textures_[i] = PrioritizedResource::Create(resource_manager_.get(),
+ gfx::Size(300, 150),
+ RGBA_8888);
+ textures_[i]->
+ set_request_priority(PriorityCalculator::VisiblePriority(true));
+ }
+ resource_manager_->PrioritizeTextures();
+
+ output_surface_ = FakeOutputSurface::Create3d(
+ scoped_ptr<TestWebGraphicsContext3D>(
+ new WebGraphicsContext3DForUploadTest(this)));
+ CHECK(output_surface_->BindToClient(&output_surface_client_));
+
+ shared_bitmap_manager_.reset(new TestSharedBitmapManager());
+ resource_provider_ = ResourceProvider::Create(output_surface_.get(),
+ shared_bitmap_manager_.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false);
+ }
+
+ void AppendFullUploadsOfIndexedTextureToUpdateQueue(int count,
+ int texture_index) {
+ full_upload_count_expected_ += count;
+ total_upload_count_expected_ += count;
+
+ const gfx::Rect rect(0, 0, 300, 150);
+ const ResourceUpdate upload = ResourceUpdate::Create(
+ textures_[texture_index].get(), &bitmap_, rect, rect, gfx::Vector2d());
+ for (int i = 0; i < count; i++)
+ queue_->AppendFullUpload(upload);
+ }
+
+ void AppendFullUploadsToUpdateQueue(int count) {
+ AppendFullUploadsOfIndexedTextureToUpdateQueue(count, 0);
+ }
+
+ void AppendPartialUploadsOfIndexedTextureToUpdateQueue(int count,
+ int texture_index) {
+ partial_count_expected_ += count;
+ total_upload_count_expected_ += count;
+
+ const gfx::Rect rect(0, 0, 100, 100);
+ const ResourceUpdate upload = ResourceUpdate::Create(
+ textures_[texture_index].get(), &bitmap_, rect, rect, gfx::Vector2d());
+ for (int i = 0; i < count; i++)
+ queue_->AppendPartialUpload(upload);
+ }
+
+ void AppendPartialUploadsToUpdateQueue(int count) {
+ AppendPartialUploadsOfIndexedTextureToUpdateQueue(count, 0);
+ }
+
+ void SetMaxUploadCountPerUpdate(int count) {
+ max_upload_count_per_update_ = count;
+ }
+
+ void UpdateTextures() {
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ scoped_ptr<ResourceUpdateController> update_controller =
+ ResourceUpdateController::Create(NULL,
+ proxy_.ImplThreadTaskRunner(),
+ queue_.Pass(),
+ resource_provider_.get());
+ update_controller->Finalize();
+ }
+
+ void MakeQueryResultAvailable() { query_results_available_++; }
+
+ protected:
+ // Classes required to interact and test the ResourceUpdateController
+ FakeProxy proxy_;
+ FakeOutputSurfaceClient output_surface_client_;
+ scoped_ptr<OutputSurface> output_surface_;
+ scoped_ptr<SharedBitmapManager> shared_bitmap_manager_;
+ scoped_ptr<ResourceProvider> resource_provider_;
+ scoped_ptr<ResourceUpdateQueue> queue_;
+ scoped_ptr<PrioritizedResource> textures_[4];
+ scoped_ptr<PrioritizedResourceManager> resource_manager_;
+ SkBitmap bitmap_;
+ int query_results_available_;
+
+ // Properties / expectations of this test
+ int full_upload_count_expected_;
+ int partial_count_expected_;
+ int total_upload_count_expected_;
+ int max_upload_count_per_update_;
+
+ // Dynamic properties of this test
+ int num_consecutive_flushes_;
+ int num_dangling_uploads_;
+ int num_total_uploads_;
+ int num_total_flushes_;
+};
+
+void WebGraphicsContext3DForUploadTest::flush() { test_->OnFlush(); }
+
+void WebGraphicsContext3DForUploadTest::shallowFlushCHROMIUM() {
+ test_->OnFlush();
+}
+
+void WebGraphicsContext3DForUploadTest::texSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* pixels) {
+ test_->OnUpload();
+}
+
+void WebGraphicsContext3DForUploadTest::getQueryObjectuivEXT(GLuint id,
+ GLenum pname,
+ GLuint* params) {
+ if (pname == GL_QUERY_RESULT_AVAILABLE_EXT)
+ *params = test_->IsQueryResultAvailable();
+}
+
+// ZERO UPLOADS TESTS
+TEST_F(ResourceUpdateControllerTest, ZeroUploads) {
+ AppendFullUploadsToUpdateQueue(0);
+ AppendPartialUploadsToUpdateQueue(0);
+ UpdateTextures();
+
+ EXPECT_EQ(0, num_total_flushes_);
+ EXPECT_EQ(0, num_total_uploads_);
+}
+
+// ONE UPLOAD TESTS
+TEST_F(ResourceUpdateControllerTest, OneFullUpload) {
+ AppendFullUploadsToUpdateQueue(1);
+ AppendPartialUploadsToUpdateQueue(0);
+ UpdateTextures();
+
+ EXPECT_EQ(1, num_total_flushes_);
+ EXPECT_EQ(1, num_total_uploads_);
+ EXPECT_EQ(0, num_dangling_uploads_)
+ << "Last upload wasn't followed by a flush.";
+}
+
+TEST_F(ResourceUpdateControllerTest, OnePartialUpload) {
+ AppendFullUploadsToUpdateQueue(0);
+ AppendPartialUploadsToUpdateQueue(1);
+ UpdateTextures();
+
+ EXPECT_EQ(1, num_total_flushes_);
+ EXPECT_EQ(1, num_total_uploads_);
+ EXPECT_EQ(0, num_dangling_uploads_)
+ << "Last upload wasn't followed by a flush.";
+}
+
+TEST_F(ResourceUpdateControllerTest, OneFullOnePartialUpload) {
+ AppendFullUploadsToUpdateQueue(1);
+ AppendPartialUploadsToUpdateQueue(1);
+ UpdateTextures();
+
+ EXPECT_EQ(1, num_total_flushes_);
+ EXPECT_EQ(2, num_total_uploads_);
+ EXPECT_EQ(0, num_dangling_uploads_)
+ << "Last upload wasn't followed by a flush.";
+}
+
+// This class of tests upload a number of textures that is a multiple
+// of the flush period.
+const int full_upload_flush_multipler = 7;
+const int full_count = full_upload_flush_multipler * kFlushPeriodFull;
+
+const int partial_upload_flush_multipler = 11;
+const int partial_count =
+ partial_upload_flush_multipler * kFlushPeriodPartial;
+
+TEST_F(ResourceUpdateControllerTest, ManyFullUploads) {
+ AppendFullUploadsToUpdateQueue(full_count);
+ AppendPartialUploadsToUpdateQueue(0);
+ UpdateTextures();
+
+ EXPECT_EQ(full_upload_flush_multipler, num_total_flushes_);
+ EXPECT_EQ(full_count, num_total_uploads_);
+ EXPECT_EQ(0, num_dangling_uploads_)
+ << "Last upload wasn't followed by a flush.";
+}
+
+TEST_F(ResourceUpdateControllerTest, ManyPartialUploads) {
+ AppendFullUploadsToUpdateQueue(0);
+ AppendPartialUploadsToUpdateQueue(partial_count);
+ UpdateTextures();
+
+ EXPECT_EQ(partial_upload_flush_multipler, num_total_flushes_);
+ EXPECT_EQ(partial_count, num_total_uploads_);
+ EXPECT_EQ(0, num_dangling_uploads_)
+ << "Last upload wasn't followed by a flush.";
+}
+
+TEST_F(ResourceUpdateControllerTest, ManyFullManyPartialUploads) {
+ AppendFullUploadsToUpdateQueue(full_count);
+ AppendPartialUploadsToUpdateQueue(partial_count);
+ UpdateTextures();
+
+ EXPECT_EQ(full_upload_flush_multipler + partial_upload_flush_multipler,
+ num_total_flushes_);
+ EXPECT_EQ(full_count + partial_count, num_total_uploads_);
+ EXPECT_EQ(0, num_dangling_uploads_)
+ << "Last upload wasn't followed by a flush.";
+}
+
+class FakeResourceUpdateControllerClient
+ : public ResourceUpdateControllerClient {
+ public:
+ FakeResourceUpdateControllerClient() { Reset(); }
+ void Reset() { ready_to_finalize_called_ = false; }
+ bool ReadyToFinalizeCalled() const { return ready_to_finalize_called_; }
+
+ virtual void ReadyToFinalizeTextureUpdates() OVERRIDE {
+ ready_to_finalize_called_ = true;
+ }
+
+ protected:
+ bool ready_to_finalize_called_;
+};
+
+class FakeResourceUpdateController : public ResourceUpdateController {
+ public:
+ static scoped_ptr<FakeResourceUpdateController> Create(
+ ResourceUpdateControllerClient* client,
+ base::TestSimpleTaskRunner* task_runner,
+ scoped_ptr<ResourceUpdateQueue> queue,
+ ResourceProvider* resource_provider) {
+ return make_scoped_ptr(new FakeResourceUpdateController(
+ client, task_runner, queue.Pass(), resource_provider));
+ }
+
+ void SetNow(base::TimeTicks time) { now_ = time; }
+ base::TimeTicks Now() const { return now_; }
+ void SetUpdateTextureTime(base::TimeDelta time) {
+ update_textures_time_ = time;
+ }
+ virtual base::TimeTicks UpdateMoreTexturesCompletionTime() OVERRIDE {
+ size_t total_updates =
+ resource_provider_->NumBlockingUploads() + update_more_textures_size_;
+ return now_ + total_updates * update_textures_time_;
+ }
+ void SetUpdateMoreTexturesSize(size_t size) {
+ update_more_textures_size_ = size;
+ }
+ virtual size_t UpdateMoreTexturesSize() const OVERRIDE {
+ return update_more_textures_size_;
+ }
+
+ protected:
+ FakeResourceUpdateController(ResourceUpdateControllerClient* client,
+ base::TestSimpleTaskRunner* task_runner,
+ scoped_ptr<ResourceUpdateQueue> queue,
+ ResourceProvider* resource_provider)
+ : ResourceUpdateController(
+ client, task_runner, queue.Pass(), resource_provider),
+ resource_provider_(resource_provider),
+ update_more_textures_size_(0) {}
+
+ ResourceProvider* resource_provider_;
+ base::TimeTicks now_;
+ base::TimeDelta update_textures_time_;
+ size_t update_more_textures_size_;
+};
+
+static void RunPendingTask(base::TestSimpleTaskRunner* task_runner,
+ FakeResourceUpdateController* controller) {
+ EXPECT_TRUE(task_runner->HasPendingTask());
+ controller->SetNow(controller->Now() + task_runner->NextPendingTaskDelay());
+ task_runner->RunPendingTasks();
+}
+
+TEST_F(ResourceUpdateControllerTest, UpdateMoreTextures) {
+ FakeResourceUpdateControllerClient client;
+ scoped_refptr<base::TestSimpleTaskRunner> task_runner =
+ new base::TestSimpleTaskRunner;
+
+ SetMaxUploadCountPerUpdate(1);
+ AppendFullUploadsToUpdateQueue(3);
+ AppendPartialUploadsToUpdateQueue(0);
+
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ scoped_ptr<FakeResourceUpdateController> controller(
+ FakeResourceUpdateController::Create(&client,
+ task_runner.get(),
+ queue_.Pass(),
+ resource_provider_.get()));
+
+ controller->SetNow(controller->Now() + base::TimeDelta::FromMilliseconds(1));
+ controller->SetUpdateTextureTime(base::TimeDelta::FromMilliseconds(100));
+ controller->SetUpdateMoreTexturesSize(1);
+ // Not enough time for any updates.
+ controller->PerformMoreUpdates(controller->Now() +
+ base::TimeDelta::FromMilliseconds(90));
+ EXPECT_FALSE(task_runner->HasPendingTask());
+
+ controller->SetUpdateTextureTime(base::TimeDelta::FromMilliseconds(100));
+ controller->SetUpdateMoreTexturesSize(1);
+ // Only enough time for 1 update.
+ controller->PerformMoreUpdates(controller->Now() +
+ base::TimeDelta::FromMilliseconds(120));
+ EXPECT_FALSE(task_runner->HasPendingTask());
+ EXPECT_EQ(1, num_total_uploads_);
+
+ // Complete one upload.
+ MakeQueryResultAvailable();
+
+ controller->SetUpdateTextureTime(base::TimeDelta::FromMilliseconds(100));
+ controller->SetUpdateMoreTexturesSize(1);
+ // Enough time for 2 updates.
+ controller->PerformMoreUpdates(controller->Now() +
+ base::TimeDelta::FromMilliseconds(220));
+ RunPendingTask(task_runner.get(), controller.get());
+ EXPECT_FALSE(task_runner->HasPendingTask());
+ EXPECT_TRUE(client.ReadyToFinalizeCalled());
+ EXPECT_EQ(3, num_total_uploads_);
+}
+
+TEST_F(ResourceUpdateControllerTest, NoMoreUpdates) {
+ FakeResourceUpdateControllerClient client;
+ scoped_refptr<base::TestSimpleTaskRunner> task_runner =
+ new base::TestSimpleTaskRunner;
+
+ SetMaxUploadCountPerUpdate(1);
+ AppendFullUploadsToUpdateQueue(2);
+ AppendPartialUploadsToUpdateQueue(0);
+
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ scoped_ptr<FakeResourceUpdateController> controller(
+ FakeResourceUpdateController::Create(&client,
+ task_runner.get(),
+ queue_.Pass(),
+ resource_provider_.get()));
+
+ controller->SetNow(controller->Now() + base::TimeDelta::FromMilliseconds(1));
+ controller->SetUpdateTextureTime(base::TimeDelta::FromMilliseconds(100));
+ controller->SetUpdateMoreTexturesSize(1);
+ // Enough time for 3 updates but only 2 necessary.
+ controller->PerformMoreUpdates(controller->Now() +
+ base::TimeDelta::FromMilliseconds(310));
+ RunPendingTask(task_runner.get(), controller.get());
+ EXPECT_FALSE(task_runner->HasPendingTask());
+ EXPECT_TRUE(client.ReadyToFinalizeCalled());
+ EXPECT_EQ(2, num_total_uploads_);
+
+ client.Reset();
+ controller->SetUpdateTextureTime(base::TimeDelta::FromMilliseconds(100));
+ controller->SetUpdateMoreTexturesSize(1);
+ // Enough time for updates but no more updates left.
+ controller->PerformMoreUpdates(controller->Now() +
+ base::TimeDelta::FromMilliseconds(310));
+
+ // ReadyToFinalizeTextureUpdates should only be called once.
+ EXPECT_FALSE(task_runner->HasPendingTask());
+ EXPECT_FALSE(client.ReadyToFinalizeCalled());
+ EXPECT_EQ(2, num_total_uploads_);
+}
+
+TEST_F(ResourceUpdateControllerTest, UpdatesCompleteInFiniteTime) {
+ FakeResourceUpdateControllerClient client;
+ scoped_refptr<base::TestSimpleTaskRunner> task_runner =
+ new base::TestSimpleTaskRunner;
+
+ SetMaxUploadCountPerUpdate(1);
+ AppendFullUploadsToUpdateQueue(2);
+ AppendPartialUploadsToUpdateQueue(0);
+
+ DebugScopedSetImplThreadAndMainThreadBlocked
+ impl_thread_and_main_thread_blocked(&proxy_);
+ scoped_ptr<FakeResourceUpdateController> controller(
+ FakeResourceUpdateController::Create(&client,
+ task_runner.get(),
+ queue_.Pass(),
+ resource_provider_.get()));
+
+ controller->SetNow(controller->Now() + base::TimeDelta::FromMilliseconds(1));
+ controller->SetUpdateTextureTime(base::TimeDelta::FromMilliseconds(500));
+ controller->SetUpdateMoreTexturesSize(1);
+
+ for (int i = 0; i < 100; i++) {
+ if (client.ReadyToFinalizeCalled())
+ break;
+
+ // Not enough time for any updates.
+ controller->PerformMoreUpdates(controller->Now() +
+ base::TimeDelta::FromMilliseconds(400));
+
+ if (task_runner->HasPendingTask())
+ RunPendingTask(task_runner.get(), controller.get());
+ }
+
+ EXPECT_FALSE(task_runner->HasPendingTask());
+ EXPECT_TRUE(client.ReadyToFinalizeCalled());
+ EXPECT_EQ(2, num_total_uploads_);
+}
+
+} // namespace
+} // namespace cc
diff --git a/cc/resources/resource_update_queue.cc b/cc/resources/resource_update_queue.cc
new file mode 100644
index 0000000..31727cd
--- /dev/null
+++ b/cc/resources/resource_update_queue.cc
@@ -0,0 +1,56 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/resource_update_queue.h"
+
+#include "cc/resources/prioritized_resource.h"
+
+namespace cc {
+
+ResourceUpdateQueue::ResourceUpdateQueue() {}
+
+ResourceUpdateQueue::~ResourceUpdateQueue() {}
+
+void ResourceUpdateQueue::AppendFullUpload(const ResourceUpdate& upload) {
+ full_entries_.push_back(upload);
+}
+
+void ResourceUpdateQueue::AppendPartialUpload(const ResourceUpdate& upload) {
+ partial_entries_.push_back(upload);
+}
+
+void ResourceUpdateQueue::ClearUploadsToEvictedResources() {
+ ClearUploadsToEvictedResources(&full_entries_);
+ ClearUploadsToEvictedResources(&partial_entries_);
+}
+
+void ResourceUpdateQueue::ClearUploadsToEvictedResources(
+ std::deque<ResourceUpdate>* entry_queue) {
+ std::deque<ResourceUpdate> temp;
+ entry_queue->swap(temp);
+ while (temp.size()) {
+ ResourceUpdate upload = temp.front();
+ temp.pop_front();
+ if (!upload.texture->BackingResourceWasEvicted())
+ entry_queue->push_back(upload);
+ }
+}
+
+ResourceUpdate ResourceUpdateQueue::TakeFirstFullUpload() {
+ ResourceUpdate first = full_entries_.front();
+ full_entries_.pop_front();
+ return first;
+}
+
+ResourceUpdate ResourceUpdateQueue::TakeFirstPartialUpload() {
+ ResourceUpdate first = partial_entries_.front();
+ partial_entries_.pop_front();
+ return first;
+}
+
+bool ResourceUpdateQueue::HasMoreUpdates() const {
+ return !full_entries_.empty() || !partial_entries_.empty();
+}
+
+} // namespace cc
diff --git a/cc/resources/resource_update_queue.h b/cc/resources/resource_update_queue.h
new file mode 100644
index 0000000..de455ce
--- /dev/null
+++ b/cc/resources/resource_update_queue.h
@@ -0,0 +1,43 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_RESOURCE_UPDATE_QUEUE_H_
+#define CC_RESOURCES_RESOURCE_UPDATE_QUEUE_H_
+
+#include <deque>
+#include "base/basictypes.h"
+#include "cc/base/cc_export.h"
+#include "cc/resources/resource_update.h"
+
+namespace cc {
+
+class CC_EXPORT ResourceUpdateQueue {
+ public:
+ ResourceUpdateQueue();
+ virtual ~ResourceUpdateQueue();
+
+ void AppendFullUpload(const ResourceUpdate& upload);
+ void AppendPartialUpload(const ResourceUpdate& upload);
+
+ void ClearUploadsToEvictedResources();
+
+ ResourceUpdate TakeFirstFullUpload();
+ ResourceUpdate TakeFirstPartialUpload();
+
+ size_t FullUploadSize() const { return full_entries_.size(); }
+ size_t PartialUploadSize() const { return partial_entries_.size(); }
+
+ bool HasMoreUpdates() const;
+
+ private:
+ void ClearUploadsToEvictedResources(std::deque<ResourceUpdate>* entry_queue);
+ std::deque<ResourceUpdate> full_entries_;
+ std::deque<ResourceUpdate> partial_entries_;
+
+ DISALLOW_COPY_AND_ASSIGN(ResourceUpdateQueue);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_RESOURCE_UPDATE_QUEUE_H_
diff --git a/cc/resources/return_callback.h b/cc/resources/return_callback.h
new file mode 100644
index 0000000..abf5aec
--- /dev/null
+++ b/cc/resources/return_callback.h
@@ -0,0 +1,20 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_RETURN_CALLBACK_H_
+#define CC_RESOURCES_RETURN_CALLBACK_H_
+
+#include "base/callback.h"
+#include "cc/resources/returned_resource.h"
+
+namespace cc {
+class BlockingTaskRunner;
+
+typedef base::Callback<void(const ReturnedResourceArray&,
+ BlockingTaskRunner* main_thread_task_runner)>
+ ReturnCallback;
+
+} // namespace cc
+
+#endif // CC_RESOURCES_RETURN_CALLBACK_H_
diff --git a/cc/resources/returned_resource.h b/cc/resources/returned_resource.h
new file mode 100644
index 0000000..e2008b4
--- /dev/null
+++ b/cc/resources/returned_resource.h
@@ -0,0 +1,27 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_RETURNED_RESOURCE_H_
+#define CC_RESOURCES_RETURNED_RESOURCE_H_
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "cc/base/cc_export.h"
+
+namespace cc {
+
+struct CC_EXPORT ReturnedResource {
+ ReturnedResource() : id(0), sync_point(0), count(0), lost(false) {}
+ unsigned id;
+ unsigned sync_point;
+ int count;
+ bool lost;
+};
+
+typedef std::vector<ReturnedResource> ReturnedResourceArray;
+
+} // namespace cc
+
+#endif // CC_RESOURCES_RETURNED_RESOURCE_H_
diff --git a/cc/resources/scoped_gpu_raster.cc b/cc/resources/scoped_gpu_raster.cc
new file mode 100644
index 0000000..517c7a7
--- /dev/null
+++ b/cc/resources/scoped_gpu_raster.cc
@@ -0,0 +1,52 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/scoped_gpu_raster.h"
+#include "gpu/command_buffer/client/gles2_interface.h"
+#include "third_party/khronos/GLES2/gl2.h"
+#include "third_party/khronos/GLES2/gl2ext.h"
+#include "third_party/skia/include/gpu/GrContext.h"
+
+using gpu::gles2::GLES2Interface;
+
+namespace cc {
+
+ScopedGpuRaster::ScopedGpuRaster(ContextProvider* context_provider)
+ : context_provider_(context_provider) {
+ BeginGpuRaster();
+}
+
+ScopedGpuRaster::~ScopedGpuRaster() {
+ EndGpuRaster();
+}
+
+void ScopedGpuRaster::BeginGpuRaster() {
+ GLES2Interface* gl = context_provider_->ContextGL();
+
+ // TODO(alokp): Use a trace macro to push/pop markers.
+ // Using push/pop functions directly incurs cost to evaluate function
+ // arguments even when tracing is disabled.
+ gl->PushGroupMarkerEXT(0, "GpuRasterization");
+
+ class GrContext* gr_context = context_provider_->GrContext();
+ // TODO(sohanjg): Remove when TestContextProvider gives a GrContext.
+ if (gr_context)
+ gr_context->resetContext();
+}
+
+void ScopedGpuRaster::EndGpuRaster() {
+ GLES2Interface* gl = context_provider_->ContextGL();
+
+ class GrContext* gr_context = context_provider_->GrContext();
+ // TODO(sohanjg): Remove when TestContextProvider gives a GrContext.
+ if (gr_context)
+ gr_context->flush();
+
+ // TODO(alokp): Use a trace macro to push/pop markers.
+ // Using push/pop functions directly incurs cost to evaluate function
+ // arguments even when tracing is disabled.
+ gl->PopGroupMarkerEXT();
+}
+
+} // namespace cc
diff --git a/cc/resources/scoped_gpu_raster.h b/cc/resources/scoped_gpu_raster.h
new file mode 100644
index 0000000..a322ccb
--- /dev/null
+++ b/cc/resources/scoped_gpu_raster.h
@@ -0,0 +1,34 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_SCOPED_GPU_RASTER_H_
+#define CC_RESOURCES_SCOPED_GPU_RASTER_H_
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "cc/base/cc_export.h"
+#include "cc/output/context_provider.h"
+
+namespace cc {
+
+// The following class is needed to modify GL resources using GPU
+// raster. The user must ensure that they only use GPU raster on
+// GL resources while an instance of this class is alive.
+class CC_EXPORT ScopedGpuRaster {
+ public:
+ ScopedGpuRaster(ContextProvider* context_provider);
+ ~ScopedGpuRaster();
+
+ private:
+ void BeginGpuRaster();
+ void EndGpuRaster();
+
+ ContextProvider* context_provider_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedGpuRaster);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_SCOPED_GPU_RASTER_H_
diff --git a/cc/resources/scoped_resource.cc b/cc/resources/scoped_resource.cc
new file mode 100644
index 0000000..407de51
--- /dev/null
+++ b/cc/resources/scoped_resource.cc
@@ -0,0 +1,66 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/scoped_resource.h"
+
+namespace cc {
+
+ScopedResource::ScopedResource(ResourceProvider* resource_provider)
+ : resource_provider_(resource_provider) {
+ DCHECK(resource_provider_);
+}
+
+ScopedResource::~ScopedResource() {
+ Free();
+}
+
+void ScopedResource::Allocate(const gfx::Size& size,
+ ResourceProvider::TextureHint hint,
+ ResourceFormat format) {
+ DCHECK(!id());
+ DCHECK(!size.IsEmpty());
+
+ set_dimensions(size, format);
+ set_id(resource_provider_->CreateResource(
+ size, GL_CLAMP_TO_EDGE, hint, format));
+
+#if DCHECK_IS_ON
+ allocate_thread_id_ = base::PlatformThread::CurrentId();
+#endif
+}
+
+void ScopedResource::AllocateManaged(const gfx::Size& size,
+ GLenum target,
+ ResourceFormat format) {
+ DCHECK(!id());
+ DCHECK(!size.IsEmpty());
+
+ set_dimensions(size, format);
+ set_id(resource_provider_->CreateManagedResource(
+ size,
+ target,
+ GL_CLAMP_TO_EDGE,
+ ResourceProvider::TextureHintImmutable,
+ format));
+
+#if DCHECK_IS_ON
+ allocate_thread_id_ = base::PlatformThread::CurrentId();
+#endif
+}
+
+void ScopedResource::Free() {
+ if (id()) {
+#if DCHECK_IS_ON
+ DCHECK(allocate_thread_id_ == base::PlatformThread::CurrentId());
+#endif
+ resource_provider_->DeleteResource(id());
+ }
+ set_id(0);
+}
+
+void ScopedResource::Leak() {
+ set_id(0);
+}
+
+} // namespace cc
diff --git a/cc/resources/scoped_resource.h b/cc/resources/scoped_resource.h
new file mode 100644
index 0000000..f2c1a0d
--- /dev/null
+++ b/cc/resources/scoped_resource.h
@@ -0,0 +1,52 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_SCOPED_RESOURCE_H_
+#define CC_RESOURCES_SCOPED_RESOURCE_H_
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "cc/base/cc_export.h"
+#include "cc/resources/resource.h"
+
+#if DCHECK_IS_ON
+#include "base/threading/platform_thread.h"
+#endif
+
+namespace cc {
+
+class CC_EXPORT ScopedResource : public Resource {
+ public:
+ static scoped_ptr<ScopedResource> Create(
+ ResourceProvider* resource_provider) {
+ return make_scoped_ptr(new ScopedResource(resource_provider));
+ }
+ virtual ~ScopedResource();
+
+ void Allocate(const gfx::Size& size,
+ ResourceProvider::TextureHint hint,
+ ResourceFormat format);
+ void AllocateManaged(const gfx::Size& size,
+ GLenum target,
+ ResourceFormat format);
+ void Free();
+ void Leak();
+
+ protected:
+ explicit ScopedResource(ResourceProvider* provider);
+
+ private:
+ ResourceProvider* resource_provider_;
+
+#if DCHECK_IS_ON
+ base::PlatformThreadId allocate_thread_id_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedResource);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_SCOPED_RESOURCE_H_
diff --git a/cc/resources/scoped_resource_unittest.cc b/cc/resources/scoped_resource_unittest.cc
new file mode 100644
index 0000000..f671479
--- /dev/null
+++ b/cc/resources/scoped_resource_unittest.cc
@@ -0,0 +1,150 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/scoped_resource.h"
+
+#include "cc/output/renderer.h"
+#include "cc/test/fake_output_surface.h"
+#include "cc/test/fake_output_surface_client.h"
+#include "cc/test/test_shared_bitmap_manager.h"
+#include "cc/test/tiled_layer_test_common.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cc {
+namespace {
+
+TEST(ScopedResourceTest, NewScopedResource) {
+ FakeOutputSurfaceClient output_surface_client;
+ scoped_ptr<OutputSurface> output_surface(FakeOutputSurface::Create3d());
+ CHECK(output_surface->BindToClient(&output_surface_client));
+
+ scoped_ptr<SharedBitmapManager> shared_bitmap_manager(
+ new TestSharedBitmapManager());
+ scoped_ptr<ResourceProvider> resource_provider(
+ ResourceProvider::Create(output_surface.get(),
+ shared_bitmap_manager.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false));
+ scoped_ptr<ScopedResource> texture =
+ ScopedResource::Create(resource_provider.get());
+
+ // New scoped textures do not hold a texture yet.
+ EXPECT_EQ(0u, texture->id());
+
+ // New scoped textures do not have a size yet.
+ EXPECT_EQ(gfx::Size(), texture->size());
+ EXPECT_EQ(0u, texture->bytes());
+}
+
+TEST(ScopedResourceTest, CreateScopedResource) {
+ FakeOutputSurfaceClient output_surface_client;
+ scoped_ptr<OutputSurface> output_surface(FakeOutputSurface::Create3d());
+ CHECK(output_surface->BindToClient(&output_surface_client));
+
+ scoped_ptr<SharedBitmapManager> shared_bitmap_manager(
+ new TestSharedBitmapManager());
+ scoped_ptr<ResourceProvider> resource_provider(
+ ResourceProvider::Create(output_surface.get(),
+ shared_bitmap_manager.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false));
+ scoped_ptr<ScopedResource> texture =
+ ScopedResource::Create(resource_provider.get());
+ texture->Allocate(
+ gfx::Size(30, 30), ResourceProvider::TextureHintImmutable, RGBA_8888);
+
+ // The texture has an allocated byte-size now.
+ size_t expected_bytes = 30 * 30 * 4;
+ EXPECT_EQ(expected_bytes, texture->bytes());
+
+ EXPECT_LT(0u, texture->id());
+ EXPECT_EQ(static_cast<unsigned>(RGBA_8888), texture->format());
+ EXPECT_EQ(gfx::Size(30, 30), texture->size());
+}
+
+TEST(ScopedResourceTest, ScopedResourceIsDeleted) {
+ FakeOutputSurfaceClient output_surface_client;
+ scoped_ptr<OutputSurface> output_surface(FakeOutputSurface::Create3d());
+ CHECK(output_surface->BindToClient(&output_surface_client));
+
+ scoped_ptr<SharedBitmapManager> shared_bitmap_manager(
+ new TestSharedBitmapManager());
+ scoped_ptr<ResourceProvider> resource_provider(
+ ResourceProvider::Create(output_surface.get(),
+ shared_bitmap_manager.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false));
+ {
+ scoped_ptr<ScopedResource> texture =
+ ScopedResource::Create(resource_provider.get());
+
+ EXPECT_EQ(0u, resource_provider->num_resources());
+ texture->Allocate(
+ gfx::Size(30, 30), ResourceProvider::TextureHintImmutable, RGBA_8888);
+ EXPECT_LT(0u, texture->id());
+ EXPECT_EQ(1u, resource_provider->num_resources());
+ }
+
+ EXPECT_EQ(0u, resource_provider->num_resources());
+ {
+ scoped_ptr<ScopedResource> texture =
+ ScopedResource::Create(resource_provider.get());
+ EXPECT_EQ(0u, resource_provider->num_resources());
+ texture->Allocate(
+ gfx::Size(30, 30), ResourceProvider::TextureHintImmutable, RGBA_8888);
+ EXPECT_LT(0u, texture->id());
+ EXPECT_EQ(1u, resource_provider->num_resources());
+ texture->Free();
+ EXPECT_EQ(0u, resource_provider->num_resources());
+ }
+}
+
+TEST(ScopedResourceTest, LeakScopedResource) {
+ FakeOutputSurfaceClient output_surface_client;
+ scoped_ptr<OutputSurface> output_surface(FakeOutputSurface::Create3d());
+ CHECK(output_surface->BindToClient(&output_surface_client));
+
+ scoped_ptr<SharedBitmapManager> shared_bitmap_manager(
+ new TestSharedBitmapManager());
+ scoped_ptr<ResourceProvider> resource_provider(
+ ResourceProvider::Create(output_surface.get(),
+ shared_bitmap_manager.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false));
+ {
+ scoped_ptr<ScopedResource> texture =
+ ScopedResource::Create(resource_provider.get());
+
+ EXPECT_EQ(0u, resource_provider->num_resources());
+ texture->Allocate(
+ gfx::Size(30, 30), ResourceProvider::TextureHintImmutable, RGBA_8888);
+ EXPECT_LT(0u, texture->id());
+ EXPECT_EQ(1u, resource_provider->num_resources());
+
+ texture->Leak();
+ EXPECT_EQ(0u, texture->id());
+ EXPECT_EQ(1u, resource_provider->num_resources());
+
+ texture->Free();
+ EXPECT_EQ(0u, texture->id());
+ EXPECT_EQ(1u, resource_provider->num_resources());
+ }
+
+ EXPECT_EQ(1u, resource_provider->num_resources());
+}
+
+} // namespace
+} // namespace cc
diff --git a/cc/resources/scoped_ui_resource.cc b/cc/resources/scoped_ui_resource.cc
new file mode 100644
index 0000000..e69b4bc
--- /dev/null
+++ b/cc/resources/scoped_ui_resource.cc
@@ -0,0 +1,40 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/scoped_ui_resource.h"
+
+#include "base/basictypes.h"
+#include "base/bind.h"
+#include "cc/trees/layer_tree_host.h"
+
+namespace cc {
+
+scoped_ptr<ScopedUIResource> ScopedUIResource::Create(
+ LayerTreeHost* host,
+ const UIResourceBitmap& bitmap) {
+ return make_scoped_ptr(new ScopedUIResource(host, bitmap));
+}
+
+ScopedUIResource::ScopedUIResource(LayerTreeHost* host,
+ const UIResourceBitmap& bitmap)
+ : bitmap_(bitmap), host_(host) {
+ DCHECK(host_);
+ id_ = host_->CreateUIResource(this);
+}
+
+// User must make sure that host is still valid before this object goes out of
+// scope.
+ScopedUIResource::~ScopedUIResource() {
+ if (id_) {
+ DCHECK(host_);
+ host_->DeleteUIResource(id_);
+ }
+}
+
+UIResourceBitmap ScopedUIResource::GetBitmap(UIResourceId uid,
+ bool resource_lost) {
+ return bitmap_;
+}
+
+} // namespace cc
diff --git a/cc/resources/scoped_ui_resource.h b/cc/resources/scoped_ui_resource.h
new file mode 100644
index 0000000..c257e1e
--- /dev/null
+++ b/cc/resources/scoped_ui_resource.h
@@ -0,0 +1,48 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_SCOPED_UI_RESOURCE_H_
+#define CC_RESOURCES_SCOPED_UI_RESOURCE_H_
+
+#include "base/memory/ref_counted.h"
+#include "cc/base/cc_export.h"
+#include "cc/resources/ui_resource_bitmap.h"
+#include "cc/resources/ui_resource_client.h"
+#include "ui/gfx/size.h"
+
+namespace cc {
+
+class LayerTreeHost;
+
+// ScopedUIResource creates an UIResource from a bitmap and a LayerTreeHost.
+// This class holds a pointer to the host so that when the instance goes out of
+// scope, the created resource is deleted. On a GetBitmap call from the
+// UIResource manager, ScopeUIResource always returns the reference to the
+// initially given bitmap regardless of whether the request was due to lost
+// resource or not.
+class CC_EXPORT ScopedUIResource : public UIResourceClient {
+ public:
+ static scoped_ptr<ScopedUIResource> Create(LayerTreeHost* host,
+ const UIResourceBitmap& bitmap);
+ virtual ~ScopedUIResource();
+
+ // UIResourceClient implementation.
+ virtual UIResourceBitmap GetBitmap(UIResourceId uid,
+ bool resource_lost) OVERRIDE;
+ UIResourceId id() { return id_; }
+
+ protected:
+ ScopedUIResource(LayerTreeHost* host, const UIResourceBitmap& bitmap);
+
+ UIResourceBitmap bitmap_;
+ LayerTreeHost* host_;
+ UIResourceId id_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ScopedUIResource);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_SCOPED_UI_RESOURCE_H_
diff --git a/cc/resources/shared_bitmap.cc b/cc/resources/shared_bitmap.cc
new file mode 100644
index 0000000..31cf245
--- /dev/null
+++ b/cc/resources/shared_bitmap.cc
@@ -0,0 +1,81 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/shared_bitmap.h"
+
+#include "base/logging.h"
+#include "base/numerics/safe_math.h"
+#include "base/rand_util.h"
+
+namespace cc {
+
+SharedBitmap::SharedBitmap(
+ base::SharedMemory* memory,
+ const SharedBitmapId& id,
+ const base::Callback<void(SharedBitmap* bitmap)>& free_callback)
+ : memory_(memory),
+ pixels_(static_cast<uint8*>(memory_->memory())),
+ id_(id),
+ free_callback_(free_callback) {
+}
+
+SharedBitmap::SharedBitmap(
+ uint8* pixels,
+ const SharedBitmapId& id,
+ const base::Callback<void(SharedBitmap* bitmap)>& free_callback)
+ : memory_(NULL), pixels_(pixels), id_(id), free_callback_(free_callback) {
+}
+
+SharedBitmap::~SharedBitmap() { free_callback_.Run(this); }
+
+// static
+bool SharedBitmap::SizeInBytes(const gfx::Size& size, size_t* size_in_bytes) {
+ if (size.IsEmpty())
+ return false;
+ base::CheckedNumeric<size_t> s = 4;
+ s *= size.width();
+ s *= size.height();
+ if (!s.IsValid())
+ return false;
+ *size_in_bytes = s.ValueOrDie();
+ return true;
+}
+
+// static
+size_t SharedBitmap::CheckedSizeInBytes(const gfx::Size& size) {
+ CHECK(!size.IsEmpty());
+ base::CheckedNumeric<size_t> s = 4;
+ s *= size.width();
+ s *= size.height();
+ return s.ValueOrDie();
+}
+
+// static
+size_t SharedBitmap::UncheckedSizeInBytes(const gfx::Size& size) {
+ DCHECK(VerifySizeInBytes(size));
+ size_t s = 4;
+ s *= size.width();
+ s *= size.height();
+ return s;
+}
+
+// static
+bool SharedBitmap::VerifySizeInBytes(const gfx::Size& size) {
+ if (size.IsEmpty())
+ return false;
+ base::CheckedNumeric<size_t> s = 4;
+ s *= size.width();
+ s *= size.height();
+ return s.IsValid();
+}
+
+// static
+SharedBitmapId SharedBitmap::GenerateId() {
+ SharedBitmapId id;
+ // Needs cryptographically-secure random numbers.
+ base::RandBytes(id.name, sizeof(id.name));
+ return id;
+}
+
+} // namespace cc
diff --git a/cc/resources/shared_bitmap.h b/cc/resources/shared_bitmap.h
new file mode 100644
index 0000000..a90e47a
--- /dev/null
+++ b/cc/resources/shared_bitmap.h
@@ -0,0 +1,70 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_SHARED_BITMAP_H_
+#define CC_RESOURCES_SHARED_BITMAP_H_
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/memory/shared_memory.h"
+#include "cc/base/cc_export.h"
+#include "gpu/command_buffer/common/mailbox.h"
+#include "ui/gfx/size.h"
+
+namespace base { class SharedMemory; }
+
+namespace cc {
+typedef gpu::Mailbox SharedBitmapId;
+
+class CC_EXPORT SharedBitmap {
+ public:
+ SharedBitmap(base::SharedMemory* memory,
+ const SharedBitmapId& id,
+ const base::Callback<void(SharedBitmap* bitmap)>& free_callback);
+
+ SharedBitmap(uint8* pixels,
+ const SharedBitmapId& id,
+ const base::Callback<void(SharedBitmap* bitmap)>& free_callback);
+
+ ~SharedBitmap();
+
+ bool operator<(const SharedBitmap& right) const {
+ if (memory_ < right.memory_)
+ return true;
+ if (memory_ > right.memory_)
+ return false;
+ return id_ < right.id_;
+ }
+
+ uint8* pixels() { return pixels_; }
+
+ base::SharedMemory* memory() { return memory_; }
+
+ SharedBitmapId id() { return id_; }
+
+ // Returns true if the size is valid and false otherwise.
+ static bool SizeInBytes(const gfx::Size& size, size_t* size_in_bytes);
+ // Dies with a CRASH() if the size can not be represented as a positive number
+ // of bytes.
+ static size_t CheckedSizeInBytes(const gfx::Size& size);
+ // Returns the size in bytes but may overflow or return 0. Only do this for
+ // sizes that have already been checked.
+ static size_t UncheckedSizeInBytes(const gfx::Size& size);
+ // Returns true if the size is valid and false otherwise.
+ static bool VerifySizeInBytes(const gfx::Size& size);
+
+ static SharedBitmapId GenerateId();
+
+ private:
+ base::SharedMemory* memory_;
+ uint8* pixels_;
+ SharedBitmapId id_;
+ base::Callback<void(SharedBitmap* bitmap)> free_callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(SharedBitmap);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_SHARED_BITMAP_H_
diff --git a/cc/resources/shared_bitmap_manager.h b/cc/resources/shared_bitmap_manager.h
new file mode 100644
index 0000000..e6e49af
--- /dev/null
+++ b/cc/resources/shared_bitmap_manager.h
@@ -0,0 +1,33 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_SHARED_BITMAP_MANAGER_H_
+#define CC_RESOURCES_SHARED_BITMAP_MANAGER_H_
+
+#include "base/basictypes.h"
+#include "cc/base/cc_export.h"
+#include "cc/resources/shared_bitmap.h"
+#include "ui/gfx/size.h"
+
+namespace cc {
+
+class CC_EXPORT SharedBitmapManager {
+ public:
+ SharedBitmapManager() {}
+ virtual ~SharedBitmapManager() {}
+
+ virtual scoped_ptr<SharedBitmap> AllocateSharedBitmap(const gfx::Size&) = 0;
+ virtual scoped_ptr<SharedBitmap> GetSharedBitmapFromId(
+ const gfx::Size&,
+ const SharedBitmapId&) = 0;
+ virtual scoped_ptr<SharedBitmap> GetBitmapForSharedMemory(
+ base::SharedMemory*) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SharedBitmapManager);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_SHARED_BITMAP_MANAGER_H_
diff --git a/cc/resources/single_release_callback.cc b/cc/resources/single_release_callback.cc
new file mode 100644
index 0000000..4565963
--- /dev/null
+++ b/cc/resources/single_release_callback.cc
@@ -0,0 +1,29 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/single_release_callback.h"
+
+#include "base/callback_helpers.h"
+#include "base/logging.h"
+
+namespace cc {
+
+SingleReleaseCallback::SingleReleaseCallback(const ReleaseCallback& callback)
+ : has_been_run_(false), callback_(callback) {
+ DCHECK(!callback_.is_null())
+ << "Use a NULL SingleReleaseCallback for an empty callback.";
+}
+
+SingleReleaseCallback::~SingleReleaseCallback() {
+ DCHECK(callback_.is_null() || has_been_run_)
+ << "SingleReleaseCallback was never run.";
+}
+
+void SingleReleaseCallback::Run(uint32 sync_point, bool is_lost) {
+ DCHECK(!has_been_run_) << "SingleReleaseCallback was run more than once.";
+ has_been_run_ = true;
+ callback_.Run(sync_point, is_lost);
+}
+
+} // namespace cc
diff --git a/cc/resources/single_release_callback.h b/cc/resources/single_release_callback.h
new file mode 100644
index 0000000..6f64df6
--- /dev/null
+++ b/cc/resources/single_release_callback.h
@@ -0,0 +1,33 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_SINGLE_RELEASE_CALLBACK_H_
+#define CC_RESOURCES_SINGLE_RELEASE_CALLBACK_H_
+
+#include "base/memory/scoped_ptr.h"
+#include "cc/base/cc_export.h"
+#include "cc/resources/release_callback.h"
+
+namespace cc {
+
+class CC_EXPORT SingleReleaseCallback {
+ public:
+ static scoped_ptr<SingleReleaseCallback> Create(const ReleaseCallback& cb) {
+ return make_scoped_ptr(new SingleReleaseCallback(cb));
+ }
+
+ ~SingleReleaseCallback();
+
+ void Run(uint32 sync_point, bool is_lost);
+
+ private:
+ explicit SingleReleaseCallback(const ReleaseCallback& callback);
+
+ bool has_been_run_;
+ ReleaseCallback callback_;
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_SINGLE_RELEASE_CALLBACK_H_
diff --git a/cc/resources/single_release_callback_impl.cc b/cc/resources/single_release_callback_impl.cc
new file mode 100644
index 0000000..6f3c535
--- /dev/null
+++ b/cc/resources/single_release_callback_impl.cc
@@ -0,0 +1,34 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/single_release_callback_impl.h"
+
+#include "base/callback_helpers.h"
+#include "base/logging.h"
+#include "cc/trees/blocking_task_runner.h"
+
+namespace cc {
+
+SingleReleaseCallbackImpl::SingleReleaseCallbackImpl(
+ const ReleaseCallbackImpl& callback)
+ : has_been_run_(false), callback_(callback) {
+ DCHECK(!callback_.is_null())
+ << "Use a NULL SingleReleaseCallbackImpl for an empty callback.";
+}
+
+SingleReleaseCallbackImpl::~SingleReleaseCallbackImpl() {
+ DCHECK(callback_.is_null() || has_been_run_)
+ << "SingleReleaseCallbackImpl was never run.";
+}
+
+void SingleReleaseCallbackImpl::Run(
+ uint32 sync_point,
+ bool is_lost,
+ BlockingTaskRunner* main_thread_task_runner) {
+ DCHECK(!has_been_run_) << "SingleReleaseCallbackImpl was run more than once.";
+ has_been_run_ = true;
+ callback_.Run(sync_point, is_lost, main_thread_task_runner);
+}
+
+} // namespace cc
diff --git a/cc/resources/single_release_callback_impl.h b/cc/resources/single_release_callback_impl.h
new file mode 100644
index 0000000..41220dc
--- /dev/null
+++ b/cc/resources/single_release_callback_impl.h
@@ -0,0 +1,36 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_SINGLE_RELEASE_CALLBACK_IMPL_H_
+#define CC_RESOURCES_SINGLE_RELEASE_CALLBACK_IMPL_H_
+
+#include "base/memory/scoped_ptr.h"
+#include "cc/base/cc_export.h"
+#include "cc/resources/release_callback_impl.h"
+
+namespace cc {
+
+class CC_EXPORT SingleReleaseCallbackImpl {
+ public:
+ static scoped_ptr<SingleReleaseCallbackImpl> Create(
+ const ReleaseCallbackImpl& cb) {
+ return make_scoped_ptr(new SingleReleaseCallbackImpl(cb));
+ }
+
+ ~SingleReleaseCallbackImpl();
+
+ void Run(uint32 sync_point,
+ bool is_lost,
+ BlockingTaskRunner* main_thread_task_runner);
+
+ private:
+ explicit SingleReleaseCallbackImpl(const ReleaseCallbackImpl& callback);
+
+ bool has_been_run_;
+ ReleaseCallbackImpl callback_;
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_SINGLE_RELEASE_CALLBACK_IMPL_H_
diff --git a/cc/resources/skpicture_content_layer_updater.cc b/cc/resources/skpicture_content_layer_updater.cc
new file mode 100644
index 0000000..bd524f2
--- /dev/null
+++ b/cc/resources/skpicture_content_layer_updater.cc
@@ -0,0 +1,56 @@
+// Copyright 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/skpicture_content_layer_updater.h"
+
+#include "base/debug/trace_event.h"
+#include "cc/debug/rendering_stats_instrumentation.h"
+#include "cc/resources/layer_painter.h"
+#include "cc/resources/prioritized_resource.h"
+#include "cc/resources/resource_update_queue.h"
+#include "third_party/skia/include/core/SkCanvas.h"
+#include "third_party/skia/include/core/SkPictureRecorder.h"
+
+namespace cc {
+
+SkPictureContentLayerUpdater::SkPictureContentLayerUpdater(
+ scoped_ptr<LayerPainter> painter,
+ RenderingStatsInstrumentation* stats_instrumentation,
+ int layer_id)
+ : ContentLayerUpdater(painter.Pass(), stats_instrumentation, layer_id) {}
+
+SkPictureContentLayerUpdater::~SkPictureContentLayerUpdater() {}
+
+void SkPictureContentLayerUpdater::PrepareToUpdate(
+ const gfx::Size& content_size,
+ const gfx::Rect& paint_rect,
+ const gfx::Size& tile_size,
+ float contents_width_scale,
+ float contents_height_scale) {
+ SkPictureRecorder recorder;
+ SkCanvas* canvas =
+ recorder.beginRecording(paint_rect.width(), paint_rect.height(), NULL, 0);
+ DCHECK_EQ(paint_rect.width(), canvas->getBaseLayerSize().width());
+ DCHECK_EQ(paint_rect.height(), canvas->getBaseLayerSize().height());
+ base::TimeTicks start_time =
+ rendering_stats_instrumentation_->StartRecording();
+ PaintContents(canvas,
+ content_size,
+ paint_rect,
+ contents_width_scale,
+ contents_height_scale);
+ base::TimeDelta duration =
+ rendering_stats_instrumentation_->EndRecording(start_time);
+ rendering_stats_instrumentation_->AddRecord(
+ duration, paint_rect.width() * paint_rect.height());
+ picture_ = skia::AdoptRef(recorder.endRecording());
+}
+
+void SkPictureContentLayerUpdater::DrawPicture(SkCanvas* canvas) {
+ TRACE_EVENT0("cc", "SkPictureContentLayerUpdater::DrawPicture");
+ if (picture_)
+ canvas->drawPicture(picture_.get());
+}
+
+} // namespace cc
diff --git a/cc/resources/skpicture_content_layer_updater.h b/cc/resources/skpicture_content_layer_updater.h
new file mode 100644
index 0000000..9c79c74
--- /dev/null
+++ b/cc/resources/skpicture_content_layer_updater.h
@@ -0,0 +1,43 @@
+// Copyright 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_SKPICTURE_CONTENT_LAYER_UPDATER_H_
+#define CC_RESOURCES_SKPICTURE_CONTENT_LAYER_UPDATER_H_
+
+#include "cc/resources/content_layer_updater.h"
+#include "skia/ext/refptr.h"
+#include "third_party/skia/include/core/SkPicture.h"
+
+class SkCanvas;
+
+namespace cc {
+
+class LayerPainter;
+
+// This class records the content_rect into an SkPicture. Subclass provides
+// SkCanvas to DrawPicture() for tile updating based on this recorded picture.
+class SkPictureContentLayerUpdater : public ContentLayerUpdater {
+ protected:
+ SkPictureContentLayerUpdater(
+ scoped_ptr<LayerPainter> painter,
+ RenderingStatsInstrumentation* stats_instrumentation,
+ int layer_id);
+ virtual ~SkPictureContentLayerUpdater();
+
+ virtual void PrepareToUpdate(const gfx::Size& content_size,
+ const gfx::Rect& paint_rect,
+ const gfx::Size& tile_size,
+ float contents_width_scale,
+ float contents_height_scale) OVERRIDE;
+ void DrawPicture(SkCanvas* canvas);
+
+ private:
+ skia::RefPtr<SkPicture> picture_;
+
+ DISALLOW_COPY_AND_ASSIGN(SkPictureContentLayerUpdater);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_SKPICTURE_CONTENT_LAYER_UPDATER_H_
diff --git a/cc/resources/task_graph_runner.cc b/cc/resources/task_graph_runner.cc
new file mode 100644
index 0000000..bcbc0dd
--- /dev/null
+++ b/cc/resources/task_graph_runner.cc
@@ -0,0 +1,480 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/task_graph_runner.h"
+
+#include <algorithm>
+
+#include "base/debug/trace_event.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/thread_restrictions.h"
+
+namespace cc {
+namespace {
+
+// Helper class for iterating over all dependents of a task.
+class DependentIterator {
+ public:
+ DependentIterator(TaskGraph* graph, const Task* task)
+ : graph_(graph),
+ task_(task),
+ current_index_(static_cast<size_t>(-1)),
+ current_node_(NULL) {
+ ++(*this);
+ }
+
+ TaskGraph::Node& operator->() const {
+ DCHECK_LT(current_index_, graph_->edges.size());
+ DCHECK_EQ(graph_->edges[current_index_].task, task_);
+ DCHECK(current_node_);
+ return *current_node_;
+ }
+
+ TaskGraph::Node& operator*() const {
+ DCHECK_LT(current_index_, graph_->edges.size());
+ DCHECK_EQ(graph_->edges[current_index_].task, task_);
+ DCHECK(current_node_);
+ return *current_node_;
+ }
+
+ // Note: Performance can be improved by keeping edges sorted.
+ DependentIterator& operator++() {
+ // Find next dependency edge for |task_|.
+ do {
+ ++current_index_;
+ if (current_index_ == graph_->edges.size())
+ return *this;
+ } while (graph_->edges[current_index_].task != task_);
+
+ // Now find the node for the dependent of this edge.
+ TaskGraph::Node::Vector::iterator it =
+ std::find_if(graph_->nodes.begin(),
+ graph_->nodes.end(),
+ TaskGraph::Node::TaskComparator(
+ graph_->edges[current_index_].dependent));
+ DCHECK(it != graph_->nodes.end());
+ current_node_ = &(*it);
+
+ return *this;
+ }
+
+ operator bool() const { return current_index_ < graph_->edges.size(); }
+
+ private:
+ TaskGraph* graph_;
+ const Task* task_;
+ size_t current_index_;
+ TaskGraph::Node* current_node_;
+};
+
+class DependencyMismatchComparator {
+ public:
+ explicit DependencyMismatchComparator(const TaskGraph* graph)
+ : graph_(graph) {}
+
+ bool operator()(const TaskGraph::Node& node) const {
+ return static_cast<size_t>(std::count_if(graph_->edges.begin(),
+ graph_->edges.end(),
+ DependentComparator(node.task))) !=
+ node.dependencies;
+ }
+
+ private:
+ class DependentComparator {
+ public:
+ explicit DependentComparator(const Task* dependent)
+ : dependent_(dependent) {}
+
+ bool operator()(const TaskGraph::Edge& edge) const {
+ return edge.dependent == dependent_;
+ }
+
+ private:
+ const Task* dependent_;
+ };
+
+ const TaskGraph* graph_;
+};
+
+} // namespace
+
+Task::Task() : will_run_(false), did_run_(false) {
+}
+
+Task::~Task() {
+ DCHECK(!will_run_);
+}
+
+void Task::WillRun() {
+ DCHECK(!will_run_);
+ DCHECK(!did_run_);
+ will_run_ = true;
+}
+
+void Task::DidRun() {
+ DCHECK(will_run_);
+ will_run_ = false;
+ did_run_ = true;
+}
+
+bool Task::HasFinishedRunning() const { return did_run_; }
+
+TaskGraph::TaskGraph() {}
+
+TaskGraph::~TaskGraph() {}
+
+void TaskGraph::Swap(TaskGraph* other) {
+ nodes.swap(other->nodes);
+ edges.swap(other->edges);
+}
+
+void TaskGraph::Reset() {
+ nodes.clear();
+ edges.clear();
+}
+
+TaskGraphRunner::TaskNamespace::TaskNamespace() {}
+
+TaskGraphRunner::TaskNamespace::~TaskNamespace() {}
+
+TaskGraphRunner::TaskGraphRunner()
+ : lock_(),
+ has_ready_to_run_tasks_cv_(&lock_),
+ has_namespaces_with_finished_running_tasks_cv_(&lock_),
+ next_namespace_id_(1),
+ shutdown_(false) {}
+
+TaskGraphRunner::~TaskGraphRunner() {
+ {
+ base::AutoLock lock(lock_);
+
+ DCHECK_EQ(0u, ready_to_run_namespaces_.size());
+ DCHECK_EQ(0u, namespaces_.size());
+ }
+}
+
+NamespaceToken TaskGraphRunner::GetNamespaceToken() {
+ base::AutoLock lock(lock_);
+
+ NamespaceToken token(next_namespace_id_++);
+ DCHECK(namespaces_.find(token.id_) == namespaces_.end());
+ return token;
+}
+
+void TaskGraphRunner::ScheduleTasks(NamespaceToken token, TaskGraph* graph) {
+ TRACE_EVENT2("cc",
+ "TaskGraphRunner::ScheduleTasks",
+ "num_nodes",
+ graph->nodes.size(),
+ "num_edges",
+ graph->edges.size());
+
+ DCHECK(token.IsValid());
+ DCHECK(std::find_if(graph->nodes.begin(),
+ graph->nodes.end(),
+ DependencyMismatchComparator(graph)) ==
+ graph->nodes.end());
+
+ {
+ base::AutoLock lock(lock_);
+
+ DCHECK(!shutdown_);
+
+ TaskNamespace& task_namespace = namespaces_[token.id_];
+
+ // First adjust number of dependencies to reflect completed tasks.
+ for (Task::Vector::iterator it = task_namespace.completed_tasks.begin();
+ it != task_namespace.completed_tasks.end();
+ ++it) {
+ for (DependentIterator node_it(graph, it->get()); node_it; ++node_it) {
+ TaskGraph::Node& node = *node_it;
+ DCHECK_LT(0u, node.dependencies);
+ node.dependencies--;
+ }
+ }
+
+ // Build new "ready to run" queue and remove nodes from old graph.
+ task_namespace.ready_to_run_tasks.clear();
+ for (TaskGraph::Node::Vector::iterator it = graph->nodes.begin();
+ it != graph->nodes.end();
+ ++it) {
+ TaskGraph::Node& node = *it;
+
+ // Remove any old nodes that are associated with this task. The result is
+ // that the old graph is left with all nodes not present in this graph,
+ // which we use below to determine what tasks need to be canceled.
+ TaskGraph::Node::Vector::iterator old_it =
+ std::find_if(task_namespace.graph.nodes.begin(),
+ task_namespace.graph.nodes.end(),
+ TaskGraph::Node::TaskComparator(node.task));
+ if (old_it != task_namespace.graph.nodes.end()) {
+ std::swap(*old_it, task_namespace.graph.nodes.back());
+ task_namespace.graph.nodes.pop_back();
+ }
+
+ // Task is not ready to run if dependencies are not yet satisfied.
+ if (node.dependencies)
+ continue;
+
+ // Skip if already finished running task.
+ if (node.task->HasFinishedRunning())
+ continue;
+
+ // Skip if already running.
+ if (std::find(task_namespace.running_tasks.begin(),
+ task_namespace.running_tasks.end(),
+ node.task) != task_namespace.running_tasks.end())
+ continue;
+
+ task_namespace.ready_to_run_tasks.push_back(
+ PrioritizedTask(node.task, node.priority));
+ }
+
+ // Rearrange the elements in |ready_to_run_tasks| in such a way that they
+ // form a heap.
+ std::make_heap(task_namespace.ready_to_run_tasks.begin(),
+ task_namespace.ready_to_run_tasks.end(),
+ CompareTaskPriority);
+
+ // Swap task graph.
+ task_namespace.graph.Swap(graph);
+
+ // Determine what tasks in old graph need to be canceled.
+ for (TaskGraph::Node::Vector::iterator it = graph->nodes.begin();
+ it != graph->nodes.end();
+ ++it) {
+ TaskGraph::Node& node = *it;
+
+ // Skip if already finished running task.
+ if (node.task->HasFinishedRunning())
+ continue;
+
+ // Skip if already running.
+ if (std::find(task_namespace.running_tasks.begin(),
+ task_namespace.running_tasks.end(),
+ node.task) != task_namespace.running_tasks.end())
+ continue;
+
+ DCHECK(std::find(task_namespace.completed_tasks.begin(),
+ task_namespace.completed_tasks.end(),
+ node.task) == task_namespace.completed_tasks.end());
+ task_namespace.completed_tasks.push_back(node.task);
+ }
+
+ // Build new "ready to run" task namespaces queue.
+ ready_to_run_namespaces_.clear();
+ for (TaskNamespaceMap::iterator it = namespaces_.begin();
+ it != namespaces_.end();
+ ++it) {
+ if (!it->second.ready_to_run_tasks.empty())
+ ready_to_run_namespaces_.push_back(&it->second);
+ }
+
+ // Rearrange the task namespaces in |ready_to_run_namespaces_| in such a way
+ // that they form a heap.
+ std::make_heap(ready_to_run_namespaces_.begin(),
+ ready_to_run_namespaces_.end(),
+ CompareTaskNamespacePriority);
+
+ // If there is more work available, wake up worker thread.
+ if (!ready_to_run_namespaces_.empty())
+ has_ready_to_run_tasks_cv_.Signal();
+ }
+}
+
+void TaskGraphRunner::WaitForTasksToFinishRunning(NamespaceToken token) {
+ TRACE_EVENT0("cc", "TaskGraphRunner::WaitForTasksToFinishRunning");
+
+ DCHECK(token.IsValid());
+
+ {
+ base::AutoLock lock(lock_);
+
+ TaskNamespaceMap::const_iterator it = namespaces_.find(token.id_);
+ if (it == namespaces_.end())
+ return;
+
+ const TaskNamespace& task_namespace = it->second;
+
+ while (!HasFinishedRunningTasksInNamespace(&task_namespace))
+ has_namespaces_with_finished_running_tasks_cv_.Wait();
+
+ // There may be other namespaces that have finished running tasks, so wake
+ // up another origin thread.
+ has_namespaces_with_finished_running_tasks_cv_.Signal();
+ }
+}
+
+void TaskGraphRunner::CollectCompletedTasks(NamespaceToken token,
+ Task::Vector* completed_tasks) {
+ TRACE_EVENT0("cc", "TaskGraphRunner::CollectCompletedTasks");
+
+ DCHECK(token.IsValid());
+
+ {
+ base::AutoLock lock(lock_);
+
+ TaskNamespaceMap::iterator it = namespaces_.find(token.id_);
+ if (it == namespaces_.end())
+ return;
+
+ TaskNamespace& task_namespace = it->second;
+
+ DCHECK_EQ(0u, completed_tasks->size());
+ completed_tasks->swap(task_namespace.completed_tasks);
+ if (!HasFinishedRunningTasksInNamespace(&task_namespace))
+ return;
+
+ // Remove namespace if finished running tasks.
+ DCHECK_EQ(0u, task_namespace.completed_tasks.size());
+ DCHECK_EQ(0u, task_namespace.ready_to_run_tasks.size());
+ DCHECK_EQ(0u, task_namespace.running_tasks.size());
+ namespaces_.erase(it);
+ }
+}
+
+void TaskGraphRunner::Shutdown() {
+ base::AutoLock lock(lock_);
+
+ DCHECK_EQ(0u, ready_to_run_namespaces_.size());
+ DCHECK_EQ(0u, namespaces_.size());
+
+ DCHECK(!shutdown_);
+ shutdown_ = true;
+
+ // Wake up a worker so it knows it should exit. This will cause all workers
+ // to exit as each will wake up another worker before exiting.
+ has_ready_to_run_tasks_cv_.Signal();
+}
+
+void TaskGraphRunner::Run() {
+ base::AutoLock lock(lock_);
+
+ while (true) {
+ if (ready_to_run_namespaces_.empty()) {
+ // Exit when shutdown is set and no more tasks are pending.
+ if (shutdown_)
+ break;
+
+ // Wait for more tasks.
+ has_ready_to_run_tasks_cv_.Wait();
+ continue;
+ }
+
+ RunTaskWithLockAcquired();
+ }
+
+ // We noticed we should exit. Wake up the next worker so it knows it should
+ // exit as well (because the Shutdown() code only signals once).
+ has_ready_to_run_tasks_cv_.Signal();
+}
+
+void TaskGraphRunner::RunUntilIdle() {
+ base::AutoLock lock(lock_);
+
+ while (!ready_to_run_namespaces_.empty())
+ RunTaskWithLockAcquired();
+}
+
+void TaskGraphRunner::RunTaskWithLockAcquired() {
+ TRACE_EVENT0("toplevel", "TaskGraphRunner::RunTask");
+
+ lock_.AssertAcquired();
+ DCHECK(!ready_to_run_namespaces_.empty());
+
+ // Take top priority TaskNamespace from |ready_to_run_namespaces_|.
+ std::pop_heap(ready_to_run_namespaces_.begin(),
+ ready_to_run_namespaces_.end(),
+ CompareTaskNamespacePriority);
+ TaskNamespace* task_namespace = ready_to_run_namespaces_.back();
+ ready_to_run_namespaces_.pop_back();
+ DCHECK(!task_namespace->ready_to_run_tasks.empty());
+
+ // Take top priority task from |ready_to_run_tasks|.
+ std::pop_heap(task_namespace->ready_to_run_tasks.begin(),
+ task_namespace->ready_to_run_tasks.end(),
+ CompareTaskPriority);
+ scoped_refptr<Task> task(task_namespace->ready_to_run_tasks.back().task);
+ task_namespace->ready_to_run_tasks.pop_back();
+
+ // Add task namespace back to |ready_to_run_namespaces_| if not empty after
+ // taking top priority task.
+ if (!task_namespace->ready_to_run_tasks.empty()) {
+ ready_to_run_namespaces_.push_back(task_namespace);
+ std::push_heap(ready_to_run_namespaces_.begin(),
+ ready_to_run_namespaces_.end(),
+ CompareTaskNamespacePriority);
+ }
+
+ // Add task to |running_tasks|.
+ task_namespace->running_tasks.push_back(task.get());
+
+ // There may be more work available, so wake up another worker thread.
+ has_ready_to_run_tasks_cv_.Signal();
+
+ // Call WillRun() before releasing |lock_| and running task.
+ task->WillRun();
+
+ {
+ base::AutoUnlock unlock(lock_);
+
+ task->RunOnWorkerThread();
+ }
+
+ // This will mark task as finished running.
+ task->DidRun();
+
+ // Remove task from |running_tasks|.
+ TaskVector::iterator it = std::find(task_namespace->running_tasks.begin(),
+ task_namespace->running_tasks.end(),
+ task.get());
+ DCHECK(it != task_namespace->running_tasks.end());
+ std::swap(*it, task_namespace->running_tasks.back());
+ task_namespace->running_tasks.pop_back();
+
+ // Now iterate over all dependents to decrement dependencies and check if they
+ // are ready to run.
+ bool ready_to_run_namespaces_has_heap_properties = true;
+ for (DependentIterator it(&task_namespace->graph, task.get()); it; ++it) {
+ TaskGraph::Node& dependent_node = *it;
+
+ DCHECK_LT(0u, dependent_node.dependencies);
+ dependent_node.dependencies--;
+ // Task is ready if it has no dependencies. Add it to |ready_to_run_tasks_|.
+ if (!dependent_node.dependencies) {
+ bool was_empty = task_namespace->ready_to_run_tasks.empty();
+ task_namespace->ready_to_run_tasks.push_back(
+ PrioritizedTask(dependent_node.task, dependent_node.priority));
+ std::push_heap(task_namespace->ready_to_run_tasks.begin(),
+ task_namespace->ready_to_run_tasks.end(),
+ CompareTaskPriority);
+ // Task namespace is ready if it has at least one ready to run task. Add
+ // it to |ready_to_run_namespaces_| if it just become ready.
+ if (was_empty) {
+ DCHECK(std::find(ready_to_run_namespaces_.begin(),
+ ready_to_run_namespaces_.end(),
+ task_namespace) == ready_to_run_namespaces_.end());
+ ready_to_run_namespaces_.push_back(task_namespace);
+ }
+ ready_to_run_namespaces_has_heap_properties = false;
+ }
+ }
+
+ // Rearrange the task namespaces in |ready_to_run_namespaces_| in such a way
+ // that they yet again form a heap.
+ if (!ready_to_run_namespaces_has_heap_properties) {
+ std::make_heap(ready_to_run_namespaces_.begin(),
+ ready_to_run_namespaces_.end(),
+ CompareTaskNamespacePriority);
+ }
+
+ // Finally add task to |completed_tasks_|.
+ task_namespace->completed_tasks.push_back(task);
+
+ // If namespace has finished running all tasks, wake up origin thread.
+ if (HasFinishedRunningTasksInNamespace(task_namespace))
+ has_namespaces_with_finished_running_tasks_cv_.Signal();
+}
+
+} // namespace cc
diff --git a/cc/resources/task_graph_runner.h b/cc/resources/task_graph_runner.h
new file mode 100644
index 0000000..0ed7140
--- /dev/null
+++ b/cc/resources/task_graph_runner.h
@@ -0,0 +1,232 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_TASK_GRAPH_RUNNER_H_
+#define CC_RESOURCES_TASK_GRAPH_RUNNER_H_
+
+#include <map>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/condition_variable.h"
+#include "cc/base/cc_export.h"
+
+namespace cc {
+
+class CC_EXPORT Task : public base::RefCountedThreadSafe<Task> {
+ public:
+ typedef std::vector<scoped_refptr<Task> > Vector;
+
+ virtual void RunOnWorkerThread() = 0;
+
+ void WillRun();
+ void DidRun();
+ bool HasFinishedRunning() const;
+
+ protected:
+ friend class base::RefCountedThreadSafe<Task>;
+
+ Task();
+ virtual ~Task();
+
+ bool will_run_;
+ bool did_run_;
+};
+
+// Dependencies are represented as edges in a task graph. Each graph node is
+// assigned a priority and a run count that matches the number of dependencies.
+// Priority range from 0 (most favorable scheduling) to UINT_MAX (least
+// favorable).
+struct CC_EXPORT TaskGraph {
+ struct Node {
+ class TaskComparator {
+ public:
+ explicit TaskComparator(const Task* task) : task_(task) {}
+
+ bool operator()(const Node& node) const { return node.task == task_; }
+
+ private:
+ const Task* task_;
+ };
+
+ typedef std::vector<Node> Vector;
+
+ Node(Task* task, unsigned priority, size_t dependencies)
+ : task(task), priority(priority), dependencies(dependencies) {}
+
+ Task* task;
+ unsigned priority;
+ size_t dependencies;
+ };
+
+ struct Edge {
+ typedef std::vector<Edge> Vector;
+
+ Edge(const Task* task, Task* dependent)
+ : task(task), dependent(dependent) {}
+
+ const Task* task;
+ Task* dependent;
+ };
+
+ TaskGraph();
+ ~TaskGraph();
+
+ void Swap(TaskGraph* other);
+ void Reset();
+
+ Node::Vector nodes;
+ Edge::Vector edges;
+};
+
+class TaskGraphRunner;
+
+// Opaque identifier that defines a namespace of tasks.
+class CC_EXPORT NamespaceToken {
+ public:
+ NamespaceToken() : id_(0) {}
+ ~NamespaceToken() {}
+
+ bool IsValid() const { return id_ != 0; }
+
+ private:
+ friend class TaskGraphRunner;
+
+ explicit NamespaceToken(int id) : id_(id) {}
+
+ int id_;
+};
+
+// A TaskGraphRunner is used to process tasks with dependencies. There can
+// be any number of TaskGraphRunner instances per thread. Tasks can be scheduled
+// from any thread and they can be run on any thread.
+class CC_EXPORT TaskGraphRunner {
+ public:
+ TaskGraphRunner();
+ virtual ~TaskGraphRunner();
+
+ // Returns a unique token that can be used to pass a task graph to
+ // ScheduleTasks(). Valid tokens are always nonzero.
+ NamespaceToken GetNamespaceToken();
+
+ // Schedule running of tasks in |graph|. Tasks previously scheduled but no
+ // longer needed will be canceled unless already running. Canceled tasks are
+ // moved to |completed_tasks| without being run. The result is that once
+ // scheduled, a task is guaranteed to end up in the |completed_tasks| queue
+ // even if it later gets canceled by another call to ScheduleTasks().
+ void ScheduleTasks(NamespaceToken token, TaskGraph* graph);
+
+ // Wait for all scheduled tasks to finish running.
+ void WaitForTasksToFinishRunning(NamespaceToken token);
+
+ // Collect all completed tasks in |completed_tasks|.
+ void CollectCompletedTasks(NamespaceToken token,
+ Task::Vector* completed_tasks);
+
+ // Run tasks until Shutdown() is called.
+ void Run();
+
+ // Process all pending tasks, but don't wait/sleep. Return as soon as all
+ // tasks that can be run are taken care of.
+ void RunUntilIdle();
+
+ // Signals the Run method to return when it becomes idle. It will continue to
+ // process tasks and future tasks as long as they are scheduled.
+ // Warning: if the TaskGraphRunner remains busy, it may never quit.
+ void Shutdown();
+
+ private:
+ struct PrioritizedTask {
+ typedef std::vector<PrioritizedTask> Vector;
+
+ PrioritizedTask(Task* task, unsigned priority)
+ : task(task), priority(priority) {}
+
+ Task* task;
+ unsigned priority;
+ };
+
+ typedef std::vector<const Task*> TaskVector;
+
+ struct TaskNamespace {
+ typedef std::vector<TaskNamespace*> Vector;
+
+ TaskNamespace();
+ ~TaskNamespace();
+
+ // Current task graph.
+ TaskGraph graph;
+
+ // Ordered set of tasks that are ready to run.
+ PrioritizedTask::Vector ready_to_run_tasks;
+
+ // Completed tasks not yet collected by origin thread.
+ Task::Vector completed_tasks;
+
+ // This set contains all currently running tasks.
+ TaskVector running_tasks;
+ };
+
+ typedef std::map<int, TaskNamespace> TaskNamespaceMap;
+
+ static bool CompareTaskPriority(const PrioritizedTask& a,
+ const PrioritizedTask& b) {
+ // In this system, numerically lower priority is run first.
+ return a.priority > b.priority;
+ }
+
+ static bool CompareTaskNamespacePriority(const TaskNamespace* a,
+ const TaskNamespace* b) {
+ DCHECK(!a->ready_to_run_tasks.empty());
+ DCHECK(!b->ready_to_run_tasks.empty());
+
+ // Compare based on task priority of the ready_to_run_tasks heap .front()
+ // will hold the max element of the heap, except after pop_heap, when max
+ // element is moved to .back().
+ return CompareTaskPriority(a->ready_to_run_tasks.front(),
+ b->ready_to_run_tasks.front());
+ }
+
+ static bool HasFinishedRunningTasksInNamespace(
+ const TaskNamespace* task_namespace) {
+ return task_namespace->running_tasks.empty() &&
+ task_namespace->ready_to_run_tasks.empty();
+ }
+
+ // Run next task. Caller must acquire |lock_| prior to calling this function
+ // and make sure at least one task is ready to run.
+ void RunTaskWithLockAcquired();
+
+ // This lock protects all members of this class. Do not read or modify
+ // anything without holding this lock. Do not block while holding this lock.
+ mutable base::Lock lock_;
+
+ // Condition variable that is waited on by Run() until new tasks are ready to
+ // run or shutdown starts.
+ base::ConditionVariable has_ready_to_run_tasks_cv_;
+
+ // Condition variable that is waited on by origin threads until a namespace
+ // has finished running all associated tasks.
+ base::ConditionVariable has_namespaces_with_finished_running_tasks_cv_;
+
+ // Provides a unique id to each NamespaceToken.
+ int next_namespace_id_;
+
+ // This set contains all namespaces with pending, running or completed tasks
+ // not yet collected.
+ TaskNamespaceMap namespaces_;
+
+ // Ordered set of task namespaces that have ready to run tasks.
+ TaskNamespace::Vector ready_to_run_namespaces_;
+
+ // Set during shutdown. Tells Run() to return when no more tasks are pending.
+ bool shutdown_;
+
+ DISALLOW_COPY_AND_ASSIGN(TaskGraphRunner);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_TASK_GRAPH_RUNNER_H_
diff --git a/cc/resources/task_graph_runner_perftest.cc b/cc/resources/task_graph_runner_perftest.cc
new file mode 100644
index 0000000..533ea4b
--- /dev/null
+++ b/cc/resources/task_graph_runner_perftest.cc
@@ -0,0 +1,318 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/task_graph_runner.h"
+
+#include <vector>
+
+#include "base/memory/scoped_ptr.h"
+#include "base/time/time.h"
+#include "cc/base/completion_event.h"
+#include "cc/debug/lap_timer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/perf/perf_test.h"
+
+namespace cc {
+namespace {
+
+static const int kTimeLimitMillis = 2000;
+static const int kWarmupRuns = 5;
+static const int kTimeCheckInterval = 10;
+
+class PerfTaskImpl : public Task {
+ public:
+ typedef std::vector<scoped_refptr<PerfTaskImpl> > Vector;
+
+ PerfTaskImpl() {}
+
+ // Overridden from Task:
+ virtual void RunOnWorkerThread() OVERRIDE {}
+
+ void Reset() { did_run_ = false; }
+
+ private:
+ virtual ~PerfTaskImpl() {}
+
+ DISALLOW_COPY_AND_ASSIGN(PerfTaskImpl);
+};
+
+class TaskGraphRunnerPerfTest : public testing::Test {
+ public:
+ TaskGraphRunnerPerfTest()
+ : timer_(kWarmupRuns,
+ base::TimeDelta::FromMilliseconds(kTimeLimitMillis),
+ kTimeCheckInterval) {}
+
+ // Overridden from testing::Test:
+ virtual void SetUp() OVERRIDE {
+ task_graph_runner_ = make_scoped_ptr(new TaskGraphRunner);
+ namespace_token_ = task_graph_runner_->GetNamespaceToken();
+ }
+ virtual void TearDown() OVERRIDE { task_graph_runner_ = nullptr; }
+
+ void AfterTest(const std::string& test_name) {
+ // Format matches chrome/test/perf/perf_test.h:PrintResult
+ printf(
+ "*RESULT %s: %.2f runs/s\n", test_name.c_str(), timer_.LapsPerSecond());
+ }
+
+ void RunBuildTaskGraphTest(const std::string& test_name,
+ int num_top_level_tasks,
+ int num_tasks,
+ int num_leaf_tasks) {
+ PerfTaskImpl::Vector top_level_tasks;
+ PerfTaskImpl::Vector tasks;
+ PerfTaskImpl::Vector leaf_tasks;
+ CreateTasks(num_top_level_tasks, &top_level_tasks);
+ CreateTasks(num_tasks, &tasks);
+ CreateTasks(num_leaf_tasks, &leaf_tasks);
+
+ // Avoid unnecessary heap allocations by reusing the same graph.
+ TaskGraph graph;
+
+ timer_.Reset();
+ do {
+ graph.Reset();
+ BuildTaskGraph(top_level_tasks, tasks, leaf_tasks, &graph);
+ timer_.NextLap();
+ } while (!timer_.HasTimeLimitExpired());
+
+ perf_test::PrintResult("build_task_graph",
+ TestModifierString(),
+ test_name,
+ timer_.LapsPerSecond(),
+ "runs/s",
+ true);
+ }
+
+ void RunScheduleTasksTest(const std::string& test_name,
+ int num_top_level_tasks,
+ int num_tasks,
+ int num_leaf_tasks) {
+ PerfTaskImpl::Vector top_level_tasks;
+ PerfTaskImpl::Vector tasks;
+ PerfTaskImpl::Vector leaf_tasks;
+ CreateTasks(num_top_level_tasks, &top_level_tasks);
+ CreateTasks(num_tasks, &tasks);
+ CreateTasks(num_leaf_tasks, &leaf_tasks);
+
+ // Avoid unnecessary heap allocations by reusing the same graph and
+ // completed tasks vector.
+ TaskGraph graph;
+ Task::Vector completed_tasks;
+
+ timer_.Reset();
+ do {
+ graph.Reset();
+ BuildTaskGraph(top_level_tasks, tasks, leaf_tasks, &graph);
+ task_graph_runner_->ScheduleTasks(namespace_token_, &graph);
+ // Shouldn't be any tasks to collect as we reschedule the same set
+ // of tasks.
+ DCHECK_EQ(0u, CollectCompletedTasks(&completed_tasks));
+ timer_.NextLap();
+ } while (!timer_.HasTimeLimitExpired());
+
+ TaskGraph empty;
+ task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
+ CollectCompletedTasks(&completed_tasks);
+
+ perf_test::PrintResult("schedule_tasks",
+ TestModifierString(),
+ test_name,
+ timer_.LapsPerSecond(),
+ "runs/s",
+ true);
+ }
+
+ void RunScheduleAlternateTasksTest(const std::string& test_name,
+ int num_top_level_tasks,
+ int num_tasks,
+ int num_leaf_tasks) {
+ const size_t kNumVersions = 2;
+ PerfTaskImpl::Vector top_level_tasks[kNumVersions];
+ PerfTaskImpl::Vector tasks[kNumVersions];
+ PerfTaskImpl::Vector leaf_tasks[kNumVersions];
+ for (size_t i = 0; i < kNumVersions; ++i) {
+ CreateTasks(num_top_level_tasks, &top_level_tasks[i]);
+ CreateTasks(num_tasks, &tasks[i]);
+ CreateTasks(num_leaf_tasks, &leaf_tasks[i]);
+ }
+
+ // Avoid unnecessary heap allocations by reusing the same graph and
+ // completed tasks vector.
+ TaskGraph graph;
+ Task::Vector completed_tasks;
+
+ size_t count = 0;
+ timer_.Reset();
+ do {
+ graph.Reset();
+ BuildTaskGraph(top_level_tasks[count % kNumVersions],
+ tasks[count % kNumVersions],
+ leaf_tasks[count % kNumVersions],
+ &graph);
+ task_graph_runner_->ScheduleTasks(namespace_token_, &graph);
+ CollectCompletedTasks(&completed_tasks);
+ completed_tasks.clear();
+ ++count;
+ timer_.NextLap();
+ } while (!timer_.HasTimeLimitExpired());
+
+ TaskGraph empty;
+ task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
+ CollectCompletedTasks(&completed_tasks);
+
+ perf_test::PrintResult("schedule_alternate_tasks",
+ TestModifierString(),
+ test_name,
+ timer_.LapsPerSecond(),
+ "runs/s",
+ true);
+ }
+
+ void RunScheduleAndExecuteTasksTest(const std::string& test_name,
+ int num_top_level_tasks,
+ int num_tasks,
+ int num_leaf_tasks) {
+ PerfTaskImpl::Vector top_level_tasks;
+ PerfTaskImpl::Vector tasks;
+ PerfTaskImpl::Vector leaf_tasks;
+ CreateTasks(num_top_level_tasks, &top_level_tasks);
+ CreateTasks(num_tasks, &tasks);
+ CreateTasks(num_leaf_tasks, &leaf_tasks);
+
+ // Avoid unnecessary heap allocations by reusing the same graph and
+ // completed tasks vector.
+ TaskGraph graph;
+ Task::Vector completed_tasks;
+
+ timer_.Reset();
+ do {
+ graph.Reset();
+ BuildTaskGraph(top_level_tasks, tasks, leaf_tasks, &graph);
+ task_graph_runner_->ScheduleTasks(namespace_token_, &graph);
+ task_graph_runner_->RunUntilIdle();
+ CollectCompletedTasks(&completed_tasks);
+ completed_tasks.clear();
+ ResetTasks(&top_level_tasks);
+ ResetTasks(&tasks);
+ ResetTasks(&leaf_tasks);
+ timer_.NextLap();
+ } while (!timer_.HasTimeLimitExpired());
+
+ perf_test::PrintResult("execute_tasks",
+ TestModifierString(),
+ test_name,
+ timer_.LapsPerSecond(),
+ "runs/s",
+ true);
+ }
+
+ private:
+ static std::string TestModifierString() {
+ return std::string("_task_graph_runner");
+ }
+
+ void CreateTasks(int num_tasks, PerfTaskImpl::Vector* tasks) {
+ for (int i = 0; i < num_tasks; ++i)
+ tasks->push_back(make_scoped_refptr(new PerfTaskImpl));
+ }
+
+ void ResetTasks(PerfTaskImpl::Vector* tasks) {
+ for (PerfTaskImpl::Vector::iterator it = tasks->begin(); it != tasks->end();
+ ++it) {
+ PerfTaskImpl* task = it->get();
+ task->Reset();
+ }
+ }
+
+ void BuildTaskGraph(const PerfTaskImpl::Vector& top_level_tasks,
+ const PerfTaskImpl::Vector& tasks,
+ const PerfTaskImpl::Vector& leaf_tasks,
+ TaskGraph* graph) {
+ DCHECK(graph->nodes.empty());
+ DCHECK(graph->edges.empty());
+
+ for (PerfTaskImpl::Vector::const_iterator it = leaf_tasks.begin();
+ it != leaf_tasks.end();
+ ++it) {
+ graph->nodes.push_back(TaskGraph::Node(it->get(), 0u, 0u));
+ }
+
+ for (PerfTaskImpl::Vector::const_iterator it = tasks.begin();
+ it != tasks.end();
+ ++it) {
+ graph->nodes.push_back(TaskGraph::Node(it->get(), 0u, leaf_tasks.size()));
+
+ for (PerfTaskImpl::Vector::const_iterator leaf_it = leaf_tasks.begin();
+ leaf_it != leaf_tasks.end();
+ ++leaf_it) {
+ graph->edges.push_back(TaskGraph::Edge(leaf_it->get(), it->get()));
+ }
+
+ for (PerfTaskImpl::Vector::const_iterator top_level_it =
+ top_level_tasks.begin();
+ top_level_it != top_level_tasks.end();
+ ++top_level_it) {
+ graph->edges.push_back(TaskGraph::Edge(it->get(), top_level_it->get()));
+ }
+ }
+
+ for (PerfTaskImpl::Vector::const_iterator it = top_level_tasks.begin();
+ it != top_level_tasks.end();
+ ++it) {
+ graph->nodes.push_back(TaskGraph::Node(it->get(), 0u, tasks.size()));
+ }
+ }
+
+ size_t CollectCompletedTasks(Task::Vector* completed_tasks) {
+ DCHECK(completed_tasks->empty());
+ task_graph_runner_->CollectCompletedTasks(namespace_token_,
+ completed_tasks);
+ return completed_tasks->size();
+ }
+
+ scoped_ptr<TaskGraphRunner> task_graph_runner_;
+ NamespaceToken namespace_token_;
+ LapTimer timer_;
+};
+
+TEST_F(TaskGraphRunnerPerfTest, BuildTaskGraph) {
+ RunBuildTaskGraphTest("0_1_0", 0, 1, 0);
+ RunBuildTaskGraphTest("0_32_0", 0, 32, 0);
+ RunBuildTaskGraphTest("2_1_0", 2, 1, 0);
+ RunBuildTaskGraphTest("2_32_0", 2, 32, 0);
+ RunBuildTaskGraphTest("2_1_1", 2, 1, 1);
+ RunBuildTaskGraphTest("2_32_1", 2, 32, 1);
+}
+
+TEST_F(TaskGraphRunnerPerfTest, ScheduleTasks) {
+ RunScheduleTasksTest("0_1_0", 0, 1, 0);
+ RunScheduleTasksTest("0_32_0", 0, 32, 0);
+ RunScheduleTasksTest("2_1_0", 2, 1, 0);
+ RunScheduleTasksTest("2_32_0", 2, 32, 0);
+ RunScheduleTasksTest("2_1_1", 2, 1, 1);
+ RunScheduleTasksTest("2_32_1", 2, 32, 1);
+}
+
+TEST_F(TaskGraphRunnerPerfTest, ScheduleAlternateTasks) {
+ RunScheduleAlternateTasksTest("0_1_0", 0, 1, 0);
+ RunScheduleAlternateTasksTest("0_32_0", 0, 32, 0);
+ RunScheduleAlternateTasksTest("2_1_0", 2, 1, 0);
+ RunScheduleAlternateTasksTest("2_32_0", 2, 32, 0);
+ RunScheduleAlternateTasksTest("2_1_1", 2, 1, 1);
+ RunScheduleAlternateTasksTest("2_32_1", 2, 32, 1);
+}
+
+TEST_F(TaskGraphRunnerPerfTest, ScheduleAndExecuteTasks) {
+ RunScheduleAndExecuteTasksTest("0_1_0", 0, 1, 0);
+ RunScheduleAndExecuteTasksTest("0_32_0", 0, 32, 0);
+ RunScheduleAndExecuteTasksTest("2_1_0", 2, 1, 0);
+ RunScheduleAndExecuteTasksTest("2_32_0", 2, 32, 0);
+ RunScheduleAndExecuteTasksTest("2_1_1", 2, 1, 1);
+ RunScheduleAndExecuteTasksTest("2_32_1", 2, 32, 1);
+}
+
+} // namespace
+} // namespace cc
diff --git a/cc/resources/task_graph_runner_unittest.cc b/cc/resources/task_graph_runner_unittest.cc
new file mode 100644
index 0000000..1a6256c
--- /dev/null
+++ b/cc/resources/task_graph_runner_unittest.cc
@@ -0,0 +1,331 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/task_graph_runner.h"
+
+#include <vector>
+
+#include "base/bind.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/simple_thread.h"
+#include "cc/base/scoped_ptr_deque.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cc {
+namespace {
+
+const int kNamespaceCount = 3;
+
+class TaskGraphRunnerTestBase {
+ public:
+ struct TaskInfo {
+ TaskInfo(int namespace_index,
+ unsigned id,
+ unsigned dependent_id,
+ unsigned dependent_count,
+ unsigned priority)
+ : namespace_index(namespace_index),
+ id(id),
+ dependent_id(dependent_id),
+ dependent_count(dependent_count),
+ priority(priority) {}
+
+ int namespace_index;
+ unsigned id;
+ unsigned dependent_id;
+ unsigned dependent_count;
+ unsigned priority;
+ };
+
+ TaskGraphRunnerTestBase() : task_graph_runner_(new TaskGraphRunner) {}
+
+ void ResetIds(int namespace_index) {
+ run_task_ids_[namespace_index].clear();
+ on_task_completed_ids_[namespace_index].clear();
+ }
+
+ void RunAllTasks(int namespace_index) {
+ task_graph_runner_->WaitForTasksToFinishRunning(
+ namespace_token_[namespace_index]);
+
+ Task::Vector completed_tasks;
+ task_graph_runner_->CollectCompletedTasks(namespace_token_[namespace_index],
+ &completed_tasks);
+ for (Task::Vector::const_iterator it = completed_tasks.begin();
+ it != completed_tasks.end();
+ ++it) {
+ FakeTaskImpl* task = static_cast<FakeTaskImpl*>(it->get());
+ task->CompleteOnOriginThread();
+ }
+ }
+
+ void RunTaskOnWorkerThread(int namespace_index, unsigned id) {
+ base::AutoLock lock(run_task_ids_lock_);
+ run_task_ids_[namespace_index].push_back(id);
+ }
+
+ void OnTaskCompleted(int namespace_index, unsigned id) {
+ on_task_completed_ids_[namespace_index].push_back(id);
+ }
+
+ const std::vector<unsigned>& run_task_ids(int namespace_index) {
+ return run_task_ids_[namespace_index];
+ }
+
+ const std::vector<unsigned>& on_task_completed_ids(int namespace_index) {
+ return on_task_completed_ids_[namespace_index];
+ }
+
+ void ScheduleTasks(int namespace_index, const std::vector<TaskInfo>& tasks) {
+ Task::Vector new_tasks;
+ Task::Vector new_dependents;
+ TaskGraph new_graph;
+
+ for (std::vector<TaskInfo>::const_iterator it = tasks.begin();
+ it != tasks.end();
+ ++it) {
+ scoped_refptr<FakeTaskImpl> new_task(
+ new FakeTaskImpl(this, it->namespace_index, it->id));
+ new_graph.nodes.push_back(
+ TaskGraph::Node(new_task.get(), it->priority, 0u));
+ for (unsigned i = 0; i < it->dependent_count; ++i) {
+ scoped_refptr<FakeDependentTaskImpl> new_dependent_task(
+ new FakeDependentTaskImpl(
+ this, it->namespace_index, it->dependent_id));
+ new_graph.nodes.push_back(
+ TaskGraph::Node(new_dependent_task.get(), it->priority, 1u));
+ new_graph.edges.push_back(
+ TaskGraph::Edge(new_task.get(), new_dependent_task.get()));
+
+ new_dependents.push_back(new_dependent_task.get());
+ }
+
+ new_tasks.push_back(new_task.get());
+ }
+
+ task_graph_runner_->ScheduleTasks(namespace_token_[namespace_index],
+ &new_graph);
+
+ dependents_[namespace_index].swap(new_dependents);
+ tasks_[namespace_index].swap(new_tasks);
+ }
+
+ protected:
+ class FakeTaskImpl : public Task {
+ public:
+ FakeTaskImpl(TaskGraphRunnerTestBase* test, int namespace_index, int id)
+ : test_(test), namespace_index_(namespace_index), id_(id) {}
+
+ // Overridden from Task:
+ virtual void RunOnWorkerThread() OVERRIDE {
+ test_->RunTaskOnWorkerThread(namespace_index_, id_);
+ }
+
+ virtual void CompleteOnOriginThread() {
+ test_->OnTaskCompleted(namespace_index_, id_);
+ }
+
+ protected:
+ virtual ~FakeTaskImpl() {}
+
+ private:
+ TaskGraphRunnerTestBase* test_;
+ int namespace_index_;
+ int id_;
+
+ DISALLOW_COPY_AND_ASSIGN(FakeTaskImpl);
+ };
+
+ class FakeDependentTaskImpl : public FakeTaskImpl {
+ public:
+ FakeDependentTaskImpl(TaskGraphRunnerTestBase* test,
+ int namespace_index,
+ int id)
+ : FakeTaskImpl(test, namespace_index, id) {}
+
+ // Overridden from FakeTaskImpl:
+ virtual void CompleteOnOriginThread() OVERRIDE {}
+
+ private:
+ virtual ~FakeDependentTaskImpl() {}
+
+ DISALLOW_COPY_AND_ASSIGN(FakeDependentTaskImpl);
+ };
+
+ scoped_ptr<TaskGraphRunner> task_graph_runner_;
+ NamespaceToken namespace_token_[kNamespaceCount];
+ Task::Vector tasks_[kNamespaceCount];
+ Task::Vector dependents_[kNamespaceCount];
+ std::vector<unsigned> run_task_ids_[kNamespaceCount];
+ base::Lock run_task_ids_lock_;
+ std::vector<unsigned> on_task_completed_ids_[kNamespaceCount];
+};
+
+class TaskGraphRunnerTest : public TaskGraphRunnerTestBase,
+ public testing::TestWithParam<int>,
+ public base::DelegateSimpleThread::Delegate {
+ public:
+ // Overridden from testing::Test:
+ virtual void SetUp() OVERRIDE {
+ const size_t num_threads = GetParam();
+ while (workers_.size() < num_threads) {
+ scoped_ptr<base::DelegateSimpleThread> worker =
+ make_scoped_ptr(new base::DelegateSimpleThread(this, "TestWorker"));
+ worker->Start();
+ workers_.push_back(worker.Pass());
+ }
+
+ for (int i = 0; i < kNamespaceCount; ++i)
+ namespace_token_[i] = task_graph_runner_->GetNamespaceToken();
+ }
+ virtual void TearDown() OVERRIDE {
+ task_graph_runner_->Shutdown();
+ while (workers_.size()) {
+ scoped_ptr<base::DelegateSimpleThread> worker = workers_.take_front();
+ worker->Join();
+ }
+ }
+
+ private:
+ // Overridden from base::DelegateSimpleThread::Delegate:
+ virtual void Run() OVERRIDE { task_graph_runner_->Run(); }
+
+ ScopedPtrDeque<base::DelegateSimpleThread> workers_;
+};
+
+TEST_P(TaskGraphRunnerTest, Basic) {
+ for (int i = 0; i < kNamespaceCount; ++i) {
+ EXPECT_EQ(0u, run_task_ids(i).size());
+ EXPECT_EQ(0u, on_task_completed_ids(i).size());
+
+ ScheduleTasks(i, std::vector<TaskInfo>(1, TaskInfo(i, 0u, 0u, 0u, 0u)));
+ }
+
+ for (int i = 0; i < kNamespaceCount; ++i) {
+ RunAllTasks(i);
+
+ EXPECT_EQ(1u, run_task_ids(i).size());
+ EXPECT_EQ(1u, on_task_completed_ids(i).size());
+ }
+
+ for (int i = 0; i < kNamespaceCount; ++i)
+ ScheduleTasks(i, std::vector<TaskInfo>(1, TaskInfo(i, 0u, 0u, 1u, 0u)));
+
+ for (int i = 0; i < kNamespaceCount; ++i) {
+ RunAllTasks(i);
+
+ EXPECT_EQ(3u, run_task_ids(i).size());
+ EXPECT_EQ(2u, on_task_completed_ids(i).size());
+ }
+
+ for (int i = 0; i < kNamespaceCount; ++i)
+ ScheduleTasks(i, std::vector<TaskInfo>(1, TaskInfo(i, 0u, 0u, 2u, 0u)));
+
+ for (int i = 0; i < kNamespaceCount; ++i) {
+ RunAllTasks(i);
+
+ EXPECT_EQ(6u, run_task_ids(i).size());
+ EXPECT_EQ(3u, on_task_completed_ids(i).size());
+ }
+}
+
+TEST_P(TaskGraphRunnerTest, Dependencies) {
+ for (int i = 0; i < kNamespaceCount; ++i) {
+ ScheduleTasks(i,
+ std::vector<TaskInfo>(1,
+ TaskInfo(i,
+ 0u,
+ 1u,
+ 1u, // 1 dependent
+ 0u)));
+ }
+
+ for (int i = 0; i < kNamespaceCount; ++i) {
+ RunAllTasks(i);
+
+ // Check if task ran before dependent.
+ ASSERT_EQ(2u, run_task_ids(i).size());
+ EXPECT_EQ(0u, run_task_ids(i)[0]);
+ EXPECT_EQ(1u, run_task_ids(i)[1]);
+ ASSERT_EQ(1u, on_task_completed_ids(i).size());
+ EXPECT_EQ(0u, on_task_completed_ids(i)[0]);
+ }
+
+ for (int i = 0; i < kNamespaceCount; ++i) {
+ ScheduleTasks(i,
+ std::vector<TaskInfo>(1,
+ TaskInfo(i,
+ 2u,
+ 3u,
+ 2u, // 2 dependents
+ 0u)));
+ }
+
+ for (int i = 0; i < kNamespaceCount; ++i) {
+ RunAllTasks(i);
+
+ // Task should only run once.
+ ASSERT_EQ(5u, run_task_ids(i).size());
+ EXPECT_EQ(2u, run_task_ids(i)[2]);
+ EXPECT_EQ(3u, run_task_ids(i)[3]);
+ EXPECT_EQ(3u, run_task_ids(i)[4]);
+ ASSERT_EQ(2u, on_task_completed_ids(i).size());
+ EXPECT_EQ(2u, on_task_completed_ids(i)[1]);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(TaskGraphRunnerTests,
+ TaskGraphRunnerTest,
+ ::testing::Range(1, 5));
+
+class TaskGraphRunnerSingleThreadTest
+ : public TaskGraphRunnerTestBase,
+ public testing::Test,
+ public base::DelegateSimpleThread::Delegate {
+ public:
+ // Overridden from testing::Test:
+ virtual void SetUp() OVERRIDE {
+ worker_.reset(new base::DelegateSimpleThread(this, "TestWorker"));
+ worker_->Start();
+
+ for (int i = 0; i < kNamespaceCount; ++i)
+ namespace_token_[i] = task_graph_runner_->GetNamespaceToken();
+ }
+ virtual void TearDown() OVERRIDE {
+ task_graph_runner_->Shutdown();
+ worker_->Join();
+ }
+
+ private:
+ // Overridden from base::DelegateSimpleThread::Delegate:
+ virtual void Run() OVERRIDE { task_graph_runner_->Run(); }
+
+ scoped_ptr<base::DelegateSimpleThread> worker_;
+};
+
+TEST_F(TaskGraphRunnerSingleThreadTest, Priority) {
+ for (int i = 0; i < kNamespaceCount; ++i) {
+ TaskInfo tasks[] = {TaskInfo(i, 0u, 2u, 1u, 1u), // Priority 1
+ TaskInfo(i, 1u, 3u, 1u, 0u) // Priority 0
+ };
+ ScheduleTasks(i, std::vector<TaskInfo>(tasks, tasks + arraysize(tasks)));
+ }
+
+ for (int i = 0; i < kNamespaceCount; ++i) {
+ RunAllTasks(i);
+
+ // Check if tasks ran in order of priority.
+ ASSERT_EQ(4u, run_task_ids(i).size());
+ EXPECT_EQ(1u, run_task_ids(i)[0]);
+ EXPECT_EQ(3u, run_task_ids(i)[1]);
+ EXPECT_EQ(0u, run_task_ids(i)[2]);
+ EXPECT_EQ(2u, run_task_ids(i)[3]);
+ ASSERT_EQ(2u, on_task_completed_ids(i).size());
+ EXPECT_EQ(1u, on_task_completed_ids(i)[0]);
+ EXPECT_EQ(0u, on_task_completed_ids(i)[1]);
+ }
+}
+
+} // namespace
+} // namespace cc
diff --git a/cc/resources/texture_mailbox.cc b/cc/resources/texture_mailbox.cc
new file mode 100644
index 0000000..90ce6be
--- /dev/null
+++ b/cc/resources/texture_mailbox.cc
@@ -0,0 +1,59 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/texture_mailbox.h"
+
+#include "base/logging.h"
+#include "cc/resources/shared_bitmap.h"
+#include "third_party/khronos/GLES2/gl2.h"
+
+namespace cc {
+
+TextureMailbox::TextureMailbox() : shared_memory_(NULL) {}
+
+TextureMailbox::TextureMailbox(const gpu::MailboxHolder& mailbox_holder)
+ : mailbox_holder_(mailbox_holder),
+ shared_memory_(NULL),
+ allow_overlay_(false) {}
+
+TextureMailbox::TextureMailbox(const gpu::Mailbox& mailbox,
+ uint32 target,
+ uint32 sync_point)
+ : mailbox_holder_(mailbox, target, sync_point),
+ shared_memory_(NULL),
+ allow_overlay_(false) {}
+
+TextureMailbox::TextureMailbox(base::SharedMemory* shared_memory,
+ const gfx::Size& size)
+ : shared_memory_(shared_memory),
+ shared_memory_size_(size),
+ allow_overlay_(false) {
+ // If an embedder of cc gives an invalid TextureMailbox, we should crash
+ // here to identify the offender.
+ CHECK(SharedBitmap::VerifySizeInBytes(shared_memory_size_));
+}
+
+TextureMailbox::~TextureMailbox() {}
+
+bool TextureMailbox::Equals(const TextureMailbox& other) const {
+ if (other.IsTexture()) {
+ return IsTexture() && !memcmp(mailbox_holder_.mailbox.name,
+ other.mailbox_holder_.mailbox.name,
+ sizeof(mailbox_holder_.mailbox.name));
+ } else if (other.IsSharedMemory()) {
+ return IsSharedMemory() &&
+ shared_memory_->handle() == other.shared_memory_->handle();
+ }
+
+ DCHECK(!other.IsValid());
+ return !IsValid();
+}
+
+size_t TextureMailbox::SharedMemorySizeInBytes() const {
+ // UncheckedSizeInBytes is okay because we VerifySizeInBytes in the
+ // constructor and the field is immutable.
+ return SharedBitmap::UncheckedSizeInBytes(shared_memory_size_);
+}
+
+} // namespace cc
diff --git a/cc/resources/texture_mailbox.h b/cc/resources/texture_mailbox.h
new file mode 100644
index 0000000..4626dd3
--- /dev/null
+++ b/cc/resources/texture_mailbox.h
@@ -0,0 +1,59 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_TEXTURE_MAILBOX_H_
+#define CC_RESOURCES_TEXTURE_MAILBOX_H_
+
+#include <string>
+
+#include "base/callback.h"
+#include "base/memory/shared_memory.h"
+#include "cc/base/cc_export.h"
+#include "gpu/command_buffer/common/mailbox_holder.h"
+#include "ui/gfx/size.h"
+
+namespace cc {
+
+// TODO(skaslev, danakj) Rename this class more apropriately since now it
+// can hold a shared memory resource as well as a texture mailbox.
+class CC_EXPORT TextureMailbox {
+ public:
+ TextureMailbox();
+ explicit TextureMailbox(const gpu::MailboxHolder& mailbox_holder);
+ TextureMailbox(const gpu::Mailbox& mailbox, uint32 target, uint32 sync_point);
+ TextureMailbox(base::SharedMemory* shared_memory, const gfx::Size& size);
+
+ ~TextureMailbox();
+
+ bool IsValid() const { return IsTexture() || IsSharedMemory(); }
+ bool IsTexture() const { return !mailbox_holder_.mailbox.IsZero(); }
+ bool IsSharedMemory() const { return shared_memory_ != NULL; }
+
+ bool Equals(const TextureMailbox&) const;
+
+ const gpu::Mailbox& mailbox() const { return mailbox_holder_.mailbox; }
+ const int8* name() const { return mailbox().name; }
+ uint32 target() const { return mailbox_holder_.texture_target; }
+ uint32 sync_point() const { return mailbox_holder_.sync_point; }
+ void set_sync_point(int32 sync_point) {
+ mailbox_holder_.sync_point = sync_point;
+ }
+
+ bool allow_overlay() const { return allow_overlay_; }
+ void set_allow_overlay(bool allow_overlay) { allow_overlay_ = allow_overlay; }
+
+ base::SharedMemory* shared_memory() const { return shared_memory_; }
+ gfx::Size shared_memory_size() const { return shared_memory_size_; }
+ size_t SharedMemorySizeInBytes() const;
+
+ private:
+ gpu::MailboxHolder mailbox_holder_;
+ base::SharedMemory* shared_memory_;
+ gfx::Size shared_memory_size_;
+ bool allow_overlay_;
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_TEXTURE_MAILBOX_H_
diff --git a/cc/resources/texture_mailbox_deleter.cc b/cc/resources/texture_mailbox_deleter.cc
new file mode 100644
index 0000000..ab86368
--- /dev/null
+++ b/cc/resources/texture_mailbox_deleter.cc
@@ -0,0 +1,92 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/texture_mailbox_deleter.h"
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/memory/weak_ptr.h"
+#include "base/single_thread_task_runner.h"
+#include "cc/output/context_provider.h"
+#include "cc/resources/single_release_callback.h"
+#include "gpu/command_buffer/client/gles2_interface.h"
+
+namespace cc {
+
+static void DeleteTextureOnImplThread(
+ const scoped_refptr<ContextProvider>& context_provider,
+ unsigned texture_id,
+ uint32 sync_point,
+ bool is_lost) {
+ if (sync_point)
+ context_provider->ContextGL()->WaitSyncPointCHROMIUM(sync_point);
+ context_provider->ContextGL()->DeleteTextures(1, &texture_id);
+}
+
+static void PostTaskFromMainToImplThread(
+ scoped_refptr<base::SingleThreadTaskRunner> impl_task_runner,
+ ReleaseCallback run_impl_callback,
+ unsigned sync_point,
+ bool is_lost) {
+ // This posts the task to RunDeleteTextureOnImplThread().
+ impl_task_runner->PostTask(
+ FROM_HERE, base::Bind(run_impl_callback, sync_point, is_lost));
+}
+
+TextureMailboxDeleter::TextureMailboxDeleter(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner)
+ : impl_task_runner_(task_runner), weak_ptr_factory_(this) {}
+
+TextureMailboxDeleter::~TextureMailboxDeleter() {
+ for (size_t i = 0; i < impl_callbacks_.size(); ++i)
+ impl_callbacks_.at(i)->Run(0, true);
+}
+
+scoped_ptr<SingleReleaseCallback> TextureMailboxDeleter::GetReleaseCallback(
+ const scoped_refptr<ContextProvider>& context_provider,
+ unsigned texture_id) {
+ // This callback owns a reference on the |context_provider|. It must be
+ // destroyed on the impl thread. Upon destruction of this class, the
+ // callback must immediately be destroyed.
+ scoped_ptr<SingleReleaseCallback> impl_callback =
+ SingleReleaseCallback::Create(base::Bind(&DeleteTextureOnImplThread,
+ context_provider,
+ texture_id));
+
+ impl_callbacks_.push_back(impl_callback.Pass());
+
+ // The raw pointer to the impl-side callback is valid as long as this
+ // class is alive. So we guard it with a WeakPtr.
+ ReleaseCallback run_impl_callback(
+ base::Bind(&TextureMailboxDeleter::RunDeleteTextureOnImplThread,
+ weak_ptr_factory_.GetWeakPtr(),
+ impl_callbacks_.back()));
+
+ // Provide a callback for the main thread that posts back to the impl
+ // thread.
+ scoped_ptr<SingleReleaseCallback> main_callback =
+ SingleReleaseCallback::Create(base::Bind(
+ &PostTaskFromMainToImplThread, impl_task_runner_, run_impl_callback));
+
+ return main_callback.Pass();
+}
+
+void TextureMailboxDeleter::RunDeleteTextureOnImplThread(
+ SingleReleaseCallback* impl_callback,
+ unsigned sync_point,
+ bool is_lost) {
+ for (size_t i = 0; i < impl_callbacks_.size(); ++i) {
+ if (impl_callbacks_.at(i) == impl_callback) {
+ // Run the callback, then destroy it here on the impl thread.
+ impl_callbacks_.at(i)->Run(sync_point, is_lost);
+ impl_callbacks_.erase(impl_callbacks_.begin() + i);
+ return;
+ }
+ }
+
+ NOTREACHED() << "The Callback returned by GetDeleteCallback() was called "
+ << "more than once.";
+}
+
+} // namespace cc
diff --git a/cc/resources/texture_mailbox_deleter.h b/cc/resources/texture_mailbox_deleter.h
new file mode 100644
index 0000000..ca0bc00
--- /dev/null
+++ b/cc/resources/texture_mailbox_deleter.h
@@ -0,0 +1,51 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_TEXTURE_MAILBOX_DELETER_H_
+#define CC_RESOURCES_TEXTURE_MAILBOX_DELETER_H_
+
+#include "base/memory/weak_ptr.h"
+#include "cc/base/cc_export.h"
+#include "cc/base/scoped_ptr_vector.h"
+
+namespace base {
+class SingleThreadTaskRunner;
+}
+
+namespace cc {
+class ContextProvider;
+class SingleReleaseCallback;
+
+class CC_EXPORT TextureMailboxDeleter {
+ public:
+ explicit TextureMailboxDeleter(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner);
+ ~TextureMailboxDeleter();
+
+ // Returns a Callback that can be used as the ReleaseCallback for a
+ // TextureMailbox attached to the |texture_id|. The ReleaseCallback can
+ // be passed to other threads and will destroy the texture, once it is
+ // run, on the impl thread. If the TextureMailboxDeleter is destroyed
+ // due to the compositor shutting down, then the ReleaseCallback will
+ // become a no-op and the texture will be deleted immediately on the
+ // impl thread, along with dropping the reference to the ContextProvider.
+ scoped_ptr<SingleReleaseCallback> GetReleaseCallback(
+ const scoped_refptr<ContextProvider>& context_provider,
+ unsigned texture_id);
+
+ private:
+ // Runs the |impl_callback| to delete the texture and removes the callback
+ // from the |impl_callbacks_| list.
+ void RunDeleteTextureOnImplThread(SingleReleaseCallback* impl_callback,
+ uint32 sync_point,
+ bool is_lost);
+
+ scoped_refptr<base::SingleThreadTaskRunner> impl_task_runner_;
+ ScopedPtrVector<SingleReleaseCallback> impl_callbacks_;
+ base::WeakPtrFactory<TextureMailboxDeleter> weak_ptr_factory_;
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_TEXTURE_MAILBOX_DELETER_H_
diff --git a/cc/resources/texture_mailbox_deleter_unittest.cc b/cc/resources/texture_mailbox_deleter_unittest.cc
new file mode 100644
index 0000000..05e33a3
--- /dev/null
+++ b/cc/resources/texture_mailbox_deleter_unittest.cc
@@ -0,0 +1,47 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/texture_mailbox_deleter.h"
+
+#include "base/message_loop/message_loop_proxy.h"
+#include "cc/resources/single_release_callback.h"
+#include "cc/test/test_context_provider.h"
+#include "cc/test/test_web_graphics_context_3d.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cc {
+namespace {
+
+TEST(TextureMailboxDeleterTest, Destroy) {
+ scoped_ptr<TextureMailboxDeleter> deleter(
+ new TextureMailboxDeleter(base::MessageLoopProxy::current()));
+
+ scoped_refptr<TestContextProvider> context_provider =
+ TestContextProvider::Create();
+ context_provider->BindToCurrentThread();
+
+ GLuint texture_id = 0u;
+ context_provider->ContextGL()->GenTextures(1, &texture_id);
+
+ EXPECT_TRUE(context_provider->HasOneRef());
+ EXPECT_EQ(1u, context_provider->TestContext3d()->NumTextures());
+
+ scoped_ptr<SingleReleaseCallback> cb =
+ deleter->GetReleaseCallback(context_provider, texture_id).Pass();
+ EXPECT_FALSE(context_provider->HasOneRef());
+ EXPECT_EQ(1u, context_provider->TestContext3d()->NumTextures());
+
+ // When the deleter is destroyed, it immediately drops its ref on the
+ // ContextProvider, and deletes the texture.
+ deleter = nullptr;
+ EXPECT_TRUE(context_provider->HasOneRef());
+ EXPECT_EQ(0u, context_provider->TestContext3d()->NumTextures());
+
+ // Run the scoped release callback before destroying it, but it won't do
+ // anything.
+ cb->Run(0, false);
+}
+
+} // namespace
+} // namespace cc
diff --git a/cc/resources/texture_uploader.cc b/cc/resources/texture_uploader.cc
new file mode 100644
index 0000000..eb4e6c0
--- /dev/null
+++ b/cc/resources/texture_uploader.cc
@@ -0,0 +1,333 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/texture_uploader.h"
+
+#include <algorithm>
+#include <vector>
+
+#include "base/debug/trace_event.h"
+#include "base/metrics/histogram.h"
+#include "cc/base/util.h"
+#include "cc/resources/prioritized_resource.h"
+#include "cc/resources/resource.h"
+#include "gpu/GLES2/gl2extchromium.h"
+#include "gpu/command_buffer/client/gles2_interface.h"
+#include "third_party/khronos/GLES2/gl2.h"
+#include "third_party/khronos/GLES2/gl2ext.h"
+#include "ui/gfx/rect.h"
+#include "ui/gfx/vector2d.h"
+
+using gpu::gles2::GLES2Interface;
+
+namespace {
+
+// How many previous uploads to use when predicting future throughput.
+static const size_t kUploadHistorySizeMax = 1000;
+static const size_t kUploadHistorySizeInitial = 100;
+
+// Global estimated number of textures per second to maintain estimates across
+// subsequent instances of TextureUploader.
+// More than one thread will not access this variable, so we do not need to
+// synchronize access.
+static const double kDefaultEstimatedTexturesPerSecond = 48.0 * 60.0;
+
+// Flush interval when performing texture uploads.
+static const size_t kTextureUploadFlushPeriod = 4;
+
+} // anonymous namespace
+
+namespace cc {
+
+TextureUploader::Query::Query(GLES2Interface* gl)
+ : gl_(gl),
+ query_id_(0),
+ value_(0),
+ has_value_(false),
+ is_non_blocking_(false) {
+ gl_->GenQueriesEXT(1, &query_id_);
+}
+
+TextureUploader::Query::~Query() { gl_->DeleteQueriesEXT(1, &query_id_); }
+
+void TextureUploader::Query::Begin() {
+ has_value_ = false;
+ is_non_blocking_ = false;
+ gl_->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, query_id_);
+}
+
+void TextureUploader::Query::End() {
+ gl_->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM);
+}
+
+bool TextureUploader::Query::IsPending() {
+ unsigned available = 1;
+ gl_->GetQueryObjectuivEXT(
+ query_id_, GL_QUERY_RESULT_AVAILABLE_EXT, &available);
+ return !available;
+}
+
+unsigned TextureUploader::Query::Value() {
+ if (!has_value_) {
+ gl_->GetQueryObjectuivEXT(query_id_, GL_QUERY_RESULT_EXT, &value_);
+ has_value_ = true;
+ }
+ return value_;
+}
+
+TextureUploader::TextureUploader(GLES2Interface* gl)
+ : gl_(gl),
+ num_blocking_texture_uploads_(0),
+ sub_image_size_(0),
+ num_texture_uploads_since_last_flush_(0) {
+ for (size_t i = kUploadHistorySizeInitial; i > 0; i--)
+ textures_per_second_history_.insert(kDefaultEstimatedTexturesPerSecond);
+}
+
+TextureUploader::~TextureUploader() {}
+
+size_t TextureUploader::NumBlockingUploads() {
+ ProcessQueries();
+ return num_blocking_texture_uploads_;
+}
+
+void TextureUploader::MarkPendingUploadsAsNonBlocking() {
+ for (ScopedPtrDeque<Query>::iterator it = pending_queries_.begin();
+ it != pending_queries_.end();
+ ++it) {
+ if ((*it)->is_non_blocking())
+ continue;
+
+ num_blocking_texture_uploads_--;
+ (*it)->mark_as_non_blocking();
+ }
+
+ DCHECK(!num_blocking_texture_uploads_);
+}
+
+double TextureUploader::EstimatedTexturesPerSecond() {
+ ProcessQueries();
+
+ // Use the median as our estimate.
+ std::multiset<double>::iterator median = textures_per_second_history_.begin();
+ std::advance(median, textures_per_second_history_.size() / 2);
+ return *median;
+}
+
+void TextureUploader::BeginQuery() {
+ if (available_queries_.empty())
+ available_queries_.push_back(Query::Create(gl_));
+
+ available_queries_.front()->Begin();
+}
+
+void TextureUploader::EndQuery() {
+ available_queries_.front()->End();
+ pending_queries_.push_back(available_queries_.take_front());
+ num_blocking_texture_uploads_++;
+}
+
+void TextureUploader::Upload(const uint8* image,
+ const gfx::Rect& image_rect,
+ const gfx::Rect& source_rect,
+ gfx::Vector2d dest_offset,
+ ResourceFormat format,
+ const gfx::Size& size) {
+ CHECK(image_rect.Contains(source_rect));
+
+ bool is_full_upload = dest_offset.IsZero() && source_rect.size() == size;
+
+ if (is_full_upload)
+ BeginQuery();
+
+ if (format == ETC1) {
+ // ETC1 does not support subimage uploads.
+ DCHECK(is_full_upload);
+ UploadWithTexImageETC1(image, size);
+ } else {
+ UploadWithMapTexSubImage(
+ image, image_rect, source_rect, dest_offset, format);
+ }
+
+ if (is_full_upload)
+ EndQuery();
+
+ num_texture_uploads_since_last_flush_++;
+ if (num_texture_uploads_since_last_flush_ >= kTextureUploadFlushPeriod)
+ Flush();
+}
+
+void TextureUploader::Flush() {
+ if (!num_texture_uploads_since_last_flush_)
+ return;
+
+ gl_->ShallowFlushCHROMIUM();
+
+ num_texture_uploads_since_last_flush_ = 0;
+}
+
+void TextureUploader::ReleaseCachedQueries() {
+ ProcessQueries();
+ available_queries_.clear();
+}
+
+void TextureUploader::UploadWithTexSubImage(const uint8* image,
+ const gfx::Rect& image_rect,
+ const gfx::Rect& source_rect,
+ gfx::Vector2d dest_offset,
+ ResourceFormat format) {
+ TRACE_EVENT0("cc", "TextureUploader::UploadWithTexSubImage");
+
+ // Early-out if this is a no-op, and assert that |image| be valid if this is
+ // not a no-op.
+ if (source_rect.IsEmpty())
+ return;
+ DCHECK(image);
+
+ // Offset from image-rect to source-rect.
+ gfx::Vector2d offset(source_rect.origin() - image_rect.origin());
+
+ const uint8* pixel_source;
+ unsigned bytes_per_pixel = BitsPerPixel(format) / 8;
+ // Use 4-byte row alignment (OpenGL default) for upload performance.
+ // Assuming that GL_UNPACK_ALIGNMENT has not changed from default.
+ unsigned upload_image_stride =
+ RoundUp(bytes_per_pixel * source_rect.width(), 4u);
+
+ if (upload_image_stride == image_rect.width() * bytes_per_pixel &&
+ !offset.x()) {
+ pixel_source = &image[image_rect.width() * bytes_per_pixel * offset.y()];
+ } else {
+ size_t needed_size = upload_image_stride * source_rect.height();
+ if (sub_image_size_ < needed_size) {
+ sub_image_.reset(new uint8[needed_size]);
+ sub_image_size_ = needed_size;
+ }
+ // Strides not equal, so do a row-by-row memcpy from the
+ // paint results into a temp buffer for uploading.
+ for (int row = 0; row < source_rect.height(); ++row)
+ memcpy(&sub_image_[upload_image_stride * row],
+ &image[bytes_per_pixel *
+ (offset.x() + (offset.y() + row) * image_rect.width())],
+ source_rect.width() * bytes_per_pixel);
+
+ pixel_source = &sub_image_[0];
+ }
+
+ gl_->TexSubImage2D(GL_TEXTURE_2D,
+ 0,
+ dest_offset.x(),
+ dest_offset.y(),
+ source_rect.width(),
+ source_rect.height(),
+ GLDataFormat(format),
+ GLDataType(format),
+ pixel_source);
+}
+
+void TextureUploader::UploadWithMapTexSubImage(const uint8* image,
+ const gfx::Rect& image_rect,
+ const gfx::Rect& source_rect,
+ gfx::Vector2d dest_offset,
+ ResourceFormat format) {
+ TRACE_EVENT0("cc", "TextureUploader::UploadWithMapTexSubImage");
+
+ // Early-out if this is a no-op, and assert that |image| be valid if this is
+ // not a no-op.
+ if (source_rect.IsEmpty())
+ return;
+ DCHECK(image);
+ // Compressed textures have no implementation of mapTexSubImage.
+ DCHECK_NE(ETC1, format);
+
+ // Offset from image-rect to source-rect.
+ gfx::Vector2d offset(source_rect.origin() - image_rect.origin());
+
+ unsigned bytes_per_pixel = BitsPerPixel(format) / 8;
+ // Use 4-byte row alignment (OpenGL default) for upload performance.
+ // Assuming that GL_UNPACK_ALIGNMENT has not changed from default.
+ unsigned upload_image_stride =
+ RoundUp(bytes_per_pixel * source_rect.width(), 4u);
+
+ // Upload tile data via a mapped transfer buffer
+ uint8* pixel_dest =
+ static_cast<uint8*>(gl_->MapTexSubImage2DCHROMIUM(GL_TEXTURE_2D,
+ 0,
+ dest_offset.x(),
+ dest_offset.y(),
+ source_rect.width(),
+ source_rect.height(),
+ GLDataFormat(format),
+ GLDataType(format),
+ GL_WRITE_ONLY));
+
+ if (!pixel_dest) {
+ UploadWithTexSubImage(image, image_rect, source_rect, dest_offset, format);
+ return;
+ }
+
+ if (upload_image_stride == image_rect.width() * bytes_per_pixel &&
+ !offset.x()) {
+ memcpy(pixel_dest,
+ &image[image_rect.width() * bytes_per_pixel * offset.y()],
+ source_rect.height() * image_rect.width() * bytes_per_pixel);
+ } else {
+ // Strides not equal, so do a row-by-row memcpy from the
+ // paint results into the pixel_dest.
+ for (int row = 0; row < source_rect.height(); ++row) {
+ memcpy(&pixel_dest[upload_image_stride * row],
+ &image[bytes_per_pixel *
+ (offset.x() + (offset.y() + row) * image_rect.width())],
+ source_rect.width() * bytes_per_pixel);
+ }
+ }
+
+ gl_->UnmapTexSubImage2DCHROMIUM(pixel_dest);
+}
+
+void TextureUploader::UploadWithTexImageETC1(const uint8* image,
+ const gfx::Size& size) {
+ TRACE_EVENT0("cc", "TextureUploader::UploadWithTexImageETC1");
+ DCHECK_EQ(0, size.width() % 4);
+ DCHECK_EQ(0, size.height() % 4);
+
+ gl_->CompressedTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GLInternalFormat(ETC1),
+ size.width(),
+ size.height(),
+ 0,
+ Resource::MemorySizeBytes(size, ETC1),
+ image);
+}
+
+void TextureUploader::ProcessQueries() {
+ while (!pending_queries_.empty()) {
+ if (pending_queries_.front()->IsPending())
+ break;
+
+ unsigned us_elapsed = pending_queries_.front()->Value();
+ UMA_HISTOGRAM_CUSTOM_COUNTS(
+ "Renderer4.TextureGpuUploadTimeUS", us_elapsed, 0, 100000, 50);
+
+ // Clamp the queries to saner values in case the queries fail.
+ us_elapsed = std::max(1u, us_elapsed);
+ us_elapsed = std::min(15000u, us_elapsed);
+
+ if (!pending_queries_.front()->is_non_blocking())
+ num_blocking_texture_uploads_--;
+
+ // Remove the min and max value from our history and insert the new one.
+ double textures_per_second = 1.0 / (us_elapsed * 1e-6);
+ if (textures_per_second_history_.size() >= kUploadHistorySizeMax) {
+ textures_per_second_history_.erase(textures_per_second_history_.begin());
+ textures_per_second_history_.erase(--textures_per_second_history_.end());
+ }
+ textures_per_second_history_.insert(textures_per_second);
+
+ available_queries_.push_back(pending_queries_.take_front());
+ }
+}
+
+} // namespace cc
diff --git a/cc/resources/texture_uploader.h b/cc/resources/texture_uploader.h
new file mode 100644
index 0000000..72c46e7
--- /dev/null
+++ b/cc/resources/texture_uploader.h
@@ -0,0 +1,118 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_TEXTURE_UPLOADER_H_
+#define CC_RESOURCES_TEXTURE_UPLOADER_H_
+
+#include <set>
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "cc/base/cc_export.h"
+#include "cc/base/scoped_ptr_deque.h"
+#include "cc/resources/resource_provider.h"
+
+namespace gfx {
+class Rect;
+class Size;
+class Vector2d;
+}
+
+namespace gpu {
+namespace gles2 {
+class GLES2Interface;
+}
+}
+
+namespace cc {
+
+class CC_EXPORT TextureUploader {
+ public:
+ static scoped_ptr<TextureUploader> Create(gpu::gles2::GLES2Interface* gl) {
+ return make_scoped_ptr(new TextureUploader(gl));
+ }
+ ~TextureUploader();
+
+ size_t NumBlockingUploads();
+ void MarkPendingUploadsAsNonBlocking();
+ double EstimatedTexturesPerSecond();
+
+ // Let content_rect be a rectangle, and let content_rect be a sub-rectangle of
+ // content_rect, expressed in the same coordinate system as content_rect. Let
+ // image be a buffer for content_rect. This function will copy the region
+ // corresponding to source_rect to dest_offset in this sub-image.
+ void Upload(const uint8* image,
+ const gfx::Rect& content_rect,
+ const gfx::Rect& source_rect,
+ gfx::Vector2d dest_offset,
+ ResourceFormat format,
+ const gfx::Size& size);
+
+ void Flush();
+ void ReleaseCachedQueries();
+
+ private:
+ class Query {
+ public:
+ static scoped_ptr<Query> Create(gpu::gles2::GLES2Interface* gl) {
+ return make_scoped_ptr(new Query(gl));
+ }
+
+ virtual ~Query();
+
+ void Begin();
+ void End();
+ bool IsPending();
+ unsigned Value();
+ size_t TexturesUploaded();
+ void mark_as_non_blocking() { is_non_blocking_ = true; }
+ bool is_non_blocking() const { return is_non_blocking_; }
+
+ private:
+ explicit Query(gpu::gles2::GLES2Interface* gl);
+
+ gpu::gles2::GLES2Interface* gl_;
+ unsigned query_id_;
+ unsigned value_;
+ bool has_value_;
+ bool is_non_blocking_;
+
+ DISALLOW_COPY_AND_ASSIGN(Query);
+ };
+
+ explicit TextureUploader(gpu::gles2::GLES2Interface* gl);
+
+ void BeginQuery();
+ void EndQuery();
+ void ProcessQueries();
+
+ void UploadWithTexSubImage(const uint8* image,
+ const gfx::Rect& image_rect,
+ const gfx::Rect& source_rect,
+ gfx::Vector2d dest_offset,
+ ResourceFormat format);
+ void UploadWithMapTexSubImage(const uint8* image,
+ const gfx::Rect& image_rect,
+ const gfx::Rect& source_rect,
+ gfx::Vector2d dest_offset,
+ ResourceFormat format);
+ void UploadWithTexImageETC1(const uint8* image, const gfx::Size& size);
+
+ gpu::gles2::GLES2Interface* gl_;
+ ScopedPtrDeque<Query> pending_queries_;
+ ScopedPtrDeque<Query> available_queries_;
+ std::multiset<double> textures_per_second_history_;
+ size_t num_blocking_texture_uploads_;
+
+ size_t sub_image_size_;
+ scoped_ptr<uint8[]> sub_image_;
+
+ size_t num_texture_uploads_since_last_flush_;
+
+ DISALLOW_COPY_AND_ASSIGN(TextureUploader);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_TEXTURE_UPLOADER_H_
diff --git a/cc/resources/texture_uploader_unittest.cc b/cc/resources/texture_uploader_unittest.cc
new file mode 100644
index 0000000..bf94065
--- /dev/null
+++ b/cc/resources/texture_uploader_unittest.cc
@@ -0,0 +1,240 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/texture_uploader.h"
+
+#include "cc/base/util.h"
+#include "cc/resources/prioritized_resource.h"
+#include "gpu/command_buffer/client/gles2_interface_stub.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/khronos/GLES2/gl2.h"
+#include "third_party/khronos/GLES2/gl2ext.h"
+
+namespace cc {
+namespace {
+
+class TextureUploadTestContext : public gpu::gles2::GLES2InterfaceStub {
+ public:
+ TextureUploadTestContext() : result_available_(0), unpack_alignment_(4) {}
+
+ virtual void PixelStorei(GLenum pname, GLint param) OVERRIDE {
+ switch (pname) {
+ case GL_UNPACK_ALIGNMENT:
+ // Param should be a power of two <= 8.
+ EXPECT_EQ(0, param & (param - 1));
+ EXPECT_GE(8, param);
+ switch (param) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ unpack_alignment_ = param;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ virtual void GetQueryObjectuivEXT(GLuint,
+ GLenum type,
+ GLuint* value) OVERRIDE {
+ switch (type) {
+ case GL_QUERY_RESULT_AVAILABLE_EXT:
+ *value = result_available_;
+ break;
+ default:
+ *value = 0;
+ break;
+ }
+ }
+
+ virtual void TexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* pixels) OVERRIDE {
+ EXPECT_EQ(static_cast<unsigned>(GL_TEXTURE_2D), target);
+ EXPECT_EQ(0, level);
+ EXPECT_LE(0, width);
+ EXPECT_LE(0, height);
+ EXPECT_LE(0, xoffset);
+ EXPECT_LE(0, yoffset);
+ EXPECT_LE(0, width);
+ EXPECT_LE(0, height);
+
+ // Check for allowed format/type combination.
+ unsigned int bytes_per_pixel = 0;
+ switch (format) {
+ case GL_ALPHA:
+ EXPECT_EQ(static_cast<unsigned>(GL_UNSIGNED_BYTE), type);
+ bytes_per_pixel = 1;
+ break;
+ case GL_RGB:
+ EXPECT_NE(static_cast<unsigned>(GL_UNSIGNED_SHORT_4_4_4_4), type);
+ EXPECT_NE(static_cast<unsigned>(GL_UNSIGNED_SHORT_5_5_5_1), type);
+ switch (type) {
+ case GL_UNSIGNED_BYTE:
+ bytes_per_pixel = 3;
+ break;
+ case GL_UNSIGNED_SHORT_5_6_5:
+ bytes_per_pixel = 2;
+ break;
+ }
+ break;
+ case GL_RGBA:
+ EXPECT_NE(static_cast<unsigned>(GL_UNSIGNED_SHORT_5_6_5), type);
+ switch (type) {
+ case GL_UNSIGNED_BYTE:
+ bytes_per_pixel = 4;
+ break;
+ case GL_UNSIGNED_SHORT_4_4_4_4:
+ bytes_per_pixel = 2;
+ break;
+ case GL_UNSIGNED_SHORT_5_5_5_1:
+ bytes_per_pixel = 2;
+ break;
+ }
+ break;
+ case GL_LUMINANCE:
+ EXPECT_EQ(static_cast<unsigned>(GL_UNSIGNED_BYTE), type);
+ bytes_per_pixel = 1;
+ break;
+ case GL_LUMINANCE_ALPHA:
+ EXPECT_EQ(static_cast<unsigned>(GL_UNSIGNED_BYTE), type);
+ bytes_per_pixel = 2;
+ break;
+ }
+
+ // If NULL, we aren't checking texture contents.
+ if (pixels == NULL)
+ return;
+
+ const uint8* bytes = static_cast<const uint8*>(pixels);
+ // We'll expect the first byte of every row to be 0x1, and the last byte to
+ // be 0x2.
+ const unsigned int stride =
+ RoundUp(bytes_per_pixel * width, unpack_alignment_);
+ for (GLsizei row = 0; row < height; ++row) {
+ const uint8* row_bytes =
+ bytes + (xoffset * bytes_per_pixel + (yoffset + row) * stride);
+ EXPECT_EQ(0x1, row_bytes[0]);
+ EXPECT_EQ(0x2, row_bytes[width * bytes_per_pixel - 1]);
+ }
+ }
+
+ void SetResultAvailable(unsigned result_available) {
+ result_available_ = result_available;
+ }
+
+ private:
+ unsigned result_available_;
+ unsigned unpack_alignment_;
+
+ DISALLOW_COPY_AND_ASSIGN(TextureUploadTestContext);
+};
+
+void UploadTexture(TextureUploader* uploader,
+ ResourceFormat format,
+ const gfx::Size& size,
+ const uint8* data) {
+ uploader->Upload(
+ data, gfx::Rect(size), gfx::Rect(size), gfx::Vector2d(), format, size);
+}
+
+TEST(TextureUploaderTest, NumBlockingUploads) {
+ TextureUploadTestContext context;
+ scoped_ptr<TextureUploader> uploader = TextureUploader::Create(&context);
+
+ context.SetResultAvailable(0);
+ EXPECT_EQ(0u, uploader->NumBlockingUploads());
+ UploadTexture(uploader.get(), RGBA_8888, gfx::Size(), NULL);
+ EXPECT_EQ(1u, uploader->NumBlockingUploads());
+ UploadTexture(uploader.get(), RGBA_8888, gfx::Size(), NULL);
+ EXPECT_EQ(2u, uploader->NumBlockingUploads());
+
+ context.SetResultAvailable(1);
+ EXPECT_EQ(0u, uploader->NumBlockingUploads());
+ UploadTexture(uploader.get(), RGBA_8888, gfx::Size(), NULL);
+ EXPECT_EQ(0u, uploader->NumBlockingUploads());
+ UploadTexture(uploader.get(), RGBA_8888, gfx::Size(), NULL);
+ UploadTexture(uploader.get(), RGBA_8888, gfx::Size(), NULL);
+ EXPECT_EQ(0u, uploader->NumBlockingUploads());
+}
+
+TEST(TextureUploaderTest, MarkPendingUploadsAsNonBlocking) {
+ TextureUploadTestContext context;
+ scoped_ptr<TextureUploader> uploader = TextureUploader::Create(&context);
+
+ context.SetResultAvailable(0);
+ EXPECT_EQ(0u, uploader->NumBlockingUploads());
+ UploadTexture(uploader.get(), RGBA_8888, gfx::Size(), NULL);
+ UploadTexture(uploader.get(), RGBA_8888, gfx::Size(), NULL);
+ EXPECT_EQ(2u, uploader->NumBlockingUploads());
+
+ uploader->MarkPendingUploadsAsNonBlocking();
+ EXPECT_EQ(0u, uploader->NumBlockingUploads());
+ UploadTexture(uploader.get(), RGBA_8888, gfx::Size(), NULL);
+ EXPECT_EQ(1u, uploader->NumBlockingUploads());
+
+ context.SetResultAvailable(1);
+ EXPECT_EQ(0u, uploader->NumBlockingUploads());
+ UploadTexture(uploader.get(), RGBA_8888, gfx::Size(), NULL);
+ uploader->MarkPendingUploadsAsNonBlocking();
+ EXPECT_EQ(0u, uploader->NumBlockingUploads());
+}
+
+TEST(TextureUploaderTest, UploadContentsTest) {
+ TextureUploadTestContext context;
+ scoped_ptr<TextureUploader> uploader = TextureUploader::Create(&context);
+
+ uint8 buffer[256 * 256 * 4];
+
+ // Upload a tightly packed 256x256 RGBA texture.
+ memset(buffer, 0, sizeof(buffer));
+ for (int i = 0; i < 256; ++i) {
+ // Mark the beginning and end of each row, for the test.
+ buffer[i * 4 * 256] = 0x1;
+ buffer[(i + 1) * 4 * 256 - 1] = 0x2;
+ }
+ UploadTexture(uploader.get(), RGBA_8888, gfx::Size(256, 256), buffer);
+
+ // Upload a tightly packed 41x43 RGBA texture.
+ memset(buffer, 0, sizeof(buffer));
+ for (int i = 0; i < 43; ++i) {
+ // Mark the beginning and end of each row, for the test.
+ buffer[i * 4 * 41] = 0x1;
+ buffer[(i + 1) * 4 * 41 - 1] = 0x2;
+ }
+ UploadTexture(uploader.get(), RGBA_8888, gfx::Size(41, 43), buffer);
+
+ // Upload a tightly packed 41x86 ALPHA texture.
+ memset(buffer, 0, sizeof(buffer));
+ for (int i = 0; i < 86; ++i) {
+ // Mark the beginning and end of each row, for the test.
+ buffer[i * 1 * 41] = 0x1;
+ buffer[(i + 1) * 41 - 1] = 0x2;
+ }
+ UploadTexture(uploader.get(), ALPHA_8, gfx::Size(41, 86), buffer);
+
+ // Upload a tightly packed 82x86 LUMINANCE texture.
+ memset(buffer, 0, sizeof(buffer));
+ for (int i = 0; i < 86; ++i) {
+ // Mark the beginning and end of each row, for the test.
+ buffer[i * 1 * 82] = 0x1;
+ buffer[(i + 1) * 82 - 1] = 0x2;
+ }
+ UploadTexture(uploader.get(), LUMINANCE_8, gfx::Size(82, 86), buffer);
+}
+
+} // namespace
+} // namespace cc
diff --git a/cc/resources/tile.cc b/cc/resources/tile.cc
new file mode 100644
index 0000000..ff7a7a2
--- /dev/null
+++ b/cc/resources/tile.cc
@@ -0,0 +1,103 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/tile.h"
+
+#include <algorithm>
+
+#include "base/debug/trace_event_argument.h"
+#include "cc/base/math_util.h"
+#include "cc/debug/traced_value.h"
+#include "cc/resources/tile_manager.h"
+#include "third_party/khronos/GLES2/gl2.h"
+
+namespace cc {
+
+Tile::Id Tile::s_next_id_ = 0;
+
+Tile::Tile(TileManager* tile_manager,
+ PicturePileImpl* picture_pile,
+ const gfx::Size& tile_size,
+ const gfx::Rect& content_rect,
+ float contents_scale,
+ int layer_id,
+ int source_frame_number,
+ int flags)
+ : RefCountedManaged<Tile>(tile_manager),
+ tile_manager_(tile_manager),
+ size_(tile_size),
+ content_rect_(content_rect),
+ contents_scale_(contents_scale),
+ layer_id_(layer_id),
+ source_frame_number_(source_frame_number),
+ flags_(flags),
+ is_shared_(false),
+ id_(s_next_id_++) {
+ set_picture_pile(picture_pile);
+ for (int i = 0; i < NUM_TREES; i++)
+ is_occluded_[i] = false;
+}
+
+Tile::~Tile() {
+ TRACE_EVENT_OBJECT_DELETED_WITH_ID(
+ TRACE_DISABLED_BY_DEFAULT("cc.debug"),
+ "cc::Tile", this);
+}
+
+void Tile::SetPriority(WhichTree tree, const TilePriority& priority) {
+ if (priority == priority_[tree])
+ return;
+
+ priority_[tree] = priority;
+ tile_manager_->DidChangeTilePriority(this);
+}
+
+void Tile::MarkRequiredForActivation() {
+ if (priority_[PENDING_TREE].required_for_activation)
+ return;
+
+ priority_[PENDING_TREE].required_for_activation = true;
+ tile_manager_->DidChangeTilePriority(this);
+}
+
+void Tile::AsValueInto(base::debug::TracedValue* res) const {
+ TracedValue::MakeDictIntoImplicitSnapshotWithCategory(
+ TRACE_DISABLED_BY_DEFAULT("cc.debug"), res, "cc::Tile", this);
+ TracedValue::SetIDRef(picture_pile_.get(), res, "picture_pile");
+ res->SetDouble("contents_scale", contents_scale_);
+
+ res->BeginArray("content_rect");
+ MathUtil::AddToTracedValue(content_rect_, res);
+ res->EndArray();
+
+ res->SetInteger("layer_id", layer_id_);
+
+ res->BeginDictionary("active_priority");
+ priority_[ACTIVE_TREE].AsValueInto(res);
+ res->EndDictionary();
+
+ res->BeginDictionary("pending_priority");
+ priority_[PENDING_TREE].AsValueInto(res);
+ res->EndDictionary();
+
+ res->BeginDictionary("managed_state");
+ managed_state_.AsValueInto(res);
+ res->EndDictionary();
+
+ res->SetBoolean("use_picture_analysis", use_picture_analysis());
+
+ res->SetInteger("gpu_memory_usage", GPUMemoryUsageInBytes());
+}
+
+size_t Tile::GPUMemoryUsageInBytes() const {
+ if (managed_state_.draw_info.resource_)
+ return managed_state_.draw_info.resource_->bytes();
+ return 0;
+}
+
+bool Tile::HasRasterTask() const {
+ return !!managed_state_.raster_task.get();
+}
+
+} // namespace cc
diff --git a/cc/resources/tile.h b/cc/resources/tile.h
new file mode 100644
index 0000000..ec0b9a2
--- /dev/null
+++ b/cc/resources/tile.h
@@ -0,0 +1,174 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_TILE_H_
+#define CC_RESOURCES_TILE_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/scoped_vector.h"
+#include "cc/base/ref_counted_managed.h"
+#include "cc/resources/managed_tile_state.h"
+#include "cc/resources/picture_pile_impl.h"
+#include "cc/resources/tile_priority.h"
+#include "ui/gfx/rect.h"
+#include "ui/gfx/size.h"
+
+namespace cc {
+
+class CC_EXPORT Tile : public RefCountedManaged<Tile> {
+ public:
+ enum TileRasterFlags { USE_PICTURE_ANALYSIS = 1 << 0 };
+
+ typedef uint64 Id;
+
+ Id id() const {
+ return id_;
+ }
+
+ PicturePileImpl* picture_pile() {
+ return picture_pile_.get();
+ }
+
+ const PicturePileImpl* picture_pile() const {
+ return picture_pile_.get();
+ }
+
+ const TilePriority& priority(WhichTree tree) const {
+ return priority_[tree];
+ }
+
+ TilePriority priority_for_tree_priority(TreePriority tree_priority) const {
+ switch (tree_priority) {
+ case SMOOTHNESS_TAKES_PRIORITY:
+ return priority_[ACTIVE_TREE];
+ case NEW_CONTENT_TAKES_PRIORITY:
+ return priority_[PENDING_TREE];
+ case SAME_PRIORITY_FOR_BOTH_TREES:
+ return combined_priority();
+ default:
+ NOTREACHED();
+ return TilePriority();
+ }
+ }
+
+ TilePriority combined_priority() const {
+ return TilePriority(priority_[ACTIVE_TREE],
+ priority_[PENDING_TREE]);
+ }
+
+ void SetPriority(WhichTree tree, const TilePriority& priority);
+
+ void set_is_occluded(WhichTree tree, bool is_occluded) {
+ is_occluded_[tree] = is_occluded;
+ }
+
+ bool is_occluded(WhichTree tree) const { return is_occluded_[tree]; }
+
+ void set_shared(bool is_shared) { is_shared_ = is_shared; }
+ bool is_shared() const { return is_shared_; }
+
+ bool is_occluded_for_tree_priority(TreePriority tree_priority) const {
+ switch (tree_priority) {
+ case SMOOTHNESS_TAKES_PRIORITY:
+ return is_occluded_[ACTIVE_TREE];
+ case NEW_CONTENT_TAKES_PRIORITY:
+ return is_occluded_[PENDING_TREE];
+ case SAME_PRIORITY_FOR_BOTH_TREES:
+ return is_occluded_[ACTIVE_TREE] && is_occluded_[PENDING_TREE];
+ default:
+ NOTREACHED();
+ return false;
+ }
+ }
+
+ void MarkRequiredForActivation();
+
+ bool required_for_activation() const {
+ return priority_[PENDING_TREE].required_for_activation;
+ }
+
+ bool use_picture_analysis() const {
+ return !!(flags_ & USE_PICTURE_ANALYSIS);
+ }
+
+ bool HasResources() const { return managed_state_.draw_info.has_resource(); }
+
+ void AsValueInto(base::debug::TracedValue* dict) const;
+
+ inline bool IsReadyToDraw() const {
+ return managed_state_.draw_info.IsReadyToDraw();
+ }
+
+ const ManagedTileState::DrawInfo& draw_info() const {
+ return managed_state_.draw_info;
+ }
+
+ ManagedTileState::DrawInfo& draw_info() { return managed_state_.draw_info; }
+
+ float contents_scale() const { return contents_scale_; }
+ gfx::Rect content_rect() const { return content_rect_; }
+
+ int layer_id() const { return layer_id_; }
+
+ int source_frame_number() const { return source_frame_number_; }
+
+ void set_picture_pile(scoped_refptr<PicturePileImpl> pile) {
+ DCHECK(pile->CanRaster(contents_scale_, content_rect_))
+ << "Recording rect: "
+ << gfx::ScaleToEnclosingRect(content_rect_, 1.f / contents_scale_)
+ .ToString();
+ picture_pile_ = pile;
+ }
+
+ size_t GPUMemoryUsageInBytes() const;
+
+ gfx::Size size() const { return size_; }
+
+ private:
+ friend class TileManager;
+ friend class PrioritizedTileSet;
+ friend class FakeTileManager;
+ friend class BinComparator;
+ friend class FakePictureLayerImpl;
+
+ // Methods called by by tile manager.
+ Tile(TileManager* tile_manager,
+ PicturePileImpl* picture_pile,
+ const gfx::Size& tile_size,
+ const gfx::Rect& content_rect,
+ float contents_scale,
+ int layer_id,
+ int source_frame_number,
+ int flags);
+ ~Tile();
+
+ ManagedTileState& managed_state() { return managed_state_; }
+ const ManagedTileState& managed_state() const { return managed_state_; }
+
+ bool HasRasterTask() const;
+
+ TileManager* tile_manager_;
+ scoped_refptr<PicturePileImpl> picture_pile_;
+ gfx::Size size_;
+ gfx::Rect content_rect_;
+ float contents_scale_;
+ bool is_occluded_[NUM_TREES];
+
+ TilePriority priority_[NUM_TREES];
+ ManagedTileState managed_state_;
+ int layer_id_;
+ int source_frame_number_;
+ int flags_;
+ bool is_shared_;
+
+ Id id_;
+ static Id s_next_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(Tile);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_TILE_H_
diff --git a/cc/resources/tile_manager.cc b/cc/resources/tile_manager.cc
new file mode 100644
index 0000000..218faca
--- /dev/null
+++ b/cc/resources/tile_manager.cc
@@ -0,0 +1,1127 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/tile_manager.h"
+
+#include <algorithm>
+#include <limits>
+#include <string>
+
+#include "base/bind.h"
+#include "base/debug/trace_event_argument.h"
+#include "base/json/json_writer.h"
+#include "base/logging.h"
+#include "base/metrics/histogram.h"
+#include "cc/debug/devtools_instrumentation.h"
+#include "cc/debug/frame_viewer_instrumentation.h"
+#include "cc/debug/traced_value.h"
+#include "cc/layers/picture_layer_impl.h"
+#include "cc/resources/raster_buffer.h"
+#include "cc/resources/rasterizer.h"
+#include "cc/resources/tile.h"
+#include "third_party/skia/include/core/SkBitmap.h"
+#include "third_party/skia/include/core/SkPixelRef.h"
+#include "ui/gfx/rect_conversions.h"
+
+namespace cc {
+namespace {
+
+// Flag to indicate whether we should try and detect that
+// a tile is of solid color.
+const bool kUseColorEstimator = true;
+
+class RasterTaskImpl : public RasterTask {
+ public:
+ RasterTaskImpl(
+ const Resource* resource,
+ PicturePileImpl* picture_pile,
+ const gfx::Rect& content_rect,
+ float contents_scale,
+ TileResolution tile_resolution,
+ int layer_id,
+ const void* tile_id,
+ int source_frame_number,
+ bool analyze_picture,
+ RenderingStatsInstrumentation* rendering_stats,
+ const base::Callback<void(const PicturePileImpl::Analysis&, bool)>& reply,
+ ImageDecodeTask::Vector* dependencies)
+ : RasterTask(resource, dependencies),
+ picture_pile_(picture_pile),
+ content_rect_(content_rect),
+ contents_scale_(contents_scale),
+ tile_resolution_(tile_resolution),
+ layer_id_(layer_id),
+ tile_id_(tile_id),
+ source_frame_number_(source_frame_number),
+ analyze_picture_(analyze_picture),
+ rendering_stats_(rendering_stats),
+ reply_(reply) {}
+
+ // Overridden from Task:
+ virtual void RunOnWorkerThread() OVERRIDE {
+ TRACE_EVENT0("cc", "RasterizerTaskImpl::RunOnWorkerThread");
+
+ DCHECK(picture_pile_.get());
+ DCHECK(raster_buffer_);
+
+ if (analyze_picture_) {
+ Analyze(picture_pile_.get());
+ if (analysis_.is_solid_color)
+ return;
+ }
+
+ Raster(picture_pile_.get());
+ }
+
+ // Overridden from RasterizerTask:
+ virtual void ScheduleOnOriginThread(RasterizerTaskClient* client) OVERRIDE {
+ DCHECK(!raster_buffer_);
+ raster_buffer_ = client->AcquireBufferForRaster(resource());
+ }
+ virtual void CompleteOnOriginThread(RasterizerTaskClient* client) OVERRIDE {
+ client->ReleaseBufferForRaster(raster_buffer_.Pass());
+ }
+ virtual void RunReplyOnOriginThread() OVERRIDE {
+ DCHECK(!raster_buffer_);
+ reply_.Run(analysis_, !HasFinishedRunning());
+ }
+
+ protected:
+ virtual ~RasterTaskImpl() { DCHECK(!raster_buffer_); }
+
+ private:
+ void Analyze(const PicturePileImpl* picture_pile) {
+ frame_viewer_instrumentation::ScopedAnalyzeTask analyze_task(
+ tile_id_, tile_resolution_, source_frame_number_, layer_id_);
+
+ DCHECK(picture_pile);
+
+ picture_pile->AnalyzeInRect(
+ content_rect_, contents_scale_, &analysis_, rendering_stats_);
+
+ // Record the solid color prediction.
+ UMA_HISTOGRAM_BOOLEAN("Renderer4.SolidColorTilesAnalyzed",
+ analysis_.is_solid_color);
+
+ // Clear the flag if we're not using the estimator.
+ analysis_.is_solid_color &= kUseColorEstimator;
+ }
+
+ void Raster(const PicturePileImpl* picture_pile) {
+ frame_viewer_instrumentation::ScopedRasterTask raster_task(
+ tile_id_, tile_resolution_, source_frame_number_, layer_id_);
+ devtools_instrumentation::ScopedLayerTask layer_task(
+ devtools_instrumentation::kRasterTask, layer_id_);
+
+ skia::RefPtr<SkCanvas> canvas = raster_buffer_->AcquireSkCanvas();
+ DCHECK(canvas);
+
+ base::TimeDelta prev_rasterize_time =
+ rendering_stats_->impl_thread_rendering_stats().rasterize_time;
+
+ // Only record rasterization time for highres tiles, because
+ // lowres tiles are not required for activation and therefore
+ // introduce noise in the measurement (sometimes they get rasterized
+ // before we draw and sometimes they aren't)
+ RenderingStatsInstrumentation* stats =
+ tile_resolution_ == HIGH_RESOLUTION ? rendering_stats_ : NULL;
+ DCHECK(picture_pile);
+ picture_pile->RasterToBitmap(
+ canvas.get(), content_rect_, contents_scale_, stats);
+
+ if (rendering_stats_->record_rendering_stats()) {
+ base::TimeDelta current_rasterize_time =
+ rendering_stats_->impl_thread_rendering_stats().rasterize_time;
+ LOCAL_HISTOGRAM_CUSTOM_COUNTS(
+ "Renderer4.PictureRasterTimeUS",
+ (current_rasterize_time - prev_rasterize_time).InMicroseconds(),
+ 0,
+ 100000,
+ 100);
+ }
+
+ raster_buffer_->ReleaseSkCanvas(canvas);
+ }
+
+ PicturePileImpl::Analysis analysis_;
+ scoped_refptr<PicturePileImpl> picture_pile_;
+ gfx::Rect content_rect_;
+ float contents_scale_;
+ TileResolution tile_resolution_;
+ int layer_id_;
+ const void* tile_id_;
+ int source_frame_number_;
+ bool analyze_picture_;
+ RenderingStatsInstrumentation* rendering_stats_;
+ const base::Callback<void(const PicturePileImpl::Analysis&, bool)> reply_;
+ scoped_ptr<RasterBuffer> raster_buffer_;
+
+ DISALLOW_COPY_AND_ASSIGN(RasterTaskImpl);
+};
+
+class ImageDecodeTaskImpl : public ImageDecodeTask {
+ public:
+ ImageDecodeTaskImpl(SkPixelRef* pixel_ref,
+ int layer_id,
+ RenderingStatsInstrumentation* rendering_stats,
+ const base::Callback<void(bool was_canceled)>& reply)
+ : pixel_ref_(skia::SharePtr(pixel_ref)),
+ layer_id_(layer_id),
+ rendering_stats_(rendering_stats),
+ reply_(reply) {}
+
+ // Overridden from Task:
+ virtual void RunOnWorkerThread() OVERRIDE {
+ TRACE_EVENT0("cc", "ImageDecodeTaskImpl::RunOnWorkerThread");
+
+ devtools_instrumentation::ScopedImageDecodeTask image_decode_task(
+ pixel_ref_.get());
+ // This will cause the image referred to by pixel ref to be decoded.
+ pixel_ref_->lockPixels();
+ pixel_ref_->unlockPixels();
+ }
+
+ // Overridden from RasterizerTask:
+ virtual void ScheduleOnOriginThread(RasterizerTaskClient* client) OVERRIDE {}
+ virtual void CompleteOnOriginThread(RasterizerTaskClient* client) OVERRIDE {}
+ virtual void RunReplyOnOriginThread() OVERRIDE {
+ reply_.Run(!HasFinishedRunning());
+ }
+
+ protected:
+ virtual ~ImageDecodeTaskImpl() {}
+
+ private:
+ skia::RefPtr<SkPixelRef> pixel_ref_;
+ int layer_id_;
+ RenderingStatsInstrumentation* rendering_stats_;
+ const base::Callback<void(bool was_canceled)> reply_;
+
+ DISALLOW_COPY_AND_ASSIGN(ImageDecodeTaskImpl);
+};
+
+const size_t kScheduledRasterTasksLimit = 32u;
+
+// Memory limit policy works by mapping some bin states to the NEVER bin.
+const ManagedTileBin kBinPolicyMap[NUM_TILE_MEMORY_LIMIT_POLICIES][NUM_BINS] = {
+ // [ALLOW_NOTHING]
+ {NEVER_BIN, // [NOW_AND_READY_TO_DRAW_BIN]
+ NEVER_BIN, // [NOW_BIN]
+ NEVER_BIN, // [SOON_BIN]
+ NEVER_BIN, // [EVENTUALLY_AND_ACTIVE_BIN]
+ NEVER_BIN, // [EVENTUALLY_BIN]
+ NEVER_BIN, // [AT_LAST_AND_ACTIVE_BIN]
+ NEVER_BIN, // [AT_LAST_BIN]
+ NEVER_BIN // [NEVER_BIN]
+ },
+ // [ALLOW_ABSOLUTE_MINIMUM]
+ {NOW_AND_READY_TO_DRAW_BIN, // [NOW_AND_READY_TO_DRAW_BIN]
+ NOW_BIN, // [NOW_BIN]
+ NEVER_BIN, // [SOON_BIN]
+ NEVER_BIN, // [EVENTUALLY_AND_ACTIVE_BIN]
+ NEVER_BIN, // [EVENTUALLY_BIN]
+ NEVER_BIN, // [AT_LAST_AND_ACTIVE_BIN]
+ NEVER_BIN, // [AT_LAST_BIN]
+ NEVER_BIN // [NEVER_BIN]
+ },
+ // [ALLOW_PREPAINT_ONLY]
+ {NOW_AND_READY_TO_DRAW_BIN, // [NOW_AND_READY_TO_DRAW_BIN]
+ NOW_BIN, // [NOW_BIN]
+ SOON_BIN, // [SOON_BIN]
+ NEVER_BIN, // [EVENTUALLY_AND_ACTIVE_BIN]
+ NEVER_BIN, // [EVENTUALLY_BIN]
+ NEVER_BIN, // [AT_LAST_AND_ACTIVE_BIN]
+ NEVER_BIN, // [AT_LAST_BIN]
+ NEVER_BIN // [NEVER_BIN]
+ },
+ // [ALLOW_ANYTHING]
+ {NOW_AND_READY_TO_DRAW_BIN, // [NOW_AND_READY_TO_DRAW_BIN]
+ NOW_BIN, // [NOW_BIN]
+ SOON_BIN, // [SOON_BIN]
+ EVENTUALLY_AND_ACTIVE_BIN, // [EVENTUALLY_AND_ACTIVE_BIN]
+ EVENTUALLY_BIN, // [EVENTUALLY_BIN]
+ AT_LAST_AND_ACTIVE_BIN, // [AT_LAST_AND_ACTIVE_BIN]
+ AT_LAST_BIN, // [AT_LAST_BIN]
+ NEVER_BIN // [NEVER_BIN]
+ }};
+
+// Ready to draw works by mapping NOW_BIN to NOW_AND_READY_TO_DRAW_BIN.
+const ManagedTileBin kBinReadyToDrawMap[2][NUM_BINS] = {
+ // Not ready
+ {NOW_AND_READY_TO_DRAW_BIN, // [NOW_AND_READY_TO_DRAW_BIN]
+ NOW_BIN, // [NOW_BIN]
+ SOON_BIN, // [SOON_BIN]
+ EVENTUALLY_AND_ACTIVE_BIN, // [EVENTUALLY_AND_ACTIVE_BIN]
+ EVENTUALLY_BIN, // [EVENTUALLY_BIN]
+ AT_LAST_AND_ACTIVE_BIN, // [AT_LAST_AND_ACTIVE_BIN]
+ AT_LAST_BIN, // [AT_LAST_BIN]
+ NEVER_BIN // [NEVER_BIN]
+ },
+ // Ready
+ {NOW_AND_READY_TO_DRAW_BIN, // [NOW_AND_READY_TO_DRAW_BIN]
+ NOW_AND_READY_TO_DRAW_BIN, // [NOW_BIN]
+ SOON_BIN, // [SOON_BIN]
+ EVENTUALLY_AND_ACTIVE_BIN, // [EVENTUALLY_AND_ACTIVE_BIN]
+ EVENTUALLY_BIN, // [EVENTUALLY_BIN]
+ AT_LAST_AND_ACTIVE_BIN, // [AT_LAST_AND_ACTIVE_BIN]
+ AT_LAST_BIN, // [AT_LAST_BIN]
+ NEVER_BIN // [NEVER_BIN]
+ }};
+
+// Active works by mapping some bin stats to equivalent _ACTIVE_BIN state.
+const ManagedTileBin kBinIsActiveMap[2][NUM_BINS] = {
+ // Inactive
+ {NOW_AND_READY_TO_DRAW_BIN, // [NOW_AND_READY_TO_DRAW_BIN]
+ NOW_BIN, // [NOW_BIN]
+ SOON_BIN, // [SOON_BIN]
+ EVENTUALLY_AND_ACTIVE_BIN, // [EVENTUALLY_AND_ACTIVE_BIN]
+ EVENTUALLY_BIN, // [EVENTUALLY_BIN]
+ AT_LAST_AND_ACTIVE_BIN, // [AT_LAST_AND_ACTIVE_BIN]
+ AT_LAST_BIN, // [AT_LAST_BIN]
+ NEVER_BIN // [NEVER_BIN]
+ },
+ // Active
+ {NOW_AND_READY_TO_DRAW_BIN, // [NOW_AND_READY_TO_DRAW_BIN]
+ NOW_BIN, // [NOW_BIN]
+ SOON_BIN, // [SOON_BIN]
+ EVENTUALLY_AND_ACTIVE_BIN, // [EVENTUALLY_AND_ACTIVE_BIN]
+ EVENTUALLY_AND_ACTIVE_BIN, // [EVENTUALLY_BIN]
+ AT_LAST_AND_ACTIVE_BIN, // [AT_LAST_AND_ACTIVE_BIN]
+ AT_LAST_AND_ACTIVE_BIN, // [AT_LAST_BIN]
+ NEVER_BIN // [NEVER_BIN]
+ }};
+
+// Determine bin based on three categories of tiles: things we need now,
+// things we need soon, and eventually.
+inline ManagedTileBin BinFromTilePriority(const TilePriority& prio) {
+ if (prio.priority_bin == TilePriority::NOW)
+ return NOW_BIN;
+
+ if (prio.priority_bin == TilePriority::SOON)
+ return SOON_BIN;
+
+ if (prio.distance_to_visible == std::numeric_limits<float>::infinity())
+ return NEVER_BIN;
+
+ return EVENTUALLY_BIN;
+}
+
+} // namespace
+
+RasterTaskCompletionStats::RasterTaskCompletionStats()
+ : completed_count(0u), canceled_count(0u) {}
+
+scoped_refptr<base::debug::ConvertableToTraceFormat>
+RasterTaskCompletionStatsAsValue(const RasterTaskCompletionStats& stats) {
+ scoped_refptr<base::debug::TracedValue> state =
+ new base::debug::TracedValue();
+ state->SetInteger("completed_count", stats.completed_count);
+ state->SetInteger("canceled_count", stats.canceled_count);
+ return state;
+}
+
+// static
+scoped_ptr<TileManager> TileManager::Create(
+ TileManagerClient* client,
+ base::SequencedTaskRunner* task_runner,
+ ResourcePool* resource_pool,
+ Rasterizer* rasterizer,
+ RenderingStatsInstrumentation* rendering_stats_instrumentation) {
+ return make_scoped_ptr(new TileManager(client,
+ task_runner,
+ resource_pool,
+ rasterizer,
+ rendering_stats_instrumentation));
+}
+
+TileManager::TileManager(
+ TileManagerClient* client,
+ const scoped_refptr<base::SequencedTaskRunner>& task_runner,
+ ResourcePool* resource_pool,
+ Rasterizer* rasterizer,
+ RenderingStatsInstrumentation* rendering_stats_instrumentation)
+ : client_(client),
+ task_runner_(task_runner),
+ resource_pool_(resource_pool),
+ rasterizer_(rasterizer),
+ prioritized_tiles_dirty_(false),
+ all_tiles_that_need_to_be_rasterized_have_memory_(true),
+ all_tiles_required_for_activation_have_memory_(true),
+ bytes_releasable_(0),
+ resources_releasable_(0),
+ ever_exceeded_memory_budget_(false),
+ rendering_stats_instrumentation_(rendering_stats_instrumentation),
+ did_initialize_visible_tile_(false),
+ did_check_for_completed_tasks_since_last_schedule_tasks_(true),
+ did_oom_on_last_assign_(false),
+ ready_to_activate_check_notifier_(
+ task_runner_.get(),
+ base::Bind(&TileManager::CheckIfReadyToActivate,
+ base::Unretained(this))) {
+ rasterizer_->SetClient(this);
+}
+
+TileManager::~TileManager() {
+ // Reset global state and manage. This should cause
+ // our memory usage to drop to zero.
+ global_state_ = GlobalStateThatImpactsTilePriority();
+
+ RasterTaskQueue empty;
+ rasterizer_->ScheduleTasks(&empty);
+ orphan_raster_tasks_.clear();
+
+ // This should finish all pending tasks and release any uninitialized
+ // resources.
+ rasterizer_->Shutdown();
+ rasterizer_->CheckForCompletedTasks();
+
+ prioritized_tiles_.Clear();
+
+ FreeResourcesForReleasedTiles();
+ CleanUpReleasedTiles();
+
+ DCHECK_EQ(0u, bytes_releasable_);
+ DCHECK_EQ(0u, resources_releasable_);
+}
+
+void TileManager::Release(Tile* tile) {
+ DCHECK(TilePriority() == tile->combined_priority());
+
+ prioritized_tiles_dirty_ = true;
+ released_tiles_.push_back(tile);
+}
+
+void TileManager::DidChangeTilePriority(Tile* tile) {
+ prioritized_tiles_dirty_ = true;
+}
+
+TaskSetCollection TileManager::TasksThatShouldBeForcedToComplete() const {
+ TaskSetCollection tasks_that_should_be_forced_to_complete;
+ if (global_state_.tree_priority != SMOOTHNESS_TAKES_PRIORITY)
+ tasks_that_should_be_forced_to_complete[REQUIRED_FOR_ACTIVATION] = true;
+ return tasks_that_should_be_forced_to_complete;
+}
+
+void TileManager::FreeResourcesForReleasedTiles() {
+ for (std::vector<Tile*>::iterator it = released_tiles_.begin();
+ it != released_tiles_.end();
+ ++it) {
+ Tile* tile = *it;
+ FreeResourcesForTile(tile);
+ }
+}
+
+void TileManager::CleanUpReleasedTiles() {
+ // Make sure |prioritized_tiles_| doesn't contain any of the tiles
+ // we're about to delete.
+ DCHECK(prioritized_tiles_.IsEmpty());
+
+ std::vector<Tile*>::iterator it = released_tiles_.begin();
+ while (it != released_tiles_.end()) {
+ Tile* tile = *it;
+
+ if (tile->HasRasterTask()) {
+ ++it;
+ continue;
+ }
+
+ DCHECK(!tile->HasResources());
+ DCHECK(tiles_.find(tile->id()) != tiles_.end());
+ tiles_.erase(tile->id());
+
+ LayerCountMap::iterator layer_it =
+ used_layer_counts_.find(tile->layer_id());
+ DCHECK_GT(layer_it->second, 0);
+ if (--layer_it->second == 0) {
+ used_layer_counts_.erase(layer_it);
+ image_decode_tasks_.erase(tile->layer_id());
+ }
+
+ delete tile;
+ it = released_tiles_.erase(it);
+ }
+}
+
+void TileManager::UpdatePrioritizedTileSetIfNeeded() {
+ if (!prioritized_tiles_dirty_)
+ return;
+
+ prioritized_tiles_.Clear();
+
+ FreeResourcesForReleasedTiles();
+ CleanUpReleasedTiles();
+
+ GetTilesWithAssignedBins(&prioritized_tiles_);
+ prioritized_tiles_dirty_ = false;
+}
+
+void TileManager::DidFinishRunningTasks(TaskSet task_set) {
+ if (task_set == ALL) {
+ TRACE_EVENT1("cc", "TileManager::DidFinishRunningTasks", "task_set", "ALL");
+
+ bool memory_usage_above_limit = resource_pool_->total_memory_usage_bytes() >
+ global_state_.soft_memory_limit_in_bytes;
+
+ // When OOM, keep re-assigning memory until we reach a steady state
+ // where top-priority tiles are initialized.
+ if (all_tiles_that_need_to_be_rasterized_have_memory_ &&
+ !memory_usage_above_limit)
+ return;
+
+ rasterizer_->CheckForCompletedTasks();
+ did_check_for_completed_tasks_since_last_schedule_tasks_ = true;
+
+ TileVector tiles_that_need_to_be_rasterized;
+ AssignGpuMemoryToTiles(&prioritized_tiles_,
+ &tiles_that_need_to_be_rasterized);
+
+ // |tiles_that_need_to_be_rasterized| will be empty when we reach a
+ // steady memory state. Keep scheduling tasks until we reach this state.
+ if (!tiles_that_need_to_be_rasterized.empty()) {
+ ScheduleTasks(tiles_that_need_to_be_rasterized);
+ return;
+ }
+
+ FreeResourcesForReleasedTiles();
+
+ resource_pool_->ReduceResourceUsage();
+
+ // We don't reserve memory for required-for-activation tiles during
+ // accelerated gestures, so we just postpone activation when we don't
+ // have these tiles, and activate after the accelerated gesture.
+ // Likewise if we don't allow any tiles (as is the case when we're
+ // invisible), if we have tiles that aren't ready, then we shouldn't
+ // activate as activation can cause checkerboards.
+ bool allow_rasterize_on_demand =
+ global_state_.tree_priority != SMOOTHNESS_TAKES_PRIORITY &&
+ global_state_.memory_limit_policy != ALLOW_NOTHING;
+
+ // Use on-demand raster for any required-for-activation tiles that have not
+ // been been assigned memory after reaching a steady memory state. This
+ // ensures that we activate even when OOM.
+ for (TileMap::iterator it = tiles_.begin(); it != tiles_.end(); ++it) {
+ Tile* tile = it->second;
+ ManagedTileState& mts = tile->managed_state();
+
+ if (tile->required_for_activation() && !mts.draw_info.IsReadyToDraw()) {
+ // If we can't raster on demand, give up early (and don't activate).
+ if (!allow_rasterize_on_demand)
+ return;
+
+ mts.draw_info.set_rasterize_on_demand();
+ client_->NotifyTileStateChanged(tile);
+ }
+ }
+
+ DCHECK(IsReadyToActivate());
+ ready_to_activate_check_notifier_.Schedule();
+ return;
+ }
+
+ if (task_set == REQUIRED_FOR_ACTIVATION) {
+ TRACE_EVENT2("cc",
+ "TileManager::DidFinishRunningTasks",
+ "task_set",
+ "REQUIRED_FOR_ACTIVATION",
+ "all_tiles_required_for_activation_have_memory",
+ all_tiles_required_for_activation_have_memory_);
+ // This is only a true indication that all tiles required for
+ // activation are initialized when no tiles are OOM. We need to
+ // wait for DidFinishRunningTasks() to be called, try to re-assign
+ // memory and in worst case use on-demand raster when tiles
+ // required for activation are OOM.
+ if (!all_tiles_required_for_activation_have_memory_)
+ return;
+
+ ready_to_activate_check_notifier_.Schedule();
+ }
+}
+
+void TileManager::GetTilesWithAssignedBins(PrioritizedTileSet* tiles) {
+ TRACE_EVENT0("cc", "TileManager::GetTilesWithAssignedBins");
+
+ const TileMemoryLimitPolicy memory_policy = global_state_.memory_limit_policy;
+ const TreePriority tree_priority = global_state_.tree_priority;
+
+ // For each tree, bin into different categories of tiles.
+ for (TileMap::const_iterator it = tiles_.begin(); it != tiles_.end(); ++it) {
+ Tile* tile = it->second;
+ ManagedTileState& mts = tile->managed_state();
+
+ bool tile_is_ready_to_draw = mts.draw_info.IsReadyToDraw();
+ bool tile_is_active = tile_is_ready_to_draw || mts.raster_task.get();
+
+ // Get the active priority and bin.
+ TilePriority active_priority = tile->priority(ACTIVE_TREE);
+ ManagedTileBin active_bin = BinFromTilePriority(active_priority);
+
+ // Get the pending priority and bin.
+ TilePriority pending_priority = tile->priority(PENDING_TREE);
+ ManagedTileBin pending_bin = BinFromTilePriority(pending_priority);
+
+ bool pending_is_low_res = pending_priority.resolution == LOW_RESOLUTION;
+ bool pending_is_non_ideal =
+ pending_priority.resolution == NON_IDEAL_RESOLUTION;
+ bool active_is_non_ideal =
+ active_priority.resolution == NON_IDEAL_RESOLUTION;
+
+ // Adjust bin state based on if ready to draw.
+ active_bin = kBinReadyToDrawMap[tile_is_ready_to_draw][active_bin];
+ pending_bin = kBinReadyToDrawMap[tile_is_ready_to_draw][pending_bin];
+
+ // Adjust bin state based on if active.
+ active_bin = kBinIsActiveMap[tile_is_active][active_bin];
+ pending_bin = kBinIsActiveMap[tile_is_active][pending_bin];
+
+ // We never want to paint new non-ideal tiles, as we always have
+ // a high-res tile covering that content (paint that instead).
+ if (!tile_is_ready_to_draw && active_is_non_ideal)
+ active_bin = NEVER_BIN;
+ if (!tile_is_ready_to_draw && pending_is_non_ideal)
+ pending_bin = NEVER_BIN;
+
+ ManagedTileBin tree_bin[NUM_TREES];
+ tree_bin[ACTIVE_TREE] = kBinPolicyMap[memory_policy][active_bin];
+ tree_bin[PENDING_TREE] = kBinPolicyMap[memory_policy][pending_bin];
+
+ // Adjust pending bin state for low res tiles. This prevents pending tree
+ // low-res tiles from being initialized before high-res tiles.
+ if (pending_is_low_res)
+ tree_bin[PENDING_TREE] = std::max(tree_bin[PENDING_TREE], EVENTUALLY_BIN);
+
+ TilePriority tile_priority;
+ switch (tree_priority) {
+ case SAME_PRIORITY_FOR_BOTH_TREES:
+ mts.bin = std::min(tree_bin[ACTIVE_TREE], tree_bin[PENDING_TREE]);
+ tile_priority = tile->combined_priority();
+ break;
+ case SMOOTHNESS_TAKES_PRIORITY:
+ mts.bin = tree_bin[ACTIVE_TREE];
+ tile_priority = active_priority;
+ break;
+ case NEW_CONTENT_TAKES_PRIORITY:
+ mts.bin = tree_bin[PENDING_TREE];
+ tile_priority = pending_priority;
+ break;
+ default:
+ NOTREACHED();
+ }
+
+ // Bump up the priority if we determined it's NEVER_BIN on one tree,
+ // but is still required on the other tree.
+ bool is_in_never_bin_on_both_trees = tree_bin[ACTIVE_TREE] == NEVER_BIN &&
+ tree_bin[PENDING_TREE] == NEVER_BIN;
+
+ if (mts.bin == NEVER_BIN && !is_in_never_bin_on_both_trees)
+ mts.bin = tile_is_active ? AT_LAST_AND_ACTIVE_BIN : AT_LAST_BIN;
+
+ mts.resolution = tile_priority.resolution;
+ mts.priority_bin = tile_priority.priority_bin;
+ mts.distance_to_visible = tile_priority.distance_to_visible;
+ mts.required_for_activation = tile_priority.required_for_activation;
+
+ mts.visible_and_ready_to_draw =
+ tree_bin[ACTIVE_TREE] == NOW_AND_READY_TO_DRAW_BIN;
+
+ // Tiles that are required for activation shouldn't be in NEVER_BIN unless
+ // smoothness takes priority or memory policy allows nothing to be
+ // initialized.
+ DCHECK(!mts.required_for_activation || mts.bin != NEVER_BIN ||
+ tree_priority == SMOOTHNESS_TAKES_PRIORITY ||
+ memory_policy == ALLOW_NOTHING);
+
+ // If the tile is in NEVER_BIN and it does not have an active task, then we
+ // can release the resources early. If it does have the task however, we
+ // should keep it in the prioritized tile set to ensure that AssignGpuMemory
+ // can visit it.
+ if (mts.bin == NEVER_BIN && !mts.raster_task.get()) {
+ FreeResourcesForTileAndNotifyClientIfTileWasReadyToDraw(tile);
+ continue;
+ }
+
+ // Insert the tile into a priority set.
+ tiles->InsertTile(tile, mts.bin);
+ }
+}
+
+void TileManager::ManageTiles(const GlobalStateThatImpactsTilePriority& state) {
+ TRACE_EVENT0("cc", "TileManager::ManageTiles");
+
+ // Update internal state.
+ if (state != global_state_) {
+ global_state_ = state;
+ prioritized_tiles_dirty_ = true;
+ }
+
+ // We need to call CheckForCompletedTasks() once in-between each call
+ // to ScheduleTasks() to prevent canceled tasks from being scheduled.
+ if (!did_check_for_completed_tasks_since_last_schedule_tasks_) {
+ rasterizer_->CheckForCompletedTasks();
+ did_check_for_completed_tasks_since_last_schedule_tasks_ = true;
+ }
+
+ UpdatePrioritizedTileSetIfNeeded();
+
+ TileVector tiles_that_need_to_be_rasterized;
+ AssignGpuMemoryToTiles(&prioritized_tiles_,
+ &tiles_that_need_to_be_rasterized);
+
+ // Finally, schedule rasterizer tasks.
+ ScheduleTasks(tiles_that_need_to_be_rasterized);
+
+ TRACE_EVENT_INSTANT1("cc",
+ "DidManage",
+ TRACE_EVENT_SCOPE_THREAD,
+ "state",
+ BasicStateAsValue());
+
+ TRACE_COUNTER_ID1("cc",
+ "unused_memory_bytes",
+ this,
+ resource_pool_->total_memory_usage_bytes() -
+ resource_pool_->acquired_memory_usage_bytes());
+}
+
+bool TileManager::UpdateVisibleTiles() {
+ TRACE_EVENT0("cc", "TileManager::UpdateVisibleTiles");
+
+ rasterizer_->CheckForCompletedTasks();
+ did_check_for_completed_tasks_since_last_schedule_tasks_ = true;
+
+ TRACE_EVENT_INSTANT1(
+ "cc",
+ "DidUpdateVisibleTiles",
+ TRACE_EVENT_SCOPE_THREAD,
+ "stats",
+ RasterTaskCompletionStatsAsValue(update_visible_tiles_stats_));
+ update_visible_tiles_stats_ = RasterTaskCompletionStats();
+
+ bool did_initialize_visible_tile = did_initialize_visible_tile_;
+ did_initialize_visible_tile_ = false;
+ return did_initialize_visible_tile;
+}
+
+scoped_refptr<base::debug::ConvertableToTraceFormat>
+TileManager::BasicStateAsValue() const {
+ scoped_refptr<base::debug::TracedValue> value =
+ new base::debug::TracedValue();
+ BasicStateAsValueInto(value.get());
+ return value;
+}
+
+void TileManager::BasicStateAsValueInto(base::debug::TracedValue* state) const {
+ state->SetInteger("tile_count", tiles_.size());
+ state->SetBoolean("did_oom_on_last_assign", did_oom_on_last_assign_);
+ state->BeginDictionary("global_state");
+ global_state_.AsValueInto(state);
+ state->EndDictionary();
+}
+
+void TileManager::AssignGpuMemoryToTiles(
+ PrioritizedTileSet* tiles,
+ TileVector* tiles_that_need_to_be_rasterized) {
+ TRACE_EVENT0("cc", "TileManager::AssignGpuMemoryToTiles");
+
+ // Maintain the list of released resources that can potentially be re-used
+ // or deleted.
+ // If this operation becomes expensive too, only do this after some
+ // resource(s) was returned. Note that in that case, one also need to
+ // invalidate when releasing some resource from the pool.
+ resource_pool_->CheckBusyResources();
+
+ // Now give memory out to the tiles until we're out, and build
+ // the needs-to-be-rasterized queue.
+ all_tiles_that_need_to_be_rasterized_have_memory_ = true;
+ all_tiles_required_for_activation_have_memory_ = true;
+
+ // Cast to prevent overflow.
+ int64 soft_bytes_available =
+ static_cast<int64>(bytes_releasable_) +
+ static_cast<int64>(global_state_.soft_memory_limit_in_bytes) -
+ static_cast<int64>(resource_pool_->acquired_memory_usage_bytes());
+ int64 hard_bytes_available =
+ static_cast<int64>(bytes_releasable_) +
+ static_cast<int64>(global_state_.hard_memory_limit_in_bytes) -
+ static_cast<int64>(resource_pool_->acquired_memory_usage_bytes());
+ int resources_available = resources_releasable_ +
+ global_state_.num_resources_limit -
+ resource_pool_->acquired_resource_count();
+ size_t soft_bytes_allocatable =
+ std::max(static_cast<int64>(0), soft_bytes_available);
+ size_t hard_bytes_allocatable =
+ std::max(static_cast<int64>(0), hard_bytes_available);
+ size_t resources_allocatable = std::max(0, resources_available);
+
+ size_t bytes_that_exceeded_memory_budget = 0;
+ size_t soft_bytes_left = soft_bytes_allocatable;
+ size_t hard_bytes_left = hard_bytes_allocatable;
+
+ size_t resources_left = resources_allocatable;
+ bool oomed_soft = false;
+ bool oomed_hard = false;
+ bool have_hit_soft_memory = false; // Soft memory comes after hard.
+
+ unsigned schedule_priority = 1u;
+ for (PrioritizedTileSet::Iterator it(tiles, true); it; ++it) {
+ Tile* tile = *it;
+ ManagedTileState& mts = tile->managed_state();
+ mts.scheduled_priority = schedule_priority++;
+
+ // If this tile doesn't need a resource, then nothing to do.
+ if (!mts.draw_info.requires_resource())
+ continue;
+
+ // If the tile is not needed, free it up.
+ if (mts.bin == NEVER_BIN) {
+ FreeResourcesForTileAndNotifyClientIfTileWasReadyToDraw(tile);
+ continue;
+ }
+
+ const bool tile_uses_hard_limit = mts.bin <= NOW_BIN;
+ const size_t bytes_if_allocated = BytesConsumedIfAllocated(tile);
+ const size_t tile_bytes_left =
+ (tile_uses_hard_limit) ? hard_bytes_left : soft_bytes_left;
+
+ // Hard-limit is reserved for tiles that would cause a calamity
+ // if they were to go away, so by definition they are the highest
+ // priority memory, and must be at the front of the list.
+ DCHECK(!(have_hit_soft_memory && tile_uses_hard_limit));
+ have_hit_soft_memory |= !tile_uses_hard_limit;
+
+ size_t tile_bytes = 0;
+ size_t tile_resources = 0;
+
+ // It costs to maintain a resource.
+ if (mts.draw_info.resource_) {
+ tile_bytes += bytes_if_allocated;
+ tile_resources++;
+ }
+
+ // Allow lower priority tiles with initialized resources to keep
+ // their memory by only assigning memory to new raster tasks if
+ // they can be scheduled.
+ bool reached_scheduled_raster_tasks_limit =
+ tiles_that_need_to_be_rasterized->size() >= kScheduledRasterTasksLimit;
+ if (!reached_scheduled_raster_tasks_limit) {
+ // If we don't have the required version, and it's not in flight
+ // then we'll have to pay to create a new task.
+ if (!mts.draw_info.resource_ && !mts.raster_task.get()) {
+ tile_bytes += bytes_if_allocated;
+ tile_resources++;
+ }
+ }
+
+ // Tile is OOM.
+ if (tile_bytes > tile_bytes_left || tile_resources > resources_left) {
+ FreeResourcesForTileAndNotifyClientIfTileWasReadyToDraw(tile);
+
+ // This tile was already on screen and now its resources have been
+ // released. In order to prevent checkerboarding, set this tile as
+ // rasterize on demand immediately.
+ if (mts.visible_and_ready_to_draw)
+ mts.draw_info.set_rasterize_on_demand();
+
+ oomed_soft = true;
+ if (tile_uses_hard_limit) {
+ oomed_hard = true;
+ bytes_that_exceeded_memory_budget += tile_bytes;
+ }
+ } else {
+ resources_left -= tile_resources;
+ hard_bytes_left -= tile_bytes;
+ soft_bytes_left =
+ (soft_bytes_left > tile_bytes) ? soft_bytes_left - tile_bytes : 0;
+ if (mts.draw_info.resource_)
+ continue;
+ }
+
+ DCHECK(!mts.draw_info.resource_);
+
+ // Tile shouldn't be rasterized if |tiles_that_need_to_be_rasterized|
+ // has reached it's limit or we've failed to assign gpu memory to this
+ // or any higher priority tile. Preventing tiles that fit into memory
+ // budget to be rasterized when higher priority tile is oom is
+ // important for two reasons:
+ // 1. Tile size should not impact raster priority.
+ // 2. Tiles with existing raster task could otherwise incorrectly
+ // be added as they are not affected by |bytes_allocatable|.
+ bool can_schedule_tile =
+ !oomed_soft && !reached_scheduled_raster_tasks_limit;
+
+ if (!can_schedule_tile) {
+ all_tiles_that_need_to_be_rasterized_have_memory_ = false;
+ if (tile->required_for_activation())
+ all_tiles_required_for_activation_have_memory_ = false;
+ it.DisablePriorityOrdering();
+ continue;
+ }
+
+ tiles_that_need_to_be_rasterized->push_back(tile);
+ }
+
+ // OOM reporting uses hard-limit, soft-OOM is normal depending on limit.
+ ever_exceeded_memory_budget_ |= oomed_hard;
+ if (ever_exceeded_memory_budget_) {
+ TRACE_COUNTER_ID2("cc",
+ "over_memory_budget",
+ this,
+ "budget",
+ global_state_.hard_memory_limit_in_bytes,
+ "over",
+ bytes_that_exceeded_memory_budget);
+ }
+ did_oom_on_last_assign_ = oomed_hard;
+ UMA_HISTOGRAM_BOOLEAN("TileManager.ExceededMemoryBudget", oomed_hard);
+ memory_stats_from_last_assign_.total_budget_in_bytes =
+ global_state_.hard_memory_limit_in_bytes;
+ memory_stats_from_last_assign_.bytes_allocated =
+ hard_bytes_allocatable - hard_bytes_left;
+ memory_stats_from_last_assign_.bytes_unreleasable =
+ resource_pool_->acquired_memory_usage_bytes() - bytes_releasable_;
+ memory_stats_from_last_assign_.bytes_over = bytes_that_exceeded_memory_budget;
+}
+
+void TileManager::FreeResourcesForTile(Tile* tile) {
+ ManagedTileState& mts = tile->managed_state();
+ if (mts.draw_info.resource_) {
+ resource_pool_->ReleaseResource(mts.draw_info.resource_.Pass());
+
+ DCHECK_GE(bytes_releasable_, BytesConsumedIfAllocated(tile));
+ DCHECK_GE(resources_releasable_, 1u);
+
+ bytes_releasable_ -= BytesConsumedIfAllocated(tile);
+ --resources_releasable_;
+ }
+}
+
+void TileManager::FreeResourcesForTileAndNotifyClientIfTileWasReadyToDraw(
+ Tile* tile) {
+ bool was_ready_to_draw = tile->IsReadyToDraw();
+ FreeResourcesForTile(tile);
+ if (was_ready_to_draw)
+ client_->NotifyTileStateChanged(tile);
+}
+
+void TileManager::ScheduleTasks(
+ const TileVector& tiles_that_need_to_be_rasterized) {
+ TRACE_EVENT1("cc",
+ "TileManager::ScheduleTasks",
+ "count",
+ tiles_that_need_to_be_rasterized.size());
+
+ DCHECK(did_check_for_completed_tasks_since_last_schedule_tasks_);
+
+ raster_queue_.Reset();
+
+ // Build a new task queue containing all task currently needed. Tasks
+ // are added in order of priority, highest priority task first.
+ for (TileVector::const_iterator it = tiles_that_need_to_be_rasterized.begin();
+ it != tiles_that_need_to_be_rasterized.end();
+ ++it) {
+ Tile* tile = *it;
+ ManagedTileState& mts = tile->managed_state();
+
+ DCHECK(mts.draw_info.requires_resource());
+ DCHECK(!mts.draw_info.resource_);
+
+ if (!mts.raster_task.get())
+ mts.raster_task = CreateRasterTask(tile);
+
+ TaskSetCollection task_sets;
+ if (tile->required_for_activation())
+ task_sets.set(REQUIRED_FOR_ACTIVATION);
+ task_sets.set(ALL);
+ raster_queue_.items.push_back(
+ RasterTaskQueue::Item(mts.raster_task.get(), task_sets));
+ }
+
+ // We must reduce the amount of unused resoruces before calling
+ // ScheduleTasks to prevent usage from rising above limits.
+ resource_pool_->ReduceResourceUsage();
+
+ // Schedule running of |raster_queue_|. This replaces any previously
+ // scheduled tasks and effectively cancels all tasks not present
+ // in |raster_queue_|.
+ rasterizer_->ScheduleTasks(&raster_queue_);
+
+ // It's now safe to clean up orphan tasks as raster worker pool is not
+ // allowed to keep around unreferenced raster tasks after ScheduleTasks() has
+ // been called.
+ orphan_raster_tasks_.clear();
+
+ did_check_for_completed_tasks_since_last_schedule_tasks_ = false;
+}
+
+scoped_refptr<ImageDecodeTask> TileManager::CreateImageDecodeTask(
+ Tile* tile,
+ SkPixelRef* pixel_ref) {
+ return make_scoped_refptr(new ImageDecodeTaskImpl(
+ pixel_ref,
+ tile->layer_id(),
+ rendering_stats_instrumentation_,
+ base::Bind(&TileManager::OnImageDecodeTaskCompleted,
+ base::Unretained(this),
+ tile->layer_id(),
+ base::Unretained(pixel_ref))));
+}
+
+scoped_refptr<RasterTask> TileManager::CreateRasterTask(Tile* tile) {
+ ManagedTileState& mts = tile->managed_state();
+
+ scoped_ptr<ScopedResource> resource =
+ resource_pool_->AcquireResource(tile->size());
+ const ScopedResource* const_resource = resource.get();
+
+ // Create and queue all image decode tasks that this tile depends on.
+ ImageDecodeTask::Vector decode_tasks;
+ PixelRefTaskMap& existing_pixel_refs = image_decode_tasks_[tile->layer_id()];
+ for (PicturePileImpl::PixelRefIterator iter(
+ tile->content_rect(), tile->contents_scale(), tile->picture_pile());
+ iter;
+ ++iter) {
+ SkPixelRef* pixel_ref = *iter;
+ uint32_t id = pixel_ref->getGenerationID();
+
+ // Append existing image decode task if available.
+ PixelRefTaskMap::iterator decode_task_it = existing_pixel_refs.find(id);
+ if (decode_task_it != existing_pixel_refs.end()) {
+ decode_tasks.push_back(decode_task_it->second);
+ continue;
+ }
+
+ // Create and append new image decode task for this pixel ref.
+ scoped_refptr<ImageDecodeTask> decode_task =
+ CreateImageDecodeTask(tile, pixel_ref);
+ decode_tasks.push_back(decode_task);
+ existing_pixel_refs[id] = decode_task;
+ }
+
+ return make_scoped_refptr(
+ new RasterTaskImpl(const_resource,
+ tile->picture_pile(),
+ tile->content_rect(),
+ tile->contents_scale(),
+ mts.resolution,
+ tile->layer_id(),
+ static_cast<const void*>(tile),
+ tile->source_frame_number(),
+ tile->use_picture_analysis(),
+ rendering_stats_instrumentation_,
+ base::Bind(&TileManager::OnRasterTaskCompleted,
+ base::Unretained(this),
+ tile->id(),
+ base::Passed(&resource)),
+ &decode_tasks));
+}
+
+void TileManager::OnImageDecodeTaskCompleted(int layer_id,
+ SkPixelRef* pixel_ref,
+ bool was_canceled) {
+ // If the task was canceled, we need to clean it up
+ // from |image_decode_tasks_|.
+ if (!was_canceled)
+ return;
+
+ LayerPixelRefTaskMap::iterator layer_it = image_decode_tasks_.find(layer_id);
+ if (layer_it == image_decode_tasks_.end())
+ return;
+
+ PixelRefTaskMap& pixel_ref_tasks = layer_it->second;
+ PixelRefTaskMap::iterator task_it =
+ pixel_ref_tasks.find(pixel_ref->getGenerationID());
+
+ if (task_it != pixel_ref_tasks.end())
+ pixel_ref_tasks.erase(task_it);
+}
+
+void TileManager::OnRasterTaskCompleted(
+ Tile::Id tile_id,
+ scoped_ptr<ScopedResource> resource,
+ const PicturePileImpl::Analysis& analysis,
+ bool was_canceled) {
+ DCHECK(tiles_.find(tile_id) != tiles_.end());
+
+ Tile* tile = tiles_[tile_id];
+ ManagedTileState& mts = tile->managed_state();
+ DCHECK(mts.raster_task.get());
+ orphan_raster_tasks_.push_back(mts.raster_task);
+ mts.raster_task = NULL;
+
+ if (was_canceled) {
+ ++update_visible_tiles_stats_.canceled_count;
+ resource_pool_->ReleaseResource(resource.Pass());
+ return;
+ }
+
+ ++update_visible_tiles_stats_.completed_count;
+
+ if (analysis.is_solid_color) {
+ mts.draw_info.set_solid_color(analysis.solid_color);
+ resource_pool_->ReleaseResource(resource.Pass());
+ } else {
+ mts.draw_info.set_use_resource();
+ mts.draw_info.resource_ = resource.Pass();
+
+ bytes_releasable_ += BytesConsumedIfAllocated(tile);
+ ++resources_releasable_;
+ }
+
+ if (tile->priority(ACTIVE_TREE).distance_to_visible == 0.f)
+ did_initialize_visible_tile_ = true;
+
+ client_->NotifyTileStateChanged(tile);
+}
+
+scoped_refptr<Tile> TileManager::CreateTile(PicturePileImpl* picture_pile,
+ const gfx::Size& tile_size,
+ const gfx::Rect& content_rect,
+ float contents_scale,
+ int layer_id,
+ int source_frame_number,
+ int flags) {
+ scoped_refptr<Tile> tile = make_scoped_refptr(new Tile(this,
+ picture_pile,
+ tile_size,
+ content_rect,
+ contents_scale,
+ layer_id,
+ source_frame_number,
+ flags));
+ DCHECK(tiles_.find(tile->id()) == tiles_.end());
+
+ tiles_[tile->id()] = tile.get();
+ used_layer_counts_[tile->layer_id()]++;
+ prioritized_tiles_dirty_ = true;
+ return tile;
+}
+
+void TileManager::SetRasterizerForTesting(Rasterizer* rasterizer) {
+ rasterizer_ = rasterizer;
+ rasterizer_->SetClient(this);
+}
+
+bool TileManager::IsReadyToActivate() const {
+ const std::vector<PictureLayerImpl*>& layers = client_->GetPictureLayers();
+
+ for (std::vector<PictureLayerImpl*>::const_iterator it = layers.begin();
+ it != layers.end();
+ ++it) {
+ if (!(*it)->AllTilesRequiredForActivationAreReadyToDraw())
+ return false;
+ }
+
+ return true;
+}
+
+void TileManager::CheckIfReadyToActivate() {
+ TRACE_EVENT0("cc", "TileManager::CheckIfReadyToActivate");
+
+ rasterizer_->CheckForCompletedTasks();
+ did_check_for_completed_tasks_since_last_schedule_tasks_ = true;
+
+ if (IsReadyToActivate())
+ client_->NotifyReadyToActivate();
+}
+
+} // namespace cc
diff --git a/cc/resources/tile_manager.h b/cc/resources/tile_manager.h
new file mode 100644
index 0000000..a9118a7
--- /dev/null
+++ b/cc/resources/tile_manager.h
@@ -0,0 +1,280 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_TILE_MANAGER_H_
+#define CC_RESOURCES_TILE_MANAGER_H_
+
+#include <deque>
+#include <queue>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "base/containers/hash_tables.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/values.h"
+#include "cc/base/ref_counted_managed.h"
+#include "cc/base/unique_notifier.h"
+#include "cc/debug/rendering_stats_instrumentation.h"
+#include "cc/resources/eviction_tile_priority_queue.h"
+#include "cc/resources/managed_tile_state.h"
+#include "cc/resources/memory_history.h"
+#include "cc/resources/picture_pile_impl.h"
+#include "cc/resources/prioritized_tile_set.h"
+#include "cc/resources/raster_tile_priority_queue.h"
+#include "cc/resources/rasterizer.h"
+#include "cc/resources/resource_pool.h"
+#include "cc/resources/tile.h"
+
+namespace base {
+namespace debug {
+class ConvertableToTraceFormat;
+class TracedValue;
+}
+}
+
+namespace cc {
+class PictureLayerImpl;
+class ResourceProvider;
+
+class CC_EXPORT TileManagerClient {
+ public:
+ // Returns the set of layers that the tile manager should consider for raster.
+ // TODO(vmpstr): Change the way we determine if we are ready to activate, so
+ // that this can be removed.
+ virtual const std::vector<PictureLayerImpl*>& GetPictureLayers() const = 0;
+
+ // Called when all tiles marked as required for activation are ready to draw.
+ virtual void NotifyReadyToActivate() = 0;
+
+ // Called when the visible representation of a tile might have changed. Some
+ // examples are:
+ // - Tile version initialized.
+ // - Tile resources freed.
+ // - Tile marked for on-demand raster.
+ virtual void NotifyTileStateChanged(const Tile* tile) = 0;
+
+ // Given an empty raster tile priority queue, this will build a priority queue
+ // that will return tiles in order in which they should be rasterized.
+ // Note if the queue was previous built, Reset must be called on it.
+ virtual void BuildRasterQueue(RasterTilePriorityQueue* queue,
+ TreePriority tree_priority) = 0;
+
+ // Given an empty eviction tile priority queue, this will build a priority
+ // queue that will return tiles in order in which they should be evicted.
+ // Note if the queue was previous built, Reset must be called on it.
+ virtual void BuildEvictionQueue(EvictionTilePriorityQueue* queue,
+ TreePriority tree_priority) = 0;
+
+ protected:
+ virtual ~TileManagerClient() {}
+};
+
+struct RasterTaskCompletionStats {
+ RasterTaskCompletionStats();
+
+ size_t completed_count;
+ size_t canceled_count;
+};
+scoped_refptr<base::debug::ConvertableToTraceFormat>
+ RasterTaskCompletionStatsAsValue(const RasterTaskCompletionStats& stats);
+
+// This class manages tiles, deciding which should get rasterized and which
+// should no longer have any memory assigned to them. Tile objects are "owned"
+// by layers; they automatically register with the manager when they are
+// created, and unregister from the manager when they are deleted.
+class CC_EXPORT TileManager : public RasterizerClient,
+ public RefCountedManager<Tile> {
+ public:
+ enum NamedTaskSet {
+ REQUIRED_FOR_ACTIVATION = 0,
+ ALL = 1,
+ // Adding additional values requires increasing kNumberOfTaskSets in
+ // rasterizer.h
+ };
+
+ static scoped_ptr<TileManager> Create(
+ TileManagerClient* client,
+ base::SequencedTaskRunner* task_runner,
+ ResourcePool* resource_pool,
+ Rasterizer* rasterizer,
+ RenderingStatsInstrumentation* rendering_stats_instrumentation);
+ virtual ~TileManager();
+
+ void ManageTiles(const GlobalStateThatImpactsTilePriority& state);
+
+ // Returns true when visible tiles have been initialized.
+ bool UpdateVisibleTiles();
+
+ scoped_refptr<Tile> CreateTile(PicturePileImpl* picture_pile,
+ const gfx::Size& tile_size,
+ const gfx::Rect& content_rect,
+ float contents_scale,
+ int layer_id,
+ int source_frame_number,
+ int flags);
+
+ scoped_refptr<base::debug::ConvertableToTraceFormat> BasicStateAsValue()
+ const;
+ void BasicStateAsValueInto(base::debug::TracedValue* dict) const;
+ const MemoryHistory::Entry& memory_stats_from_last_assign() const {
+ return memory_stats_from_last_assign_;
+ }
+
+ void InitializeTilesWithResourcesForTesting(const std::vector<Tile*>& tiles) {
+ for (size_t i = 0; i < tiles.size(); ++i) {
+ ManagedTileState& mts = tiles[i]->managed_state();
+
+ mts.draw_info.resource_ =
+ resource_pool_->AcquireResource(tiles[i]->size());
+
+ bytes_releasable_ += BytesConsumedIfAllocated(tiles[i]);
+ ++resources_releasable_;
+ }
+ }
+
+ void ReleaseTileResourcesForTesting(const std::vector<Tile*>& tiles) {
+ for (size_t i = 0; i < tiles.size(); ++i) {
+ Tile* tile = tiles[i];
+ FreeResourcesForTile(tile);
+ }
+ }
+
+ void SetGlobalStateForTesting(
+ const GlobalStateThatImpactsTilePriority& state) {
+ // Soft limit is used for resource pool such that
+ // memory returns to soft limit after going over.
+ if (state != global_state_) {
+ global_state_ = state;
+ prioritized_tiles_dirty_ = true;
+ }
+ }
+
+ void SetRasterizerForTesting(Rasterizer* rasterizer);
+
+ void FreeResourcesAndCleanUpReleasedTilesForTesting() {
+ prioritized_tiles_.Clear();
+ FreeResourcesForReleasedTiles();
+ CleanUpReleasedTiles();
+ }
+
+ std::vector<Tile*> AllTilesForTesting() const {
+ std::vector<Tile*> tiles;
+ for (TileMap::const_iterator it = tiles_.begin(); it != tiles_.end();
+ ++it) {
+ tiles.push_back(it->second);
+ }
+ return tiles;
+ }
+
+ protected:
+ TileManager(TileManagerClient* client,
+ const scoped_refptr<base::SequencedTaskRunner>& task_runner,
+ ResourcePool* resource_pool,
+ Rasterizer* rasterizer,
+ RenderingStatsInstrumentation* rendering_stats_instrumentation);
+
+ // Methods called by Tile
+ friend class Tile;
+ void DidChangeTilePriority(Tile* tile);
+
+ void FreeResourcesForReleasedTiles();
+ void CleanUpReleasedTiles();
+
+ // Overriden from RefCountedManager<Tile>:
+ virtual void Release(Tile* tile) OVERRIDE;
+
+ // Overriden from RasterizerClient:
+ virtual void DidFinishRunningTasks(TaskSet task_set) OVERRIDE;
+ virtual TaskSetCollection TasksThatShouldBeForcedToComplete() const OVERRIDE;
+
+ typedef std::vector<Tile*> TileVector;
+ typedef std::set<Tile*> TileSet;
+
+ // Virtual for test
+ virtual void ScheduleTasks(
+ const TileVector& tiles_that_need_to_be_rasterized);
+
+ void AssignGpuMemoryToTiles(PrioritizedTileSet* tiles,
+ TileVector* tiles_that_need_to_be_rasterized);
+ void GetTilesWithAssignedBins(PrioritizedTileSet* tiles);
+
+ private:
+ void OnImageDecodeTaskCompleted(int layer_id,
+ SkPixelRef* pixel_ref,
+ bool was_canceled);
+ void OnRasterTaskCompleted(Tile::Id tile,
+ scoped_ptr<ScopedResource> resource,
+ const PicturePileImpl::Analysis& analysis,
+ bool was_canceled);
+
+ inline size_t BytesConsumedIfAllocated(const Tile* tile) const {
+ return Resource::MemorySizeBytes(tile->size(),
+ resource_pool_->resource_format());
+ }
+
+ void FreeResourcesForTile(Tile* tile);
+ void FreeResourcesForTileAndNotifyClientIfTileWasReadyToDraw(Tile* tile);
+ scoped_refptr<ImageDecodeTask> CreateImageDecodeTask(Tile* tile,
+ SkPixelRef* pixel_ref);
+ scoped_refptr<RasterTask> CreateRasterTask(Tile* tile);
+ void UpdatePrioritizedTileSetIfNeeded();
+
+ bool IsReadyToActivate() const;
+ void CheckIfReadyToActivate();
+
+ TileManagerClient* client_;
+ scoped_refptr<base::SequencedTaskRunner> task_runner_;
+ ResourcePool* resource_pool_;
+ Rasterizer* rasterizer_;
+ GlobalStateThatImpactsTilePriority global_state_;
+
+ typedef base::hash_map<Tile::Id, Tile*> TileMap;
+ TileMap tiles_;
+
+ PrioritizedTileSet prioritized_tiles_;
+ bool prioritized_tiles_dirty_;
+
+ bool all_tiles_that_need_to_be_rasterized_have_memory_;
+ bool all_tiles_required_for_activation_have_memory_;
+
+ size_t bytes_releasable_;
+ size_t resources_releasable_;
+
+ bool ever_exceeded_memory_budget_;
+ MemoryHistory::Entry memory_stats_from_last_assign_;
+
+ RenderingStatsInstrumentation* rendering_stats_instrumentation_;
+
+ bool did_initialize_visible_tile_;
+ bool did_check_for_completed_tasks_since_last_schedule_tasks_;
+ bool did_oom_on_last_assign_;
+
+ typedef base::hash_map<uint32_t, scoped_refptr<ImageDecodeTask> >
+ PixelRefTaskMap;
+ typedef base::hash_map<int, PixelRefTaskMap> LayerPixelRefTaskMap;
+ LayerPixelRefTaskMap image_decode_tasks_;
+
+ typedef base::hash_map<int, int> LayerCountMap;
+ LayerCountMap used_layer_counts_;
+
+ RasterTaskCompletionStats update_visible_tiles_stats_;
+
+ std::vector<Tile*> released_tiles_;
+
+ ResourceFormat resource_format_;
+
+ // Queue used when scheduling raster tasks.
+ RasterTaskQueue raster_queue_;
+
+ std::vector<scoped_refptr<RasterTask> > orphan_raster_tasks_;
+
+ UniqueNotifier ready_to_activate_check_notifier_;
+
+ DISALLOW_COPY_AND_ASSIGN(TileManager);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_TILE_MANAGER_H_
diff --git a/cc/resources/tile_manager_perftest.cc b/cc/resources/tile_manager_perftest.cc
new file mode 100644
index 0000000..5ff4fa2
--- /dev/null
+++ b/cc/resources/tile_manager_perftest.cc
@@ -0,0 +1,496 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/time.h"
+#include "cc/debug/lap_timer.h"
+#include "cc/resources/raster_buffer.h"
+#include "cc/resources/tile.h"
+#include "cc/resources/tile_priority.h"
+#include "cc/test/begin_frame_args_test.h"
+#include "cc/test/fake_impl_proxy.h"
+#include "cc/test/fake_layer_tree_host_impl.h"
+#include "cc/test/fake_output_surface.h"
+#include "cc/test/fake_output_surface_client.h"
+#include "cc/test/fake_picture_layer_impl.h"
+#include "cc/test/fake_picture_pile_impl.h"
+#include "cc/test/fake_tile_manager.h"
+#include "cc/test/fake_tile_manager_client.h"
+#include "cc/test/impl_side_painting_settings.h"
+#include "cc/test/test_shared_bitmap_manager.h"
+#include "cc/test/test_tile_priorities.h"
+#include "cc/trees/layer_tree_impl.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/perf/perf_test.h"
+
+#include "ui/gfx/frame_time.h"
+
+namespace cc {
+
+namespace {
+
+static const int kTimeLimitMillis = 2000;
+static const int kWarmupRuns = 5;
+static const int kTimeCheckInterval = 10;
+
+class FakeRasterizerImpl : public Rasterizer, public RasterizerTaskClient {
+ public:
+ // Overridden from Rasterizer:
+ virtual void SetClient(RasterizerClient* client) OVERRIDE {}
+ virtual void Shutdown() OVERRIDE {}
+ virtual void ScheduleTasks(RasterTaskQueue* queue) OVERRIDE {
+ for (RasterTaskQueue::Item::Vector::const_iterator it =
+ queue->items.begin();
+ it != queue->items.end();
+ ++it) {
+ RasterTask* task = it->task;
+
+ task->WillSchedule();
+ task->ScheduleOnOriginThread(this);
+ task->DidSchedule();
+
+ completed_tasks_.push_back(task);
+ }
+ }
+ virtual void CheckForCompletedTasks() OVERRIDE {
+ for (RasterTask::Vector::iterator it = completed_tasks_.begin();
+ it != completed_tasks_.end();
+ ++it) {
+ RasterTask* task = it->get();
+
+ task->WillComplete();
+ task->CompleteOnOriginThread(this);
+ task->DidComplete();
+
+ task->RunReplyOnOriginThread();
+ }
+ completed_tasks_.clear();
+ }
+
+ // Overridden from RasterizerTaskClient:
+ virtual scoped_ptr<RasterBuffer> AcquireBufferForRaster(
+ const Resource* resource) OVERRIDE {
+ return nullptr;
+ }
+ virtual void ReleaseBufferForRaster(
+ scoped_ptr<RasterBuffer> buffer) OVERRIDE {}
+
+ private:
+ RasterTask::Vector completed_tasks_;
+};
+base::LazyInstance<FakeRasterizerImpl> g_fake_rasterizer =
+ LAZY_INSTANCE_INITIALIZER;
+
+class TileManagerPerfTest : public testing::Test {
+ public:
+ TileManagerPerfTest()
+ : memory_limit_policy_(ALLOW_ANYTHING),
+ max_tiles_(10000),
+ id_(7),
+ proxy_(base::MessageLoopProxy::current()),
+ host_impl_(ImplSidePaintingSettings(10000),
+ &proxy_,
+ &shared_bitmap_manager_),
+ timer_(kWarmupRuns,
+ base::TimeDelta::FromMilliseconds(kTimeLimitMillis),
+ kTimeCheckInterval) {}
+
+ void SetTreePriority(TreePriority tree_priority) {
+ GlobalStateThatImpactsTilePriority state;
+ gfx::Size tile_size(256, 256);
+
+ state.soft_memory_limit_in_bytes = 100 * 1000 * 1000;
+ state.num_resources_limit = max_tiles_;
+ state.hard_memory_limit_in_bytes = state.soft_memory_limit_in_bytes * 2;
+ state.memory_limit_policy = memory_limit_policy_;
+ state.tree_priority = tree_priority;
+
+ global_state_ = state;
+ host_impl_.resource_pool()->SetResourceUsageLimits(
+ state.soft_memory_limit_in_bytes, 0, state.num_resources_limit);
+ host_impl_.tile_manager()->SetGlobalStateForTesting(state);
+ }
+
+ virtual void SetUp() OVERRIDE {
+ picture_pile_ = FakePicturePileImpl::CreateInfiniteFilledPile();
+ InitializeRenderer();
+ SetTreePriority(SAME_PRIORITY_FOR_BOTH_TREES);
+ }
+
+ virtual void InitializeRenderer() {
+ host_impl_.InitializeRenderer(FakeOutputSurface::Create3d().Pass());
+ tile_manager()->SetRasterizerForTesting(g_fake_rasterizer.Pointer());
+ }
+
+ void SetupDefaultTrees(const gfx::Size& layer_bounds) {
+ gfx::Size tile_size(100, 100);
+
+ scoped_refptr<FakePicturePileImpl> pending_pile =
+ FakePicturePileImpl::CreateFilledPile(tile_size, layer_bounds);
+ scoped_refptr<FakePicturePileImpl> active_pile =
+ FakePicturePileImpl::CreateFilledPile(tile_size, layer_bounds);
+
+ SetupTrees(pending_pile, active_pile);
+ }
+
+ void ActivateTree() {
+ host_impl_.ActivateSyncTree();
+ CHECK(!host_impl_.pending_tree());
+ pending_root_layer_ = NULL;
+ active_root_layer_ = static_cast<FakePictureLayerImpl*>(
+ host_impl_.active_tree()->LayerById(id_));
+ }
+
+ void SetupDefaultTreesWithFixedTileSize(const gfx::Size& layer_bounds,
+ const gfx::Size& tile_size) {
+ SetupDefaultTrees(layer_bounds);
+ pending_root_layer_->set_fixed_tile_size(tile_size);
+ active_root_layer_->set_fixed_tile_size(tile_size);
+ }
+
+ void SetupTrees(scoped_refptr<PicturePileImpl> pending_pile,
+ scoped_refptr<PicturePileImpl> active_pile) {
+ SetupPendingTree(active_pile);
+ ActivateTree();
+ SetupPendingTree(pending_pile);
+ }
+
+ void SetupPendingTree(scoped_refptr<PicturePileImpl> pile) {
+ host_impl_.CreatePendingTree();
+ LayerTreeImpl* pending_tree = host_impl_.pending_tree();
+ // Clear recycled tree.
+ pending_tree->DetachLayerTree();
+
+ scoped_ptr<FakePictureLayerImpl> pending_layer =
+ FakePictureLayerImpl::CreateWithPile(pending_tree, id_, pile);
+ pending_layer->SetDrawsContent(true);
+ pending_tree->SetRootLayer(pending_layer.Pass());
+
+ pending_root_layer_ = static_cast<FakePictureLayerImpl*>(
+ host_impl_.pending_tree()->LayerById(id_));
+ pending_root_layer_->DoPostCommitInitializationIfNeeded();
+ }
+
+ void CreateHighLowResAndSetAllTilesVisible() {
+ // Active layer must get updated first so pending layer can share from it.
+ active_root_layer_->CreateDefaultTilingsAndTiles();
+ active_root_layer_->SetAllTilesVisible();
+ pending_root_layer_->CreateDefaultTilingsAndTiles();
+ pending_root_layer_->SetAllTilesVisible();
+ }
+
+ void RunRasterQueueConstructTest(const std::string& test_name,
+ int layer_count) {
+ TreePriority priorities[] = {SAME_PRIORITY_FOR_BOTH_TREES,
+ SMOOTHNESS_TAKES_PRIORITY,
+ NEW_CONTENT_TAKES_PRIORITY};
+ int priority_count = 0;
+
+ std::vector<LayerImpl*> layers = CreateLayers(layer_count, 10);
+ bool resourceless_software_draw = false;
+ for (unsigned i = 0; i < layers.size(); ++i) {
+ layers[i]->UpdateTiles(Occlusion(), resourceless_software_draw);
+ }
+
+ timer_.Reset();
+ do {
+ RasterTilePriorityQueue queue;
+ host_impl_.BuildRasterQueue(&queue, priorities[priority_count]);
+ priority_count = (priority_count + 1) % arraysize(priorities);
+ timer_.NextLap();
+ } while (!timer_.HasTimeLimitExpired());
+
+ perf_test::PrintResult("tile_manager_raster_tile_queue_construct",
+ "",
+ test_name,
+ timer_.LapsPerSecond(),
+ "runs/s",
+ true);
+ }
+
+ void RunRasterQueueConstructAndIterateTest(const std::string& test_name,
+ int layer_count,
+ unsigned tile_count) {
+ TreePriority priorities[] = {SAME_PRIORITY_FOR_BOTH_TREES,
+ SMOOTHNESS_TAKES_PRIORITY,
+ NEW_CONTENT_TAKES_PRIORITY};
+
+ std::vector<LayerImpl*> layers = CreateLayers(layer_count, 100);
+ bool resourceless_software_draw = false;
+ for (unsigned i = 0; i < layers.size(); ++i) {
+ layers[i]->UpdateTiles(Occlusion(), resourceless_software_draw);
+ }
+
+ int priority_count = 0;
+ timer_.Reset();
+ do {
+ int count = tile_count;
+ RasterTilePriorityQueue queue;
+ host_impl_.BuildRasterQueue(&queue, priorities[priority_count]);
+ while (count--) {
+ ASSERT_FALSE(queue.IsEmpty());
+ ASSERT_TRUE(queue.Top() != NULL);
+ queue.Pop();
+ }
+ priority_count = (priority_count + 1) % arraysize(priorities);
+ timer_.NextLap();
+ } while (!timer_.HasTimeLimitExpired());
+
+ perf_test::PrintResult(
+ "tile_manager_raster_tile_queue_construct_and_iterate",
+ "",
+ test_name,
+ timer_.LapsPerSecond(),
+ "runs/s",
+ true);
+ }
+
+ void RunEvictionQueueConstructTest(const std::string& test_name,
+ int layer_count) {
+ TreePriority priorities[] = {SAME_PRIORITY_FOR_BOTH_TREES,
+ SMOOTHNESS_TAKES_PRIORITY,
+ NEW_CONTENT_TAKES_PRIORITY};
+ int priority_count = 0;
+
+ std::vector<LayerImpl*> layers = CreateLayers(layer_count, 10);
+ bool resourceless_software_draw = false;
+ for (unsigned i = 0; i < layers.size(); ++i) {
+ FakePictureLayerImpl* layer =
+ static_cast<FakePictureLayerImpl*>(layers[i]);
+ layer->UpdateTiles(Occlusion(), resourceless_software_draw);
+ for (size_t j = 0; j < layer->GetTilings()->num_tilings(); ++j) {
+ tile_manager()->InitializeTilesWithResourcesForTesting(
+ layer->GetTilings()->tiling_at(j)->AllTilesForTesting());
+ }
+ }
+
+ timer_.Reset();
+ do {
+ EvictionTilePriorityQueue queue;
+ host_impl_.BuildEvictionQueue(&queue, priorities[priority_count]);
+ priority_count = (priority_count + 1) % arraysize(priorities);
+ timer_.NextLap();
+ } while (!timer_.HasTimeLimitExpired());
+
+ perf_test::PrintResult("tile_manager_eviction_tile_queue_construct",
+ "",
+ test_name,
+ timer_.LapsPerSecond(),
+ "runs/s",
+ true);
+ }
+
+ void RunEvictionQueueConstructAndIterateTest(const std::string& test_name,
+ int layer_count,
+ unsigned tile_count) {
+ TreePriority priorities[] = {SAME_PRIORITY_FOR_BOTH_TREES,
+ SMOOTHNESS_TAKES_PRIORITY,
+ NEW_CONTENT_TAKES_PRIORITY};
+ int priority_count = 0;
+
+ std::vector<LayerImpl*> layers = CreateLayers(layer_count, tile_count);
+ bool resourceless_software_draw = false;
+ for (unsigned i = 0; i < layers.size(); ++i) {
+ FakePictureLayerImpl* layer =
+ static_cast<FakePictureLayerImpl*>(layers[i]);
+ layer->UpdateTiles(Occlusion(), resourceless_software_draw);
+ for (size_t j = 0; j < layer->GetTilings()->num_tilings(); ++j) {
+ tile_manager()->InitializeTilesWithResourcesForTesting(
+ layer->GetTilings()->tiling_at(j)->AllTilesForTesting());
+ }
+ }
+
+ timer_.Reset();
+ do {
+ int count = tile_count;
+ EvictionTilePriorityQueue queue;
+ host_impl_.BuildEvictionQueue(&queue, priorities[priority_count]);
+ while (count--) {
+ ASSERT_FALSE(queue.IsEmpty());
+ ASSERT_TRUE(queue.Top() != NULL);
+ queue.Pop();
+ }
+ priority_count = (priority_count + 1) % arraysize(priorities);
+ timer_.NextLap();
+ } while (!timer_.HasTimeLimitExpired());
+
+ perf_test::PrintResult(
+ "tile_manager_eviction_tile_queue_construct_and_iterate",
+ "",
+ test_name,
+ timer_.LapsPerSecond(),
+ "runs/s",
+ true);
+ }
+
+ std::vector<LayerImpl*> CreateLayers(int layer_count,
+ int tiles_per_layer_count) {
+ // Compute the width/height required for high res to get
+ // tiles_per_layer_count tiles.
+ float width = std::sqrt(static_cast<float>(tiles_per_layer_count));
+ float height = tiles_per_layer_count / width;
+
+ // Adjust the width and height to account for the fact that tiles
+ // are bigger than 1x1. Also, account for the fact that that we
+ // will be creating one high res and one low res tiling. That is,
+ // width and height should be smaller by sqrt(1 + low_res_scale).
+ // This gives us _approximately_ correct counts.
+ width *= settings_.default_tile_size.width() /
+ std::sqrt(1 + settings_.low_res_contents_scale_factor);
+ height *= settings_.default_tile_size.height() /
+ std::sqrt(1 + settings_.low_res_contents_scale_factor);
+
+ // Ensure that we start with blank trees and no tiles.
+ host_impl_.ResetTreesForTesting();
+ tile_manager()->FreeResourcesAndCleanUpReleasedTilesForTesting();
+
+ gfx::Size layer_bounds(width, height);
+ gfx::Size viewport(width / 5, height / 5);
+ host_impl_.SetViewportSize(viewport);
+ SetupDefaultTreesWithFixedTileSize(layer_bounds,
+ settings_.default_tile_size);
+
+ active_root_layer_->CreateDefaultTilingsAndTiles();
+ pending_root_layer_->CreateDefaultTilingsAndTiles();
+
+ std::vector<LayerImpl*> layers;
+
+ // Pending layer counts as one layer.
+ layers.push_back(pending_root_layer_);
+ int next_id = id_ + 1;
+
+ // Create the rest of the layers as children of the root layer.
+ while (static_cast<int>(layers.size()) < layer_count) {
+ scoped_ptr<FakePictureLayerImpl> layer =
+ FakePictureLayerImpl::CreateWithPile(
+ host_impl_.pending_tree(), next_id, picture_pile_);
+ layer->SetBounds(layer_bounds);
+ layers.push_back(layer.get());
+ pending_root_layer_->AddChild(layer.Pass());
+
+ FakePictureLayerImpl* fake_layer =
+ static_cast<FakePictureLayerImpl*>(layers.back());
+
+ fake_layer->SetDrawsContent(true);
+ fake_layer->DoPostCommitInitializationIfNeeded();
+ fake_layer->CreateDefaultTilingsAndTiles();
+ ++next_id;
+ }
+
+ return layers;
+ }
+
+ GlobalStateThatImpactsTilePriority GlobalStateForTest() {
+ GlobalStateThatImpactsTilePriority state;
+ gfx::Size tile_size = settings_.default_tile_size;
+ state.soft_memory_limit_in_bytes =
+ 10000u * 4u *
+ static_cast<size_t>(tile_size.width() * tile_size.height());
+ state.hard_memory_limit_in_bytes = state.soft_memory_limit_in_bytes;
+ state.num_resources_limit = 10000;
+ state.memory_limit_policy = ALLOW_ANYTHING;
+ state.tree_priority = SMOOTHNESS_TAKES_PRIORITY;
+ return state;
+ }
+
+ void RunManageTilesTest(const std::string& test_name,
+ int layer_count,
+ int approximate_tile_count_per_layer) {
+ std::vector<LayerImpl*> layers =
+ CreateLayers(layer_count, approximate_tile_count_per_layer);
+ timer_.Reset();
+ bool resourceless_software_draw = false;
+ do {
+ BeginFrameArgs args = CreateBeginFrameArgsForTesting();
+ host_impl_.UpdateCurrentBeginFrameArgs(args);
+ for (unsigned i = 0; i < layers.size(); ++i) {
+ layers[i]->UpdateTiles(Occlusion(), resourceless_software_draw);
+ }
+
+ GlobalStateThatImpactsTilePriority global_state(GlobalStateForTest());
+ tile_manager()->ManageTiles(global_state);
+ tile_manager()->UpdateVisibleTiles();
+ timer_.NextLap();
+ host_impl_.ResetCurrentBeginFrameArgsForNextFrame();
+ } while (!timer_.HasTimeLimitExpired());
+
+ perf_test::PrintResult(
+ "manage_tiles", "", test_name, timer_.LapsPerSecond(), "runs/s", true);
+ }
+
+ TileManager* tile_manager() { return host_impl_.tile_manager(); }
+
+ protected:
+ GlobalStateThatImpactsTilePriority global_state_;
+
+ TestSharedBitmapManager shared_bitmap_manager_;
+ TileMemoryLimitPolicy memory_limit_policy_;
+ int max_tiles_;
+ int id_;
+ FakeImplProxy proxy_;
+ FakeLayerTreeHostImpl host_impl_;
+ FakePictureLayerImpl* pending_root_layer_;
+ FakePictureLayerImpl* active_root_layer_;
+ LapTimer timer_;
+ scoped_refptr<FakePicturePileImpl> picture_pile_;
+ LayerTreeSettings settings_;
+};
+
+TEST_F(TileManagerPerfTest, ManageTiles) {
+ RunManageTilesTest("2_100", 2, 100);
+ RunManageTilesTest("2_500", 2, 500);
+ RunManageTilesTest("2_1000", 2, 1000);
+ RunManageTilesTest("10_100", 10, 100);
+ RunManageTilesTest("10_500", 10, 500);
+ RunManageTilesTest("10_1000", 10, 1000);
+ RunManageTilesTest("50_100", 100, 100);
+ RunManageTilesTest("50_500", 100, 500);
+ RunManageTilesTest("50_1000", 100, 1000);
+}
+
+TEST_F(TileManagerPerfTest, RasterTileQueueConstruct) {
+ RunRasterQueueConstructTest("2", 2);
+ RunRasterQueueConstructTest("10", 10);
+ RunRasterQueueConstructTest("50", 50);
+}
+
+TEST_F(TileManagerPerfTest, RasterTileQueueConstructAndIterate) {
+ RunRasterQueueConstructAndIterateTest("2_16", 2, 16);
+ RunRasterQueueConstructAndIterateTest("2_32", 2, 32);
+ RunRasterQueueConstructAndIterateTest("2_64", 2, 64);
+ RunRasterQueueConstructAndIterateTest("2_128", 2, 128);
+ RunRasterQueueConstructAndIterateTest("10_16", 10, 16);
+ RunRasterQueueConstructAndIterateTest("10_32", 10, 32);
+ RunRasterQueueConstructAndIterateTest("10_64", 10, 64);
+ RunRasterQueueConstructAndIterateTest("10_128", 10, 128);
+ RunRasterQueueConstructAndIterateTest("50_16", 50, 16);
+ RunRasterQueueConstructAndIterateTest("50_32", 50, 32);
+ RunRasterQueueConstructAndIterateTest("50_64", 50, 64);
+ RunRasterQueueConstructAndIterateTest("50_128", 50, 128);
+}
+
+TEST_F(TileManagerPerfTest, EvictionTileQueueConstruct) {
+ RunEvictionQueueConstructTest("2", 2);
+ RunEvictionQueueConstructTest("10", 10);
+ RunEvictionQueueConstructTest("50", 50);
+}
+
+TEST_F(TileManagerPerfTest, EvictionTileQueueConstructAndIterate) {
+ RunEvictionQueueConstructAndIterateTest("2_16", 2, 16);
+ RunEvictionQueueConstructAndIterateTest("2_32", 2, 32);
+ RunEvictionQueueConstructAndIterateTest("2_64", 2, 64);
+ RunEvictionQueueConstructAndIterateTest("2_128", 2, 128);
+ RunEvictionQueueConstructAndIterateTest("10_16", 10, 16);
+ RunEvictionQueueConstructAndIterateTest("10_32", 10, 32);
+ RunEvictionQueueConstructAndIterateTest("10_64", 10, 64);
+ RunEvictionQueueConstructAndIterateTest("10_128", 10, 128);
+ RunEvictionQueueConstructAndIterateTest("50_16", 50, 16);
+ RunEvictionQueueConstructAndIterateTest("50_32", 50, 32);
+ RunEvictionQueueConstructAndIterateTest("50_64", 50, 64);
+ RunEvictionQueueConstructAndIterateTest("50_128", 50, 128);
+}
+
+} // namespace
+
+} // namespace cc
diff --git a/cc/resources/tile_manager_unittest.cc b/cc/resources/tile_manager_unittest.cc
new file mode 100644
index 0000000..3fb5b13
--- /dev/null
+++ b/cc/resources/tile_manager_unittest.cc
@@ -0,0 +1,1144 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/eviction_tile_priority_queue.h"
+#include "cc/resources/raster_tile_priority_queue.h"
+#include "cc/resources/tile.h"
+#include "cc/resources/tile_priority.h"
+#include "cc/test/fake_impl_proxy.h"
+#include "cc/test/fake_layer_tree_host_impl.h"
+#include "cc/test/fake_output_surface.h"
+#include "cc/test/fake_output_surface_client.h"
+#include "cc/test/fake_picture_layer_impl.h"
+#include "cc/test/fake_picture_pile_impl.h"
+#include "cc/test/fake_tile_manager.h"
+#include "cc/test/impl_side_painting_settings.h"
+#include "cc/test/test_shared_bitmap_manager.h"
+#include "cc/test/test_tile_priorities.h"
+#include "cc/trees/layer_tree_impl.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cc {
+namespace {
+
+class TileManagerTest : public testing::TestWithParam<bool>,
+ public TileManagerClient {
+ public:
+ typedef std::vector<scoped_refptr<Tile> > TileVector;
+
+ TileManagerTest()
+ : memory_limit_policy_(ALLOW_ANYTHING),
+ max_tiles_(0),
+ ready_to_activate_(false) {}
+
+ void Initialize(int max_tiles,
+ TileMemoryLimitPolicy memory_limit_policy,
+ TreePriority tree_priority) {
+ output_surface_ = FakeOutputSurface::Create3d();
+ CHECK(output_surface_->BindToClient(&output_surface_client_));
+
+ shared_bitmap_manager_.reset(new TestSharedBitmapManager());
+ resource_provider_ = ResourceProvider::Create(output_surface_.get(),
+ shared_bitmap_manager_.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false);
+ resource_pool_ = ResourcePool::Create(
+ resource_provider_.get(), GL_TEXTURE_2D, RGBA_8888);
+ tile_manager_ =
+ make_scoped_ptr(new FakeTileManager(this, resource_pool_.get()));
+
+ memory_limit_policy_ = memory_limit_policy;
+ max_tiles_ = max_tiles;
+ picture_pile_ = FakePicturePileImpl::CreateInfiniteFilledPile();
+
+ SetTreePriority(tree_priority);
+ }
+
+ void SetTreePriority(TreePriority tree_priority) {
+ GlobalStateThatImpactsTilePriority state;
+ gfx::Size tile_size = settings_.default_tile_size;
+
+ if (UsingMemoryLimit()) {
+ state.soft_memory_limit_in_bytes =
+ max_tiles_ * 4 * tile_size.width() * tile_size.height();
+ state.num_resources_limit = 100;
+ } else {
+ state.soft_memory_limit_in_bytes = 100 * 1000 * 1000;
+ state.num_resources_limit = max_tiles_;
+ }
+ state.hard_memory_limit_in_bytes = state.soft_memory_limit_in_bytes * 2;
+ state.memory_limit_policy = memory_limit_policy_;
+ state.tree_priority = tree_priority;
+
+ global_state_ = state;
+ resource_pool_->SetResourceUsageLimits(state.soft_memory_limit_in_bytes,
+ state.soft_memory_limit_in_bytes,
+ state.num_resources_limit);
+ tile_manager_->SetGlobalStateForTesting(state);
+ }
+
+ virtual void TearDown() OVERRIDE {
+ tile_manager_.reset(NULL);
+ picture_pile_ = NULL;
+
+ testing::Test::TearDown();
+ }
+
+ // TileManagerClient implementation.
+ virtual const std::vector<PictureLayerImpl*>& GetPictureLayers()
+ const OVERRIDE {
+ return picture_layers_;
+ }
+ virtual void NotifyReadyToActivate() OVERRIDE { ready_to_activate_ = true; }
+ virtual void NotifyTileStateChanged(const Tile* tile) OVERRIDE {}
+ virtual void BuildRasterQueue(RasterTilePriorityQueue* queue,
+ TreePriority priority) OVERRIDE {}
+ virtual void BuildEvictionQueue(EvictionTilePriorityQueue* queue,
+ TreePriority priority) OVERRIDE {}
+
+ TileVector CreateTilesWithSize(int count,
+ TilePriority active_priority,
+ TilePriority pending_priority,
+ const gfx::Size& tile_size) {
+ TileVector tiles;
+ for (int i = 0; i < count; ++i) {
+ scoped_refptr<Tile> tile = tile_manager_->CreateTile(picture_pile_.get(),
+ tile_size,
+ gfx::Rect(),
+ 1.0,
+ 0,
+ 0,
+ 0);
+ tile->SetPriority(ACTIVE_TREE, active_priority);
+ tile->SetPriority(PENDING_TREE, pending_priority);
+ tiles.push_back(tile);
+ }
+ return tiles;
+ }
+
+ TileVector CreateTiles(int count,
+ TilePriority active_priority,
+ TilePriority pending_priority) {
+ return CreateTilesWithSize(
+ count, active_priority, pending_priority, settings_.default_tile_size);
+ }
+
+ void ReleaseTiles(TileVector* tiles) {
+ for (TileVector::iterator it = tiles->begin(); it != tiles->end(); it++) {
+ Tile* tile = it->get();
+ tile->SetPriority(ACTIVE_TREE, TilePriority());
+ tile->SetPriority(PENDING_TREE, TilePriority());
+ }
+ }
+
+ FakeTileManager* tile_manager() { return tile_manager_.get(); }
+
+ int AssignedMemoryCount(const TileVector& tiles) {
+ int has_memory_count = 0;
+ for (TileVector::const_iterator it = tiles.begin(); it != tiles.end();
+ ++it) {
+ if (tile_manager_->HasBeenAssignedMemory(it->get()))
+ ++has_memory_count;
+ }
+ return has_memory_count;
+ }
+
+ bool ready_to_activate() const { return ready_to_activate_; }
+
+ // The parametrization specifies whether the max tile limit should
+ // be applied to memory or resources.
+ bool UsingResourceLimit() { return !GetParam(); }
+ bool UsingMemoryLimit() { return GetParam(); }
+
+ protected:
+ GlobalStateThatImpactsTilePriority global_state_;
+
+ private:
+ LayerTreeSettings settings_;
+ scoped_ptr<FakeTileManager> tile_manager_;
+ scoped_refptr<FakePicturePileImpl> picture_pile_;
+ FakeOutputSurfaceClient output_surface_client_;
+ scoped_ptr<FakeOutputSurface> output_surface_;
+ scoped_ptr<SharedBitmapManager> shared_bitmap_manager_;
+ scoped_ptr<ResourceProvider> resource_provider_;
+ scoped_ptr<ResourcePool> resource_pool_;
+ TileMemoryLimitPolicy memory_limit_policy_;
+ int max_tiles_;
+ bool ready_to_activate_;
+ std::vector<PictureLayerImpl*> picture_layers_;
+};
+
+TEST_P(TileManagerTest, EnoughMemoryAllowAnything) {
+ // A few tiles of each type of priority, with enough memory for all tiles.
+
+ Initialize(10, ALLOW_ANYTHING, SMOOTHNESS_TAKES_PRIORITY);
+ TileVector active_now =
+ CreateTiles(3, TilePriorityForNowBin(), TilePriority());
+ TileVector pending_now =
+ CreateTiles(3, TilePriority(), TilePriorityForNowBin());
+ TileVector active_pending_soon =
+ CreateTiles(3, TilePriorityForSoonBin(), TilePriorityForSoonBin());
+ TileVector never_bin = CreateTiles(1, TilePriority(), TilePriority());
+
+ tile_manager()->AssignMemoryToTiles(global_state_);
+
+ EXPECT_EQ(3, AssignedMemoryCount(active_now));
+ EXPECT_EQ(3, AssignedMemoryCount(pending_now));
+ EXPECT_EQ(3, AssignedMemoryCount(active_pending_soon));
+ EXPECT_EQ(0, AssignedMemoryCount(never_bin));
+
+ ReleaseTiles(&active_now);
+ ReleaseTiles(&pending_now);
+ ReleaseTiles(&active_pending_soon);
+ ReleaseTiles(&never_bin);
+}
+
+TEST_P(TileManagerTest, EnoughMemoryAllowPrepaintOnly) {
+ // A few tiles of each type of priority, with enough memory for all tiles,
+ // with the exception of never bin.
+
+ Initialize(10, ALLOW_PREPAINT_ONLY, SMOOTHNESS_TAKES_PRIORITY);
+ TileVector active_now =
+ CreateTiles(3, TilePriorityForNowBin(), TilePriority());
+ TileVector pending_now =
+ CreateTiles(3, TilePriority(), TilePriorityForNowBin());
+ TileVector active_pending_soon =
+ CreateTiles(3, TilePriorityForSoonBin(), TilePriorityForSoonBin());
+ TileVector never_bin = CreateTiles(1, TilePriority(), TilePriority());
+
+ tile_manager()->AssignMemoryToTiles(global_state_);
+
+ EXPECT_EQ(3, AssignedMemoryCount(active_now));
+ EXPECT_EQ(3, AssignedMemoryCount(pending_now));
+ EXPECT_EQ(3, AssignedMemoryCount(active_pending_soon));
+ EXPECT_EQ(0, AssignedMemoryCount(never_bin));
+
+ ReleaseTiles(&active_now);
+ ReleaseTiles(&pending_now);
+ ReleaseTiles(&active_pending_soon);
+ ReleaseTiles(&never_bin);
+}
+
+TEST_P(TileManagerTest, EnoughMemoryPendingLowResAllowAbsoluteMinimum) {
+ // A few low-res tiles required for activation, with enough memory for all
+ // tiles.
+
+ Initialize(5, ALLOW_ABSOLUTE_MINIMUM, SAME_PRIORITY_FOR_BOTH_TREES);
+ TileVector pending_low_res =
+ CreateTiles(5, TilePriority(), TilePriorityLowRes());
+
+ tile_manager()->AssignMemoryToTiles(global_state_);
+
+ EXPECT_EQ(5, AssignedMemoryCount(pending_low_res));
+ ReleaseTiles(&pending_low_res);
+}
+
+TEST_P(TileManagerTest, EnoughMemoryAllowAbsoluteMinimum) {
+ // A few tiles of each type of priority, with enough memory for all tiles,
+ // with the exception of never and soon bins.
+
+ Initialize(10, ALLOW_ABSOLUTE_MINIMUM, SMOOTHNESS_TAKES_PRIORITY);
+ TileVector active_now =
+ CreateTiles(3, TilePriorityForNowBin(), TilePriority());
+ TileVector pending_now =
+ CreateTiles(3, TilePriority(), TilePriorityForNowBin());
+ TileVector active_pending_soon =
+ CreateTiles(3, TilePriorityForSoonBin(), TilePriorityForSoonBin());
+ TileVector never_bin = CreateTiles(1, TilePriority(), TilePriority());
+
+ tile_manager()->AssignMemoryToTiles(global_state_);
+
+ EXPECT_EQ(3, AssignedMemoryCount(active_now));
+ EXPECT_EQ(3, AssignedMemoryCount(pending_now));
+ EXPECT_EQ(0, AssignedMemoryCount(active_pending_soon));
+ EXPECT_EQ(0, AssignedMemoryCount(never_bin));
+
+ ReleaseTiles(&active_now);
+ ReleaseTiles(&pending_now);
+ ReleaseTiles(&active_pending_soon);
+ ReleaseTiles(&never_bin);
+}
+
+TEST_P(TileManagerTest, EnoughMemoryAllowNothing) {
+ // A few tiles of each type of priority, with enough memory for all tiles,
+ // but allow nothing should not assign any memory.
+
+ Initialize(10, ALLOW_NOTHING, SMOOTHNESS_TAKES_PRIORITY);
+ TileVector active_now =
+ CreateTiles(3, TilePriorityForNowBin(), TilePriority());
+ TileVector pending_now =
+ CreateTiles(3, TilePriority(), TilePriorityForNowBin());
+ TileVector active_pending_soon =
+ CreateTiles(3, TilePriorityForSoonBin(), TilePriorityForSoonBin());
+ TileVector never_bin = CreateTiles(1, TilePriority(), TilePriority());
+
+ tile_manager()->AssignMemoryToTiles(global_state_);
+
+ EXPECT_EQ(0, AssignedMemoryCount(active_now));
+ EXPECT_EQ(0, AssignedMemoryCount(pending_now));
+ EXPECT_EQ(0, AssignedMemoryCount(active_pending_soon));
+ EXPECT_EQ(0, AssignedMemoryCount(never_bin));
+
+ ReleaseTiles(&active_now);
+ ReleaseTiles(&pending_now);
+ ReleaseTiles(&active_pending_soon);
+ ReleaseTiles(&never_bin);
+}
+
+TEST_P(TileManagerTest, PartialOOMMemoryToPending) {
+ // 5 tiles on active tree eventually bin, 5 tiles on pending tree that are
+ // required for activation, but only enough memory for 8 tiles. The result
+ // is all pending tree tiles get memory, and 3 of the active tree tiles
+ // get memory. None of these tiles is needed to avoid calimity (flickering or
+ // raster-on-demand) so the soft memory limit is used.
+
+ Initialize(8, ALLOW_ANYTHING, SMOOTHNESS_TAKES_PRIORITY);
+ TileVector active_tree_tiles =
+ CreateTiles(5, TilePriorityForEventualBin(), TilePriority());
+ TileVector pending_tree_tiles =
+ CreateTiles(5, TilePriority(), TilePriorityRequiredForActivation());
+ tile_manager()->AssignMemoryToTiles(global_state_);
+
+ EXPECT_EQ(5, AssignedMemoryCount(active_tree_tiles));
+ EXPECT_EQ(3, AssignedMemoryCount(pending_tree_tiles));
+
+ SetTreePriority(SAME_PRIORITY_FOR_BOTH_TREES);
+ tile_manager()->AssignMemoryToTiles(global_state_);
+
+ EXPECT_EQ(3, AssignedMemoryCount(active_tree_tiles));
+ EXPECT_EQ(5, AssignedMemoryCount(pending_tree_tiles));
+
+ ReleaseTiles(&active_tree_tiles);
+ ReleaseTiles(&pending_tree_tiles);
+}
+
+TEST_P(TileManagerTest, PartialOOMMemoryToActive) {
+ // 5 tiles on active tree eventually bin, 5 tiles on pending tree now bin,
+ // but only enough memory for 8 tiles. The result is all active tree tiles
+ // get memory, and 3 of the pending tree tiles get memory.
+ // The pending tiles are not needed to avoid calimity (flickering or
+ // raster-on-demand) and the active tiles fit, so the soft limit is used.
+
+ Initialize(8, ALLOW_ANYTHING, SMOOTHNESS_TAKES_PRIORITY);
+ TileVector active_tree_tiles =
+ CreateTiles(5, TilePriorityForNowBin(), TilePriority());
+ TileVector pending_tree_tiles =
+ CreateTiles(5, TilePriority(), TilePriorityForNowBin());
+
+ tile_manager()->AssignMemoryToTiles(global_state_);
+
+ EXPECT_EQ(5, AssignedMemoryCount(active_tree_tiles));
+ EXPECT_EQ(3, AssignedMemoryCount(pending_tree_tiles));
+
+ ReleaseTiles(&active_tree_tiles);
+ ReleaseTiles(&pending_tree_tiles);
+}
+
+TEST_P(TileManagerTest, TotalOOMMemoryToPending) {
+ // 10 tiles on active tree eventually bin, 10 tiles on pending tree that are
+ // required for activation, but only enough tiles for 4 tiles. The result
+ // is 4 pending tree tiles get memory, and none of the active tree tiles
+ // get memory.
+
+ Initialize(4, ALLOW_ANYTHING, SMOOTHNESS_TAKES_PRIORITY);
+ TileVector active_tree_tiles =
+ CreateTiles(10, TilePriorityForEventualBin(), TilePriority());
+ TileVector pending_tree_tiles =
+ CreateTiles(10, TilePriority(), TilePriorityRequiredForActivation());
+
+ tile_manager()->AssignMemoryToTiles(global_state_);
+
+ EXPECT_EQ(4, AssignedMemoryCount(active_tree_tiles));
+ EXPECT_EQ(0, AssignedMemoryCount(pending_tree_tiles));
+
+ SetTreePriority(SAME_PRIORITY_FOR_BOTH_TREES);
+ tile_manager()->AssignMemoryToTiles(global_state_);
+
+ if (UsingResourceLimit()) {
+ EXPECT_EQ(0, AssignedMemoryCount(active_tree_tiles));
+ EXPECT_EQ(4, AssignedMemoryCount(pending_tree_tiles));
+ } else {
+ // Pending tiles are now required to avoid calimity (flickering or
+ // raster-on-demand). Hard-limit is used and double the tiles fit.
+ EXPECT_EQ(0, AssignedMemoryCount(active_tree_tiles));
+ EXPECT_EQ(8, AssignedMemoryCount(pending_tree_tiles));
+ }
+
+ ReleaseTiles(&active_tree_tiles);
+ ReleaseTiles(&pending_tree_tiles);
+}
+
+TEST_P(TileManagerTest, TotalOOMActiveSoonMemoryToPending) {
+ // 10 tiles on active tree soon bin, 10 tiles on pending tree that are
+ // required for activation, but only enough tiles for 4 tiles. The result
+ // is 4 pending tree tiles get memory, and none of the active tree tiles
+ // get memory.
+
+ Initialize(4, ALLOW_ANYTHING, SMOOTHNESS_TAKES_PRIORITY);
+ TileVector active_tree_tiles =
+ CreateTiles(10, TilePriorityForSoonBin(), TilePriority());
+ TileVector pending_tree_tiles =
+ CreateTiles(10, TilePriority(), TilePriorityRequiredForActivation());
+
+ tile_manager()->AssignMemoryToTiles(global_state_);
+
+ EXPECT_EQ(4, AssignedMemoryCount(active_tree_tiles));
+ EXPECT_EQ(0, AssignedMemoryCount(pending_tree_tiles));
+
+ SetTreePriority(SAME_PRIORITY_FOR_BOTH_TREES);
+ tile_manager()->AssignMemoryToTiles(global_state_);
+
+ if (UsingResourceLimit()) {
+ EXPECT_EQ(0, AssignedMemoryCount(active_tree_tiles));
+ EXPECT_EQ(4, AssignedMemoryCount(pending_tree_tiles));
+ } else {
+ // Pending tiles are now required to avoid calimity (flickering or
+ // raster-on-demand). Hard-limit is used and double the tiles fit.
+ EXPECT_EQ(0, AssignedMemoryCount(active_tree_tiles));
+ EXPECT_EQ(8, AssignedMemoryCount(pending_tree_tiles));
+ }
+
+ ReleaseTiles(&active_tree_tiles);
+ ReleaseTiles(&pending_tree_tiles);
+}
+
+TEST_P(TileManagerTest, TotalOOMMemoryToActive) {
+ // 10 tiles on active tree eventually bin, 10 tiles on pending tree now bin,
+ // but only enough memory for 4 tiles. The result is 4 active tree tiles
+ // get memory, and none of the pending tree tiles get memory.
+
+ Initialize(4, ALLOW_ANYTHING, SMOOTHNESS_TAKES_PRIORITY);
+ TileVector active_tree_tiles =
+ CreateTiles(10, TilePriorityForNowBin(), TilePriority());
+ TileVector pending_tree_tiles =
+ CreateTiles(10, TilePriority(), TilePriorityForNowBin());
+
+ tile_manager()->AssignMemoryToTiles(global_state_);
+
+ if (UsingResourceLimit()) {
+ EXPECT_EQ(4, AssignedMemoryCount(active_tree_tiles));
+ EXPECT_EQ(0, AssignedMemoryCount(pending_tree_tiles));
+ } else {
+ // Active tiles are required to avoid calimity (flickering or
+ // raster-on-demand). Hard-limit is used and double the tiles fit.
+ EXPECT_EQ(8, AssignedMemoryCount(active_tree_tiles));
+ EXPECT_EQ(0, AssignedMemoryCount(pending_tree_tiles));
+ }
+
+ ReleaseTiles(&active_tree_tiles);
+ ReleaseTiles(&pending_tree_tiles);
+}
+
+TEST_P(TileManagerTest, TotalOOMMemoryToNewContent) {
+ // 10 tiles on active tree now bin, 10 tiles on pending tree now bin,
+ // but only enough memory for 8 tiles. Any tile missing would cause
+ // a calamity (flickering or raster-on-demand). Depending on mode,
+ // we should use varying amounts of the higher hard memory limit.
+ if (UsingResourceLimit())
+ return;
+
+ Initialize(8, ALLOW_ANYTHING, SMOOTHNESS_TAKES_PRIORITY);
+ TileVector active_tree_tiles =
+ CreateTiles(10, TilePriorityForNowBin(), TilePriority());
+ TileVector pending_tree_tiles =
+ CreateTiles(10, TilePriority(), TilePriorityForNowBin());
+
+ // Active tiles are required to avoid calimity. The hard-limit is used and all
+ // active-tiles fit. No pending tiles are needed to avoid calamity so only 10
+ // tiles total are used.
+ tile_manager()->AssignMemoryToTiles(global_state_);
+ EXPECT_EQ(10, AssignedMemoryCount(active_tree_tiles));
+ EXPECT_EQ(0, AssignedMemoryCount(pending_tree_tiles));
+
+ // Even the hard-limit won't save us now. All tiles are required to avoid
+ // a clamity but we only have 16. The tiles will be distribted randomly
+ // given they are identical, in practice depending on their screen location.
+ SetTreePriority(SAME_PRIORITY_FOR_BOTH_TREES);
+ tile_manager()->AssignMemoryToTiles(global_state_);
+ EXPECT_EQ(16,
+ AssignedMemoryCount(active_tree_tiles) +
+ AssignedMemoryCount(pending_tree_tiles));
+
+ // The pending tree is now more important. Active tiles will take higher
+ // priority if they are ready-to-draw in practice. Importantly though,
+ // pending tiles also utilize the hard-limit.
+ SetTreePriority(NEW_CONTENT_TAKES_PRIORITY);
+ tile_manager()->AssignMemoryToTiles(global_state_);
+ EXPECT_EQ(0, AssignedMemoryCount(active_tree_tiles));
+ EXPECT_EQ(10, AssignedMemoryCount(pending_tree_tiles));
+
+ ReleaseTiles(&active_tree_tiles);
+ ReleaseTiles(&pending_tree_tiles);
+}
+
+// If true, the max tile limit should be applied as bytes; if false,
+// as num_resources_limit.
+INSTANTIATE_TEST_CASE_P(TileManagerTests,
+ TileManagerTest,
+ ::testing::Values(true, false));
+
+class LowResTilingsSettings : public ImplSidePaintingSettings {
+ public:
+ LowResTilingsSettings() { create_low_res_tiling = true; }
+};
+
+class TileManagerTilePriorityQueueTest : public testing::Test {
+ public:
+ TileManagerTilePriorityQueueTest()
+ : memory_limit_policy_(ALLOW_ANYTHING),
+ max_tiles_(10000),
+ ready_to_activate_(false),
+ id_(7),
+ proxy_(base::MessageLoopProxy::current()),
+ host_impl_(LowResTilingsSettings(), &proxy_, &shared_bitmap_manager_) {}
+
+ void SetTreePriority(TreePriority tree_priority) {
+ GlobalStateThatImpactsTilePriority state;
+ gfx::Size tile_size(256, 256);
+
+ state.soft_memory_limit_in_bytes = 100 * 1000 * 1000;
+ state.num_resources_limit = max_tiles_;
+ state.hard_memory_limit_in_bytes = state.soft_memory_limit_in_bytes * 2;
+ state.memory_limit_policy = memory_limit_policy_;
+ state.tree_priority = tree_priority;
+
+ global_state_ = state;
+ host_impl_.resource_pool()->SetResourceUsageLimits(
+ state.soft_memory_limit_in_bytes,
+ state.soft_memory_limit_in_bytes,
+ state.num_resources_limit);
+ host_impl_.tile_manager()->SetGlobalStateForTesting(state);
+ }
+
+ virtual void SetUp() OVERRIDE {
+ InitializeRenderer();
+ SetTreePriority(SAME_PRIORITY_FOR_BOTH_TREES);
+ }
+
+ virtual void InitializeRenderer() {
+ host_impl_.InitializeRenderer(FakeOutputSurface::Create3d());
+ }
+
+ void SetupDefaultTrees(const gfx::Size& layer_bounds) {
+ gfx::Size tile_size(100, 100);
+
+ scoped_refptr<FakePicturePileImpl> pending_pile =
+ FakePicturePileImpl::CreateFilledPile(tile_size, layer_bounds);
+ scoped_refptr<FakePicturePileImpl> active_pile =
+ FakePicturePileImpl::CreateFilledPile(tile_size, layer_bounds);
+
+ SetupTrees(pending_pile, active_pile);
+ }
+
+ void ActivateTree() {
+ host_impl_.ActivateSyncTree();
+ CHECK(!host_impl_.pending_tree());
+ pending_layer_ = NULL;
+ active_layer_ = static_cast<FakePictureLayerImpl*>(
+ host_impl_.active_tree()->LayerById(id_));
+ }
+
+ void SetupDefaultTreesWithFixedTileSize(const gfx::Size& layer_bounds,
+ const gfx::Size& tile_size) {
+ SetupDefaultTrees(layer_bounds);
+ pending_layer_->set_fixed_tile_size(tile_size);
+ active_layer_->set_fixed_tile_size(tile_size);
+ }
+
+ void SetupTrees(scoped_refptr<PicturePileImpl> pending_pile,
+ scoped_refptr<PicturePileImpl> active_pile) {
+ SetupPendingTree(active_pile);
+ ActivateTree();
+ SetupPendingTree(pending_pile);
+ }
+
+ void SetupPendingTree(scoped_refptr<PicturePileImpl> pile) {
+ host_impl_.CreatePendingTree();
+ LayerTreeImpl* pending_tree = host_impl_.pending_tree();
+ // Clear recycled tree.
+ pending_tree->DetachLayerTree();
+
+ scoped_ptr<FakePictureLayerImpl> pending_layer =
+ FakePictureLayerImpl::CreateWithPile(pending_tree, id_, pile);
+ pending_layer->SetDrawsContent(true);
+ pending_tree->SetRootLayer(pending_layer.Pass());
+
+ pending_layer_ = static_cast<FakePictureLayerImpl*>(
+ host_impl_.pending_tree()->LayerById(id_));
+ pending_layer_->DoPostCommitInitializationIfNeeded();
+ }
+
+ void CreateHighLowResAndSetAllTilesVisible() {
+ // Active layer must get updated first so pending layer can share from it.
+ active_layer_->CreateDefaultTilingsAndTiles();
+ active_layer_->SetAllTilesVisible();
+ pending_layer_->CreateDefaultTilingsAndTiles();
+ pending_layer_->SetAllTilesVisible();
+ }
+
+ TileManager* tile_manager() { return host_impl_.tile_manager(); }
+
+ protected:
+ GlobalStateThatImpactsTilePriority global_state_;
+
+ TestSharedBitmapManager shared_bitmap_manager_;
+ TileMemoryLimitPolicy memory_limit_policy_;
+ int max_tiles_;
+ bool ready_to_activate_;
+ int id_;
+ FakeImplProxy proxy_;
+ FakeLayerTreeHostImpl host_impl_;
+ FakePictureLayerImpl* pending_layer_;
+ FakePictureLayerImpl* active_layer_;
+};
+
+TEST_F(TileManagerTilePriorityQueueTest, RasterTilePriorityQueue) {
+ SetupDefaultTrees(gfx::Size(1000, 1000));
+
+ active_layer_->CreateDefaultTilingsAndTiles();
+ pending_layer_->CreateDefaultTilingsAndTiles();
+
+ RasterTilePriorityQueue queue;
+ host_impl_.BuildRasterQueue(&queue, SAME_PRIORITY_FOR_BOTH_TREES);
+ EXPECT_FALSE(queue.IsEmpty());
+
+ size_t tile_count = 0;
+ std::set<Tile*> all_tiles;
+ while (!queue.IsEmpty()) {
+ EXPECT_TRUE(queue.Top());
+ all_tiles.insert(queue.Top());
+ ++tile_count;
+ queue.Pop();
+ }
+
+ EXPECT_EQ(tile_count, all_tiles.size());
+ EXPECT_EQ(17u, tile_count);
+
+ // Sanity check, all tiles should be visible.
+ std::set<Tile*> smoothness_tiles;
+ queue.Reset();
+ host_impl_.BuildRasterQueue(&queue, SMOOTHNESS_TAKES_PRIORITY);
+ while (!queue.IsEmpty()) {
+ Tile* tile = queue.Top();
+ EXPECT_TRUE(tile);
+ EXPECT_EQ(TilePriority::NOW, tile->priority(ACTIVE_TREE).priority_bin);
+ EXPECT_EQ(TilePriority::NOW, tile->priority(PENDING_TREE).priority_bin);
+ smoothness_tiles.insert(tile);
+ queue.Pop();
+ }
+ EXPECT_EQ(all_tiles, smoothness_tiles);
+
+ Region invalidation(gfx::Rect(0, 0, 500, 500));
+
+ // Invalidate the pending tree.
+ pending_layer_->set_invalidation(invalidation);
+ pending_layer_->HighResTiling()->UpdateTilesToCurrentPile(
+ invalidation, gfx::Size(1000, 1000));
+ pending_layer_->LowResTiling()->UpdateTilesToCurrentPile(
+ invalidation, gfx::Size(1000, 1000));
+
+ active_layer_->ResetAllTilesPriorities();
+ pending_layer_->ResetAllTilesPriorities();
+
+ // Renew all of the tile priorities.
+ gfx::Rect viewport(50, 50, 100, 100);
+ pending_layer_->HighResTiling()->UpdateTilePriorities(
+ PENDING_TREE, viewport, 1.0f, 1.0, Occlusion());
+ pending_layer_->LowResTiling()->UpdateTilePriorities(
+ PENDING_TREE, viewport, 1.0f, 1.0, Occlusion());
+ active_layer_->HighResTiling()->UpdateTilePriorities(
+ ACTIVE_TREE, viewport, 1.0f, 1.0, Occlusion());
+ active_layer_->LowResTiling()->UpdateTilePriorities(
+ ACTIVE_TREE, viewport, 1.0f, 1.0, Occlusion());
+
+ // Populate all tiles directly from the tilings.
+ all_tiles.clear();
+ std::vector<Tile*> pending_high_res_tiles =
+ pending_layer_->HighResTiling()->AllTilesForTesting();
+ for (size_t i = 0; i < pending_high_res_tiles.size(); ++i)
+ all_tiles.insert(pending_high_res_tiles[i]);
+
+ std::vector<Tile*> pending_low_res_tiles =
+ pending_layer_->LowResTiling()->AllTilesForTesting();
+ for (size_t i = 0; i < pending_low_res_tiles.size(); ++i)
+ all_tiles.insert(pending_low_res_tiles[i]);
+
+ std::vector<Tile*> active_high_res_tiles =
+ active_layer_->HighResTiling()->AllTilesForTesting();
+ for (size_t i = 0; i < active_high_res_tiles.size(); ++i)
+ all_tiles.insert(active_high_res_tiles[i]);
+
+ std::vector<Tile*> active_low_res_tiles =
+ active_layer_->LowResTiling()->AllTilesForTesting();
+ for (size_t i = 0; i < active_low_res_tiles.size(); ++i)
+ all_tiles.insert(active_low_res_tiles[i]);
+
+ Tile* last_tile = NULL;
+ smoothness_tiles.clear();
+ tile_count = 0;
+ size_t increasing_distance_tiles = 0u;
+ // Here we expect to get increasing ACTIVE_TREE priority_bin.
+ queue.Reset();
+ host_impl_.BuildRasterQueue(&queue, SMOOTHNESS_TAKES_PRIORITY);
+ while (!queue.IsEmpty()) {
+ Tile* tile = queue.Top();
+ EXPECT_TRUE(tile);
+
+ if (!last_tile)
+ last_tile = tile;
+
+ EXPECT_LE(last_tile->priority(ACTIVE_TREE).priority_bin,
+ tile->priority(ACTIVE_TREE).priority_bin);
+ if (last_tile->priority(ACTIVE_TREE).priority_bin ==
+ tile->priority(ACTIVE_TREE).priority_bin) {
+ increasing_distance_tiles +=
+ last_tile->priority(ACTIVE_TREE).distance_to_visible <=
+ tile->priority(ACTIVE_TREE).distance_to_visible;
+ }
+
+ if (tile->priority(ACTIVE_TREE).priority_bin == TilePriority::NOW &&
+ last_tile->priority(ACTIVE_TREE).resolution !=
+ tile->priority(ACTIVE_TREE).resolution) {
+ // Low resolution should come first.
+ EXPECT_EQ(LOW_RESOLUTION, last_tile->priority(ACTIVE_TREE).resolution);
+ }
+
+ last_tile = tile;
+ ++tile_count;
+ smoothness_tiles.insert(tile);
+ queue.Pop();
+ }
+
+ EXPECT_EQ(tile_count, smoothness_tiles.size());
+ EXPECT_EQ(all_tiles, smoothness_tiles);
+ // Since we don't guarantee increasing distance due to spiral iterator, we
+ // should check that we're _mostly_ right.
+ EXPECT_GT(increasing_distance_tiles, 3 * tile_count / 4);
+
+ std::set<Tile*> new_content_tiles;
+ last_tile = NULL;
+ increasing_distance_tiles = 0u;
+ // Here we expect to get increasing PENDING_TREE priority_bin.
+ queue.Reset();
+ host_impl_.BuildRasterQueue(&queue, NEW_CONTENT_TAKES_PRIORITY);
+ while (!queue.IsEmpty()) {
+ Tile* tile = queue.Top();
+ EXPECT_TRUE(tile);
+
+ if (!last_tile)
+ last_tile = tile;
+
+ EXPECT_LE(last_tile->priority(PENDING_TREE).priority_bin,
+ tile->priority(PENDING_TREE).priority_bin);
+ if (last_tile->priority(PENDING_TREE).priority_bin ==
+ tile->priority(PENDING_TREE).priority_bin) {
+ increasing_distance_tiles +=
+ last_tile->priority(PENDING_TREE).distance_to_visible <=
+ tile->priority(PENDING_TREE).distance_to_visible;
+ }
+
+ if (tile->priority(PENDING_TREE).priority_bin == TilePriority::NOW &&
+ last_tile->priority(PENDING_TREE).resolution !=
+ tile->priority(PENDING_TREE).resolution) {
+ // High resolution should come first.
+ EXPECT_EQ(HIGH_RESOLUTION, last_tile->priority(PENDING_TREE).resolution);
+ }
+
+ last_tile = tile;
+ new_content_tiles.insert(tile);
+ queue.Pop();
+ }
+
+ EXPECT_EQ(tile_count, new_content_tiles.size());
+ EXPECT_EQ(all_tiles, new_content_tiles);
+ // Since we don't guarantee increasing distance due to spiral iterator, we
+ // should check that we're _mostly_ right.
+ EXPECT_GT(increasing_distance_tiles, 3 * tile_count / 4);
+}
+
+TEST_F(TileManagerTilePriorityQueueTest, EvictionTilePriorityQueue) {
+ SetupDefaultTrees(gfx::Size(1000, 1000));
+
+ active_layer_->CreateDefaultTilingsAndTiles();
+ pending_layer_->CreateDefaultTilingsAndTiles();
+
+ EvictionTilePriorityQueue empty_queue;
+ host_impl_.BuildEvictionQueue(&empty_queue, SAME_PRIORITY_FOR_BOTH_TREES);
+ EXPECT_TRUE(empty_queue.IsEmpty());
+ std::set<Tile*> all_tiles;
+ size_t tile_count = 0;
+
+ RasterTilePriorityQueue raster_queue;
+ host_impl_.BuildRasterQueue(&raster_queue, SAME_PRIORITY_FOR_BOTH_TREES);
+ while (!raster_queue.IsEmpty()) {
+ ++tile_count;
+ EXPECT_TRUE(raster_queue.Top());
+ all_tiles.insert(raster_queue.Top());
+ raster_queue.Pop();
+ }
+
+ EXPECT_EQ(tile_count, all_tiles.size());
+ EXPECT_EQ(17u, tile_count);
+
+ tile_manager()->InitializeTilesWithResourcesForTesting(
+ std::vector<Tile*>(all_tiles.begin(), all_tiles.end()));
+
+ EvictionTilePriorityQueue queue;
+ host_impl_.BuildEvictionQueue(&queue, SMOOTHNESS_TAKES_PRIORITY);
+ EXPECT_FALSE(queue.IsEmpty());
+
+ // Sanity check, all tiles should be visible.
+ std::set<Tile*> smoothness_tiles;
+ while (!queue.IsEmpty()) {
+ Tile* tile = queue.Top();
+ EXPECT_TRUE(tile);
+ EXPECT_EQ(TilePriority::NOW, tile->priority(ACTIVE_TREE).priority_bin);
+ EXPECT_EQ(TilePriority::NOW, tile->priority(PENDING_TREE).priority_bin);
+ EXPECT_TRUE(tile->HasResources());
+ smoothness_tiles.insert(tile);
+ queue.Pop();
+ }
+ EXPECT_EQ(all_tiles, smoothness_tiles);
+
+ tile_manager()->ReleaseTileResourcesForTesting(
+ std::vector<Tile*>(all_tiles.begin(), all_tiles.end()));
+
+ Region invalidation(gfx::Rect(0, 0, 500, 500));
+
+ // Invalidate the pending tree.
+ pending_layer_->set_invalidation(invalidation);
+ pending_layer_->HighResTiling()->UpdateTilesToCurrentPile(
+ invalidation, gfx::Size(1000, 1000));
+ pending_layer_->LowResTiling()->UpdateTilesToCurrentPile(
+ invalidation, gfx::Size(1000, 1000));
+
+ active_layer_->ResetAllTilesPriorities();
+ pending_layer_->ResetAllTilesPriorities();
+
+ // Renew all of the tile priorities.
+ gfx::Rect viewport(50, 50, 100, 100);
+ pending_layer_->HighResTiling()->UpdateTilePriorities(
+ PENDING_TREE, viewport, 1.0f, 1.0, Occlusion());
+ pending_layer_->LowResTiling()->UpdateTilePriorities(
+ PENDING_TREE, viewport, 1.0f, 1.0, Occlusion());
+ active_layer_->HighResTiling()->UpdateTilePriorities(
+ ACTIVE_TREE, viewport, 1.0f, 1.0, Occlusion());
+ active_layer_->LowResTiling()->UpdateTilePriorities(
+ ACTIVE_TREE, viewport, 1.0f, 1.0, Occlusion());
+
+ // Populate all tiles directly from the tilings.
+ all_tiles.clear();
+ std::vector<Tile*> pending_high_res_tiles =
+ pending_layer_->HighResTiling()->AllTilesForTesting();
+ for (size_t i = 0; i < pending_high_res_tiles.size(); ++i)
+ all_tiles.insert(pending_high_res_tiles[i]);
+
+ std::vector<Tile*> pending_low_res_tiles =
+ pending_layer_->LowResTiling()->AllTilesForTesting();
+ for (size_t i = 0; i < pending_low_res_tiles.size(); ++i)
+ all_tiles.insert(pending_low_res_tiles[i]);
+
+ std::vector<Tile*> active_high_res_tiles =
+ active_layer_->HighResTiling()->AllTilesForTesting();
+ for (size_t i = 0; i < active_high_res_tiles.size(); ++i)
+ all_tiles.insert(active_high_res_tiles[i]);
+
+ std::vector<Tile*> active_low_res_tiles =
+ active_layer_->LowResTiling()->AllTilesForTesting();
+ for (size_t i = 0; i < active_low_res_tiles.size(); ++i)
+ all_tiles.insert(active_low_res_tiles[i]);
+
+ tile_manager()->InitializeTilesWithResourcesForTesting(
+ std::vector<Tile*>(all_tiles.begin(), all_tiles.end()));
+
+ pending_layer_->MarkVisibleResourcesAsRequired();
+
+ Tile* last_tile = NULL;
+ smoothness_tiles.clear();
+ tile_count = 0;
+ // Here we expect to get increasing ACTIVE_TREE priority_bin.
+ queue.Reset();
+ host_impl_.BuildEvictionQueue(&queue, SMOOTHNESS_TAKES_PRIORITY);
+ while (!queue.IsEmpty()) {
+ Tile* tile = queue.Top();
+ EXPECT_TRUE(tile);
+ EXPECT_TRUE(tile->HasResources());
+
+ if (!last_tile)
+ last_tile = tile;
+
+ EXPECT_GE(last_tile->priority(ACTIVE_TREE).priority_bin,
+ tile->priority(ACTIVE_TREE).priority_bin);
+ if (last_tile->priority(ACTIVE_TREE).priority_bin ==
+ tile->priority(ACTIVE_TREE).priority_bin) {
+ EXPECT_LE(last_tile->required_for_activation(),
+ tile->required_for_activation());
+ if (last_tile->required_for_activation() ==
+ tile->required_for_activation()) {
+ EXPECT_GE(last_tile->priority(ACTIVE_TREE).distance_to_visible,
+ tile->priority(ACTIVE_TREE).distance_to_visible);
+ }
+ }
+
+ last_tile = tile;
+ ++tile_count;
+ smoothness_tiles.insert(tile);
+ queue.Pop();
+ }
+
+ EXPECT_EQ(tile_count, smoothness_tiles.size());
+ EXPECT_EQ(all_tiles, smoothness_tiles);
+
+ std::set<Tile*> new_content_tiles;
+ last_tile = NULL;
+ // Here we expect to get increasing PENDING_TREE priority_bin.
+ queue.Reset();
+ host_impl_.BuildEvictionQueue(&queue, NEW_CONTENT_TAKES_PRIORITY);
+ while (!queue.IsEmpty()) {
+ Tile* tile = queue.Top();
+ EXPECT_TRUE(tile);
+
+ if (!last_tile)
+ last_tile = tile;
+
+ EXPECT_GE(last_tile->priority(PENDING_TREE).priority_bin,
+ tile->priority(PENDING_TREE).priority_bin);
+ if (last_tile->priority(PENDING_TREE).priority_bin ==
+ tile->priority(PENDING_TREE).priority_bin) {
+ EXPECT_LE(last_tile->required_for_activation(),
+ tile->required_for_activation());
+ if (last_tile->required_for_activation() ==
+ tile->required_for_activation()) {
+ EXPECT_GE(last_tile->priority(PENDING_TREE).distance_to_visible,
+ tile->priority(PENDING_TREE).distance_to_visible);
+ }
+ }
+
+ last_tile = tile;
+ new_content_tiles.insert(tile);
+ queue.Pop();
+ }
+
+ EXPECT_EQ(tile_count, new_content_tiles.size());
+ EXPECT_EQ(all_tiles, new_content_tiles);
+}
+
+TEST_F(TileManagerTilePriorityQueueTest,
+ EvictionTilePriorityQueueWithOcclusion) {
+ gfx::Size tile_size(102, 102);
+ gfx::Size layer_bounds(1000, 1000);
+
+ scoped_refptr<FakePicturePileImpl> pending_pile =
+ FakePicturePileImpl::CreateFilledPile(tile_size, layer_bounds);
+ SetupPendingTree(pending_pile);
+ pending_layer_->CreateDefaultTilingsAndTiles();
+
+ scoped_ptr<FakePictureLayerImpl> pending_child =
+ FakePictureLayerImpl::CreateWithPile(
+ host_impl_.pending_tree(), 2, pending_pile);
+ pending_layer_->AddChild(pending_child.Pass());
+
+ FakePictureLayerImpl* pending_child_layer =
+ static_cast<FakePictureLayerImpl*>(pending_layer_->children()[0]);
+ pending_child_layer->SetDrawsContent(true);
+ pending_child_layer->DoPostCommitInitializationIfNeeded();
+ pending_child_layer->CreateDefaultTilingsAndTiles();
+
+ std::set<Tile*> all_tiles;
+ size_t tile_count = 0;
+ RasterTilePriorityQueue raster_queue;
+ host_impl_.BuildRasterQueue(&raster_queue, SAME_PRIORITY_FOR_BOTH_TREES);
+ while (!raster_queue.IsEmpty()) {
+ ++tile_count;
+ EXPECT_TRUE(raster_queue.Top());
+ all_tiles.insert(raster_queue.Top());
+ raster_queue.Pop();
+ }
+ EXPECT_EQ(tile_count, all_tiles.size());
+ EXPECT_EQ(34u, tile_count);
+
+ pending_layer_->ResetAllTilesPriorities();
+
+ // Renew all of the tile priorities.
+ gfx::Rect viewport(layer_bounds);
+ pending_layer_->HighResTiling()->UpdateTilePriorities(
+ PENDING_TREE, viewport, 1.0f, 1.0, Occlusion());
+ pending_layer_->LowResTiling()->UpdateTilePriorities(
+ PENDING_TREE, viewport, 1.0f, 1.0, Occlusion());
+ pending_child_layer->HighResTiling()->UpdateTilePriorities(
+ PENDING_TREE, viewport, 1.0f, 1.0, Occlusion());
+ pending_child_layer->LowResTiling()->UpdateTilePriorities(
+ PENDING_TREE, viewport, 1.0f, 1.0, Occlusion());
+
+ // Populate all tiles directly from the tilings.
+ all_tiles.clear();
+ std::vector<Tile*> pending_high_res_tiles =
+ pending_layer_->HighResTiling()->AllTilesForTesting();
+ for (size_t i = 0; i < pending_high_res_tiles.size(); ++i)
+ all_tiles.insert(pending_high_res_tiles[i]);
+
+ std::vector<Tile*> pending_low_res_tiles =
+ pending_layer_->LowResTiling()->AllTilesForTesting();
+ for (size_t i = 0; i < pending_low_res_tiles.size(); ++i)
+ all_tiles.insert(pending_low_res_tiles[i]);
+
+ // Set all tiles on the pending_child_layer as occluded on the pending tree.
+ std::vector<Tile*> pending_child_high_res_tiles =
+ pending_child_layer->HighResTiling()->AllTilesForTesting();
+ for (size_t i = 0; i < pending_child_high_res_tiles.size(); ++i) {
+ pending_child_high_res_tiles[i]->set_is_occluded(PENDING_TREE, true);
+ all_tiles.insert(pending_child_high_res_tiles[i]);
+ }
+
+ std::vector<Tile*> pending_child_low_res_tiles =
+ pending_child_layer->LowResTiling()->AllTilesForTesting();
+ for (size_t i = 0; i < pending_child_low_res_tiles.size(); ++i) {
+ pending_child_low_res_tiles[i]->set_is_occluded(PENDING_TREE, true);
+ all_tiles.insert(pending_child_low_res_tiles[i]);
+ }
+
+ tile_manager()->InitializeTilesWithResourcesForTesting(
+ std::vector<Tile*>(all_tiles.begin(), all_tiles.end()));
+
+ // Verify occlusion is considered by EvictionTilePriorityQueue.
+ TreePriority tree_priority = NEW_CONTENT_TAKES_PRIORITY;
+ size_t occluded_count = 0u;
+ Tile* last_tile = NULL;
+ EvictionTilePriorityQueue queue;
+ host_impl_.BuildEvictionQueue(&queue, tree_priority);
+ while (!queue.IsEmpty()) {
+ Tile* tile = queue.Top();
+ if (!last_tile)
+ last_tile = tile;
+
+ bool tile_is_occluded = tile->is_occluded_for_tree_priority(tree_priority);
+
+ // The only way we will encounter an occluded tile after an unoccluded
+ // tile is if the priorty bin decreased, the tile is required for
+ // activation, or the scale changed.
+ if (tile_is_occluded) {
+ occluded_count++;
+
+ bool last_tile_is_occluded =
+ last_tile->is_occluded_for_tree_priority(tree_priority);
+ if (!last_tile_is_occluded) {
+ TilePriority::PriorityBin tile_priority_bin =
+ tile->priority_for_tree_priority(tree_priority).priority_bin;
+ TilePriority::PriorityBin last_tile_priority_bin =
+ last_tile->priority_for_tree_priority(tree_priority).priority_bin;
+
+ EXPECT_TRUE((tile_priority_bin < last_tile_priority_bin) ||
+ tile->required_for_activation() ||
+ (tile->contents_scale() != last_tile->contents_scale()));
+ }
+ }
+ last_tile = tile;
+ queue.Pop();
+ }
+ size_t expected_occluded_count =
+ pending_child_high_res_tiles.size() + pending_child_low_res_tiles.size();
+ EXPECT_EQ(expected_occluded_count, occluded_count);
+}
+
+TEST_F(TileManagerTilePriorityQueueTest, RasterTilePriorityQueueEmptyLayers) {
+ SetupDefaultTrees(gfx::Size(1000, 1000));
+
+ active_layer_->CreateDefaultTilingsAndTiles();
+ pending_layer_->CreateDefaultTilingsAndTiles();
+
+ RasterTilePriorityQueue queue;
+ host_impl_.BuildRasterQueue(&queue, SAME_PRIORITY_FOR_BOTH_TREES);
+ EXPECT_FALSE(queue.IsEmpty());
+
+ size_t tile_count = 0;
+ std::set<Tile*> all_tiles;
+ while (!queue.IsEmpty()) {
+ EXPECT_TRUE(queue.Top());
+ all_tiles.insert(queue.Top());
+ ++tile_count;
+ queue.Pop();
+ }
+
+ EXPECT_EQ(tile_count, all_tiles.size());
+ EXPECT_EQ(17u, tile_count);
+
+ queue.Reset();
+ for (int i = 1; i < 10; ++i) {
+ scoped_ptr<FakePictureLayerImpl> pending_layer =
+ FakePictureLayerImpl::Create(host_impl_.pending_tree(), id_ + i);
+ pending_layer->SetDrawsContent(true);
+ pending_layer->DoPostCommitInitializationIfNeeded();
+ pending_layer->set_has_valid_tile_priorities(true);
+ pending_layer_->AddChild(pending_layer.Pass());
+ }
+
+ host_impl_.BuildRasterQueue(&queue, SAME_PRIORITY_FOR_BOTH_TREES);
+ EXPECT_FALSE(queue.IsEmpty());
+
+ tile_count = 0;
+ all_tiles.clear();
+ while (!queue.IsEmpty()) {
+ EXPECT_TRUE(queue.Top());
+ all_tiles.insert(queue.Top());
+ ++tile_count;
+ queue.Pop();
+ }
+ EXPECT_EQ(tile_count, all_tiles.size());
+ EXPECT_EQ(17u, tile_count);
+}
+
+TEST_F(TileManagerTilePriorityQueueTest, EvictionTilePriorityQueueEmptyLayers) {
+ SetupDefaultTrees(gfx::Size(1000, 1000));
+
+ active_layer_->CreateDefaultTilingsAndTiles();
+ pending_layer_->CreateDefaultTilingsAndTiles();
+
+ RasterTilePriorityQueue raster_queue;
+ host_impl_.BuildRasterQueue(&raster_queue, SAME_PRIORITY_FOR_BOTH_TREES);
+ EXPECT_FALSE(raster_queue.IsEmpty());
+
+ size_t tile_count = 0;
+ std::set<Tile*> all_tiles;
+ while (!raster_queue.IsEmpty()) {
+ EXPECT_TRUE(raster_queue.Top());
+ all_tiles.insert(raster_queue.Top());
+ ++tile_count;
+ raster_queue.Pop();
+ }
+ EXPECT_EQ(tile_count, all_tiles.size());
+ EXPECT_EQ(17u, tile_count);
+
+ std::vector<Tile*> tiles(all_tiles.begin(), all_tiles.end());
+ host_impl_.tile_manager()->InitializeTilesWithResourcesForTesting(tiles);
+
+ EvictionTilePriorityQueue queue;
+ for (int i = 1; i < 10; ++i) {
+ scoped_ptr<FakePictureLayerImpl> pending_layer =
+ FakePictureLayerImpl::Create(host_impl_.pending_tree(), id_ + i);
+ pending_layer->SetDrawsContent(true);
+ pending_layer->DoPostCommitInitializationIfNeeded();
+ pending_layer->set_has_valid_tile_priorities(true);
+ pending_layer_->AddChild(pending_layer.Pass());
+ }
+
+ host_impl_.BuildEvictionQueue(&queue, SAME_PRIORITY_FOR_BOTH_TREES);
+ EXPECT_FALSE(queue.IsEmpty());
+
+ tile_count = 0;
+ all_tiles.clear();
+ while (!queue.IsEmpty()) {
+ EXPECT_TRUE(queue.Top());
+ all_tiles.insert(queue.Top());
+ ++tile_count;
+ queue.Pop();
+ }
+ EXPECT_EQ(tile_count, all_tiles.size());
+ EXPECT_EQ(17u, tile_count);
+}
+
+} // namespace
+} // namespace cc
diff --git a/cc/resources/tile_priority.cc b/cc/resources/tile_priority.cc
new file mode 100644
index 0000000..26826af
--- /dev/null
+++ b/cc/resources/tile_priority.cc
@@ -0,0 +1,98 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/tile_priority.h"
+
+#include "base/debug/trace_event_argument.h"
+#include "base/values.h"
+#include "cc/base/math_util.h"
+
+namespace cc {
+
+std::string WhichTreeToString(WhichTree tree) {
+ switch (tree) {
+ case ACTIVE_TREE:
+ return "ACTIVE_TREE";
+ case PENDING_TREE:
+ return "PENDING_TREE";
+ default:
+ DCHECK(false) << "Unrecognized WhichTree value " << tree;
+ return "<unknown WhichTree value>";
+ }
+}
+
+std::string TileResolutionToString(TileResolution resolution) {
+ switch (resolution) {
+ case LOW_RESOLUTION:
+ return "LOW_RESOLUTION";
+ case HIGH_RESOLUTION:
+ return "HIGH_RESOLUTION";
+ case NON_IDEAL_RESOLUTION:
+ return "NON_IDEAL_RESOLUTION";
+ }
+ DCHECK(false) << "Unrecognized TileResolution value " << resolution;
+ return "<unknown TileResolution value>";
+}
+
+std::string TilePriorityBinToString(TilePriority::PriorityBin bin) {
+ switch (bin) {
+ case TilePriority::NOW:
+ return "NOW";
+ case TilePriority::SOON:
+ return "SOON";
+ case TilePriority::EVENTUALLY:
+ return "EVENTUALLY";
+ }
+ DCHECK(false) << "Unrecognized TilePriority::PriorityBin value " << bin;
+ return "<unknown TilePriority::PriorityBin value>";
+}
+
+void TilePriority::AsValueInto(base::debug::TracedValue* state) const {
+ state->SetString("resolution", TileResolutionToString(resolution));
+ state->SetString("priority_bin", TilePriorityBinToString(priority_bin));
+ state->SetDouble("distance_to_visible",
+ MathUtil::AsDoubleSafely(distance_to_visible));
+}
+
+std::string TileMemoryLimitPolicyToString(TileMemoryLimitPolicy policy) {
+ switch (policy) {
+ case ALLOW_NOTHING:
+ return "ALLOW_NOTHING";
+ case ALLOW_ABSOLUTE_MINIMUM:
+ return "ALLOW_ABSOLUTE_MINIMUM";
+ case ALLOW_PREPAINT_ONLY:
+ return "ALLOW_PREPAINT_ONLY";
+ case ALLOW_ANYTHING:
+ return "ALLOW_ANYTHING";
+ default:
+ DCHECK(false) << "Unrecognized policy value";
+ return "<unknown>";
+ }
+}
+
+std::string TreePriorityToString(TreePriority prio) {
+ switch (prio) {
+ case SAME_PRIORITY_FOR_BOTH_TREES:
+ return "SAME_PRIORITY_FOR_BOTH_TREES";
+ case SMOOTHNESS_TAKES_PRIORITY:
+ return "SMOOTHNESS_TAKES_PRIORITY";
+ case NEW_CONTENT_TAKES_PRIORITY:
+ return "NEW_CONTENT_TAKES_PRIORITY";
+ default:
+ DCHECK(false) << "Unrecognized priority value " << prio;
+ return "<unknown>";
+ }
+}
+
+void GlobalStateThatImpactsTilePriority::AsValueInto(
+ base::debug::TracedValue* state) const {
+ state->SetString("memory_limit_policy",
+ TileMemoryLimitPolicyToString(memory_limit_policy));
+ state->SetInteger("soft_memory_limit_in_bytes", soft_memory_limit_in_bytes);
+ state->SetInteger("hard_memory_limit_in_bytes", hard_memory_limit_in_bytes);
+ state->SetInteger("num_resources_limit", num_resources_limit);
+ state->SetString("tree_priority", TreePriorityToString(tree_priority));
+}
+
+} // namespace cc
diff --git a/cc/resources/tile_priority.h b/cc/resources/tile_priority.h
new file mode 100644
index 0000000..7831eab
--- /dev/null
+++ b/cc/resources/tile_priority.h
@@ -0,0 +1,173 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_TILE_PRIORITY_H_
+#define CC_RESOURCES_TILE_PRIORITY_H_
+
+#include <algorithm>
+#include <limits>
+#include <string>
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "cc/resources/picture_pile.h"
+#include "ui/gfx/quad_f.h"
+#include "ui/gfx/rect.h"
+#include "ui/gfx/size.h"
+
+namespace base {
+class Value;
+}
+
+namespace cc {
+
+enum WhichTree {
+ // Note: these must be 0 and 1 because we index with them in various places,
+ // e.g. in Tile::priority_.
+ ACTIVE_TREE = 0,
+ PENDING_TREE = 1,
+ NUM_TREES = 2
+ // Be sure to update WhichTreeAsValue when adding new fields.
+};
+scoped_ptr<base::Value> WhichTreeAsValue(WhichTree tree);
+
+enum TileResolution {
+ LOW_RESOLUTION = 0 ,
+ HIGH_RESOLUTION = 1,
+ NON_IDEAL_RESOLUTION = 2,
+};
+std::string TileResolutionToString(TileResolution resolution);
+
+struct CC_EXPORT TilePriority {
+ enum PriorityBin { NOW, SOON, EVENTUALLY };
+
+ TilePriority()
+ : resolution(NON_IDEAL_RESOLUTION),
+ required_for_activation(false),
+ priority_bin(EVENTUALLY),
+ distance_to_visible(std::numeric_limits<float>::infinity()) {}
+
+ TilePriority(TileResolution resolution,
+ PriorityBin bin,
+ float distance_to_visible)
+ : resolution(resolution),
+ required_for_activation(false),
+ priority_bin(bin),
+ distance_to_visible(distance_to_visible) {}
+
+ TilePriority(const TilePriority& active, const TilePriority& pending) {
+ if (active.resolution == HIGH_RESOLUTION ||
+ pending.resolution == HIGH_RESOLUTION)
+ resolution = HIGH_RESOLUTION;
+ else if (active.resolution == LOW_RESOLUTION ||
+ pending.resolution == LOW_RESOLUTION)
+ resolution = LOW_RESOLUTION;
+ else
+ resolution = NON_IDEAL_RESOLUTION;
+
+ required_for_activation =
+ active.required_for_activation || pending.required_for_activation;
+
+ if (active.priority_bin < pending.priority_bin) {
+ priority_bin = active.priority_bin;
+ distance_to_visible = active.distance_to_visible;
+ } else if (active.priority_bin > pending.priority_bin) {
+ priority_bin = pending.priority_bin;
+ distance_to_visible = pending.distance_to_visible;
+ } else {
+ priority_bin = active.priority_bin;
+ distance_to_visible =
+ std::min(active.distance_to_visible, pending.distance_to_visible);
+ }
+ }
+
+ void AsValueInto(base::debug::TracedValue* dict) const;
+
+ bool operator ==(const TilePriority& other) const {
+ return resolution == other.resolution &&
+ priority_bin == other.priority_bin &&
+ distance_to_visible == other.distance_to_visible &&
+ required_for_activation == other.required_for_activation;
+ }
+
+ bool operator !=(const TilePriority& other) const {
+ return !(*this == other);
+ }
+
+ bool IsHigherPriorityThan(const TilePriority& other) const {
+ return priority_bin < other.priority_bin ||
+ (priority_bin == other.priority_bin &&
+ distance_to_visible < other.distance_to_visible);
+ }
+
+ TileResolution resolution;
+ bool required_for_activation;
+ PriorityBin priority_bin;
+ float distance_to_visible;
+};
+
+std::string TilePriorityBinToString(TilePriority::PriorityBin bin);
+
+enum TileMemoryLimitPolicy {
+ // Nothing. This mode is used when visible is set to false.
+ ALLOW_NOTHING = 0,
+
+ // You might be made visible, but you're not being interacted with.
+ ALLOW_ABSOLUTE_MINIMUM = 1, // Tall.
+
+ // You're being interacted with, but we're low on memory.
+ ALLOW_PREPAINT_ONLY = 2, // Grande.
+
+ // You're the only thing in town. Go crazy.
+ ALLOW_ANYTHING = 3, // Venti.
+ NUM_TILE_MEMORY_LIMIT_POLICIES = 4,
+
+ // NOTE: Be sure to update TreePriorityAsValue and kBinPolicyMap when adding
+ // or reordering fields.
+};
+std::string TileMemoryLimitPolicyToString(TileMemoryLimitPolicy policy);
+
+enum TreePriority {
+ SAME_PRIORITY_FOR_BOTH_TREES,
+ SMOOTHNESS_TAKES_PRIORITY,
+ NEW_CONTENT_TAKES_PRIORITY,
+ NUM_TREE_PRIORITIES
+ // Be sure to update TreePriorityAsValue when adding new fields.
+};
+std::string TreePriorityToString(TreePriority prio);
+
+class GlobalStateThatImpactsTilePriority {
+ public:
+ GlobalStateThatImpactsTilePriority()
+ : memory_limit_policy(ALLOW_NOTHING),
+ soft_memory_limit_in_bytes(0),
+ hard_memory_limit_in_bytes(0),
+ num_resources_limit(0),
+ tree_priority(SAME_PRIORITY_FOR_BOTH_TREES) {}
+
+ TileMemoryLimitPolicy memory_limit_policy;
+
+ size_t soft_memory_limit_in_bytes;
+ size_t hard_memory_limit_in_bytes;
+ size_t num_resources_limit;
+
+ TreePriority tree_priority;
+
+ bool operator==(const GlobalStateThatImpactsTilePriority& other) const {
+ return memory_limit_policy == other.memory_limit_policy &&
+ soft_memory_limit_in_bytes == other.soft_memory_limit_in_bytes &&
+ hard_memory_limit_in_bytes == other.hard_memory_limit_in_bytes &&
+ num_resources_limit == other.num_resources_limit &&
+ tree_priority == other.tree_priority;
+ }
+ bool operator!=(const GlobalStateThatImpactsTilePriority& other) const {
+ return !(*this == other);
+ }
+
+ void AsValueInto(base::debug::TracedValue* dict) const;
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_TILE_PRIORITY_H_
diff --git a/cc/resources/tile_priority_unittest.cc b/cc/resources/tile_priority_unittest.cc
new file mode 100644
index 0000000..e134bc9
--- /dev/null
+++ b/cc/resources/tile_priority_unittest.cc
@@ -0,0 +1,45 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/tile_priority.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cc {
+
+TEST(TilePriorityTest, IsHigherPriorityThan) {
+ TilePriority now(HIGH_RESOLUTION, TilePriority::NOW, 0);
+ TilePriority close_soon(HIGH_RESOLUTION, TilePriority::SOON, 1);
+ TilePriority far_soon(HIGH_RESOLUTION, TilePriority::SOON, 500);
+ TilePriority close_eventually(HIGH_RESOLUTION, TilePriority::EVENTUALLY, 2);
+ TilePriority far_eventually(HIGH_RESOLUTION, TilePriority::EVENTUALLY, 1000);
+ TilePriority non_ideal_now(NON_IDEAL_RESOLUTION, TilePriority::NOW, 0);
+
+ EXPECT_FALSE(now.IsHigherPriorityThan(now));
+ EXPECT_FALSE(now.IsHigherPriorityThan(non_ideal_now));
+
+ EXPECT_TRUE(now.IsHigherPriorityThan(close_soon));
+ EXPECT_TRUE(now.IsHigherPriorityThan(far_soon));
+ EXPECT_TRUE(now.IsHigherPriorityThan(close_eventually));
+ EXPECT_TRUE(now.IsHigherPriorityThan(far_eventually));
+ EXPECT_TRUE(close_soon.IsHigherPriorityThan(far_soon));
+ EXPECT_TRUE(close_soon.IsHigherPriorityThan(close_eventually));
+ EXPECT_TRUE(close_soon.IsHigherPriorityThan(far_eventually));
+ EXPECT_TRUE(far_soon.IsHigherPriorityThan(close_eventually));
+ EXPECT_TRUE(far_soon.IsHigherPriorityThan(far_eventually));
+ EXPECT_TRUE(close_eventually.IsHigherPriorityThan(far_eventually));
+
+ EXPECT_FALSE(far_eventually.IsHigherPriorityThan(close_eventually));
+ EXPECT_FALSE(far_eventually.IsHigherPriorityThan(far_soon));
+ EXPECT_FALSE(far_eventually.IsHigherPriorityThan(close_soon));
+ EXPECT_FALSE(far_eventually.IsHigherPriorityThan(now));
+ EXPECT_FALSE(far_eventually.IsHigherPriorityThan(non_ideal_now));
+ EXPECT_FALSE(close_eventually.IsHigherPriorityThan(far_soon));
+ EXPECT_FALSE(close_eventually.IsHigherPriorityThan(close_soon));
+ EXPECT_FALSE(close_eventually.IsHigherPriorityThan(now));
+ EXPECT_FALSE(far_soon.IsHigherPriorityThan(close_soon));
+ EXPECT_FALSE(far_soon.IsHigherPriorityThan(now));
+ EXPECT_FALSE(close_soon.IsHigherPriorityThan(now));
+}
+
+} // namespace cc
diff --git a/cc/resources/transferable_resource.cc b/cc/resources/transferable_resource.cc
new file mode 100644
index 0000000..a375f2e
--- /dev/null
+++ b/cc/resources/transferable_resource.cc
@@ -0,0 +1,40 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "cc/resources/returned_resource.h"
+#include "cc/resources/transferable_resource.h"
+
+namespace cc {
+
+TransferableResource::TransferableResource()
+ : id(0),
+ format(RGBA_8888),
+ filter(0),
+ is_repeated(false),
+ is_software(false),
+ allow_overlay(false) {
+}
+
+TransferableResource::~TransferableResource() {
+}
+
+ReturnedResource TransferableResource::ToReturnedResource() const {
+ ReturnedResource returned;
+ returned.id = id;
+ returned.sync_point = mailbox_holder.sync_point;
+ returned.count = 1;
+ return returned;
+}
+
+// static
+void TransferableResource::ReturnResources(
+ const TransferableResourceArray& input,
+ ReturnedResourceArray* output) {
+ for (TransferableResourceArray::const_iterator it = input.begin();
+ it != input.end(); ++it)
+ output->push_back(it->ToReturnedResource());
+}
+
+} // namespace cc
diff --git a/cc/resources/transferable_resource.h b/cc/resources/transferable_resource.h
new file mode 100644
index 0000000..bd6b060
--- /dev/null
+++ b/cc/resources/transferable_resource.h
@@ -0,0 +1,43 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_TRANSFERABLE_RESOURCE_H_
+#define CC_RESOURCES_TRANSFERABLE_RESOURCE_H_
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "cc/base/cc_export.h"
+#include "cc/resources/resource_format.h"
+#include "gpu/command_buffer/common/mailbox_holder.h"
+#include "ui/gfx/size.h"
+
+namespace cc {
+
+struct ReturnedResource;
+typedef std::vector<ReturnedResource> ReturnedResourceArray;
+struct TransferableResource;
+typedef std::vector<TransferableResource> TransferableResourceArray;
+
+struct CC_EXPORT TransferableResource {
+ TransferableResource();
+ ~TransferableResource();
+
+ ReturnedResource ToReturnedResource() const;
+ static void ReturnResources(const TransferableResourceArray& input,
+ ReturnedResourceArray* output);
+
+ unsigned id;
+ ResourceFormat format;
+ uint32 filter;
+ gfx::Size size;
+ gpu::MailboxHolder mailbox_holder;
+ bool is_repeated;
+ bool is_software;
+ bool allow_overlay;
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_TRANSFERABLE_RESOURCE_H_
diff --git a/cc/resources/ui_resource_bitmap.cc b/cc/resources/ui_resource_bitmap.cc
new file mode 100644
index 0000000..e254cf7
--- /dev/null
+++ b/cc/resources/ui_resource_bitmap.cc
@@ -0,0 +1,95 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/ui_resource_bitmap.h"
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "third_party/skia/include/core/SkBitmap.h"
+#include "third_party/skia/include/core/SkMallocPixelRef.h"
+#include "third_party/skia/include/core/SkPixelRef.h"
+
+namespace cc {
+namespace {
+
+UIResourceBitmap::UIResourceFormat SkColorTypeToUIResourceFormat(
+ SkColorType sk_type) {
+ UIResourceBitmap::UIResourceFormat format = UIResourceBitmap::RGBA8;
+ switch (sk_type) {
+ case kN32_SkColorType:
+ format = UIResourceBitmap::RGBA8;
+ break;
+ case kAlpha_8_SkColorType:
+ format = UIResourceBitmap::ALPHA_8;
+ break;
+ default:
+ NOTREACHED() << "Invalid SkColorType for UIResourceBitmap: " << sk_type;
+ break;
+ }
+ return format;
+}
+
+} // namespace
+
+void UIResourceBitmap::Create(const skia::RefPtr<SkPixelRef>& pixel_ref,
+ const gfx::Size& size,
+ UIResourceFormat format) {
+ DCHECK(size.width());
+ DCHECK(size.height());
+ DCHECK(pixel_ref);
+ DCHECK(pixel_ref->isImmutable());
+ format_ = format;
+ size_ = size;
+ pixel_ref_ = pixel_ref;
+
+ // Default values for secondary parameters.
+ wrap_mode_ = CLAMP_TO_EDGE;
+ opaque_ = (format == ETC1);
+}
+
+UIResourceBitmap::UIResourceBitmap(const SkBitmap& skbitmap) {
+ DCHECK_EQ(skbitmap.width(), skbitmap.rowBytesAsPixels());
+ DCHECK(skbitmap.isImmutable());
+
+ skia::RefPtr<SkPixelRef> pixel_ref = skia::SharePtr(skbitmap.pixelRef());
+ const SkImageInfo& info = pixel_ref->info();
+ Create(pixel_ref,
+ gfx::Size(info.fWidth, info.fHeight),
+ SkColorTypeToUIResourceFormat(skbitmap.colorType()));
+
+ SetOpaque(skbitmap.isOpaque());
+}
+
+UIResourceBitmap::UIResourceBitmap(const gfx::Size& size, bool is_opaque) {
+ SkAlphaType alphaType = is_opaque ? kOpaque_SkAlphaType : kPremul_SkAlphaType;
+ SkImageInfo info =
+ SkImageInfo::MakeN32(size.width(), size.height(), alphaType);
+ skia::RefPtr<SkPixelRef> pixel_ref = skia::AdoptRef(
+ SkMallocPixelRef::NewAllocate(info, info.minRowBytes(), NULL));
+ pixel_ref->setImmutable();
+ Create(pixel_ref, size, UIResourceBitmap::RGBA8);
+ SetOpaque(is_opaque);
+}
+
+UIResourceBitmap::UIResourceBitmap(const skia::RefPtr<SkPixelRef>& pixel_ref,
+ const gfx::Size& size) {
+ Create(pixel_ref, size, UIResourceBitmap::ETC1);
+}
+
+UIResourceBitmap::~UIResourceBitmap() {}
+
+AutoLockUIResourceBitmap::AutoLockUIResourceBitmap(
+ const UIResourceBitmap& bitmap) : bitmap_(bitmap) {
+ bitmap_.pixel_ref_->lockPixels();
+}
+
+AutoLockUIResourceBitmap::~AutoLockUIResourceBitmap() {
+ bitmap_.pixel_ref_->unlockPixels();
+}
+
+const uint8_t* AutoLockUIResourceBitmap::GetPixels() const {
+ return static_cast<const uint8_t*>(bitmap_.pixel_ref_->pixels());
+}
+
+} // namespace cc
diff --git a/cc/resources/ui_resource_bitmap.h b/cc/resources/ui_resource_bitmap.h
new file mode 100644
index 0000000..09403a3
--- /dev/null
+++ b/cc/resources/ui_resource_bitmap.h
@@ -0,0 +1,79 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_UI_RESOURCE_BITMAP_H_
+#define CC_RESOURCES_UI_RESOURCE_BITMAP_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "cc/base/cc_export.h"
+#include "skia/ext/refptr.h"
+#include "third_party/skia/include/core/SkPixelRef.h"
+#include "third_party/skia/include/core/SkTypes.h"
+#include "ui/gfx/size.h"
+
+class SkBitmap;
+
+namespace cc {
+
+class ETC1PixelRef;
+
+// A bitmap class that contains a ref-counted reference to a SkPixelRef that
+// holds the content of the bitmap (cannot use SkBitmap because of ETC1).
+// Thread-safety (by ways of SkPixelRef) ensures that both main and impl threads
+// can hold references to the bitmap and that asynchronous uploads are allowed.
+class CC_EXPORT UIResourceBitmap {
+ public:
+ enum UIResourceFormat {
+ RGBA8,
+ ALPHA_8,
+ ETC1
+ };
+ enum UIResourceWrapMode {
+ CLAMP_TO_EDGE,
+ REPEAT
+ };
+
+ gfx::Size GetSize() const { return size_; }
+ UIResourceFormat GetFormat() const { return format_; }
+ UIResourceWrapMode GetWrapMode() const { return wrap_mode_; }
+ void SetWrapMode(UIResourceWrapMode wrap_mode) { wrap_mode_ = wrap_mode; }
+ bool GetOpaque() const { return opaque_; }
+ void SetOpaque(bool opaque) { opaque_ = opaque; }
+
+ // User must ensure that |skbitmap| is immutable. The SkBitmap Format should
+ // be 32-bit RGBA or 8-bit ALPHA.
+ explicit UIResourceBitmap(const SkBitmap& skbitmap);
+ UIResourceBitmap(const gfx::Size& size, bool is_opaque);
+ UIResourceBitmap(const skia::RefPtr<SkPixelRef>& pixel_ref,
+ const gfx::Size& size);
+ ~UIResourceBitmap();
+
+ private:
+ friend class AutoLockUIResourceBitmap;
+
+ void Create(const skia::RefPtr<SkPixelRef>& pixel_ref,
+ const gfx::Size& size,
+ UIResourceFormat format);
+
+ skia::RefPtr<SkPixelRef> pixel_ref_;
+ UIResourceFormat format_;
+ UIResourceWrapMode wrap_mode_;
+ gfx::Size size_;
+ bool opaque_;
+};
+
+class CC_EXPORT AutoLockUIResourceBitmap {
+ public:
+ explicit AutoLockUIResourceBitmap(const UIResourceBitmap& bitmap);
+ ~AutoLockUIResourceBitmap();
+ const uint8_t* GetPixels() const;
+
+ private:
+ const UIResourceBitmap& bitmap_;
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_UI_RESOURCE_BITMAP_H_
diff --git a/cc/resources/ui_resource_client.h b/cc/resources/ui_resource_client.h
new file mode 100644
index 0000000..24309a5
--- /dev/null
+++ b/cc/resources/ui_resource_client.h
@@ -0,0 +1,34 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_UI_RESOURCE_CLIENT_H_
+#define CC_RESOURCES_UI_RESOURCE_CLIENT_H_
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "cc/base/cc_export.h"
+
+namespace cc {
+
+class UIResourceBitmap;
+
+typedef int UIResourceId;
+
+class CC_EXPORT UIResourceClient {
+ public:
+ // GetBitmap() will be called once soon after resource creation and then will
+ // be called afterwards whenever the GL context is lost, on the same thread
+ // that LayerTreeHost::CreateUIResource was called on. It is only safe to
+ // delete a UIResourceClient object after DeleteUIResource has been called for
+ // all IDs associated with it. A valid bitmap always must be returned but it
+ // doesn't need to be the same size or format as the original.
+ virtual UIResourceBitmap GetBitmap(UIResourceId uid,
+ bool resource_lost) = 0;
+ virtual ~UIResourceClient() {}
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_UI_RESOURCE_CLIENT_H_
diff --git a/cc/resources/ui_resource_request.cc b/cc/resources/ui_resource_request.cc
new file mode 100644
index 0000000..b8dd50a
--- /dev/null
+++ b/cc/resources/ui_resource_request.cc
@@ -0,0 +1,37 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/ui_resource_request.h"
+
+namespace cc {
+
+UIResourceRequest::UIResourceRequest(UIResourceRequestType type,
+ UIResourceId id)
+ : type_(type), id_(id) {}
+
+UIResourceRequest::UIResourceRequest(UIResourceRequestType type,
+ UIResourceId id,
+ const UIResourceBitmap& bitmap)
+ : type_(type), id_(id), bitmap_(new UIResourceBitmap(bitmap)) {}
+
+UIResourceRequest::UIResourceRequest(const UIResourceRequest& request) {
+ (*this) = request;
+}
+
+UIResourceRequest& UIResourceRequest::operator=(
+ const UIResourceRequest& request) {
+ type_ = request.type_;
+ id_ = request.id_;
+ if (request.bitmap_) {
+ bitmap_ = make_scoped_ptr(new UIResourceBitmap(*request.bitmap_.get()));
+ } else {
+ bitmap_ = nullptr;
+ }
+
+ return *this;
+}
+
+UIResourceRequest::~UIResourceRequest() {}
+
+} // namespace cc
diff --git a/cc/resources/ui_resource_request.h b/cc/resources/ui_resource_request.h
new file mode 100644
index 0000000..6d89761
--- /dev/null
+++ b/cc/resources/ui_resource_request.h
@@ -0,0 +1,49 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_UI_RESOURCE_REQUEST_H_
+#define CC_RESOURCES_UI_RESOURCE_REQUEST_H_
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "cc/base/cc_export.h"
+#include "cc/resources/ui_resource_bitmap.h"
+#include "cc/resources/ui_resource_client.h"
+
+namespace cc {
+
+class CC_EXPORT UIResourceRequest {
+ public:
+ enum UIResourceRequestType {
+ UIResourceCreate,
+ UIResourceDelete,
+ UIResourceInvalidRequest
+ };
+
+ UIResourceRequest(UIResourceRequestType type, UIResourceId id);
+ UIResourceRequest(UIResourceRequestType type,
+ UIResourceId id,
+ const UIResourceBitmap& bitmap);
+ UIResourceRequest(const UIResourceRequest& request);
+
+ ~UIResourceRequest();
+
+ UIResourceRequestType GetType() const { return type_; }
+ UIResourceId GetId() const { return id_; }
+ UIResourceBitmap GetBitmap() const {
+ DCHECK(bitmap_);
+ return *bitmap_.get();
+ }
+
+ UIResourceRequest& operator=(const UIResourceRequest& request);
+
+ private:
+ UIResourceRequestType type_;
+ UIResourceId id_;
+ scoped_ptr<UIResourceBitmap> bitmap_;
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_UI_RESOURCE_REQUEST_H_
diff --git a/cc/resources/video_resource_updater.cc b/cc/resources/video_resource_updater.cc
new file mode 100644
index 0000000..75edccb
--- /dev/null
+++ b/cc/resources/video_resource_updater.cc
@@ -0,0 +1,399 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/video_resource_updater.h"
+
+#include "base/bind.h"
+#include "base/debug/trace_event.h"
+#include "cc/output/gl_renderer.h"
+#include "cc/resources/resource_provider.h"
+#include "gpu/GLES2/gl2extchromium.h"
+#include "gpu/command_buffer/client/gles2_interface.h"
+#include "media/base/video_frame.h"
+#include "media/filters/skcanvas_video_renderer.h"
+#include "third_party/khronos/GLES2/gl2.h"
+#include "third_party/khronos/GLES2/gl2ext.h"
+#include "ui/gfx/size_conversions.h"
+
+namespace cc {
+
+namespace {
+
+const ResourceFormat kYUVResourceFormat = LUMINANCE_8;
+const ResourceFormat kRGBResourceFormat = RGBA_8888;
+
+class SyncPointClientImpl : public media::VideoFrame::SyncPointClient {
+ public:
+ explicit SyncPointClientImpl(gpu::gles2::GLES2Interface* gl) : gl_(gl) {}
+ virtual ~SyncPointClientImpl() {}
+ virtual uint32 InsertSyncPoint() OVERRIDE {
+ return GLC(gl_, gl_->InsertSyncPointCHROMIUM());
+ }
+ virtual void WaitSyncPoint(uint32 sync_point) OVERRIDE {
+ GLC(gl_, gl_->WaitSyncPointCHROMIUM(sync_point));
+ }
+
+ private:
+ gpu::gles2::GLES2Interface* gl_;
+};
+
+} // namespace
+
+VideoFrameExternalResources::VideoFrameExternalResources() : type(NONE) {}
+
+VideoFrameExternalResources::~VideoFrameExternalResources() {}
+
+VideoResourceUpdater::VideoResourceUpdater(ContextProvider* context_provider,
+ ResourceProvider* resource_provider)
+ : context_provider_(context_provider),
+ resource_provider_(resource_provider) {
+}
+
+VideoResourceUpdater::~VideoResourceUpdater() {
+ while (!all_resources_.empty()) {
+ resource_provider_->DeleteResource(all_resources_.back());
+ all_resources_.pop_back();
+ }
+}
+
+void VideoResourceUpdater::DeleteResource(unsigned resource_id) {
+ resource_provider_->DeleteResource(resource_id);
+ all_resources_.erase(std::remove(all_resources_.begin(),
+ all_resources_.end(),
+ resource_id));
+}
+
+VideoFrameExternalResources VideoResourceUpdater::
+ CreateExternalResourcesFromVideoFrame(
+ const scoped_refptr<media::VideoFrame>& video_frame) {
+ if (!VerifyFrame(video_frame))
+ return VideoFrameExternalResources();
+
+ if (video_frame->format() == media::VideoFrame::NATIVE_TEXTURE)
+ return CreateForHardwarePlanes(video_frame);
+ else
+ return CreateForSoftwarePlanes(video_frame);
+}
+
+bool VideoResourceUpdater::VerifyFrame(
+ const scoped_refptr<media::VideoFrame>& video_frame) {
+ switch (video_frame->format()) {
+ // Acceptable inputs.
+ case media::VideoFrame::YV12:
+ case media::VideoFrame::I420:
+ case media::VideoFrame::YV12A:
+ case media::VideoFrame::YV16:
+ case media::VideoFrame::YV12J:
+ case media::VideoFrame::YV24:
+ case media::VideoFrame::NATIVE_TEXTURE:
+#if defined(VIDEO_HOLE)
+ case media::VideoFrame::HOLE:
+#endif // defined(VIDEO_HOLE)
+ return true;
+
+ // Unacceptable inputs. ¯\(°_o)/¯
+ case media::VideoFrame::UNKNOWN:
+ case media::VideoFrame::NV12:
+ break;
+ }
+ return false;
+}
+
+// For frames that we receive in software format, determine the dimensions of
+// each plane in the frame.
+static gfx::Size SoftwarePlaneDimension(
+ const scoped_refptr<media::VideoFrame>& input_frame,
+ ResourceFormat output_resource_format,
+ size_t plane_index) {
+ if (output_resource_format == kYUVResourceFormat) {
+ return media::VideoFrame::PlaneSize(
+ input_frame->format(), plane_index, input_frame->coded_size());
+ }
+
+ DCHECK_EQ(output_resource_format, kRGBResourceFormat);
+ return input_frame->coded_size();
+}
+
+VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes(
+ const scoped_refptr<media::VideoFrame>& video_frame) {
+ TRACE_EVENT0("cc", "VideoResourceUpdater::CreateForSoftwarePlanes");
+ media::VideoFrame::Format input_frame_format = video_frame->format();
+
+#if defined(VIDEO_HOLE)
+ if (input_frame_format == media::VideoFrame::HOLE) {
+ VideoFrameExternalResources external_resources;
+ external_resources.type = VideoFrameExternalResources::HOLE;
+ return external_resources;
+ }
+#endif // defined(VIDEO_HOLE)
+
+ // Only YUV software video frames are supported.
+ DCHECK(input_frame_format == media::VideoFrame::YV12 ||
+ input_frame_format == media::VideoFrame::I420 ||
+ input_frame_format == media::VideoFrame::YV12A ||
+ input_frame_format == media::VideoFrame::YV12J ||
+ input_frame_format == media::VideoFrame::YV16 ||
+ input_frame_format == media::VideoFrame::YV24);
+ if (input_frame_format != media::VideoFrame::YV12 &&
+ input_frame_format != media::VideoFrame::I420 &&
+ input_frame_format != media::VideoFrame::YV12A &&
+ input_frame_format != media::VideoFrame::YV12J &&
+ input_frame_format != media::VideoFrame::YV16 &&
+ input_frame_format != media::VideoFrame::YV24)
+ return VideoFrameExternalResources();
+
+ bool software_compositor = context_provider_ == NULL;
+
+ ResourceFormat output_resource_format = kYUVResourceFormat;
+ size_t output_plane_count = media::VideoFrame::NumPlanes(input_frame_format);
+
+ // TODO(skaslev): If we're in software compositing mode, we do the YUV -> RGB
+ // conversion here. That involves an extra copy of each frame to a bitmap.
+ // Obviously, this is suboptimal and should be addressed once ubercompositor
+ // starts shaping up.
+ if (software_compositor) {
+ output_resource_format = kRGBResourceFormat;
+ output_plane_count = 1;
+ }
+
+ int max_resource_size = resource_provider_->max_texture_size();
+ std::vector<PlaneResource> plane_resources;
+ bool allocation_success = true;
+
+ for (size_t i = 0; i < output_plane_count; ++i) {
+ gfx::Size output_plane_resource_size =
+ SoftwarePlaneDimension(video_frame, output_resource_format, i);
+ if (output_plane_resource_size.IsEmpty() ||
+ output_plane_resource_size.width() > max_resource_size ||
+ output_plane_resource_size.height() > max_resource_size) {
+ allocation_success = false;
+ break;
+ }
+
+ ResourceProvider::ResourceId resource_id = 0;
+ gpu::Mailbox mailbox;
+
+ // Try recycle a previously-allocated resource.
+ for (size_t i = 0; i < recycled_resources_.size(); ++i) {
+ bool resource_matches =
+ recycled_resources_[i].resource_format == output_resource_format &&
+ recycled_resources_[i].resource_size == output_plane_resource_size;
+ bool not_in_use =
+ !software_compositor || !resource_provider_->InUseByConsumer(
+ recycled_resources_[i].resource_id);
+ if (resource_matches && not_in_use) {
+ resource_id = recycled_resources_[i].resource_id;
+ mailbox = recycled_resources_[i].mailbox;
+ recycled_resources_.erase(recycled_resources_.begin() + i);
+ break;
+ }
+ }
+
+ if (resource_id == 0) {
+ // TODO(danakj): Abstract out hw/sw resource create/delete from
+ // ResourceProvider and stop using ResourceProvider in this class.
+ resource_id = resource_provider_->CreateResource(
+ output_plane_resource_size,
+ GL_CLAMP_TO_EDGE,
+ ResourceProvider::TextureHintImmutable,
+ output_resource_format);
+
+ DCHECK(mailbox.IsZero());
+
+ if (!software_compositor) {
+ DCHECK(context_provider_);
+
+ gpu::gles2::GLES2Interface* gl = context_provider_->ContextGL();
+
+ GLC(gl, gl->GenMailboxCHROMIUM(mailbox.name));
+ ResourceProvider::ScopedWriteLockGL lock(resource_provider_,
+ resource_id);
+ GLC(gl,
+ gl->ProduceTextureDirectCHROMIUM(
+ lock.texture_id(), GL_TEXTURE_2D, mailbox.name));
+ }
+
+ if (resource_id)
+ all_resources_.push_back(resource_id);
+ }
+
+ if (resource_id == 0) {
+ allocation_success = false;
+ break;
+ }
+
+ DCHECK(software_compositor || !mailbox.IsZero());
+ plane_resources.push_back(PlaneResource(resource_id,
+ output_plane_resource_size,
+ output_resource_format,
+ mailbox));
+ }
+
+ if (!allocation_success) {
+ for (size_t i = 0; i < plane_resources.size(); ++i)
+ DeleteResource(plane_resources[i].resource_id);
+ return VideoFrameExternalResources();
+ }
+
+ VideoFrameExternalResources external_resources;
+
+ if (software_compositor) {
+ DCHECK_EQ(plane_resources.size(), 1u);
+ DCHECK_EQ(plane_resources[0].resource_format, kRGBResourceFormat);
+ DCHECK(plane_resources[0].mailbox.IsZero());
+
+ if (!video_renderer_)
+ video_renderer_.reset(new media::SkCanvasVideoRenderer);
+
+ {
+ ResourceProvider::ScopedWriteLockSoftware lock(
+ resource_provider_, plane_resources[0].resource_id);
+ video_renderer_->Copy(video_frame, lock.sk_canvas());
+ }
+
+ RecycleResourceData recycle_data = {
+ plane_resources[0].resource_id,
+ plane_resources[0].resource_size,
+ plane_resources[0].resource_format,
+ gpu::Mailbox()
+ };
+ external_resources.software_resources.push_back(
+ plane_resources[0].resource_id);
+ external_resources.software_release_callback =
+ base::Bind(&RecycleResource, AsWeakPtr(), recycle_data);
+ external_resources.type = VideoFrameExternalResources::SOFTWARE_RESOURCE;
+
+ return external_resources;
+ }
+
+ for (size_t i = 0; i < plane_resources.size(); ++i) {
+ // Update each plane's resource id with its content.
+ DCHECK_EQ(plane_resources[i].resource_format, kYUVResourceFormat);
+
+ const uint8_t* input_plane_pixels = video_frame->data(i);
+
+ gfx::Rect image_rect(0,
+ 0,
+ video_frame->stride(i),
+ plane_resources[i].resource_size.height());
+ gfx::Rect source_rect(plane_resources[i].resource_size);
+ resource_provider_->SetPixels(plane_resources[i].resource_id,
+ input_plane_pixels,
+ image_rect,
+ source_rect,
+ gfx::Vector2d());
+
+ RecycleResourceData recycle_data = {
+ plane_resources[i].resource_id,
+ plane_resources[i].resource_size,
+ plane_resources[i].resource_format,
+ plane_resources[i].mailbox
+ };
+
+ external_resources.mailboxes.push_back(
+ TextureMailbox(plane_resources[i].mailbox, GL_TEXTURE_2D, 0));
+ external_resources.release_callbacks.push_back(
+ base::Bind(&RecycleResource, AsWeakPtr(), recycle_data));
+ }
+
+ external_resources.type = VideoFrameExternalResources::YUV_RESOURCE;
+ return external_resources;
+}
+
+// static
+void VideoResourceUpdater::ReturnTexture(
+ base::WeakPtr<VideoResourceUpdater> updater,
+ const scoped_refptr<media::VideoFrame>& video_frame,
+ uint32 sync_point,
+ bool lost_resource,
+ BlockingTaskRunner* main_thread_task_runner) {
+ // TODO(dshwang) this case should be forwarded to the decoder as lost
+ // resource.
+ if (lost_resource || !updater.get())
+ return;
+ // VideoFrame::UpdateReleaseSyncPoint() creates new sync point using the same
+ // GL context which created the given |sync_point|, so discard the
+ // |sync_point|.
+ SyncPointClientImpl client(updater->context_provider_->ContextGL());
+ video_frame->UpdateReleaseSyncPoint(&client);
+}
+
+VideoFrameExternalResources VideoResourceUpdater::CreateForHardwarePlanes(
+ const scoped_refptr<media::VideoFrame>& video_frame) {
+ TRACE_EVENT0("cc", "VideoResourceUpdater::CreateForHardwarePlanes");
+ media::VideoFrame::Format frame_format = video_frame->format();
+
+ DCHECK_EQ(frame_format, media::VideoFrame::NATIVE_TEXTURE);
+ if (frame_format != media::VideoFrame::NATIVE_TEXTURE)
+ return VideoFrameExternalResources();
+
+ if (!context_provider_)
+ return VideoFrameExternalResources();
+
+ const gpu::MailboxHolder* mailbox_holder = video_frame->mailbox_holder();
+ VideoFrameExternalResources external_resources;
+ switch (mailbox_holder->texture_target) {
+ case GL_TEXTURE_2D:
+ external_resources.type = VideoFrameExternalResources::RGB_RESOURCE;
+ break;
+ case GL_TEXTURE_EXTERNAL_OES:
+ external_resources.type =
+ VideoFrameExternalResources::STREAM_TEXTURE_RESOURCE;
+ break;
+ case GL_TEXTURE_RECTANGLE_ARB:
+ external_resources.type = VideoFrameExternalResources::IO_SURFACE;
+ break;
+ default:
+ NOTREACHED();
+ return VideoFrameExternalResources();
+ }
+
+ external_resources.mailboxes.push_back(
+ TextureMailbox(mailbox_holder->mailbox,
+ mailbox_holder->texture_target,
+ mailbox_holder->sync_point));
+ external_resources.release_callbacks.push_back(
+ base::Bind(&ReturnTexture, AsWeakPtr(), video_frame));
+ return external_resources;
+}
+
+// static
+void VideoResourceUpdater::RecycleResource(
+ base::WeakPtr<VideoResourceUpdater> updater,
+ RecycleResourceData data,
+ uint32 sync_point,
+ bool lost_resource,
+ BlockingTaskRunner* main_thread_task_runner) {
+ if (!updater.get()) {
+ // Resource was already deleted.
+ return;
+ }
+
+ ContextProvider* context_provider = updater->context_provider_;
+ if (context_provider && sync_point) {
+ GLC(context_provider->ContextGL(),
+ context_provider->ContextGL()->WaitSyncPointCHROMIUM(sync_point));
+ }
+
+ if (lost_resource) {
+ updater->DeleteResource(data.resource_id);
+ return;
+ }
+
+ // Drop recycled resources that are the wrong format.
+ while (!updater->recycled_resources_.empty() &&
+ updater->recycled_resources_.back().resource_format !=
+ data.resource_format) {
+ updater->DeleteResource(updater->recycled_resources_.back().resource_id);
+ updater->recycled_resources_.pop_back();
+ }
+
+ PlaneResource recycled_resource(data.resource_id,
+ data.resource_size,
+ data.resource_format,
+ data.mailbox);
+ updater->recycled_resources_.push_back(recycled_resource);
+}
+
+} // namespace cc
diff --git a/cc/resources/video_resource_updater.h b/cc/resources/video_resource_updater.h
new file mode 100644
index 0000000..e9b924b
--- /dev/null
+++ b/cc/resources/video_resource_updater.h
@@ -0,0 +1,128 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_VIDEO_RESOURCE_UPDATER_H_
+#define CC_RESOURCES_VIDEO_RESOURCE_UPDATER_H_
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "cc/base/cc_export.h"
+#include "cc/resources/release_callback_impl.h"
+#include "cc/resources/resource_format.h"
+#include "cc/resources/texture_mailbox.h"
+#include "ui/gfx/size.h"
+
+namespace media {
+class SkCanvasVideoRenderer;
+class VideoFrame;
+}
+
+namespace cc {
+class ContextProvider;
+class ResourceProvider;
+
+class CC_EXPORT VideoFrameExternalResources {
+ public:
+ // Specifies what type of data is contained in the mailboxes, as well as how
+ // many mailboxes will be present.
+ enum ResourceType {
+ NONE,
+ YUV_RESOURCE,
+ RGB_RESOURCE,
+ STREAM_TEXTURE_RESOURCE,
+ IO_SURFACE,
+
+#if defined(VIDEO_HOLE)
+ // TODO(danakj): Implement this with a solid color layer instead of a video
+ // frame and video layer.
+ HOLE,
+#endif // defined(VIDEO_HOLE)
+
+ // TODO(danakj): Remove this and abstract TextureMailbox into
+ // "ExternalResource" that can hold a hardware or software backing.
+ SOFTWARE_RESOURCE
+ };
+
+ ResourceType type;
+ std::vector<TextureMailbox> mailboxes;
+ std::vector<ReleaseCallbackImpl> release_callbacks;
+
+ // TODO(danakj): Remove these too.
+ std::vector<unsigned> software_resources;
+ ReleaseCallbackImpl software_release_callback;
+
+ VideoFrameExternalResources();
+ ~VideoFrameExternalResources();
+};
+
+// VideoResourceUpdater is by the video system to produce frame content as
+// resources consumable by the compositor.
+class CC_EXPORT VideoResourceUpdater
+ : public base::SupportsWeakPtr<VideoResourceUpdater> {
+ public:
+ explicit VideoResourceUpdater(ContextProvider* context_provider,
+ ResourceProvider* resource_provider);
+ ~VideoResourceUpdater();
+
+ VideoFrameExternalResources CreateExternalResourcesFromVideoFrame(
+ const scoped_refptr<media::VideoFrame>& video_frame);
+
+ private:
+ struct PlaneResource {
+ unsigned resource_id;
+ gfx::Size resource_size;
+ ResourceFormat resource_format;
+ gpu::Mailbox mailbox;
+
+ PlaneResource(unsigned resource_id,
+ const gfx::Size& resource_size,
+ ResourceFormat resource_format,
+ gpu::Mailbox mailbox)
+ : resource_id(resource_id),
+ resource_size(resource_size),
+ resource_format(resource_format),
+ mailbox(mailbox) {}
+ };
+
+ void DeleteResource(unsigned resource_id);
+ bool VerifyFrame(const scoped_refptr<media::VideoFrame>& video_frame);
+ VideoFrameExternalResources CreateForHardwarePlanes(
+ const scoped_refptr<media::VideoFrame>& video_frame);
+ VideoFrameExternalResources CreateForSoftwarePlanes(
+ const scoped_refptr<media::VideoFrame>& video_frame);
+
+ struct RecycleResourceData {
+ unsigned resource_id;
+ gfx::Size resource_size;
+ ResourceFormat resource_format;
+ gpu::Mailbox mailbox;
+ };
+ static void RecycleResource(base::WeakPtr<VideoResourceUpdater> updater,
+ RecycleResourceData data,
+ uint32 sync_point,
+ bool lost_resource,
+ BlockingTaskRunner* main_thread_task_runner);
+ static void ReturnTexture(base::WeakPtr<VideoResourceUpdater> updater,
+ const scoped_refptr<media::VideoFrame>& video_frame,
+ uint32 sync_point,
+ bool lost_resource,
+ BlockingTaskRunner* main_thread_task_runner);
+
+ ContextProvider* context_provider_;
+ ResourceProvider* resource_provider_;
+ scoped_ptr<media::SkCanvasVideoRenderer> video_renderer_;
+
+ std::vector<unsigned> all_resources_;
+ std::vector<PlaneResource> recycled_resources_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoResourceUpdater);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_VIDEO_RESOURCE_UPDATER_H_
diff --git a/cc/resources/video_resource_updater_unittest.cc b/cc/resources/video_resource_updater_unittest.cc
new file mode 100644
index 0000000..c40d4a2
--- /dev/null
+++ b/cc/resources/video_resource_updater_unittest.cc
@@ -0,0 +1,81 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/video_resource_updater.h"
+
+#include "base/memory/shared_memory.h"
+#include "cc/resources/resource_provider.h"
+#include "cc/test/fake_output_surface.h"
+#include "cc/test/fake_output_surface_client.h"
+#include "cc/test/test_shared_bitmap_manager.h"
+#include "cc/test/test_web_graphics_context_3d.h"
+#include "cc/trees/blocking_task_runner.h"
+#include "media/base/video_frame.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cc {
+namespace {
+
+class VideoResourceUpdaterTest : public testing::Test {
+ protected:
+ VideoResourceUpdaterTest() {
+ scoped_ptr<TestWebGraphicsContext3D> context3d =
+ TestWebGraphicsContext3D::Create();
+ context3d_ = context3d.get();
+
+ output_surface3d_ =
+ FakeOutputSurface::Create3d(context3d.Pass());
+ CHECK(output_surface3d_->BindToClient(&client_));
+ shared_bitmap_manager_.reset(new TestSharedBitmapManager());
+ resource_provider3d_ =
+ ResourceProvider::Create(output_surface3d_.get(),
+ shared_bitmap_manager_.get(),
+ NULL,
+ 0,
+ false,
+ 1,
+ false);
+ }
+
+ scoped_refptr<media::VideoFrame> CreateTestYUVVideoFrame() {
+ const int kDimension = 10;
+ gfx::Size size(kDimension, kDimension);
+ static uint8 y_data[kDimension * kDimension] = { 0 };
+ static uint8 u_data[kDimension * kDimension / 2] = { 0 };
+ static uint8 v_data[kDimension * kDimension / 2] = { 0 };
+
+ return media::VideoFrame::WrapExternalYuvData(
+ media::VideoFrame::YV16, // format
+ size, // coded_size
+ gfx::Rect(size), // visible_rect
+ size, // natural_size
+ size.width(), // y_stride
+ size.width() / 2, // u_stride
+ size.width() / 2, // v_stride
+ y_data, // y_data
+ u_data, // u_data
+ v_data, // v_data
+ base::TimeDelta(), // timestamp,
+ base::Closure()); // no_longer_needed_cb
+ }
+
+ TestWebGraphicsContext3D* context3d_;
+ FakeOutputSurfaceClient client_;
+ scoped_ptr<FakeOutputSurface> output_surface3d_;
+ scoped_ptr<TestSharedBitmapManager> shared_bitmap_manager_;
+ scoped_ptr<ResourceProvider> resource_provider3d_;
+};
+
+TEST_F(VideoResourceUpdaterTest, SoftwareFrame) {
+ VideoResourceUpdater updater(output_surface3d_->context_provider(),
+ resource_provider3d_.get());
+ scoped_refptr<media::VideoFrame> video_frame = CreateTestYUVVideoFrame();
+
+ VideoFrameExternalResources resources =
+ updater.CreateExternalResourcesFromVideoFrame(video_frame);
+ EXPECT_EQ(VideoFrameExternalResources::YUV_RESOURCE, resources.type);
+}
+
+} // namespace
+} // namespace cc
diff --git a/cc/resources/zero_copy_raster_worker_pool.cc b/cc/resources/zero_copy_raster_worker_pool.cc
new file mode 100644
index 0000000..f463406
--- /dev/null
+++ b/cc/resources/zero_copy_raster_worker_pool.cc
@@ -0,0 +1,233 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/resources/zero_copy_raster_worker_pool.h"
+
+#include <algorithm>
+
+#include "base/debug/trace_event.h"
+#include "base/debug/trace_event_argument.h"
+#include "base/strings/stringprintf.h"
+#include "cc/debug/traced_value.h"
+#include "cc/resources/raster_buffer.h"
+#include "cc/resources/resource.h"
+#include "third_party/skia/include/utils/SkNullCanvas.h"
+
+namespace cc {
+namespace {
+
+class RasterBufferImpl : public RasterBuffer {
+ public:
+ RasterBufferImpl(ResourceProvider* resource_provider,
+ const Resource* resource)
+ : resource_provider_(resource_provider),
+ resource_(resource),
+ stride_(0),
+ buffer_(resource_provider->MapImage(resource->id(), &stride_)) {}
+
+ virtual ~RasterBufferImpl() {
+ resource_provider_->UnmapImage(resource_->id());
+
+ // This RasterBuffer implementation provides direct access to the memory
+ // used by the GPU. Read lock fences are required to ensure that we're not
+ // trying to map a resource that is currently in-use by the GPU.
+ resource_provider_->EnableReadLockFences(resource_->id());
+ }
+
+ // Overridden from RasterBuffer:
+ virtual skia::RefPtr<SkCanvas> AcquireSkCanvas() OVERRIDE {
+ if (!buffer_)
+ return skia::AdoptRef(SkCreateNullCanvas());
+
+ RasterWorkerPool::AcquireBitmapForBuffer(
+ &bitmap_, buffer_, resource_->format(), resource_->size(), stride_);
+ return skia::AdoptRef(new SkCanvas(bitmap_));
+ }
+ virtual void ReleaseSkCanvas(const skia::RefPtr<SkCanvas>& canvas) OVERRIDE {
+ if (!buffer_)
+ return;
+
+ RasterWorkerPool::ReleaseBitmapForBuffer(
+ &bitmap_, buffer_, resource_->format());
+ }
+
+ private:
+ ResourceProvider* resource_provider_;
+ const Resource* resource_;
+ int stride_;
+ uint8_t* buffer_;
+ SkBitmap bitmap_;
+
+ DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
+};
+
+} // namespace
+
+// static
+scoped_ptr<RasterWorkerPool> ZeroCopyRasterWorkerPool::Create(
+ base::SequencedTaskRunner* task_runner,
+ TaskGraphRunner* task_graph_runner,
+ ResourceProvider* resource_provider) {
+ return make_scoped_ptr<RasterWorkerPool>(new ZeroCopyRasterWorkerPool(
+ task_runner, task_graph_runner, resource_provider));
+}
+
+ZeroCopyRasterWorkerPool::ZeroCopyRasterWorkerPool(
+ base::SequencedTaskRunner* task_runner,
+ TaskGraphRunner* task_graph_runner,
+ ResourceProvider* resource_provider)
+ : task_runner_(task_runner),
+ task_graph_runner_(task_graph_runner),
+ namespace_token_(task_graph_runner->GetNamespaceToken()),
+ resource_provider_(resource_provider),
+ raster_finished_weak_ptr_factory_(this) {
+}
+
+ZeroCopyRasterWorkerPool::~ZeroCopyRasterWorkerPool() {
+}
+
+Rasterizer* ZeroCopyRasterWorkerPool::AsRasterizer() {
+ return this;
+}
+
+void ZeroCopyRasterWorkerPool::SetClient(RasterizerClient* client) {
+ client_ = client;
+}
+
+void ZeroCopyRasterWorkerPool::Shutdown() {
+ TRACE_EVENT0("cc", "ZeroCopyRasterWorkerPool::Shutdown");
+
+ TaskGraph empty;
+ task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
+ task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
+}
+
+void ZeroCopyRasterWorkerPool::ScheduleTasks(RasterTaskQueue* queue) {
+ TRACE_EVENT0("cc", "ZeroCopyRasterWorkerPool::ScheduleTasks");
+
+ if (raster_pending_.none())
+ TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this);
+
+ // Mark all task sets as pending.
+ raster_pending_.set();
+
+ unsigned priority = kRasterTaskPriorityBase;
+
+ graph_.Reset();
+
+ // Cancel existing OnRasterFinished callbacks.
+ raster_finished_weak_ptr_factory_.InvalidateWeakPtrs();
+
+ scoped_refptr<RasterizerTask> new_raster_finished_tasks[kNumberOfTaskSets];
+
+ size_t task_count[kNumberOfTaskSets] = {0};
+
+ for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
+ new_raster_finished_tasks[task_set] = CreateRasterFinishedTask(
+ task_runner_.get(),
+ base::Bind(&ZeroCopyRasterWorkerPool::OnRasterFinished,
+ raster_finished_weak_ptr_factory_.GetWeakPtr(),
+ task_set));
+ }
+
+ for (RasterTaskQueue::Item::Vector::const_iterator it = queue->items.begin();
+ it != queue->items.end();
+ ++it) {
+ const RasterTaskQueue::Item& item = *it;
+ RasterTask* task = item.task;
+ DCHECK(!task->HasCompleted());
+
+ for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
+ if (!item.task_sets[task_set])
+ continue;
+
+ ++task_count[task_set];
+
+ graph_.edges.push_back(
+ TaskGraph::Edge(task, new_raster_finished_tasks[task_set].get()));
+ }
+
+ InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++);
+ }
+
+ for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
+ InsertNodeForTask(&graph_,
+ new_raster_finished_tasks[task_set].get(),
+ kRasterFinishedTaskPriority,
+ task_count[task_set]);
+ }
+
+ ScheduleTasksOnOriginThread(this, &graph_);
+ task_graph_runner_->ScheduleTasks(namespace_token_, &graph_);
+
+ std::copy(new_raster_finished_tasks,
+ new_raster_finished_tasks + kNumberOfTaskSets,
+ raster_finished_tasks_);
+
+ TRACE_EVENT_ASYNC_STEP_INTO1(
+ "cc", "ScheduledTasks", this, "rasterizing", "state", StateAsValue());
+}
+
+void ZeroCopyRasterWorkerPool::CheckForCompletedTasks() {
+ TRACE_EVENT0("cc", "ZeroCopyRasterWorkerPool::CheckForCompletedTasks");
+
+ task_graph_runner_->CollectCompletedTasks(namespace_token_,
+ &completed_tasks_);
+ for (Task::Vector::const_iterator it = completed_tasks_.begin();
+ it != completed_tasks_.end();
+ ++it) {
+ RasterizerTask* task = static_cast<RasterizerTask*>(it->get());
+
+ task->WillComplete();
+ task->CompleteOnOriginThread(this);
+ task->DidComplete();
+
+ task->RunReplyOnOriginThread();
+ }
+ completed_tasks_.clear();
+}
+
+scoped_ptr<RasterBuffer> ZeroCopyRasterWorkerPool::AcquireBufferForRaster(
+ const Resource* resource) {
+ // RasterBuffer implementation depends on an image having been acquired for
+ // the resource.
+ resource_provider_->AcquireImage(resource->id());
+
+ return make_scoped_ptr<RasterBuffer>(
+ new RasterBufferImpl(resource_provider_, resource));
+}
+
+void ZeroCopyRasterWorkerPool::ReleaseBufferForRaster(
+ scoped_ptr<RasterBuffer> buffer) {
+ // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
+}
+
+void ZeroCopyRasterWorkerPool::OnRasterFinished(TaskSet task_set) {
+ TRACE_EVENT1(
+ "cc", "ZeroCopyRasterWorkerPool::OnRasterFinished", "task_set", task_set);
+
+ DCHECK(raster_pending_[task_set]);
+ raster_pending_[task_set] = false;
+ if (raster_pending_.any()) {
+ TRACE_EVENT_ASYNC_STEP_INTO1(
+ "cc", "ScheduledTasks", this, "rasterizing", "state", StateAsValue());
+ } else {
+ TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this);
+ }
+ client_->DidFinishRunningTasks(task_set);
+}
+
+scoped_refptr<base::debug::ConvertableToTraceFormat>
+ZeroCopyRasterWorkerPool::StateAsValue() const {
+ scoped_refptr<base::debug::TracedValue> state =
+ new base::debug::TracedValue();
+
+ state->BeginArray("tasks_pending");
+ for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set)
+ state->AppendBoolean(raster_pending_[task_set]);
+ state->EndArray();
+ return state;
+}
+
+} // namespace cc
diff --git a/cc/resources/zero_copy_raster_worker_pool.h b/cc/resources/zero_copy_raster_worker_pool.h
new file mode 100644
index 0000000..4e4280e
--- /dev/null
+++ b/cc/resources/zero_copy_raster_worker_pool.h
@@ -0,0 +1,79 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_RESOURCES_ZERO_COPY_RASTER_WORKER_POOL_H_
+#define CC_RESOURCES_ZERO_COPY_RASTER_WORKER_POOL_H_
+
+#include "base/memory/weak_ptr.h"
+#include "base/values.h"
+#include "cc/resources/raster_worker_pool.h"
+#include "cc/resources/rasterizer.h"
+
+namespace base {
+namespace debug {
+class ConvertableToTraceFormat;
+}
+}
+
+namespace cc {
+class ResourceProvider;
+
+class CC_EXPORT ZeroCopyRasterWorkerPool : public RasterWorkerPool,
+ public Rasterizer,
+ public RasterizerTaskClient {
+ public:
+ virtual ~ZeroCopyRasterWorkerPool();
+
+ static scoped_ptr<RasterWorkerPool> Create(
+ base::SequencedTaskRunner* task_runner,
+ TaskGraphRunner* task_graph_runner,
+ ResourceProvider* resource_provider);
+
+ // Overridden from RasterWorkerPool:
+ virtual Rasterizer* AsRasterizer() OVERRIDE;
+
+ // Overridden from Rasterizer:
+ virtual void SetClient(RasterizerClient* client) OVERRIDE;
+ virtual void Shutdown() OVERRIDE;
+ virtual void ScheduleTasks(RasterTaskQueue* queue) OVERRIDE;
+ virtual void CheckForCompletedTasks() OVERRIDE;
+
+ // Overridden from RasterizerTaskClient:
+ virtual scoped_ptr<RasterBuffer> AcquireBufferForRaster(
+ const Resource* resource) OVERRIDE;
+ virtual void ReleaseBufferForRaster(scoped_ptr<RasterBuffer> buffer) OVERRIDE;
+
+ protected:
+ ZeroCopyRasterWorkerPool(base::SequencedTaskRunner* task_runner,
+ TaskGraphRunner* task_graph_runner,
+ ResourceProvider* resource_provider);
+
+ private:
+ void OnRasterFinished(TaskSet task_set);
+ scoped_refptr<base::debug::ConvertableToTraceFormat> StateAsValue() const;
+
+ scoped_refptr<base::SequencedTaskRunner> task_runner_;
+ TaskGraphRunner* task_graph_runner_;
+ const NamespaceToken namespace_token_;
+ RasterizerClient* client_;
+ ResourceProvider* resource_provider_;
+
+ TaskSetCollection raster_pending_;
+
+ scoped_refptr<RasterizerTask> raster_finished_tasks_[kNumberOfTaskSets];
+
+ // Task graph used when scheduling tasks and vector used to gather
+ // completed tasks.
+ TaskGraph graph_;
+ Task::Vector completed_tasks_;
+
+ base::WeakPtrFactory<ZeroCopyRasterWorkerPool>
+ raster_finished_weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(ZeroCopyRasterWorkerPool);
+};
+
+} // namespace cc
+
+#endif // CC_RESOURCES_ZERO_COPY_RASTER_WORKER_POOL_H_