Update from https://crrev.com/304418
Review URL: https://codereview.chromium.org/734063004
diff --git a/BUILD.gn b/BUILD.gn
index a0cdf66..2fc232d 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -399,6 +399,7 @@
"metrics/histogram_delta_serialization.cc",
"metrics/histogram_delta_serialization.",
"metrics/histogram_flattener.h",
+ "metrics/histogram_macros.h",
"metrics/histogram_samples.cc",
"metrics/histogram_samples.h",
"metrics/histogram_snapshot_manager.cc",
diff --git a/OWNERS b/OWNERS
index 92844b6..03c20e7 100644
--- a/OWNERS
+++ b/OWNERS
@@ -45,3 +45,8 @@
per-file callback_unittest.h=ajwong@chromium.org
per-file callback_unittest.nc=ajwong@chromium.org
per-file security_unittest.cc=jln@chromium.org
+
+# For Android-specific changes:
+per-file *android*=nyquist@chromium.org
+per-file *android*=rmcilroy@chromium.org
+per-file *android*=yfriedman@chromium.org
diff --git a/base.gypi b/base.gypi
index 416a7e5..39bb08b 100644
--- a/base.gypi
+++ b/base.gypi
@@ -393,6 +393,7 @@
'metrics/histogram_delta_serialization.cc',
'metrics/histogram_delta_serialization.h',
'metrics/histogram_flattener.h',
+ 'metrics/histogram_macros.h',
'metrics/histogram_samples.cc',
'metrics/histogram_samples.h',
'metrics/histogram_snapshot_manager.cc',
diff --git a/cancelable_callback.h b/cancelable_callback.h
index 159100f..91eb046 100644
--- a/cancelable_callback.h
+++ b/cancelable_callback.h
@@ -55,13 +55,13 @@
template <typename Sig>
class CancelableCallback;
-template <>
-class CancelableCallback<void(void)> {
+template <typename... A>
+class CancelableCallback<void(A...)> {
public:
CancelableCallback() : weak_factory_(this) {}
// |callback| must not be null.
- explicit CancelableCallback(const base::Callback<void(void)>& callback)
+ explicit CancelableCallback(const base::Callback<void(A...)>& callback)
: weak_factory_(this),
callback_(callback) {
DCHECK(!callback.is_null());
@@ -84,7 +84,7 @@
// Sets |callback| as the closure that may be cancelled. |callback| may not
// be null. Outstanding and any previously wrapped callbacks are cancelled.
- void Reset(const base::Callback<void(void)>& callback) {
+ void Reset(const base::Callback<void(A...)>& callback) {
DCHECK(!callback.is_null());
// Outstanding tasks (e.g., posted to a message loop) must not be called.
@@ -97,173 +97,36 @@
}
// Returns a callback that can be disabled by calling Cancel().
- const base::Callback<void(void)>& callback() const {
+ const base::Callback<void(A...)>& callback() const {
return forwarder_;
}
private:
- void Forward() {
- callback_.Run();
+ void Forward(A... args) const {
+ callback_.Run(args...);
}
// Helper method to bind |forwarder_| using a weak pointer from
// |weak_factory_|.
void InitializeForwarder() {
- forwarder_ = base::Bind(&CancelableCallback<void(void)>::Forward,
+ forwarder_ = base::Bind(&CancelableCallback<void(A...)>::Forward,
weak_factory_.GetWeakPtr());
}
// Used to ensure Forward() is not run when this object is destroyed.
- base::WeakPtrFactory<CancelableCallback<void(void)> > weak_factory_;
+ // TODO(ckehoe): This should be the last class member.
+ // Move it there when crbug.com/433583 is fixed.
+ base::WeakPtrFactory<CancelableCallback<void(A...)> > weak_factory_;
// The wrapper closure.
- base::Callback<void(void)> forwarder_;
+ base::Callback<void(A...)> forwarder_;
// The stored closure that may be cancelled.
- base::Callback<void(void)> callback_;
+ base::Callback<void(A...)> callback_;
DISALLOW_COPY_AND_ASSIGN(CancelableCallback);
};
-template <typename A1>
-class CancelableCallback<void(A1)> {
- public:
- CancelableCallback() : weak_factory_(this) {}
-
- // |callback| must not be null.
- explicit CancelableCallback(const base::Callback<void(A1)>& callback)
- : weak_factory_(this),
- callback_(callback) {
- DCHECK(!callback.is_null());
- InitializeForwarder();
- }
-
- ~CancelableCallback() {}
-
- // Cancels and drops the reference to the wrapped callback.
- void Cancel() {
- weak_factory_.InvalidateWeakPtrs();
- forwarder_.Reset();
- callback_.Reset();
- }
-
- // Returns true if the wrapped callback has been cancelled.
- bool IsCancelled() const {
- return callback_.is_null();
- }
-
- // Sets |callback| as the closure that may be cancelled. |callback| may not
- // be null. Outstanding and any previously wrapped callbacks are cancelled.
- void Reset(const base::Callback<void(A1)>& callback) {
- DCHECK(!callback.is_null());
-
- // Outstanding tasks (e.g., posted to a message loop) must not be called.
- Cancel();
-
- // |forwarder_| is no longer valid after Cancel(), so re-bind.
- InitializeForwarder();
-
- callback_ = callback;
- }
-
- // Returns a callback that can be disabled by calling Cancel().
- const base::Callback<void(A1)>& callback() const {
- return forwarder_;
- }
-
- private:
- void Forward(A1 a1) const {
- callback_.Run(a1);
- }
-
- // Helper method to bind |forwarder_| using a weak pointer from
- // |weak_factory_|.
- void InitializeForwarder() {
- forwarder_ = base::Bind(&CancelableCallback<void(A1)>::Forward,
- weak_factory_.GetWeakPtr());
- }
-
- // Used to ensure Forward() is not run when this object is destroyed.
- base::WeakPtrFactory<CancelableCallback<void(A1)> > weak_factory_;
-
- // The wrapper closure.
- base::Callback<void(A1)> forwarder_;
-
- // The stored closure that may be cancelled.
- base::Callback<void(A1)> callback_;
-
- DISALLOW_COPY_AND_ASSIGN(CancelableCallback);
-};
-
-template <typename A1, typename A2>
-class CancelableCallback<void(A1, A2)> {
- public:
- CancelableCallback() : weak_factory_(this) {}
-
- // |callback| must not be null.
- explicit CancelableCallback(const base::Callback<void(A1, A2)>& callback)
- : weak_factory_(this),
- callback_(callback) {
- DCHECK(!callback.is_null());
- InitializeForwarder();
- }
-
- ~CancelableCallback() {}
-
- // Cancels and drops the reference to the wrapped callback.
- void Cancel() {
- weak_factory_.InvalidateWeakPtrs();
- forwarder_.Reset();
- callback_.Reset();
- }
-
- // Returns true if the wrapped callback has been cancelled.
- bool IsCancelled() const {
- return callback_.is_null();
- }
-
- // Sets |callback| as the closure that may be cancelled. |callback| may not
- // be null. Outstanding and any previously wrapped callbacks are cancelled.
- void Reset(const base::Callback<void(A1, A2)>& callback) {
- DCHECK(!callback.is_null());
-
- // Outstanding tasks (e.g., posted to a message loop) must not be called.
- Cancel();
-
- // |forwarder_| is no longer valid after Cancel(), so re-bind.
- InitializeForwarder();
-
- callback_ = callback;
- }
-
- // Returns a callback that can be disabled by calling Cancel().
- const base::Callback<void(A1, A2)>& callback() const {
- return forwarder_;
- }
-
- private:
- void Forward(A1 a1, A2 a2) const {
- callback_.Run(a1, a2);
- }
-
- // Helper method to bind |forwarder_| using a weak pointer from
- // |weak_factory_|.
- void InitializeForwarder() {
- forwarder_ = base::Bind(&CancelableCallback<void(A1, A2)>::Forward,
- weak_factory_.GetWeakPtr());
- }
-
- // The wrapper closure.
- base::Callback<void(A1, A2)> forwarder_;
-
- // The stored closure that may be cancelled.
- base::Callback<void(A1, A2)> callback_;
-
- // Used to ensure Forward() is not run when this object is destroyed.
- base::WeakPtrFactory<CancelableCallback<void(A1, A2)> > weak_factory_;
- DISALLOW_COPY_AND_ASSIGN(CancelableCallback);
-};
-
typedef CancelableCallback<void(void)> CancelableClosure;
} // namespace base
diff --git a/debug/trace_event_impl.cc b/debug/trace_event_impl.cc
index ce62766..1a64eb0 100644
--- a/debug/trace_event_impl.cc
+++ b/debug/trace_event_impl.cc
@@ -1185,6 +1185,12 @@
// find the generation mismatch and delete this buffer soon.
}
+TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {
+}
+
+TraceLogStatus::~TraceLogStatus() {
+}
+
// static
TraceLog* TraceLog::GetInstance() {
return Singleton<TraceLog, LeakySingletonTraits<TraceLog> >::get();
@@ -1586,10 +1592,12 @@
return it != enabled_state_observer_list_.end();
}
-float TraceLog::GetBufferPercentFull() const {
+TraceLogStatus TraceLog::GetStatus() const {
AutoLock lock(lock_);
- return static_cast<float>(static_cast<double>(logged_events_->Size()) /
- logged_events_->Capacity());
+ TraceLogStatus result;
+ result.event_capacity = logged_events_->Capacity();
+ result.event_count = logged_events_->Size();
+ return result;
}
bool TraceLog::BufferIsFull() const {
diff --git a/debug/trace_event_impl.h b/debug/trace_event_impl.h
index 6075e2d..ce2e017 100644
--- a/debug/trace_event_impl.h
+++ b/debug/trace_event_impl.h
@@ -420,6 +420,13 @@
bool enable_systrace;
};
+struct BASE_EXPORT TraceLogStatus {
+ TraceLogStatus();
+ ~TraceLogStatus();
+ size_t event_capacity;
+ size_t event_count;
+};
+
class BASE_EXPORT TraceLog {
public:
enum Mode {
@@ -495,7 +502,7 @@
void RemoveEnabledStateObserver(EnabledStateObserver* listener);
bool HasEnabledStateObserver(EnabledStateObserver* listener) const;
- float GetBufferPercentFull() const;
+ TraceLogStatus GetStatus() const;
bool BufferIsFull() const;
// Not using base::Callback because of its limited by 7 parameters.
@@ -603,7 +610,6 @@
static void DeleteForTesting();
// Allow tests to inspect TraceEvents.
- size_t GetEventsSize() const { return logged_events_->Size(); }
TraceEvent* GetEventByHandle(TraceEventHandle handle);
void SetProcessID(int process_id);
diff --git a/debug/trace_event_unittest.cc b/debug/trace_event_unittest.cc
index 69b5743..0904954 100644
--- a/debug/trace_event_unittest.cc
+++ b/debug/trace_event_unittest.cc
@@ -1366,8 +1366,7 @@
TEST_F(TraceEventTestFixture, StaticStringVsString) {
TraceLog* tracer = TraceLog::GetInstance();
// Make sure old events are flushed:
- EndTraceAndFlush();
- EXPECT_EQ(0u, tracer->GetEventsSize());
+ EXPECT_EQ(0u, tracer->GetStatus().event_count);
const unsigned char* category_group_enabled =
TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED("cat");
@@ -1384,8 +1383,7 @@
TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name2", 0, 0,
"arg1", TRACE_STR_COPY("argval"),
"arg2", TRACE_STR_COPY("argval"));
- size_t num_events = tracer->GetEventsSize();
- EXPECT_GT(num_events, 1u);
+ EXPECT_GT(tracer->GetStatus().event_count, 1u);
const TraceEvent* event1 = tracer->GetEventByHandle(handle1);
const TraceEvent* event2 = tracer->GetEventByHandle(handle2);
ASSERT_TRUE(event1);
@@ -1414,8 +1412,7 @@
TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name2", 0, 0,
"arg1", TRACE_STR_COPY(str1),
"arg2", TRACE_STR_COPY(str2));
- size_t num_events = tracer->GetEventsSize();
- EXPECT_GT(num_events, 1u);
+ EXPECT_GT(tracer->GetStatus().event_count, 1u);
const TraceEvent* event1 = tracer->GetEventByHandle(handle1);
const TraceEvent* event2 = tracer->GetEventByHandle(handle2);
ASSERT_TRUE(event1);
diff --git a/metrics/histogram.h b/metrics/histogram.h
index 43e7462..9ee172e 100644
--- a/metrics/histogram.h
+++ b/metrics/histogram.h
@@ -50,9 +50,12 @@
// at the low end of the histogram scale, but allows the histogram to cover a
// gigantic range with the addition of very few buckets.
-// Usually we use macros to define and use a histogram. These macros use a
-// pattern involving a function static variable, that is a pointer to a
-// histogram. This static is explicitly initialized on any thread
+// Usually we use macros to define and use a histogram, which are defined in
+// base/metrics/histogram_macros.h. Note: Callers should include that header
+// directly if they only access the histogram APIs through macros.
+//
+// Macros use a pattern involving a function static variable, that is a pointer
+// to a histogram. This static is explicitly initialized on any thread
// that detects a uninitialized (NULL) pointer. The potentially racy
// initialization is not a problem as it is always set to point to the same
// value (i.e., the FactoryGet always returns the same value). FactoryGet
@@ -67,7 +70,6 @@
#include <string>
#include <vector>
-#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/basictypes.h"
#include "base/compiler_specific.h"
@@ -76,6 +78,8 @@
#include "base/memory/scoped_ptr.h"
#include "base/metrics/bucket_ranges.h"
#include "base/metrics/histogram_base.h"
+// TODO(asvitkine): Migrate callers to to include this directly and remove this.
+#include "base/metrics/histogram_macros.h"
#include "base/metrics/histogram_samples.h"
#include "base/time/time.h"
@@ -84,234 +88,11 @@
namespace base {
-class Lock;
-//------------------------------------------------------------------------------
-// Histograms are often put in areas where they are called many many times, and
-// performance is critical. As a result, they are designed to have a very low
-// recurring cost of executing (adding additional samples). Toward that end,
-// the macros declare a static pointer to the histogram in question, and only
-// take a "slow path" to construct (or find) the histogram on the first run
-// through the macro. We leak the histograms at shutdown time so that we don't
-// have to validate using the pointers at any time during the running of the
-// process.
-
-// The following code is generally what a thread-safe static pointer
-// initialization looks like for a histogram (after a macro is expanded). This
-// sample is an expansion (with comments) of the code for
-// LOCAL_HISTOGRAM_CUSTOM_COUNTS().
-
-/*
- do {
- // The pointer's presence indicates the initialization is complete.
- // Initialization is idempotent, so it can safely be atomically repeated.
- static base::subtle::AtomicWord atomic_histogram_pointer = 0;
-
- // Acquire_Load() ensures that we acquire visibility to the pointed-to data
- // in the histogram.
- base::Histogram* histogram_pointer(reinterpret_cast<base::Histogram*>(
- base::subtle::Acquire_Load(&atomic_histogram_pointer)));
-
- if (!histogram_pointer) {
- // This is the slow path, which will construct OR find the matching
- // histogram. FactoryGet includes locks on a global histogram name map
- // and is completely thread safe.
- histogram_pointer = base::Histogram::FactoryGet(
- name, min, max, bucket_count, base::HistogramBase::kNoFlags);
-
- // Use Release_Store to ensure that the histogram data is made available
- // globally before we make the pointer visible.
- // Several threads may perform this store, but the same value will be
- // stored in all cases (for a given named/spec'ed histogram).
- // We could do this without any barrier, since FactoryGet entered and
- // exited a lock after construction, but this barrier makes things clear.
- base::subtle::Release_Store(&atomic_histogram_pointer,
- reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer));
- }
-
- // Ensure calling contract is upheld, and the name does NOT vary.
- DCHECK(histogram_pointer->histogram_name() == constant_histogram_name);
-
- histogram_pointer->Add(sample);
- } while (0);
-*/
-
-// The above pattern is repeated in several macros. The only elements that
-// vary are the invocation of the Add(sample) vs AddTime(sample), and the choice
-// of which FactoryGet method to use. The different FactoryGet methods have
-// various argument lists, so the function with its argument list is provided as
-// a macro argument here. The name is only used in a DCHECK, to assure that
-// callers don't try to vary the name of the histogram (which would tend to be
-// ignored by the one-time initialization of the histogtram_pointer).
-#define STATIC_HISTOGRAM_POINTER_BLOCK(constant_histogram_name, \
- histogram_add_method_invocation, \
- histogram_factory_get_invocation) \
- do { \
- static base::subtle::AtomicWord atomic_histogram_pointer = 0; \
- base::HistogramBase* histogram_pointer( \
- reinterpret_cast<base::HistogramBase*>( \
- base::subtle::Acquire_Load(&atomic_histogram_pointer))); \
- if (!histogram_pointer) { \
- histogram_pointer = histogram_factory_get_invocation; \
- base::subtle::Release_Store(&atomic_histogram_pointer, \
- reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer)); \
- } \
- if (DCHECK_IS_ON) \
- histogram_pointer->CheckName(constant_histogram_name); \
- histogram_pointer->histogram_add_method_invocation; \
- } while (0)
-
-
-//------------------------------------------------------------------------------
-// Provide easy general purpose histogram in a macro, just like stats counters.
-// The first four macros use 50 buckets.
-
-#define LOCAL_HISTOGRAM_TIMES(name, sample) LOCAL_HISTOGRAM_CUSTOM_TIMES( \
- name, sample, base::TimeDelta::FromMilliseconds(1), \
- base::TimeDelta::FromSeconds(10), 50)
-
-// For folks that need real specific times, use this to select a precise range
-// of times you want plotted, and the number of buckets you want used.
-#define LOCAL_HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
- STATIC_HISTOGRAM_POINTER_BLOCK(name, AddTime(sample), \
- base::Histogram::FactoryTimeGet(name, min, max, bucket_count, \
- base::HistogramBase::kNoFlags))
-
-#define LOCAL_HISTOGRAM_COUNTS(name, sample) LOCAL_HISTOGRAM_CUSTOM_COUNTS( \
- name, sample, 1, 1000000, 50)
-
-#define LOCAL_HISTOGRAM_COUNTS_100(name, sample) \
- LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 100, 50)
-
-#define LOCAL_HISTOGRAM_COUNTS_10000(name, sample) \
- LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 10000, 50)
-
-#define LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) \
- STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
- base::Histogram::FactoryGet(name, min, max, bucket_count, \
- base::HistogramBase::kNoFlags))
-
-// This is a helper macro used by other macros and shouldn't be used directly.
-#define HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, boundary, flag) \
- STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
- base::LinearHistogram::FactoryGet(name, 1, boundary, boundary + 1, \
- flag))
-
-#define LOCAL_HISTOGRAM_PERCENTAGE(name, under_one_hundred) \
- LOCAL_HISTOGRAM_ENUMERATION(name, under_one_hundred, 101)
-
-#define LOCAL_HISTOGRAM_BOOLEAN(name, sample) \
- STATIC_HISTOGRAM_POINTER_BLOCK(name, AddBoolean(sample), \
- base::BooleanHistogram::FactoryGet(name, base::Histogram::kNoFlags))
-
-// Support histograming of an enumerated value. The samples should always be
-// strictly less than |boundary_value| -- this prevents you from running into
-// problems down the line if you add additional buckets to the histogram. Note
-// also that, despite explicitly setting the minimum bucket value to |1| below,
-// it is fine for enumerated histograms to be 0-indexed -- this is because
-// enumerated histograms should never have underflow.
-#define LOCAL_HISTOGRAM_ENUMERATION(name, sample, boundary_value) \
- STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
- base::LinearHistogram::FactoryGet(name, 1, boundary_value, \
- boundary_value + 1, base::HistogramBase::kNoFlags))
-
-// Support histograming of an enumerated value. Samples should be one of the
-// std::vector<int> list provided via |custom_ranges|. See comments above
-// CustomRanges::FactoryGet about the requirement of |custom_ranges|.
-// You can use the helper function CustomHistogram::ArrayToCustomRanges to
-// transform a C-style array of valid sample values to a std::vector<int>.
-#define LOCAL_HISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges) \
- STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
- base::CustomHistogram::FactoryGet(name, custom_ranges, \
- base::HistogramBase::kNoFlags))
-
-#define LOCAL_HISTOGRAM_MEMORY_KB(name, sample) LOCAL_HISTOGRAM_CUSTOM_COUNTS( \
- name, sample, 1000, 500000, 50)
-
-//------------------------------------------------------------------------------
-// The following macros provide typical usage scenarios for callers that wish
-// to record histogram data, and have the data submitted/uploaded via UMA.
-// Not all systems support such UMA, but if they do, the following macros
-// should work with the service.
-
-#define UMA_HISTOGRAM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
- name, sample, base::TimeDelta::FromMilliseconds(1), \
- base::TimeDelta::FromSeconds(10), 50)
-
-#define UMA_HISTOGRAM_MEDIUM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
- name, sample, base::TimeDelta::FromMilliseconds(10), \
- base::TimeDelta::FromMinutes(3), 50)
-
-// Use this macro when times can routinely be much longer than 10 seconds.
-#define UMA_HISTOGRAM_LONG_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
- name, sample, base::TimeDelta::FromMilliseconds(1), \
- base::TimeDelta::FromHours(1), 50)
-
-// Use this macro when times can routinely be much longer than 10 seconds and
-// you want 100 buckets.
-#define UMA_HISTOGRAM_LONG_TIMES_100(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
- name, sample, base::TimeDelta::FromMilliseconds(1), \
- base::TimeDelta::FromHours(1), 100)
-
-#define UMA_HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
- STATIC_HISTOGRAM_POINTER_BLOCK(name, AddTime(sample), \
- base::Histogram::FactoryTimeGet(name, min, max, bucket_count, \
- base::HistogramBase::kUmaTargetedHistogramFlag))
-
-#define UMA_HISTOGRAM_COUNTS(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
- name, sample, 1, 1000000, 50)
-
-#define UMA_HISTOGRAM_COUNTS_100(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
- name, sample, 1, 100, 50)
-
-#define UMA_HISTOGRAM_COUNTS_10000(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
- name, sample, 1, 10000, 50)
-
-#define UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) \
- STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
- base::Histogram::FactoryGet(name, min, max, bucket_count, \
- base::HistogramBase::kUmaTargetedHistogramFlag))
-
-#define UMA_HISTOGRAM_MEMORY_KB(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
- name, sample, 1000, 500000, 50)
-
-#define UMA_HISTOGRAM_MEMORY_MB(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
- name, sample, 1, 1000, 50)
-
-#define UMA_HISTOGRAM_PERCENTAGE(name, under_one_hundred) \
- UMA_HISTOGRAM_ENUMERATION(name, under_one_hundred, 101)
-
-#define UMA_HISTOGRAM_BOOLEAN(name, sample) \
- STATIC_HISTOGRAM_POINTER_BLOCK(name, AddBoolean(sample), \
- base::BooleanHistogram::FactoryGet(name, \
- base::HistogramBase::kUmaTargetedHistogramFlag))
-
-// The samples should always be strictly less than |boundary_value|. For more
-// details, see the comment for the |LOCAL_HISTOGRAM_ENUMERATION| macro, above.
-#define UMA_HISTOGRAM_ENUMERATION(name, sample, boundary_value) \
- HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, boundary_value, \
- base::HistogramBase::kUmaTargetedHistogramFlag)
-
-// Similar to UMA_HISTOGRAM_ENUMERATION, but used for recording stability
-// histograms. Use this if recording a histogram that should be part of the
-// initial stability log.
-#define UMA_STABILITY_HISTOGRAM_ENUMERATION(name, sample, boundary_value) \
- HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, boundary_value, \
- base::HistogramBase::kUmaStabilityHistogramFlag)
-
-#define UMA_HISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges) \
- STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
- base::CustomHistogram::FactoryGet(name, custom_ranges, \
- base::HistogramBase::kUmaTargetedHistogramFlag))
-
-//------------------------------------------------------------------------------
-
-class BucketRanges;
-class SampleVector;
-
class BooleanHistogram;
class CustomHistogram;
class Histogram;
class LinearHistogram;
+class SampleVector;
class BASE_EXPORT Histogram : public HistogramBase {
public:
diff --git a/metrics/histogram_macros.h b/metrics/histogram_macros.h
new file mode 100644
index 0000000..a9996d1
--- /dev/null
+++ b/metrics/histogram_macros.h
@@ -0,0 +1,232 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_HISTOGRAM_MACROS_H_
+#define BASE_METRICS_HISTOGRAM_MACROS_H_
+
+#include "base/atomicops.h"
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/metrics/histogram.h"
+#include "base/time/time.h"
+
+//------------------------------------------------------------------------------
+// Histograms are often put in areas where they are called many many times, and
+// performance is critical. As a result, they are designed to have a very low
+// recurring cost of executing (adding additional samples). Toward that end,
+// the macros declare a static pointer to the histogram in question, and only
+// take a "slow path" to construct (or find) the histogram on the first run
+// through the macro. We leak the histograms at shutdown time so that we don't
+// have to validate using the pointers at any time during the running of the
+// process.
+
+// The following code is generally what a thread-safe static pointer
+// initialization looks like for a histogram (after a macro is expanded). This
+// sample is an expansion (with comments) of the code for
+// LOCAL_HISTOGRAM_CUSTOM_COUNTS().
+
+/*
+ do {
+ // The pointer's presence indicates the initialization is complete.
+ // Initialization is idempotent, so it can safely be atomically repeated.
+ static base::subtle::AtomicWord atomic_histogram_pointer = 0;
+
+ // Acquire_Load() ensures that we acquire visibility to the pointed-to data
+ // in the histogram.
+ base::Histogram* histogram_pointer(reinterpret_cast<base::Histogram*>(
+ base::subtle::Acquire_Load(&atomic_histogram_pointer)));
+
+ if (!histogram_pointer) {
+ // This is the slow path, which will construct OR find the matching
+ // histogram. FactoryGet includes locks on a global histogram name map
+ // and is completely thread safe.
+ histogram_pointer = base::Histogram::FactoryGet(
+ name, min, max, bucket_count, base::HistogramBase::kNoFlags);
+
+ // Use Release_Store to ensure that the histogram data is made available
+ // globally before we make the pointer visible.
+ // Several threads may perform this store, but the same value will be
+ // stored in all cases (for a given named/spec'ed histogram).
+ // We could do this without any barrier, since FactoryGet entered and
+ // exited a lock after construction, but this barrier makes things clear.
+ base::subtle::Release_Store(&atomic_histogram_pointer,
+ reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer));
+ }
+
+ // Ensure calling contract is upheld, and the name does NOT vary.
+ DCHECK(histogram_pointer->histogram_name() == constant_histogram_name);
+
+ histogram_pointer->Add(sample);
+ } while (0);
+*/
+
+// The above pattern is repeated in several macros. The only elements that
+// vary are the invocation of the Add(sample) vs AddTime(sample), and the choice
+// of which FactoryGet method to use. The different FactoryGet methods have
+// various argument lists, so the function with its argument list is provided as
+// a macro argument here. The name is only used in a DCHECK, to assure that
+// callers don't try to vary the name of the histogram (which would tend to be
+// ignored by the one-time initialization of the histogtram_pointer).
+#define STATIC_HISTOGRAM_POINTER_BLOCK(constant_histogram_name, \
+ histogram_add_method_invocation, \
+ histogram_factory_get_invocation) \
+ do { \
+ static base::subtle::AtomicWord atomic_histogram_pointer = 0; \
+ base::HistogramBase* histogram_pointer( \
+ reinterpret_cast<base::HistogramBase*>( \
+ base::subtle::Acquire_Load(&atomic_histogram_pointer))); \
+ if (!histogram_pointer) { \
+ histogram_pointer = histogram_factory_get_invocation; \
+ base::subtle::Release_Store(&atomic_histogram_pointer, \
+ reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer)); \
+ } \
+ if (DCHECK_IS_ON) \
+ histogram_pointer->CheckName(constant_histogram_name); \
+ histogram_pointer->histogram_add_method_invocation; \
+ } while (0)
+
+
+//------------------------------------------------------------------------------
+// Provide easy general purpose histogram in a macro, just like stats counters.
+// The first four macros use 50 buckets.
+
+#define LOCAL_HISTOGRAM_TIMES(name, sample) LOCAL_HISTOGRAM_CUSTOM_TIMES( \
+ name, sample, base::TimeDelta::FromMilliseconds(1), \
+ base::TimeDelta::FromSeconds(10), 50)
+
+// For folks that need real specific times, use this to select a precise range
+// of times you want plotted, and the number of buckets you want used.
+#define LOCAL_HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
+ STATIC_HISTOGRAM_POINTER_BLOCK(name, AddTime(sample), \
+ base::Histogram::FactoryTimeGet(name, min, max, bucket_count, \
+ base::HistogramBase::kNoFlags))
+
+#define LOCAL_HISTOGRAM_COUNTS(name, sample) LOCAL_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 1000000, 50)
+
+#define LOCAL_HISTOGRAM_COUNTS_100(name, sample) \
+ LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 100, 50)
+
+#define LOCAL_HISTOGRAM_COUNTS_10000(name, sample) \
+ LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 10000, 50)
+
+#define LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) \
+ STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
+ base::Histogram::FactoryGet(name, min, max, bucket_count, \
+ base::HistogramBase::kNoFlags))
+
+// This is a helper macro used by other macros and shouldn't be used directly.
+#define HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, boundary, flag) \
+ STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
+ base::LinearHistogram::FactoryGet(name, 1, boundary, boundary + 1, \
+ flag))
+
+#define LOCAL_HISTOGRAM_PERCENTAGE(name, under_one_hundred) \
+ LOCAL_HISTOGRAM_ENUMERATION(name, under_one_hundred, 101)
+
+#define LOCAL_HISTOGRAM_BOOLEAN(name, sample) \
+ STATIC_HISTOGRAM_POINTER_BLOCK(name, AddBoolean(sample), \
+ base::BooleanHistogram::FactoryGet(name, base::Histogram::kNoFlags))
+
+// Support histograming of an enumerated value. The samples should always be
+// strictly less than |boundary_value| -- this prevents you from running into
+// problems down the line if you add additional buckets to the histogram. Note
+// also that, despite explicitly setting the minimum bucket value to |1| below,
+// it is fine for enumerated histograms to be 0-indexed -- this is because
+// enumerated histograms should never have underflow.
+#define LOCAL_HISTOGRAM_ENUMERATION(name, sample, boundary_value) \
+ STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
+ base::LinearHistogram::FactoryGet(name, 1, boundary_value, \
+ boundary_value + 1, base::HistogramBase::kNoFlags))
+
+// Support histograming of an enumerated value. Samples should be one of the
+// std::vector<int> list provided via |custom_ranges|. See comments above
+// CustomRanges::FactoryGet about the requirement of |custom_ranges|.
+// You can use the helper function CustomHistogram::ArrayToCustomRanges to
+// transform a C-style array of valid sample values to a std::vector<int>.
+#define LOCAL_HISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges) \
+ STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
+ base::CustomHistogram::FactoryGet(name, custom_ranges, \
+ base::HistogramBase::kNoFlags))
+
+#define LOCAL_HISTOGRAM_MEMORY_KB(name, sample) LOCAL_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1000, 500000, 50)
+
+//------------------------------------------------------------------------------
+// The following macros provide typical usage scenarios for callers that wish
+// to record histogram data, and have the data submitted/uploaded via UMA.
+// Not all systems support such UMA, but if they do, the following macros
+// should work with the service.
+
+#define UMA_HISTOGRAM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
+ name, sample, base::TimeDelta::FromMilliseconds(1), \
+ base::TimeDelta::FromSeconds(10), 50)
+
+#define UMA_HISTOGRAM_MEDIUM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
+ name, sample, base::TimeDelta::FromMilliseconds(10), \
+ base::TimeDelta::FromMinutes(3), 50)
+
+// Use this macro when times can routinely be much longer than 10 seconds.
+#define UMA_HISTOGRAM_LONG_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
+ name, sample, base::TimeDelta::FromMilliseconds(1), \
+ base::TimeDelta::FromHours(1), 50)
+
+// Use this macro when times can routinely be much longer than 10 seconds and
+// you want 100 buckets.
+#define UMA_HISTOGRAM_LONG_TIMES_100(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
+ name, sample, base::TimeDelta::FromMilliseconds(1), \
+ base::TimeDelta::FromHours(1), 100)
+
+#define UMA_HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
+ STATIC_HISTOGRAM_POINTER_BLOCK(name, AddTime(sample), \
+ base::Histogram::FactoryTimeGet(name, min, max, bucket_count, \
+ base::HistogramBase::kUmaTargetedHistogramFlag))
+
+#define UMA_HISTOGRAM_COUNTS(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 1000000, 50)
+
+#define UMA_HISTOGRAM_COUNTS_100(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 100, 50)
+
+#define UMA_HISTOGRAM_COUNTS_10000(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 10000, 50)
+
+#define UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) \
+ STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
+ base::Histogram::FactoryGet(name, min, max, bucket_count, \
+ base::HistogramBase::kUmaTargetedHistogramFlag))
+
+#define UMA_HISTOGRAM_MEMORY_KB(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1000, 500000, 50)
+
+#define UMA_HISTOGRAM_MEMORY_MB(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 1000, 50)
+
+#define UMA_HISTOGRAM_PERCENTAGE(name, under_one_hundred) \
+ UMA_HISTOGRAM_ENUMERATION(name, under_one_hundred, 101)
+
+#define UMA_HISTOGRAM_BOOLEAN(name, sample) \
+ STATIC_HISTOGRAM_POINTER_BLOCK(name, AddBoolean(sample), \
+ base::BooleanHistogram::FactoryGet(name, \
+ base::HistogramBase::kUmaTargetedHistogramFlag))
+
+// The samples should always be strictly less than |boundary_value|. For more
+// details, see the comment for the |LOCAL_HISTOGRAM_ENUMERATION| macro, above.
+#define UMA_HISTOGRAM_ENUMERATION(name, sample, boundary_value) \
+ HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, boundary_value, \
+ base::HistogramBase::kUmaTargetedHistogramFlag)
+
+// Similar to UMA_HISTOGRAM_ENUMERATION, but used for recording stability
+// histograms. Use this if recording a histogram that should be part of the
+// initial stability log.
+#define UMA_STABILITY_HISTOGRAM_ENUMERATION(name, sample, boundary_value) \
+ HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, boundary_value, \
+ base::HistogramBase::kUmaStabilityHistogramFlag)
+
+#define UMA_HISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges) \
+ STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
+ base::CustomHistogram::FactoryGet(name, custom_ranges, \
+ base::HistogramBase::kUmaTargetedHistogramFlag))
+
+#endif // BASE_METRICS_HISTOGRAM_MACROS_H_
diff --git a/sys_info_android.cc b/sys_info_android.cc
index 0d885ee..829053f 100644
--- a/sys_info_android.cc
+++ b/sys_info_android.cc
@@ -59,8 +59,8 @@
// cannot be acquired. Use the latest Android release with a higher bug fix
// version to avoid unnecessarily comparison errors with the latest release.
// This should be manually kept up-to-date on each Android release.
-const int kDefaultAndroidMajorVersion = 4;
-const int kDefaultAndroidMinorVersion = 4;
+const int kDefaultAndroidMajorVersion = 5;
+const int kDefaultAndroidMinorVersion = 0;
const int kDefaultAndroidBugfixVersion = 99;
// Parse out the OS version numbers from the system properties.
diff --git a/test/multiprocess_test_android.cc b/test/multiprocess_test_android.cc
index 4cfae5e..8f54b82 100644
--- a/test/multiprocess_test_android.cc
+++ b/test/multiprocess_test_android.cc
@@ -2,13 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/posix/global_descriptors.h"
#include "base/test/multiprocess_test.h"
#include <unistd.h>
+#include "base/base_switches.h"
+#include "base/command_line.h"
#include "base/containers/hash_tables.h"
#include "base/logging.h"
+#include "base/posix/global_descriptors.h"
#include "testing/multiprocess_func_list.h"
namespace base {
@@ -16,7 +18,6 @@
// A very basic implementation for Android. On Android tests can run in an APK
// and we don't have an executable to exec*. This implementation does the bare
// minimum to execute the method specified by procname (in the child process).
-// - |base_command_line| is ignored.
// - All options except |fds_to_remap| are ignored.
ProcessHandle SpawnMultiProcessTestChild(const std::string& procname,
const CommandLine& base_command_line,
@@ -60,6 +61,13 @@
}
close(old_fd);
}
+ CommandLine::Reset();
+ CommandLine::Init(0, nullptr);
+ CommandLine* command_line = CommandLine::ForCurrentProcess();
+ command_line->InitFromArgv(base_command_line.argv());
+ if (!command_line->HasSwitch(switches::kTestChildProcess))
+ command_line->AppendSwitchASCII(switches::kTestChildProcess, procname);
+
_exit(multi_process_function_list::InvokeChildProcessTest(procname));
return 0;
}