Update to Chromium //base at Chromium commit 82baa9afc6620c68cb2168f20bb65e77e3e57f0a.
This gets us to sometime during September 14, 2015.
TBR=jamesr@chromium.org
Review URL: https://codereview.chromium.org/2045223003 .
diff --git a/BUILD.gn b/BUILD.gn
index 86db541..fae5384 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -64,8 +64,6 @@
sources = [
"allocator/allocator_extension.cc",
"allocator/allocator_extension.h",
- "allocator/type_profiler_control.cc",
- "allocator/type_profiler_control.h",
"android/animation_frame_time_histogram.cc",
"android/animation_frame_time_histogram.h",
"android/apk_assets.cc",
@@ -649,13 +647,15 @@
if (is_ios) {
sources -= [
+ "files/file_path_watcher.cc",
+ "files/file_path_watcher.h",
+ "files/file_path_watcher_fsevents.cc",
+ "files/file_path_watcher_fsevents.h",
+ "files/file_path_watcher_kqueue.cc",
+ "files/file_path_watcher_kqueue.h",
"message_loop/message_pump_libevent.cc",
"message_loop/message_pump_libevent.h",
]
-
- set_sources_assignment_filter([])
- sources += [ "files/file_path_watcher_mac.cc" ]
- set_sources_assignment_filter(sources_assignment_filter)
}
sources -= [
@@ -729,8 +729,6 @@
set_sources_assignment_filter(sources_assignment_filter)
sources -= [
- "allocator/type_profiler_control.cc",
- "allocator/type_profiler_control.h",
"async_socket_io_handler_posix.cc",
"cpu.cc",
"files/file_enumerator_posix.cc",
@@ -1434,6 +1432,7 @@
"win/registry_unittest.cc",
"win/scoped_bstr_unittest.cc",
"win/scoped_comptr_unittest.cc",
+ "win/scoped_handle_unittest.cc",
"win/scoped_process_information_unittest.cc",
"win/scoped_variant_unittest.cc",
"win/shortcut_unittest.cc",
@@ -1481,6 +1480,7 @@
if (is_ios) {
sources -= [
+ "files/file_path_watcher_unittest.cc",
"memory/discardable_shared_memory_unittest.cc",
"memory/shared_memory_unittest.cc",
"process/memory_unittest.cc",
diff --git a/allocator/BUILD.gn b/allocator/BUILD.gn
index b6b7fc2..c12431b 100644
--- a/allocator/BUILD.gn
+++ b/allocator/BUILD.gn
@@ -100,7 +100,7 @@
check_includes = false
sources = [
- # Generated for our configuration from tcmalloc"s build
+ # Generated for our configuration from tcmalloc's build
# and checked in.
"$tcmalloc_dir/src/config.h",
"$tcmalloc_dir/src/config_android.h",
@@ -151,8 +151,6 @@
# #included by debugallocation_shim.cc
#"$tcmalloc_dir/src/debugallocation.cc",
- "$tcmalloc_dir/src/deep-heap-profile.cc",
- "$tcmalloc_dir/src/deep-heap-profile.h",
"$tcmalloc_dir/src/free_list.cc",
"$tcmalloc_dir/src/free_list.h",
"$tcmalloc_dir/src/heap-profile-table.cc",
diff --git a/allocator/type_profiler.cc b/allocator/type_profiler.cc
deleted file mode 100644
index 635fbcf..0000000
--- a/allocator/type_profiler.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if defined(TYPE_PROFILING)
-
-#include "base/allocator/type_profiler.h"
-
-#include <assert.h>
-
-namespace {
-
-void* NopIntercept(void* ptr, size_t size, const std::type_info& type) {
- return ptr;
-}
-
-base::type_profiler::InterceptFunction* g_new_intercept = NopIntercept;
-base::type_profiler::InterceptFunction* g_delete_intercept = NopIntercept;
-
-}
-
-void* __op_new_intercept__(void* ptr,
- size_t size,
- const std::type_info& type) {
- return g_new_intercept(ptr, size, type);
-}
-
-void* __op_delete_intercept__(void* ptr,
- size_t size,
- const std::type_info& type) {
- return g_delete_intercept(ptr, size, type);
-}
-
-namespace base {
-namespace type_profiler {
-
-// static
-void InterceptFunctions::SetFunctions(InterceptFunction* new_intercept,
- InterceptFunction* delete_intercept) {
- // Don't use DCHECK, as this file is injected into targets
- // that do not and should not depend on base/base.gyp:base
- assert(g_new_intercept == NopIntercept);
- assert(g_delete_intercept == NopIntercept);
-
- g_new_intercept = new_intercept;
- g_delete_intercept = delete_intercept;
-}
-
-// static
-void InterceptFunctions::ResetFunctions() {
- g_new_intercept = NopIntercept;
- g_delete_intercept = NopIntercept;
-}
-
-// static
-bool InterceptFunctions::IsAvailable() {
- return g_new_intercept != NopIntercept || g_delete_intercept != NopIntercept;
-}
-
-} // namespace type_profiler
-} // namespace base
-
-#endif // defined(TYPE_PROFILING)
diff --git a/allocator/type_profiler.h b/allocator/type_profiler.h
deleted file mode 100644
index 86b5711..0000000
--- a/allocator/type_profiler.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_TYPE_PROFILER_H_
-#define BASE_ALLOCATOR_TYPE_PROFILER_H_
-
-#if defined(TYPE_PROFILING)
-
-#include <stddef.h> // for size_t
-#include <typeinfo> // for std::typeinfo
-
-namespace base {
-namespace type_profiler {
-
-typedef void* InterceptFunction(void*, size_t, const std::type_info&);
-
-class InterceptFunctions {
- public:
- // It must be called only once in a process while it is in single-thread.
- // For now, ContentMainRunnerImpl::Initialize is the only supposed caller
- // of this function except for single-threaded unit tests.
- static void SetFunctions(InterceptFunction* new_intercept,
- InterceptFunction* delete_intercept);
-
- private:
- friend class TypeProfilerTest;
-
- // These functions are not thread safe.
- // They must be used only from single-threaded unit tests.
- static void ResetFunctions();
- static bool IsAvailable();
-};
-
-} // namespace type_profiler
-} // namespace base
-
-#endif // defined(TYPE_PROFILING)
-
-#endif // BASE_ALLOCATOR_TYPE_PROFILER_H_
diff --git a/allocator/type_profiler_control.cc b/allocator/type_profiler_control.cc
deleted file mode 100644
index 6be7984..0000000
--- a/allocator/type_profiler_control.cc
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/type_profiler_control.h"
-
-namespace base {
-namespace type_profiler {
-
-namespace {
-
-#if defined(TYPE_PROFILING)
-const bool kTypeProfilingEnabled = true;
-#else
-const bool kTypeProfilingEnabled = false;
-#endif
-
-bool g_enable_intercept = kTypeProfilingEnabled;
-
-} // namespace
-
-// static
-void Controller::Stop() {
- g_enable_intercept = false;
-}
-
-// static
-bool Controller::IsProfiling() {
- return kTypeProfilingEnabled && g_enable_intercept;
-}
-
-// static
-void Controller::Restart() {
- g_enable_intercept = kTypeProfilingEnabled;
-}
-
-} // namespace type_profiler
-} // namespace base
diff --git a/allocator/type_profiler_control.h b/allocator/type_profiler_control.h
deleted file mode 100644
index 17cf5b6..0000000
--- a/allocator/type_profiler_control.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_TYPE_PROFILER_CONTROL_H_
-#define BASE_ALLOCATOR_TYPE_PROFILER_CONTROL_H_
-
-#include "base/gtest_prod_util.h"
-
-namespace base {
-namespace type_profiler {
-
-class Controller {
- public:
- static void Stop();
- static bool IsProfiling();
-
- private:
- FRIEND_TEST_ALL_PREFIXES(TypeProfilerTest,
- TestProfileNewWithoutProfiledDelete);
-
- // It must be used only from allowed unit tests. The following is only
- // allowed for use in unit tests. Profiling should never be restarted in
- // regular use.
- static void Restart();
-};
-
-} // namespace type_profiler
-} // namespace base
-
-#endif // BASE_ALLOCATOR_TYPE_PROFILER_CONTROL_H_
diff --git a/allocator/type_profiler_map_unittest.cc b/allocator/type_profiler_map_unittest.cc
deleted file mode 100644
index 514ec16..0000000
--- a/allocator/type_profiler_map_unittest.cc
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This is a unittest set for type_profiler_map in third_party/tcmalloc. It is
-// independent from other tests and executed manually like allocator_unittests
-// since type_profiler_map is a singleton (like TCMalloc's heap-profiler), and
-// it requires RTTI and different compiling/linking options from others.
-
-#if defined(TYPE_PROFILING)
-
-#include "base/memory/scoped_ptr.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "third_party/tcmalloc/chromium/src/gperftools/type_profiler_map.h"
-
-namespace base {
-namespace type_profiler {
-
-static const void* const g_const_null = static_cast<const void*>(NULL);
-
-TEST(TypeProfilerMapTest, NormalOperation) {
- // Allocate an object just to get a valid address.
- // This 'new' is not profiled by type_profiler.
- scoped_ptr<int> dummy(new int(48));
- const std::type_info* type;
-
- type = LookupType(dummy.get());
- EXPECT_EQ(g_const_null, type);
-
- InsertType(dummy.get(), 12, typeid(int));
- type = LookupType(dummy.get());
- ASSERT_NE(g_const_null, type);
- EXPECT_STREQ(typeid(int).name(), type->name());
-
- EraseType(dummy.get());
- type = LookupType(dummy.get());
- EXPECT_EQ(g_const_null, type);
-}
-
-TEST(TypeProfilerMapTest, EraseWithoutInsert) {
- scoped_ptr<int> dummy(new int(48));
- const std::type_info* type;
-
- for (int i = 0; i < 10; ++i) {
- EraseType(dummy.get());
- type = LookupType(dummy.get());
- EXPECT_EQ(g_const_null, type);
- }
-}
-
-TEST(TypeProfilerMapTest, InsertThenMultipleErase) {
- scoped_ptr<int> dummy(new int(48));
- const std::type_info* type;
-
- InsertType(dummy.get(), 12, typeid(int));
- type = LookupType(dummy.get());
- ASSERT_NE(g_const_null, type);
- EXPECT_STREQ(typeid(int).name(), type->name());
-
- for (int i = 0; i < 10; ++i) {
- EraseType(dummy.get());
- type = LookupType(dummy.get());
- EXPECT_EQ(g_const_null, type);
- }
-}
-
-TEST(TypeProfilerMapTest, MultipleInsertWithoutErase) {
- scoped_ptr<int> dummy(new int(48));
- const std::type_info* type;
-
- InsertType(dummy.get(), 12, typeid(int));
- type = LookupType(dummy.get());
- ASSERT_NE(g_const_null, type);
- EXPECT_STREQ(typeid(int).name(), type->name());
-
- InsertType(dummy.get(), 5, typeid(char));
- type = LookupType(dummy.get());
- ASSERT_NE(g_const_null, type);
- EXPECT_STREQ(typeid(char).name(), type->name());
-
- InsertType(dummy.get(), 129, typeid(long));
- type = LookupType(dummy.get());
- ASSERT_NE(g_const_null, type);
- EXPECT_STREQ(typeid(long).name(), type->name());
-
- EraseType(dummy.get());
- type = LookupType(dummy.get());
- EXPECT_EQ(g_const_null, type);
-}
-
-} // namespace type_profiler
-} // namespace base
-
-#endif // defined(TYPE_PROFILING)
-
-int main(int argc, char** argv) {
- testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
diff --git a/allocator/type_profiler_tcmalloc.cc b/allocator/type_profiler_tcmalloc.cc
deleted file mode 100644
index e5e10e0..0000000
--- a/allocator/type_profiler_tcmalloc.cc
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if defined(TYPE_PROFILING)
-
-#include "base/allocator/type_profiler_tcmalloc.h"
-
-#include "base/allocator/type_profiler_control.h"
-#include "third_party/tcmalloc/chromium/src/gperftools/heap-profiler.h"
-#include "third_party/tcmalloc/chromium/src/gperftools/type_profiler_map.h"
-
-namespace base {
-namespace type_profiler {
-
-void* NewInterceptForTCMalloc(void* ptr,
- size_t size,
- const std::type_info& type) {
- if (Controller::IsProfiling())
- InsertType(ptr, size, type);
-
- return ptr;
-}
-
-void* DeleteInterceptForTCMalloc(void* ptr,
- size_t size,
- const std::type_info& type) {
- if (Controller::IsProfiling())
- EraseType(ptr);
-
- return ptr;
-}
-
-} // namespace type_profiler
-} // namespace base
-
-#endif // defined(TYPE_PROFILING)
diff --git a/allocator/type_profiler_tcmalloc.h b/allocator/type_profiler_tcmalloc.h
deleted file mode 100644
index ac55995..0000000
--- a/allocator/type_profiler_tcmalloc.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_TYPE_PROFILER_TCMALLOC_H_
-#define BASE_ALLOCATOR_TYPE_PROFILER_TCMALLOC_H_
-
-#if defined(TYPE_PROFILING)
-
-#include <cstddef> // for size_t
-#include <typeinfo> // for std::type_info
-
-namespace base {
-namespace type_profiler {
-
-void* NewInterceptForTCMalloc(void* ptr,
- size_t size,
- const std::type_info& type);
-
-void* DeleteInterceptForTCMalloc(void* ptr,
- size_t size,
- const std::type_info& type);
-
-} // namespace type_profiler
-} // namespace base
-
-#endif // defined(TYPE_PROFILING)
-
-#endif // BASE_ALLOCATOR_TYPE_PROFILER_TCMALLOC_H_
diff --git a/allocator/type_profiler_unittest.cc b/allocator/type_profiler_unittest.cc
deleted file mode 100644
index 3d7369c38..0000000
--- a/allocator/type_profiler_unittest.cc
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This is a unittest set for type_profiler. It is independent from other
-// tests and executed manually like allocator_unittests since type_profiler_map
-// used in type_profiler is a singleton (like TCMalloc's heap-profiler), and
-// it requires RTTI and different compiling/linking options from others
-//
-// It tests that the profiler doesn't fail in suspicous cases. For example,
-// 'new' is not profiled, but 'delete' for the created object is profiled.
-
-#if defined(TYPE_PROFILING)
-
-#include "base/allocator/type_profiler.h"
-#include "base/allocator/type_profiler_control.h"
-#include "base/allocator/type_profiler_tcmalloc.h"
-#include "base/basictypes.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "third_party/tcmalloc/chromium/src/gperftools/type_profiler_map.h"
-
-namespace base {
-namespace type_profiler {
-
-class TypeProfilerTest : public testing::Test {
- public:
- TypeProfilerTest() {}
-
- void SetInterceptFunctions() {
- InterceptFunctions::SetFunctions(NewInterceptForTCMalloc,
- DeleteInterceptForTCMalloc);
- }
-
- void ResetInterceptFunctions() {
- InterceptFunctions::ResetFunctions();
- }
-
- void SetUp() {
- SetInterceptFunctions();
- }
-
- void TearDown() {
- ResetInterceptFunctions();
- }
-
- protected:
- static const size_t kDummyArraySize;
- static const void* const kConstNull;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(TypeProfilerTest);
-};
-
-const size_t TypeProfilerTest::kDummyArraySize = 10;
-const void* const TypeProfilerTest::kConstNull = static_cast<const void*>(NULL);
-
-TEST_F(TypeProfilerTest, TestNormalProfiling) {
- int* dummy = new int(48);
- const std::type_info* type;
-
- type = LookupType(dummy);
- ASSERT_NE(kConstNull, type);
- EXPECT_STREQ(typeid(int).name(), type->name());
- delete dummy;
-
- type = LookupType(dummy);
- EXPECT_EQ(kConstNull, type);
-}
-
-TEST_F(TypeProfilerTest, TestNormalArrayProfiling) {
- int* dummy = new int[kDummyArraySize];
- const std::type_info* type;
-
- type = LookupType(dummy);
- ASSERT_NE(kConstNull, type);
- // For an array, the profiler remembers its base type.
- EXPECT_STREQ(typeid(int).name(), type->name());
- delete[] dummy;
-
- type = LookupType(dummy);
- EXPECT_EQ(kConstNull, type);
-}
-
-TEST_F(TypeProfilerTest, TestRepeatedNewAndDelete) {
- int *dummy[kDummyArraySize];
- const std::type_info* type;
- for (int i = 0; i < kDummyArraySize; ++i)
- dummy[i] = new int(i);
-
- for (int i = 0; i < kDummyArraySize; ++i) {
- type = LookupType(dummy[i]);
- ASSERT_NE(kConstNull, type);
- EXPECT_STREQ(typeid(int).name(), type->name());
- }
-
- for (int i = 0; i < kDummyArraySize; ++i) {
- delete dummy[i];
- type = LookupType(dummy[i]);
- ASSERT_EQ(kConstNull, type);
- }
-}
-
-TEST_F(TypeProfilerTest, TestMultipleNewWithDroppingDelete) {
- static const size_t large_size = 256 * 1024;
-
- char* dummy_char = new char[large_size / sizeof(*dummy_char)];
- const std::type_info* type;
-
- type = LookupType(dummy_char);
- ASSERT_NE(kConstNull, type);
- EXPECT_STREQ(typeid(char).name(), type->name());
-
- // Call "::operator delete" directly to drop __op_delete_intercept__.
- ::operator delete[](dummy_char);
-
- type = LookupType(dummy_char);
- ASSERT_NE(kConstNull, type);
- EXPECT_STREQ(typeid(char).name(), type->name());
-
- // Allocates a little different size.
- int* dummy_int = new int[large_size / sizeof(*dummy_int) - 1];
-
- // We expect that tcmalloc returns the same address for these large (over 32k)
- // allocation calls. It usually happens, but maybe probablistic.
- ASSERT_EQ(static_cast<void*>(dummy_char), static_cast<void*>(dummy_int)) <<
- "two new (malloc) calls didn't return the same address; retry it.";
-
- type = LookupType(dummy_int);
- ASSERT_NE(kConstNull, type);
- EXPECT_STREQ(typeid(int).name(), type->name());
-
- delete[] dummy_int;
-
- type = LookupType(dummy_int);
- EXPECT_EQ(kConstNull, type);
-}
-
-TEST_F(TypeProfilerTest, TestProfileDeleteWithoutProfiledNew) {
- // 'dummy' should be new'ed in this test before intercept functions are set.
- ResetInterceptFunctions();
-
- int* dummy = new int(48);
- const std::type_info* type;
-
- // Set intercept functions again after 'dummy' is new'ed.
- SetInterceptFunctions();
-
- delete dummy;
-
- type = LookupType(dummy);
- EXPECT_EQ(kConstNull, type);
-
- ResetInterceptFunctions();
-}
-
-TEST_F(TypeProfilerTest, TestProfileNewWithoutProfiledDelete) {
- int* dummy = new int(48);
- const std::type_info* type;
-
- EXPECT_TRUE(Controller::IsProfiling());
-
- // Stop profiling before deleting 'dummy'.
- Controller::Stop();
- EXPECT_FALSE(Controller::IsProfiling());
-
- delete dummy;
-
- // NOTE: We accept that a profile entry remains when a profiled object is
- // deleted after Controller::Stop().
- type = LookupType(dummy);
- ASSERT_NE(kConstNull, type);
- EXPECT_STREQ(typeid(int).name(), type->name());
-
- Controller::Restart();
- EXPECT_TRUE(Controller::IsProfiling());
-
- // Remove manually since 'dummy' is not removed from type_profiler_map.
- EraseType(dummy);
-}
-
-} // namespace type_profiler
-} // namespace base
-
-#endif // defined(TYPE_PROFILING)
-
-int main(int argc, char** argv) {
- testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
diff --git a/android/java/src/org/chromium/base/PathUtils.java b/android/java/src/org/chromium/base/PathUtils.java
index 6f29732..c80124f 100644
--- a/android/java/src/org/chromium/base/PathUtils.java
+++ b/android/java/src/org/chromium/base/PathUtils.java
@@ -8,6 +8,7 @@
import android.content.pm.ApplicationInfo;
import android.os.AsyncTask;
import android.os.Environment;
+import android.os.StrictMode;
import org.chromium.base.annotations.CalledByNative;
@@ -115,8 +116,14 @@
@SuppressWarnings("unused")
@CalledByNative
private static String getDownloadsDirectory(Context appContext) {
- return Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_DOWNLOADS)
- .getPath();
+ // Temporarily allowing disk access while fixing. TODO: http://crbug.com/508615
+ StrictMode.ThreadPolicy oldPolicy = StrictMode.allowThreadDiskReads();
+ try {
+ return Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_DOWNLOADS)
+ .getPath();
+ } finally {
+ StrictMode.setThreadPolicy(oldPolicy);
+ }
}
/**
diff --git a/android/scoped_java_ref.h b/android/scoped_java_ref.h
index 6274969..94f94f5 100644
--- a/android/scoped_java_ref.h
+++ b/android/scoped_java_ref.h
@@ -193,6 +193,13 @@
// This class is only good for use on the thread it was created on so
// it's safe to cache the non-threadsafe JNIEnv* inside this object.
JNIEnv* env_;
+
+ // Prevent ScopedJavaLocalRef(JNIEnv*, T obj) from being used to take
+ // ownership of a JavaParamRef's underlying object - parameters are not
+ // allowed to be deleted and so should not be owned by ScopedJavaLocalRef.
+ // TODO(torne): this can be removed once JavaParamRef no longer has an
+ // implicit conversion back to T.
+ ScopedJavaLocalRef(JNIEnv* env, const JavaParamRef<T>& other);
};
// Holds a global reference to a Java object. The global reference is scoped
diff --git a/basictypes.h b/basictypes.h
index d71abd9..e167466 100644
--- a/basictypes.h
+++ b/basictypes.h
@@ -28,7 +28,8 @@
typedef int64_t int64;
typedef uint64_t uint64;
-// DEPRECATED: Please use std::numeric_limits (from <limits>) instead.
+// DEPRECATED: Please use std::numeric_limits (from <limits>) or
+// (U)INT{8,16,32,64}_{MIN,MAX} in case of globals (and include <stdint.h>).
const uint8 kuint8max = 0xFF;
const uint16 kuint16max = 0xFFFF;
const uint32 kuint32max = 0xFFFFFFFF;
diff --git a/containers/scoped_ptr_map.h b/containers/scoped_ptr_map.h
index a4605e3..622a39d 100644
--- a/containers/scoped_ptr_map.h
+++ b/containers/scoped_ptr_map.h
@@ -42,9 +42,9 @@
ScopedPtrMap() {}
~ScopedPtrMap() { clear(); }
- ScopedPtrMap(ScopedPtrMap<Key, ScopedPtr>&& other) { swap(other); }
+ ScopedPtrMap(ScopedPtrMap&& other) { swap(other); }
- ScopedPtrMap& operator=(ScopedPtrMap<Key, ScopedPtr>&& rhs) {
+ ScopedPtrMap& operator=(ScopedPtrMap&& rhs) {
swap(rhs);
return *this;
}
@@ -61,7 +61,7 @@
const_iterator begin() const { return data_.begin(); }
const_iterator end() const { return data_.end(); }
- void swap(ScopedPtrMap<Key, ScopedPtr>& other) { data_.swap(other.data_); }
+ void swap(ScopedPtrMap& other) { data_.swap(other.data_); }
void clear() { STLDeleteValues(&data_); }
diff --git a/containers/scoped_ptr_map_unittest.cc b/containers/scoped_ptr_map_unittest.cc
index 46843b3..706b2ed 100644
--- a/containers/scoped_ptr_map_unittest.cc
+++ b/containers/scoped_ptr_map_unittest.cc
@@ -147,15 +147,30 @@
TEST(ScopedPtrMapTest, Compare) {
// Construct a ScopedPtrMap with a custom comparison function.
- bool destroyed = false;
- ScopedPtrMap<int, scoped_ptr<ScopedDestroyer>, std::greater<int>> scoped_map;
- scoped_map.insert(0, make_scoped_ptr(new ScopedDestroyer(&destroyed)));
- scoped_map.insert(1, make_scoped_ptr(new ScopedDestroyer(&destroyed)));
+ ScopedPtrMap<int, scoped_ptr<int>, std::greater<int>> scoped_map1;
+ scoped_map1.insert(0, make_scoped_ptr(new int(0)));
+ scoped_map1.insert(1, make_scoped_ptr(new int(0)));
- auto it = scoped_map.begin();
+ auto it = scoped_map1.begin();
EXPECT_EQ(1, it->first);
++it;
EXPECT_EQ(0, it->first);
+
+ // Test the move constructor.
+ ScopedPtrMap<int, scoped_ptr<int>, std::greater<int>> scoped_map2(
+ scoped_map1.Pass());
+ EXPECT_EQ(2u, scoped_map2.size());
+ EXPECT_TRUE(scoped_map1.empty());
+
+ // Test move assignment.
+ scoped_map1 = scoped_map2.Pass();
+ EXPECT_EQ(2u, scoped_map1.size());
+ EXPECT_TRUE(scoped_map2.empty());
+
+ // Test swap.
+ scoped_map2.swap(scoped_map1);
+ EXPECT_EQ(2u, scoped_map2.size());
+ EXPECT_TRUE(scoped_map1.empty());
}
TEST(ScopedPtrMapTest, Scope) {
diff --git a/linux_util.cc b/linux_util.cc
index d6cd504..d94588f 100644
--- a/linux_util.cc
+++ b/linux_util.cc
@@ -37,7 +37,7 @@
public:
// Retrieves the Singleton.
static LinuxDistroHelper* GetInstance() {
- return Singleton<LinuxDistroHelper>::get();
+ return base::Singleton<LinuxDistroHelper>::get();
}
// The simple state machine goes from:
@@ -106,7 +106,7 @@
argv.push_back("lsb_release");
argv.push_back("-d");
std::string output;
- base::GetAppOutput(CommandLine(argv), &output);
+ GetAppOutput(CommandLine(argv), &output);
if (output.length() > 0) {
// lsb_release -d should return: Description:<tab>Distro Info
const char field[] = "Description:\t";
@@ -124,8 +124,8 @@
void SetLinuxDistro(const std::string& distro) {
std::string trimmed_distro;
- base::TrimWhitespaceASCII(distro, base::TRIM_ALL, &trimmed_distro);
- base::strlcpy(g_linux_distro, trimmed_distro.c_str(), kDistroSize);
+ TrimWhitespaceASCII(distro, TRIM_ALL, &trimmed_distro);
+ strlcpy(g_linux_distro, trimmed_distro.c_str(), kDistroSize);
}
pid_t FindThreadIDWithSyscall(pid_t pid, const std::string& expected_data,
diff --git a/logging_win.cc b/logging_win.cc
index 53cc37c..319ae8a 100644
--- a/logging_win.cc
+++ b/logging_win.cc
@@ -18,8 +18,8 @@
}
LogEventProvider* LogEventProvider::GetInstance() {
- return Singleton<LogEventProvider,
- StaticMemorySingletonTraits<LogEventProvider> >::get();
+ return base::Singleton<LogEventProvider, base::StaticMemorySingletonTraits<
+ LogEventProvider>>::get();
}
bool LogEventProvider::LogMessage(logging::LogSeverity severity,
diff --git a/logging_win.h b/logging_win.h
index aa48e22..de34a64 100644
--- a/logging_win.h
+++ b/logging_win.h
@@ -12,8 +12,10 @@
#include "base/win/event_trace_provider.h"
#include "base/logging.h"
+namespace base {
template <typename Type>
struct StaticMemorySingletonTraits;
+} // namespace base
namespace logging {
@@ -71,7 +73,7 @@
// restored in OnEventsDisabled.
logging::LogSeverity old_log_level_;
- friend struct StaticMemorySingletonTraits<LogEventProvider>;
+ friend struct base::StaticMemorySingletonTraits<LogEventProvider>;
DISALLOW_COPY_AND_ASSIGN(LogEventProvider);
};
diff --git a/memory/singleton.h b/memory/singleton.h
index e50bdc0..7319699 100644
--- a/memory/singleton.h
+++ b/memory/singleton.h
@@ -36,10 +36,10 @@
// we can implement the more complicated pieces out of line in the .cc file.
BASE_EXPORT subtle::AtomicWord WaitForInstance(subtle::AtomicWord* instance);
-} // namespace internal
-} // namespace base
+class DeleteTraceLogForTesting;
-// TODO(joth): Move more of this file into namespace base
+} // namespace internal
+
// Default traits for Singleton<Type>. Calls operator new and operator delete on
// the object. Registers automatic deletion at process exit.
@@ -110,7 +110,7 @@
// this is traits for returning NULL.
static Type* New() {
// Only constructs once and returns pointer; otherwise returns NULL.
- if (base::subtle::NoBarrier_AtomicExchange(&dead_, 1))
+ if (subtle::NoBarrier_AtomicExchange(&dead_, 1))
return NULL;
return new(buffer_.void_data()) Type();
@@ -125,20 +125,19 @@
static const bool kAllowedToAccessOnNonjoinableThread = true;
// Exposed for unittesting.
- static void Resurrect() {
- base::subtle::NoBarrier_Store(&dead_, 0);
- }
+ static void Resurrect() { subtle::NoBarrier_Store(&dead_, 0); }
private:
- static base::AlignedMemory<sizeof(Type), ALIGNOF(Type)> buffer_;
+ static AlignedMemory<sizeof(Type), ALIGNOF(Type)> buffer_;
// Signal the object was already deleted, so it is not revived.
- static base::subtle::Atomic32 dead_;
+ static subtle::Atomic32 dead_;
};
-template <typename Type> base::AlignedMemory<sizeof(Type), ALIGNOF(Type)>
+template <typename Type>
+AlignedMemory<sizeof(Type), ALIGNOF(Type)>
StaticMemorySingletonTraits<Type>::buffer_;
-template <typename Type> base::subtle::Atomic32
- StaticMemorySingletonTraits<Type>::dead_ = 0;
+template <typename Type>
+subtle::Atomic32 StaticMemorySingletonTraits<Type>::dead_ = 0;
// The Singleton<Type, Traits, DifferentiatingType> class manages a single
// instance of Type which will be created on first use and will be destroyed at
@@ -190,7 +189,7 @@
// RAE = kRegisterAtExit
//
// On every platform, if Traits::RAE is true, the singleton will be destroyed at
-// process exit. More precisely it uses base::AtExitManager which requires an
+// process exit. More precisely it uses AtExitManager which requires an
// object of this type to be instantiated. AtExitManager mimics the semantics
// of atexit() such as LIFO order but under Windows is safer to call. For more
// information see at_exit.h.
@@ -209,6 +208,7 @@
// (b) Your factory function must never throw an exception. This class is not
// exception-safe.
//
+
template <typename Type,
typename Traits = DefaultSingletonTraits<Type>,
typename DifferentiatingType = Type>
@@ -219,7 +219,7 @@
friend Type* Type::GetInstance();
// Allow TraceLog tests to test tracing after OnExit.
- friend class DeleteTraceLogForTesting;
+ friend class internal::DeleteTraceLogForTesting;
// This class is safe to be constructed and copy-constructed since it has no
// member.
@@ -229,36 +229,36 @@
#ifndef NDEBUG
// Avoid making TLS lookup on release builds.
if (!Traits::kAllowedToAccessOnNonjoinableThread)
- base::ThreadRestrictions::AssertSingletonAllowed();
+ ThreadRestrictions::AssertSingletonAllowed();
#endif
// The load has acquire memory ordering as the thread which reads the
// instance_ pointer must acquire visibility over the singleton data.
- base::subtle::AtomicWord value = base::subtle::Acquire_Load(&instance_);
- if (value != 0 && value != base::internal::kBeingCreatedMarker) {
+ subtle::AtomicWord value = subtle::Acquire_Load(&instance_);
+ if (value != 0 && value != internal::kBeingCreatedMarker) {
return reinterpret_cast<Type*>(value);
}
// Object isn't created yet, maybe we will get to create it, let's try...
- if (base::subtle::Acquire_CompareAndSwap(
- &instance_, 0, base::internal::kBeingCreatedMarker) == 0) {
+ if (subtle::Acquire_CompareAndSwap(&instance_, 0,
+ internal::kBeingCreatedMarker) == 0) {
// instance_ was NULL and is now kBeingCreatedMarker. Only one thread
// will ever get here. Threads might be spinning on us, and they will
// stop right after we do this store.
Type* newval = Traits::New();
// Releases the visibility over instance_ to the readers.
- base::subtle::Release_Store(
- &instance_, reinterpret_cast<base::subtle::AtomicWord>(newval));
+ subtle::Release_Store(&instance_,
+ reinterpret_cast<subtle::AtomicWord>(newval));
if (newval != NULL && Traits::kRegisterAtExit)
- base::AtExitManager::RegisterCallback(OnExit, NULL);
+ AtExitManager::RegisterCallback(OnExit, NULL);
return newval;
}
// We hit a race. Wait for the other thread to complete it.
- value = base::internal::WaitForInstance(&instance_);
+ value = internal::WaitForInstance(&instance_);
return reinterpret_cast<Type*>(value);
}
@@ -269,15 +269,15 @@
static void OnExit(void* /*unused*/) {
// AtExit should only ever be register after the singleton instance was
// created. We should only ever get here with a valid instance_ pointer.
- Traits::Delete(
- reinterpret_cast<Type*>(base::subtle::NoBarrier_Load(&instance_)));
+ Traits::Delete(reinterpret_cast<Type*>(subtle::NoBarrier_Load(&instance_)));
instance_ = 0;
}
- static base::subtle::AtomicWord instance_;
+ static subtle::AtomicWord instance_;
};
template <typename Type, typename Traits, typename DifferentiatingType>
-base::subtle::AtomicWord Singleton<Type, Traits, DifferentiatingType>::
- instance_ = 0;
+subtle::AtomicWord Singleton<Type, Traits, DifferentiatingType>::instance_ = 0;
+
+} // namespace base
#endif // BASE_MEMORY_SINGLETON_H_
diff --git a/memory/singleton_unittest.cc b/memory/singleton_unittest.cc
index dbff007..e8788ba 100644
--- a/memory/singleton_unittest.cc
+++ b/memory/singleton_unittest.cc
@@ -6,6 +6,7 @@
#include "base/memory/singleton.h"
#include "testing/gtest/include/gtest/gtest.h"
+namespace base {
namespace {
COMPILE_ASSERT(DefaultSingletonTraits<int>::kRegisterAtExit == true, a);
@@ -115,7 +116,7 @@
~AlignedTestSingleton() {}
static AlignedTestSingleton* GetInstance() {
return Singleton<AlignedTestSingleton,
- StaticMemorySingletonTraits<AlignedTestSingleton> >::get();
+ StaticMemorySingletonTraits<AlignedTestSingleton>>::get();
}
Type type_;
@@ -147,7 +148,6 @@
return &CallbackSingletonWithStaticTrait::GetInstance()->callback_;
}
-} // namespace
class SingletonTest : public testing::Test {
public:
@@ -207,7 +207,7 @@
CallbackFunc* static_singleton;
{
- base::ShadowingAtExitManager sem;
+ ShadowingAtExitManager sem;
{
singleton_int = SingletonInt();
}
@@ -241,7 +241,7 @@
EXPECT_EQ(NULL, GetStaticSingleton());
{
- base::ShadowingAtExitManager sem;
+ ShadowingAtExitManager sem;
// Verifiy that the variables were reset.
{
singleton_int = SingletonInt();
@@ -285,3 +285,6 @@
EXPECT_ALIGNED(align128, 128);
EXPECT_ALIGNED(align4096, 4096);
}
+
+} // namespace
+} // namespace base
diff --git a/numerics/safe_conversions_impl.h b/numerics/safe_conversions_impl.h
index 4157067..f4bc916 100644
--- a/numerics/safe_conversions_impl.h
+++ b/numerics/safe_conversions_impl.h
@@ -108,6 +108,55 @@
(is_in_lower_bound ? 0 : RANGE_UNDERFLOW));
}
+// The following helper template addresses a corner case in range checks for
+// conversion from a floating-point type to an integral type of smaller range
+// but larger precision (e.g. float -> unsigned). The problem is as follows:
+// 1. Integral maximum is always one less than a power of two, so it must be
+// truncated to fit the mantissa of the floating point. The direction of
+// rounding is implementation defined, but by default it's always IEEE
+// floats, which round to nearest and thus result in a value of larger
+// magnitude than the integral value.
+// Example: float f = UINT_MAX; // f is 4294967296f but UINT_MAX
+// // is 4294967295u.
+// 2. If the floating point value is equal to the promoted integral maximum
+// value, a range check will erroneously pass.
+// Example: (4294967296f <= 4294967295u) // This is true due to a precision
+// // loss in rounding up to float.
+// 3. When the floating point value is then converted to an integral, the
+// resulting value is out of range for the target integral type and
+// thus is implementation defined.
+// Example: unsigned u = (float)INT_MAX; // u will typically overflow to 0.
+// To fix this bug we manually truncate the maximum value when the destination
+// type is an integral of larger precision than the source floating-point type,
+// such that the resulting maximum is represented exactly as a floating point.
+template <typename Dst, typename Src>
+struct NarrowingRange {
+ typedef typename std::numeric_limits<Src> SrcLimits;
+ typedef typename std::numeric_limits<Dst> DstLimits;
+
+ static Dst max() {
+ // The following logic avoids warnings where the max function is
+ // instantiated with invalid values for a bit shift (even though
+ // such a function can never be called).
+ static const int shift =
+ (MaxExponent<Src>::value > MaxExponent<Dst>::value &&
+ SrcLimits::digits < DstLimits::digits && SrcLimits::is_iec559 &&
+ DstLimits::is_integer)
+ ? (DstLimits::digits - SrcLimits::digits)
+ : 0;
+
+ // We use UINTMAX_C below to avoid compiler warnings about shifting floating
+ // points. Since it's a compile time calculation, it shouldn't have any
+ // performance impact.
+ return DstLimits::max() - static_cast<Dst>((UINTMAX_C(1) << shift) - 1);
+ }
+
+ static Dst min() {
+ return std::numeric_limits<Dst>::is_iec559 ? -DstLimits::max()
+ : DstLimits::min();
+ }
+};
+
template <
typename Dst,
typename Src,
@@ -147,11 +196,8 @@
INTEGER_REPRESENTATION_SIGNED,
NUMERIC_RANGE_NOT_CONTAINED> {
static RangeConstraint Check(Src value) {
- return std::numeric_limits<Dst>::is_iec559
- ? GetRangeConstraint((value < std::numeric_limits<Dst>::max()),
- (value > -std::numeric_limits<Dst>::max()))
- : GetRangeConstraint((value < std::numeric_limits<Dst>::max()),
- (value > std::numeric_limits<Dst>::min()));
+ return GetRangeConstraint((value <= NarrowingRange<Dst, Src>::max()),
+ (value >= NarrowingRange<Dst, Src>::min()));
}
};
@@ -163,7 +209,7 @@
INTEGER_REPRESENTATION_UNSIGNED,
NUMERIC_RANGE_NOT_CONTAINED> {
static RangeConstraint Check(Src value) {
- return GetRangeConstraint(value < std::numeric_limits<Dst>::max(), true);
+ return GetRangeConstraint(value <= NarrowingRange<Dst, Src>::max(), true);
}
};
@@ -178,7 +224,7 @@
return sizeof(Dst) > sizeof(Src)
? RANGE_VALID
: GetRangeConstraint(
- value < static_cast<Src>(std::numeric_limits<Dst>::max()),
+ value <= static_cast<Src>(NarrowingRange<Dst, Src>::max()),
true);
}
};
@@ -195,7 +241,7 @@
return (MaxExponent<Dst>::value >= MaxExponent<Src>::value)
? GetRangeConstraint(true, value >= static_cast<Src>(0))
: GetRangeConstraint(
- value < static_cast<Src>(std::numeric_limits<Dst>::max()),
+ value <= static_cast<Src>(NarrowingRange<Dst, Src>::max()),
value >= static_cast<Src>(0));
}
};
diff --git a/numerics/safe_numerics_unittest.cc b/numerics/safe_numerics_unittest.cc
index 6f9a966..bad0f57 100644
--- a/numerics/safe_numerics_unittest.cc
+++ b/numerics/safe_numerics_unittest.cc
@@ -18,6 +18,7 @@
using std::numeric_limits;
using base::CheckedNumeric;
using base::checked_cast;
+using base::IsValueInRangeForNumericType;
using base::SizeT;
using base::StrictNumeric;
using base::saturated_cast;
@@ -36,6 +37,26 @@
#pragma warning(disable:4756)
#endif
+// This is a helper function for finding the maximum value in Src that can be
+// wholy represented as the destination floating-point type.
+template <typename Dst, typename Src>
+Dst GetMaxConvertibleToFloat() {
+ typedef numeric_limits<Dst> DstLimits;
+ typedef numeric_limits<Src> SrcLimits;
+ static_assert(SrcLimits::is_specialized, "Source must be numeric.");
+ static_assert(DstLimits::is_specialized, "Destination must be numeric.");
+ CHECK(DstLimits::is_iec559);
+
+ if (SrcLimits::digits <= DstLimits::digits &&
+ MaxExponent<Src>::value <= MaxExponent<Dst>::value)
+ return SrcLimits::max();
+ Src max = SrcLimits::max() / 2 + (SrcLimits::is_integer ? 1 : 0);
+ while (max != static_cast<Src>(static_cast<Dst>(max))) {
+ max /= 2;
+ }
+ return static_cast<Dst>(max);
+}
+
// Helper macros to wrap displaying the conversion types and line numbers.
#define TEST_EXPECTED_VALIDITY(expected, actual) \
EXPECT_EQ(expected, CheckedNumeric<Dst>(actual).validity()) \
@@ -370,6 +391,18 @@
TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::infinity());
TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::infinity() * -1);
TEST_EXPECTED_RANGE(RANGE_INVALID, SrcLimits::quiet_NaN());
+ if (DstLimits::is_integer) {
+ if (SrcLimits::digits < DstLimits::digits) {
+ TEST_EXPECTED_RANGE(RANGE_OVERFLOW,
+ static_cast<Src>(DstLimits::max()));
+ } else {
+ TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::max()));
+ }
+ TEST_EXPECTED_RANGE(
+ RANGE_VALID,
+ static_cast<Src>(GetMaxConvertibleToFloat<Src, Dst>()));
+ TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::min()));
+ }
} else if (SrcLimits::is_signed) {
TEST_EXPECTED_VALUE(-1, checked_dst - static_cast<Src>(1));
TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::min());
@@ -428,6 +461,18 @@
TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::infinity());
TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::infinity() * -1);
TEST_EXPECTED_RANGE(RANGE_INVALID, SrcLimits::quiet_NaN());
+ if (DstLimits::is_integer) {
+ if (SrcLimits::digits < DstLimits::digits) {
+ TEST_EXPECTED_RANGE(RANGE_OVERFLOW,
+ static_cast<Src>(DstLimits::max()));
+ } else {
+ TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::max()));
+ }
+ TEST_EXPECTED_RANGE(
+ RANGE_VALID,
+ static_cast<Src>(GetMaxConvertibleToFloat<Src, Dst>()));
+ TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::min()));
+ }
} else {
TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::min());
}
@@ -600,3 +645,77 @@
EXPECT_EQ(numeric_limits<int>::max(), saturated_cast<int>(double_large_int));
}
+TEST(SafeNumerics, IsValueInRangeForNumericType) {
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint32_t>(0));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint32_t>(1));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint32_t>(2));
+ EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(-1));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint32_t>(0xffffffffu));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint32_t>(UINT64_C(0xffffffff)));
+ EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(UINT64_C(0x100000000)));
+ EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(UINT64_C(0x100000001)));
+ EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(
+ std::numeric_limits<int32_t>::min()));
+ EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(
+ std::numeric_limits<int64_t>::min()));
+
+ EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(0));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(1));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(2));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(-1));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(0x7fffffff));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(0x7fffffffu));
+ EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(0x80000000u));
+ EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(0xffffffffu));
+ EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(INT64_C(0x80000000)));
+ EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(INT64_C(0xffffffff)));
+ EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(INT64_C(0x100000000)));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(
+ std::numeric_limits<int32_t>::min()));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(
+ implicit_cast<int64_t>(std::numeric_limits<int32_t>::min())));
+ EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(
+ implicit_cast<int64_t>(std::numeric_limits<int32_t>::min()) - 1));
+ EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(
+ std::numeric_limits<int64_t>::min()));
+
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(0));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(1));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(2));
+ EXPECT_FALSE(IsValueInRangeForNumericType<uint64_t>(-1));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(0xffffffffu));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(UINT64_C(0xffffffff)));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(UINT64_C(0x100000000)));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(UINT64_C(0x100000001)));
+ EXPECT_FALSE(IsValueInRangeForNumericType<uint64_t>(
+ std::numeric_limits<int32_t>::min()));
+ EXPECT_FALSE(IsValueInRangeForNumericType<uint64_t>(INT64_C(-1)));
+ EXPECT_FALSE(IsValueInRangeForNumericType<uint64_t>(
+ std::numeric_limits<int64_t>::min()));
+
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(0));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(1));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(2));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(-1));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(0x7fffffff));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(0x7fffffffu));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(0x80000000u));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(0xffffffffu));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(INT64_C(0x80000000)));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(INT64_C(0xffffffff)));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(INT64_C(0x100000000)));
+ EXPECT_TRUE(
+ IsValueInRangeForNumericType<int64_t>(INT64_C(0x7fffffffffffffff)));
+ EXPECT_TRUE(
+ IsValueInRangeForNumericType<int64_t>(UINT64_C(0x7fffffffffffffff)));
+ EXPECT_FALSE(
+ IsValueInRangeForNumericType<int64_t>(UINT64_C(0x8000000000000000)));
+ EXPECT_FALSE(
+ IsValueInRangeForNumericType<int64_t>(UINT64_C(0xffffffffffffffff)));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(
+ std::numeric_limits<int32_t>::min()));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(
+ implicit_cast<int64_t>(std::numeric_limits<int32_t>::min())));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(
+ std::numeric_limits<int64_t>::min()));
+}
diff --git a/process/launch_posix.cc b/process/launch_posix.cc
index 96685cb..6c2f779 100644
--- a/process/launch_posix.cc
+++ b/process/launch_posix.cc
@@ -22,7 +22,6 @@
#include <limits>
#include <set>
-#include "base/allocator/type_profiler_control.h"
#include "base/command_line.h"
#include "base/compiler_specific.h"
#include "base/debug/debugger.h"
@@ -392,11 +391,6 @@
}
}
- // Stop type-profiler.
- // The profiler should be stopped between fork and exec since it inserts
- // locks at new/delete expressions. See http://crbug.com/36678.
- base::type_profiler::Controller::Stop();
-
if (options.maximize_rlimits) {
// Some resource limits need to be maximal in this child.
for (size_t i = 0; i < options.maximize_rlimits->size(); ++i) {
@@ -593,11 +587,6 @@
if (dev_null < 0)
_exit(127);
- // Stop type-profiler.
- // The profiler should be stopped between fork and exec since it inserts
- // locks at new/delete expressions. See http://crbug.com/36678.
- base::type_profiler::Controller::Stop();
-
fd_shuffle1.push_back(InjectionArc(pipe_fd[1], STDOUT_FILENO, true));
fd_shuffle1.push_back(InjectionArc(
include_stderr ? pipe_fd[1] : dev_null, STDERR_FILENO, true));
diff --git a/sync_socket.h b/sync_socket.h
index 36d6bc1..201fb1c 100644
--- a/sync_socket.h
+++ b/sync_socket.h
@@ -83,10 +83,8 @@
TimeDelta timeout);
// Returns the number of bytes available. If non-zero, Receive() will not
- // not block when called. NOTE: Some implementations cannot reliably
- // determine the number of bytes available so avoid using the returned
- // size as a promise and simply test against zero.
- size_t Peek();
+ // not block when called.
+ virtual size_t Peek();
// Extracts the contained handle. Used for transferring between
// processes.
diff --git a/synchronization/condition_variable.h b/synchronization/condition_variable.h
index 5d8507d..91e4d13 100644
--- a/synchronization/condition_variable.h
+++ b/synchronization/condition_variable.h
@@ -73,6 +73,7 @@
#include "base/base_export.h"
#include "base/basictypes.h"
+#include "base/logging.h"
#include "base/synchronization/lock.h"
namespace base {
@@ -104,7 +105,7 @@
#elif defined(OS_POSIX)
pthread_cond_t condition_;
pthread_mutex_t* user_mutex_;
-#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+#if DCHECK_IS_ON()
base::Lock* user_lock_; // Needed to adjust shadow lock state on wait.
#endif
diff --git a/synchronization/condition_variable_posix.cc b/synchronization/condition_variable_posix.cc
index c9a2ec4..03f8184 100644
--- a/synchronization/condition_variable_posix.cc
+++ b/synchronization/condition_variable_posix.cc
@@ -7,7 +7,6 @@
#include <errno.h>
#include <sys/time.h>
-#include "base/logging.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
@@ -16,8 +15,9 @@
ConditionVariable::ConditionVariable(Lock* user_lock)
: user_mutex_(user_lock->lock_.native_handle())
-#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
- , user_lock_(user_lock)
+#if DCHECK_IS_ON()
+ ,
+ user_lock_(user_lock)
#endif
{
int rv = 0;
@@ -62,12 +62,12 @@
void ConditionVariable::Wait() {
base::ThreadRestrictions::AssertWaitAllowed();
-#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+#if DCHECK_IS_ON()
user_lock_->CheckHeldAndUnmark();
#endif
int rv = pthread_cond_wait(&condition_, user_mutex_);
DCHECK_EQ(0, rv);
-#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+#if DCHECK_IS_ON()
user_lock_->CheckUnheldAndMark();
#endif
}
@@ -80,7 +80,7 @@
relative_time.tv_nsec =
(usecs % Time::kMicrosecondsPerSecond) * Time::kNanosecondsPerMicrosecond;
-#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+#if DCHECK_IS_ON()
user_lock_->CheckHeldAndUnmark();
#endif
@@ -118,7 +118,7 @@
#endif // OS_MACOSX
DCHECK(rv == 0 || rv == ETIMEDOUT);
-#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+#if DCHECK_IS_ON()
user_lock_->CheckUnheldAndMark();
#endif
}
diff --git a/synchronization/condition_variable_win.cc b/synchronization/condition_variable_win.cc
index 470e564..4256ac8 100644
--- a/synchronization/condition_variable_win.cc
+++ b/synchronization/condition_variable_win.cc
@@ -8,7 +8,6 @@
#include <stack>
#include "base/compiler_specific.h"
-#include "base/logging.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
@@ -99,7 +98,7 @@
DWORD timeout = static_cast<DWORD>(max_time.InMilliseconds());
CRITICAL_SECTION* cs = user_lock_.lock_.native_handle();
-#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+#if DCHECK_IS_ON()
user_lock_.CheckHeldAndUnmark();
#endif
diff --git a/synchronization/lock.cc b/synchronization/lock.cc
index b1576c5..03297ad 100644
--- a/synchronization/lock.cc
+++ b/synchronization/lock.cc
@@ -6,10 +6,9 @@
// is functionally a wrapper around the LockImpl class, so the only
// real intelligence in the class is in the debugging logic.
-#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
-
#include "base/synchronization/lock.h"
-#include "base/logging.h"
+
+#if DCHECK_IS_ON()
namespace base {
@@ -36,4 +35,4 @@
} // namespace base
-#endif // !NDEBUG || DCHECK_ALWAYS_ON
+#endif // DCHECK_IS_ON()
diff --git a/synchronization/lock.h b/synchronization/lock.h
index f384e41..b6afecb 100644
--- a/synchronization/lock.h
+++ b/synchronization/lock.h
@@ -6,6 +6,7 @@
#define BASE_SYNCHRONIZATION_LOCK_H_
#include "base/base_export.h"
+#include "base/logging.h"
#include "base/synchronization/lock_impl.h"
#include "base/threading/platform_thread.h"
@@ -16,8 +17,8 @@
// AssertAcquired() method.
class BASE_EXPORT Lock {
public:
-#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON)
- // Optimized wrapper implementation
+#if !DCHECK_IS_ON()
+ // Optimized wrapper implementation
Lock() : lock_() {}
~Lock() {}
void Acquire() { lock_.Lock(); }
@@ -56,7 +57,7 @@
}
void AssertAcquired() const;
-#endif // NDEBUG && !DCHECK_ALWAYS_ON
+#endif // DCHECK_IS_ON()
#if defined(OS_POSIX)
// The posix implementation of ConditionVariable needs to be able
@@ -70,7 +71,7 @@
#endif
private:
-#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+#if DCHECK_IS_ON()
// Members and routines taking care of locks assertions.
// Note that this checks for recursive locks and allows them
// if the variable is set. This is allowed by the underlying implementation
@@ -82,7 +83,7 @@
// All private data is implicitly protected by lock_.
// Be VERY careful to only access members under that lock.
base::PlatformThreadRef owning_thread_ref_;
-#endif // !NDEBUG || DCHECK_ALWAYS_ON
+#endif // DCHECK_IS_ON()
// Platform specific underlying lock implementation.
internal::LockImpl lock_;
diff --git a/test/test_support_android.cc b/test/test_support_android.cc
index 11a0871..5a02ef0 100644
--- a/test/test_support_android.cc
+++ b/test/test_support_android.cc
@@ -39,27 +39,21 @@
// when there are no pending messages.
class Waitable {
public:
- static Waitable* GetInstance() {
- return Singleton<Waitable>::get();
- }
+ static Waitable* GetInstance() { return base::Singleton<Waitable>::get(); }
- // Signals that there are more work to do.
- void Signal() {
- waitable_event_.Signal();
- }
+ // Signals that there are more work to do.
+ void Signal() { waitable_event_.Signal(); }
- // Blocks until more work is scheduled.
- void Block() {
- waitable_event_.Wait();
- }
+ // Blocks until more work is scheduled.
+ void Block() { waitable_event_.Wait(); }
- void Quit() {
- g_state->should_quit = true;
- Signal();
+ void Quit() {
+ g_state->should_quit = true;
+ Signal();
}
private:
- friend struct DefaultSingletonTraits<Waitable>;
+ friend struct base::DefaultSingletonTraits<Waitable>;
Waitable()
: waitable_event_(false, false) {
diff --git a/threading/thread_id_name_manager.h b/threading/thread_id_name_manager.h
index 927d25f..1ba7e13 100644
--- a/threading/thread_id_name_manager.h
+++ b/threading/thread_id_name_manager.h
@@ -13,10 +13,11 @@
#include "base/synchronization/lock.h"
#include "base/threading/platform_thread.h"
-template <typename T> struct DefaultSingletonTraits;
-
namespace base {
+template <typename T>
+struct DefaultSingletonTraits;
+
class BASE_EXPORT ThreadIdNameManager {
public:
static ThreadIdNameManager* GetInstance();
diff --git a/threading/thread_restrictions.cc b/threading/thread_restrictions.cc
index 871f2dc..00306c5 100644
--- a/threading/thread_restrictions.cc
+++ b/threading/thread_restrictions.cc
@@ -23,7 +23,7 @@
LazyInstance<ThreadLocalBoolean>::Leaky
g_wait_disallowed = LAZY_INSTANCE_INITIALIZER;
-} // anonymous namespace
+} // namespace
// static
bool ThreadRestrictions::SetIOAllowed(bool allowed) {
@@ -69,7 +69,7 @@
// static
void ThreadRestrictions::AssertWaitAllowed() {
if (g_wait_disallowed.Get().Get()) {
- LOG(FATAL) << "Waiting is not allowed to be used on this thread to prevent"
+ LOG(FATAL) << "Waiting is not allowed to be used on this thread to prevent "
<< "jank and deadlock.";
}
}
diff --git a/trace_event/BUILD.gn b/trace_event/BUILD.gn
index 09c9380..a4e1e43 100644
--- a/trace_event/BUILD.gn
+++ b/trace_event/BUILD.gn
@@ -105,6 +105,7 @@
"process_memory_dump_unittest.cc",
"process_memory_maps_dump_provider_unittest.cc",
"process_memory_totals_dump_provider_unittest.cc",
+ "trace_config_memory_test_util.h",
"trace_config_unittest.cc",
"trace_event_argument_unittest.cc",
"trace_event_memory_unittest.cc",
diff --git a/trace_event/java_heap_dump_provider_android_unittest.cc b/trace_event/java_heap_dump_provider_android_unittest.cc
index 557ac45..35f3f17 100644
--- a/trace_event/java_heap_dump_provider_android_unittest.cc
+++ b/trace_event/java_heap_dump_provider_android_unittest.cc
@@ -13,7 +13,7 @@
TEST(JavaHeapDumpProviderTest, JavaHeapDump) {
auto jhdp = JavaHeapDumpProvider::GetInstance();
scoped_ptr<ProcessMemoryDump> pmd(new ProcessMemoryDump(nullptr));
- MemoryDumpArgs dump_args = {MemoryDumpArgs::LevelOfDetail::HIGH};
+ MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
jhdp->OnMemoryDump(dump_args, pmd.get());
}
diff --git a/trace_event/malloc_dump_provider.cc b/trace_event/malloc_dump_provider.cc
index 98b53da..7cdc34a 100644
--- a/trace_event/malloc_dump_provider.cc
+++ b/trace_event/malloc_dump_provider.cc
@@ -39,7 +39,7 @@
// dlmalloc the total is given by |arena| + |hblkhd|.
// For more details see link: http://goo.gl/fMR8lF.
MemoryAllocatorDump* outer_dump = pmd->CreateAllocatorDump("malloc");
- outer_dump->AddScalar("heap_virtual_size", MemoryAllocatorDump::kUnitsBytes,
+ outer_dump->AddScalar("virtual_size", MemoryAllocatorDump::kUnitsBytes,
info.arena + info.hblkhd);
// Total allocated space is given by |uordblks|.
diff --git a/trace_event/memory_allocator_dump.cc b/trace_event/memory_allocator_dump.cc
index 4037f94..76d53eb 100644
--- a/trace_event/memory_allocator_dump.cc
+++ b/trace_event/memory_allocator_dump.cc
@@ -16,7 +16,7 @@
namespace trace_event {
const char MemoryAllocatorDump::kNameSize[] = "size";
-const char MemoryAllocatorDump::kNameObjectsCount[] = "objects_count";
+const char MemoryAllocatorDump::kNameObjectCount[] = "object_count";
const char MemoryAllocatorDump::kTypeScalar[] = "scalar";
const char MemoryAllocatorDump::kTypeString[] = "string";
const char MemoryAllocatorDump::kUnitsBytes[] = "bytes";
diff --git a/trace_event/memory_allocator_dump.h b/trace_event/memory_allocator_dump.h
index 2ded173..6ad3d64 100644
--- a/trace_event/memory_allocator_dump.h
+++ b/trace_event/memory_allocator_dump.h
@@ -34,7 +34,7 @@
// Standard attribute |name|s for the AddScalar and AddString() methods.
static const char kNameSize[]; // To represent allocated space.
- static const char kNameObjectsCount[]; // To represent number of objects.
+ static const char kNameObjectCount[]; // To represent number of objects.
// Standard attribute |unit|s for the AddScalar and AddString() methods.
static const char kUnitsBytes[]; // Unit name to represent bytes.
diff --git a/trace_event/memory_allocator_dump_unittest.cc b/trace_event/memory_allocator_dump_unittest.cc
index 8a41513..f787e64 100644
--- a/trace_event/memory_allocator_dump_unittest.cc
+++ b/trace_event/memory_allocator_dump_unittest.cc
@@ -28,7 +28,7 @@
root_heap->AddScalar(MemoryAllocatorDump::kNameSize,
MemoryAllocatorDump::kUnitsBytes, 4096);
- root_heap->AddScalar(MemoryAllocatorDump::kNameObjectsCount,
+ root_heap->AddScalar(MemoryAllocatorDump::kNameObjectCount,
MemoryAllocatorDump::kUnitsObjects, 42);
root_heap->AddScalar("attr1", "units1", 1234);
root_heap->AddString("attr2", "units2", "string_value");
@@ -38,7 +38,7 @@
pmd->CreateAllocatorDump("foobar_allocator/sub_heap");
sub_heap->AddScalar(MemoryAllocatorDump::kNameSize,
MemoryAllocatorDump::kUnitsBytes, 1);
- sub_heap->AddScalar(MemoryAllocatorDump::kNameObjectsCount,
+ sub_heap->AddScalar(MemoryAllocatorDump::kNameObjectCount,
MemoryAllocatorDump::kUnitsObjects, 3);
pmd->CreateAllocatorDump("foobar_allocator/sub_heap/empty");
@@ -126,7 +126,7 @@
TEST(MemoryAllocatorDumpTest, DumpIntoProcessMemoryDump) {
FakeMemoryAllocatorDumpProvider fmadp;
ProcessMemoryDump pmd(make_scoped_refptr(new MemoryDumpSessionState()));
- MemoryDumpArgs dump_args = {MemoryDumpArgs::LevelOfDetail::HIGH};
+ MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
fmadp.OnMemoryDump(dump_args, &pmd);
@@ -138,7 +138,7 @@
EXPECT_EQ("foobar_allocator", root_heap->absolute_name());
CheckScalar(root_heap, MemoryAllocatorDump::kNameSize,
MemoryAllocatorDump::kUnitsBytes, 4096);
- CheckScalar(root_heap, MemoryAllocatorDump::kNameObjectsCount,
+ CheckScalar(root_heap, MemoryAllocatorDump::kNameObjectCount,
MemoryAllocatorDump::kUnitsObjects, 42);
CheckScalar(root_heap, "attr1", "units1", 1234);
CheckString(root_heap, "attr2", MemoryAllocatorDump::kTypeString, "units2",
@@ -151,7 +151,7 @@
EXPECT_EQ("foobar_allocator/sub_heap", sub_heap->absolute_name());
CheckScalar(sub_heap, MemoryAllocatorDump::kNameSize,
MemoryAllocatorDump::kUnitsBytes, 1);
- CheckScalar(sub_heap, MemoryAllocatorDump::kNameObjectsCount,
+ CheckScalar(sub_heap, MemoryAllocatorDump::kNameObjectCount,
MemoryAllocatorDump::kUnitsObjects, 3);
const MemoryAllocatorDump* empty_sub_heap =
pmd.GetAllocatorDump("foobar_allocator/sub_heap/empty");
@@ -161,7 +161,7 @@
DictionaryValue* attrs = nullptr;
ASSERT_TRUE(raw_attrs->GetAsDictionary(&attrs));
ASSERT_FALSE(attrs->HasKey(MemoryAllocatorDump::kNameSize));
- ASSERT_FALSE(attrs->HasKey(MemoryAllocatorDump::kNameObjectsCount));
+ ASSERT_FALSE(attrs->HasKey(MemoryAllocatorDump::kNameObjectCount));
// Check that the AsValueInfo doesn't hit any DCHECK.
scoped_refptr<TracedValue> traced_value(new TracedValue());
diff --git a/trace_event/memory_dump_manager.cc b/trace_event/memory_dump_manager.cc
index 71a9dae..1c10d1c 100644
--- a/trace_event/memory_dump_manager.cc
+++ b/trace_event/memory_dump_manager.cc
@@ -48,21 +48,20 @@
MemoryDumpManager* g_instance_for_testing = nullptr;
void RequestPeriodicGlobalDump() {
- MemoryDumpArgs::LevelOfDetail dump_level_of_detail;
+ MemoryDumpLevelOfDetail level_of_detail;
if (g_heavy_dumps_rate == 0) {
- dump_level_of_detail = MemoryDumpArgs::LevelOfDetail::LOW;
+ level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
} else {
- dump_level_of_detail = g_periodic_dumps_count == 0
- ? MemoryDumpArgs::LevelOfDetail::HIGH
- : MemoryDumpArgs::LevelOfDetail::LOW;
+ level_of_detail = g_periodic_dumps_count == 0
+ ? MemoryDumpLevelOfDetail::DETAILED
+ : MemoryDumpLevelOfDetail::LIGHT;
if (++g_periodic_dumps_count == g_heavy_dumps_rate)
g_periodic_dumps_count = 0;
}
- MemoryDumpArgs dump_args = {dump_level_of_detail};
MemoryDumpManager::GetInstance()->RequestGlobalDump(
- MemoryDumpType::PERIODIC_INTERVAL, dump_args);
+ MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
}
} // namespace
@@ -78,6 +77,16 @@
const uint64 MemoryDumpManager::kInvalidTracingProcessId = 0;
// static
+const char* const MemoryDumpManager::kSystemAllocatorPoolName =
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+ MallocDumpProvider::kAllocatedObjects;
+#elif defined(OS_WIN)
+ WinHeapDumpProvider::kAllocatedObjects;
+#else
+ nullptr;
+#endif
+
+// static
MemoryDumpManager* MemoryDumpManager::GetInstance() {
if (g_instance_for_testing)
return g_instance_for_testing;
@@ -95,50 +104,54 @@
MemoryDumpManager::MemoryDumpManager()
: delegate_(nullptr),
+ is_coordinator_(false),
memory_tracing_enabled_(0),
tracing_process_id_(kInvalidTracingProcessId),
- system_allocator_pool_name_(nullptr),
- skip_core_dumpers_auto_registration_for_testing_(false),
- disable_periodic_dumps_for_testing_(false) {
+ skip_core_dumpers_auto_registration_for_testing_(false) {
g_next_guid.GetNext(); // Make sure that first guid is not zero.
}
MemoryDumpManager::~MemoryDumpManager() {
- base::trace_event::TraceLog::GetInstance()->RemoveEnabledStateObserver(this);
+ TraceLog::GetInstance()->RemoveEnabledStateObserver(this);
}
-void MemoryDumpManager::Initialize() {
- TRACE_EVENT0(kTraceCategory, "init"); // Add to trace-viewer category list.
- trace_event::TraceLog::GetInstance()->AddEnabledStateObserver(this);
-
- if (skip_core_dumpers_auto_registration_for_testing_)
- return;
+void MemoryDumpManager::Initialize(MemoryDumpManagerDelegate* delegate,
+ bool is_coordinator) {
+ {
+ AutoLock lock(lock_);
+ DCHECK(delegate);
+ DCHECK(!delegate_);
+ delegate_ = delegate;
+ is_coordinator_ = is_coordinator;
+ }
// Enable the core dump providers.
+ if (!skip_core_dumpers_auto_registration_for_testing_) {
#if !defined(OS_NACL)
- RegisterDumpProvider(ProcessMemoryTotalsDumpProvider::GetInstance());
+ RegisterDumpProvider(ProcessMemoryTotalsDumpProvider::GetInstance());
#endif
#if (defined(OS_LINUX) && !defined(FNL_MUSL)) || defined(OS_ANDROID)
- RegisterDumpProvider(ProcessMemoryMapsDumpProvider::GetInstance());
- RegisterDumpProvider(MallocDumpProvider::GetInstance());
- system_allocator_pool_name_ = MallocDumpProvider::kAllocatedObjects;
+ RegisterDumpProvider(ProcessMemoryMapsDumpProvider::GetInstance());
+ RegisterDumpProvider(MallocDumpProvider::GetInstance());
#endif
#if defined(OS_ANDROID)
- RegisterDumpProvider(JavaHeapDumpProvider::GetInstance());
+ RegisterDumpProvider(JavaHeapDumpProvider::GetInstance());
#endif
#if defined(OS_WIN)
- RegisterDumpProvider(WinHeapDumpProvider::GetInstance());
- system_allocator_pool_name_ = WinHeapDumpProvider::kAllocatedObjects;
+ RegisterDumpProvider(WinHeapDumpProvider::GetInstance());
#endif
-}
+ } // !skip_core_dumpers_auto_registration_for_testing_
-void MemoryDumpManager::SetDelegate(MemoryDumpManagerDelegate* delegate) {
- AutoLock lock(lock_);
- DCHECK_EQ(static_cast<MemoryDumpManagerDelegate*>(nullptr), delegate_);
- delegate_ = delegate;
+ // If tracing was enabled before initializing MemoryDumpManager, we missed the
+ // OnTraceLogEnabled() event. Synthetize it so we can late-join the party.
+ bool is_tracing_already_enabled = TraceLog::GetInstance()->IsEnabled();
+ TRACE_EVENT0(kTraceCategory, "init"); // Add to trace-viewer category list.
+ TraceLog::GetInstance()->AddEnabledStateObserver(this);
+ if (is_tracing_already_enabled)
+ OnTraceLogEnabled();
}
void MemoryDumpManager::RegisterDumpProvider(
@@ -189,9 +202,10 @@
mdp_iter->unregistered = true;
}
-void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type,
- const MemoryDumpArgs& dump_args,
- const MemoryDumpCallback& callback) {
+void MemoryDumpManager::RequestGlobalDump(
+ MemoryDumpType dump_type,
+ MemoryDumpLevelOfDetail level_of_detail,
+ const MemoryDumpCallback& callback) {
// Bail out immediately if tracing is not enabled at all.
if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) {
if (!callback.is_null())
@@ -202,27 +216,27 @@
const uint64 guid =
TraceLog::GetInstance()->MangleEventId(g_next_guid.GetNext());
- // The delegate_ is supposed to be thread safe, immutable and long lived.
- // No need to keep the lock after we ensure that a delegate has been set.
+ // Technically there is no need to grab the |lock_| here as the delegate is
+ // long-lived and can only be set by Initialize(), which is locked and
+ // necessarily happens before memory_tracing_enabled_ == true.
+ // Not taking the |lock_|, though, is lakely make TSan barf and, at this point
+ // (memory-infra is enabled) we're not in the fast-path anymore.
MemoryDumpManagerDelegate* delegate;
{
AutoLock lock(lock_);
delegate = delegate_;
}
- if (delegate) {
- // The delegate is in charge to coordinate the request among all the
- // processes and call the CreateLocalDumpPoint on the local process.
- MemoryDumpRequestArgs args = {guid, dump_type, dump_args};
- delegate->RequestGlobalMemoryDump(args, callback);
- } else if (!callback.is_null()) {
- callback.Run(guid, false /* success */);
- }
+ // The delegate will coordinate the IPC broadcast and at some point invoke
+ // CreateProcessDump() to get a dump for the current process.
+ MemoryDumpRequestArgs args = {guid, dump_type, level_of_detail};
+ delegate->RequestGlobalMemoryDump(args, callback);
}
-void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type,
- const MemoryDumpArgs& dump_args) {
- RequestGlobalDump(dump_type, dump_args, MemoryDumpCallback());
+void MemoryDumpManager::RequestGlobalDump(
+ MemoryDumpType dump_type,
+ MemoryDumpLevelOfDetail level_of_detail) {
+ RequestGlobalDump(dump_type, level_of_detail, MemoryDumpCallback());
}
void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
@@ -305,8 +319,9 @@
bool dump_successful = false;
if (!skip_dump) {
- dump_successful = mdp->OnMemoryDump(pmd_async_state->req_args.dump_args,
- &pmd_async_state->process_memory_dump);
+ MemoryDumpArgs args = {pmd_async_state->req_args.level_of_detail};
+ dump_successful =
+ mdp->OnMemoryDump(args, &pmd_async_state->process_memory_dump);
}
{
@@ -386,11 +401,10 @@
}
void MemoryDumpManager::OnTraceLogEnabled() {
- // TODO(primiano): at this point we query TraceLog::GetCurrentCategoryFilter
- // to figure out (and cache) which dumpers should be enabled or not.
- // For the moment piggy back everything on the generic "memory" category.
bool enabled;
TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled);
+ if (!enabled)
+ return;
// Initialize the TraceLog for the current thread. This is to avoid that the
// TraceLog memory dump provider is registered lazily in the PostTask() below
@@ -399,13 +413,7 @@
AutoLock lock(lock_);
- // There is no point starting the tracing without a delegate.
- if (!enabled || !delegate_) {
- // Disable all the providers.
- for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it)
- it->disabled = true;
- return;
- }
+ DCHECK(delegate_); // At this point we must have a delegate.
session_state_ = new MemoryDumpSessionState();
for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it) {
@@ -417,12 +425,10 @@
// TODO(primiano): This is a temporary hack to disable periodic memory dumps
// when running memory benchmarks until telemetry uses TraceConfig to
- // enable/disable periodic dumps.
- // The same mechanism should be used to disable periodic dumps in tests.
- if (!delegate_->IsCoordinatorProcess() ||
+ // enable/disable periodic dumps. See crbug.com/529184 .
+ if (!is_coordinator_ ||
CommandLine::ForCurrentProcess()->HasSwitch(
- "enable-memory-benchmarking") ||
- disable_periodic_dumps_for_testing_) {
+ "enable-memory-benchmarking")) {
return;
}
@@ -443,7 +449,7 @@
DCHECK_LE(config_list.size(), 2u);
for (const TraceConfig::MemoryDumpTriggerConfig& config : config_list) {
DCHECK(config.periodic_interval_ms);
- if (config.level_of_detail == MemoryDumpArgs::LevelOfDetail::HIGH)
+ if (config.level_of_detail == MemoryDumpLevelOfDetail::DETAILED)
heavy_dump_period_ms = config.periodic_interval_ms;
min_timer_period_ms =
std::min(min_timer_period_ms, config.periodic_interval_ms);
diff --git a/trace_event/memory_dump_manager.h b/trace_event/memory_dump_manager.h
index 305ec59..b445cdb 100644
--- a/trace_event/memory_dump_manager.h
+++ b/trace_event/memory_dump_manager.h
@@ -40,12 +40,19 @@
static MemoryDumpManager* GetInstance();
- // Invoked once per process to register the TraceLog observer.
- void Initialize();
-
- // See the lifetime and thread-safety requirements on the delegate below in
- // the |MemoryDumpManagerDelegate| docstring.
- void SetDelegate(MemoryDumpManagerDelegate* delegate);
+ // Invoked once per process to listen to trace begin / end events.
+ // Initialization can happen after (Un)RegisterMemoryDumpProvider() calls
+ // and the MemoryDumpManager guarantees to support this.
+ // On the other side, the MemoryDumpManager will not be fully operational
+ // (i.e. will NACK any RequestGlobalMemoryDump()) until initialized.
+ // Arguments:
+ // is_coordinator: if true this MemoryDumpManager instance will act as a
+ // coordinator and schedule periodic dumps (if enabled via TraceConfig);
+ // false when the MemoryDumpManager is initialized in a slave process.
+ // delegate: inversion-of-control interface for embedder-specific behaviors
+ // (multiprocess handshaking). See the lifetime and thread-safety
+ // requirements in the |MemoryDumpManagerDelegate| docstring.
+ void Initialize(MemoryDumpManagerDelegate* delegate, bool is_coordinator);
// MemoryDumpManager does NOT take memory ownership of |mdp|, which is
// expected to either be a singleton or unregister itself.
@@ -60,18 +67,17 @@
// Requests a memory dump. The dump might happen or not depending on the
// filters and categories specified when enabling tracing.
- // The |dump_args| is used to specify the dump's level of detail.
// The optional |callback| is executed asynchronously, on an arbitrary thread,
// to notify about the completion of the global dump (i.e. after all the
// processes have dumped) and its success (true iff all the dumps were
// successful).
void RequestGlobalDump(MemoryDumpType dump_type,
- const MemoryDumpArgs& dump_args,
+ MemoryDumpLevelOfDetail level_of_detail,
const MemoryDumpCallback& callback);
// Same as above (still asynchronous), but without callback.
void RequestGlobalDump(MemoryDumpType dump_type,
- const MemoryDumpArgs& dump_args);
+ MemoryDumpLevelOfDetail level_of_detail);
// TraceLog::EnabledStateObserver implementation.
void OnTraceLogEnabled() override;
@@ -92,24 +98,17 @@
// Returns the name for a the allocated_objects dump. Use this to declare
// suballocator dumps from other dump providers.
- // It should not return nullptr after the manager has been initialized.
+ // It will return nullptr if there is no dump provider for the system
+ // allocator registered (which is currently the case for Mac OS).
const char* system_allocator_pool_name() const {
- return system_allocator_pool_name_;
+ return kSystemAllocatorPoolName;
};
- // Tells the initialization phase to skip scheduling periodic memory dumps.
- void DisablePeriodicDumpsForTesting() {
- disable_periodic_dumps_for_testing_ = true;
- }
-
private:
friend struct DefaultDeleter<MemoryDumpManager>; // For the testing instance.
friend struct DefaultSingletonTraits<MemoryDumpManager>;
friend class MemoryDumpManagerDelegate;
friend class MemoryDumpManagerTest;
- FRIEND_TEST_ALL_PREFIXES(MemoryDumpManagerTest, DisableFailingDumpers);
- FRIEND_TEST_ALL_PREFIXES(MemoryDumpManagerTest,
- UnregisterDumperFromThreadWhileDumping);
// Descriptor struct used to hold information about registered MDPs. It is
// deliberately copyable, in order to allow it to be used as std::set value.
@@ -174,6 +173,7 @@
};
static const int kMaxConsecutiveFailuresCount;
+ static const char* const kSystemAllocatorPoolName;
MemoryDumpManager();
~MemoryDumpManager() override;
@@ -206,6 +206,9 @@
MemoryDumpManagerDelegate* delegate_; // Not owned.
+ // When true, this instance is in charge of coordinating periodic dumps.
+ bool is_coordinator_;
+
// Protects from concurrent accesses to the |dump_providers_*| and |delegate_|
// to guard against disabling logging while dumping on another thread.
Lock lock_;
@@ -221,17 +224,9 @@
// expected to be valid only when tracing is enabled.
uint64 tracing_process_id_;
- // Name of the allocated_objects dump.
- const char* system_allocator_pool_name_;
-
// Skips the auto-registration of the core dumpers during Initialize().
bool skip_core_dumpers_auto_registration_for_testing_;
- // When true, the initialization phase does not start the periodic memory
- // dumps.
- // TODO(primiano): This should go into TraceConfig. https://goo.gl/5Hj3o0.
- bool disable_periodic_dumps_for_testing_;
-
DISALLOW_COPY_AND_ASSIGN(MemoryDumpManager);
};
@@ -242,10 +237,6 @@
virtual void RequestGlobalMemoryDump(const MemoryDumpRequestArgs& args,
const MemoryDumpCallback& callback) = 0;
- // Determines whether the MemoryDumpManager instance should be the master
- // (the ones which initiates and coordinates the multiprocess dumps) or not.
- virtual bool IsCoordinatorProcess() const = 0;
-
// Returns tracing process id of the current process. This is used by
// MemoryDumpManager::GetTracingProcessId.
virtual uint64 GetTracingProcessId() const = 0;
diff --git a/trace_event/memory_dump_manager_unittest.cc b/trace_event/memory_dump_manager_unittest.cc
index 6200c2d..f409a4c 100644
--- a/trace_event/memory_dump_manager_unittest.cc
+++ b/trace_event/memory_dump_manager_unittest.cc
@@ -8,15 +8,18 @@
#include "base/memory/scoped_vector.h"
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
+#include "base/strings/stringprintf.h"
#include "base/test/test_io_thread.h"
#include "base/thread_task_runner_handle.h"
#include "base/threading/thread.h"
#include "base/trace_event/memory_dump_provider.h"
#include "base/trace_event/process_memory_dump.h"
+#include "base/trace_event/trace_config_memory_test_util.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
using testing::_;
+using testing::AnyNumber;
using testing::AtMost;
using testing::Between;
using testing::Invoke;
@@ -24,34 +27,40 @@
namespace base {
namespace trace_event {
-namespace {
-MemoryDumpArgs g_high_detail_args = {MemoryDumpArgs::LevelOfDetail::HIGH};
-MemoryDumpArgs g_low_detail_args = {MemoryDumpArgs::LevelOfDetail::LOW};
+
+// GTest matchers for MemoryDumpRequestArgs arguments.
+MATCHER(IsDetailedDump, "") {
+ return arg.level_of_detail == MemoryDumpLevelOfDetail::DETAILED;
}
-// Testing MemoryDumpManagerDelegate which short-circuits dump requests locally
-// instead of performing IPC dances.
+MATCHER(IsLightDump, "") {
+ return arg.level_of_detail == MemoryDumpLevelOfDetail::LIGHT;
+}
+
+// Testing MemoryDumpManagerDelegate which, by default, short-circuits dump
+// requests locally to the MemoryDumpManager instead of performing IPC dances.
class MemoryDumpManagerDelegateForTesting : public MemoryDumpManagerDelegate {
public:
- void RequestGlobalMemoryDump(const MemoryDumpRequestArgs& args,
- const MemoryDumpCallback& callback) override {
- CreateProcessDump(args, callback);
+ MemoryDumpManagerDelegateForTesting() {
+ ON_CALL(*this, RequestGlobalMemoryDump(_, _))
+ .WillByDefault(Invoke(
+ this, &MemoryDumpManagerDelegateForTesting::CreateProcessDump));
}
- bool IsCoordinatorProcess() const override { return false; }
- uint64 GetTracingProcessId() const override {
- return MemoryDumpManager::kInvalidTracingProcessId;
- }
-};
-
-class MemoryDumpManagerDelegateForPeriodicDumpTest
- : public MemoryDumpManagerDelegateForTesting {
- public:
MOCK_METHOD2(RequestGlobalMemoryDump,
void(const MemoryDumpRequestArgs& args,
const MemoryDumpCallback& callback));
- virtual bool IsCoordinatorProcess() const { return true; }
+ uint64 GetTracingProcessId() const {
+ NOTREACHED();
+ return MemoryDumpManager::kInvalidTracingProcessId;
+ }
+};
+
+class MockMemoryDumpProvider : public MemoryDumpProvider {
+ public:
+ MOCK_METHOD2(OnMemoryDump,
+ bool(const MemoryDumpArgs& args, ProcessMemoryDump* pmd));
};
class MemoryDumpManagerTest : public testing::Test {
@@ -62,12 +71,13 @@
mdm_.reset(new MemoryDumpManager());
MemoryDumpManager::SetInstanceForTesting(mdm_.get());
ASSERT_EQ(mdm_, MemoryDumpManager::GetInstance());
- MemoryDumpManager::GetInstance()->Initialize();
+ delegate_.reset(new MemoryDumpManagerDelegateForTesting);
}
void TearDown() override {
MemoryDumpManager::SetInstanceForTesting(nullptr);
mdm_.reset();
+ delegate_.reset();
message_loop_.reset();
TraceLog::DeleteForTesting();
}
@@ -81,32 +91,33 @@
}
protected:
- void SetDelegate(scoped_ptr<MemoryDumpManagerDelegateForTesting> delegate) {
- delegate_ = delegate.Pass();
- MemoryDumpManager::GetInstance()->SetDelegate(delegate_.get());
+ void InitializeMemoryDumpManager(bool is_coordinator) {
+ mdm_->Initialize(delegate_.get(), is_coordinator);
}
- // This enalbes tracing using the legacy category filter string.
- void EnableTracing(const char* category) {
- if (!delegate_) {
- delegate_.reset(new MemoryDumpManagerDelegateForTesting());
- MemoryDumpManager::GetInstance()->SetDelegate(delegate_.get());
- }
- TraceLog::GetInstance()->SetEnabled(
- TraceConfig(category, ""), TraceLog::RECORDING_MODE);
+ void EnableTracingWithLegacyCategories(const char* category) {
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(category, ""),
+ TraceLog::RECORDING_MODE);
}
- void EnableTracingWithTraceConfig(const char* trace_config) {
- DCHECK(delegate_);
- TraceConfig tc(trace_config);
- TraceLog::GetInstance()->SetEnabled(tc, TraceLog::RECORDING_MODE);
+ void EnableTracingWithTraceConfig(const std::string& trace_config) {
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(trace_config),
+ TraceLog::RECORDING_MODE);
}
void DisableTracing() { TraceLog::GetInstance()->SetDisabled(); }
+ bool IsPeriodicDumpingEnabled() const {
+ return mdm_->periodic_dump_timer_.IsRunning();
+ }
+
+ int GetMaxConsecutiveFailuresCount() const {
+ return MemoryDumpManager::kMaxConsecutiveFailuresCount;
+ }
+
scoped_ptr<MemoryDumpManager> mdm_;
- bool last_callback_success_;
scoped_ptr<MemoryDumpManagerDelegateForTesting> delegate_;
+ bool last_callback_success_;
private:
scoped_ptr<MessageLoop> message_loop_;
@@ -115,220 +126,166 @@
ShadowingAtExitManager at_exit_manager_;
};
-class MockDumpProvider : public MemoryDumpProvider {
- public:
- MockDumpProvider()
- : dump_provider_to_register_or_unregister(nullptr),
- last_session_state_(nullptr),
- level_of_detail_(MemoryDumpArgs::LevelOfDetail::HIGH) {}
-
- // Ctor used by the RespectTaskRunnerAffinity test.
- explicit MockDumpProvider(
- const scoped_refptr<SingleThreadTaskRunner>& task_runner)
- : last_session_state_(nullptr),
- task_runner_(task_runner),
- level_of_detail_(MemoryDumpArgs::LevelOfDetail::HIGH) {}
-
- // Ctor used by CheckMemoryDumpArgs test.
- explicit MockDumpProvider(const MemoryDumpArgs::LevelOfDetail level_of_detail)
- : last_session_state_(nullptr), level_of_detail_(level_of_detail) {}
-
- virtual ~MockDumpProvider() {}
-
- MOCK_METHOD2(OnMemoryDump,
- bool(const MemoryDumpArgs& args, ProcessMemoryDump* pmd));
-
- // OnMemoryDump() override for the RespectTaskRunnerAffinity test.
- bool OnMemoryDump_CheckTaskRunner(const MemoryDumpArgs& args,
- ProcessMemoryDump* pmd) {
- EXPECT_TRUE(task_runner_->RunsTasksOnCurrentThread());
- return true;
- }
-
- // OnMemoryDump() override for the SharedSessionState test.
- bool OnMemoryDump_CheckSessionState(const MemoryDumpArgs& args,
- ProcessMemoryDump* pmd) {
- MemoryDumpSessionState* cur_session_state = pmd->session_state().get();
- if (last_session_state_)
- EXPECT_EQ(last_session_state_, cur_session_state);
- last_session_state_ = cur_session_state;
- return true;
- }
-
- // OnMemoryDump() override for the RegisterDumperWhileDumping test.
- bool OnMemoryDump_RegisterExtraDumpProvider(const MemoryDumpArgs& args,
- ProcessMemoryDump* pmd) {
- MemoryDumpManager::GetInstance()->RegisterDumpProvider(
- dump_provider_to_register_or_unregister);
- return true;
- }
-
- // OnMemoryDump() override for the UnegisterDumperWhileDumping test.
- bool OnMemoryDump_UnregisterDumpProvider(const MemoryDumpArgs& args,
- ProcessMemoryDump* pmd) {
- MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
- dump_provider_to_register_or_unregister);
- return true;
- }
-
- // OnMemoryDump() override for the CheckMemoryDumpArgs test.
- bool OnMemoryDump_CheckMemoryDumpArgs(const MemoryDumpArgs& args,
- ProcessMemoryDump* pmd) {
- EXPECT_EQ(level_of_detail_, args.level_of_detail);
- return true;
- }
-
- // Used by OnMemoryDump_(Un)RegisterExtraDumpProvider.
- MemoryDumpProvider* dump_provider_to_register_or_unregister;
-
- private:
- MemoryDumpSessionState* last_session_state_;
- scoped_refptr<SingleThreadTaskRunner> task_runner_;
- const MemoryDumpArgs::LevelOfDetail level_of_detail_;
-};
-
+// Basic sanity checks. Registers a memory dump provider and checks that it is
+// called, but only when memory-infra is enabled.
TEST_F(MemoryDumpManagerTest, SingleDumper) {
- MockDumpProvider mdp;
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp;
mdm_->RegisterDumpProvider(&mdp);
// Check that the dumper is not called if the memory category is not enabled.
- EnableTracing("foo-and-bar-but-not-memory");
+ EnableTracingWithLegacyCategories("foobar-but-not-memory");
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(0);
EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
- g_high_detail_args);
+ MemoryDumpLevelOfDetail::DETAILED);
DisableTracing();
// Now repeat enabling the memory category and check that the dumper is
// invoked this time.
- EnableTracing(MemoryDumpManager::kTraceCategory);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(3);
EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(3).WillRepeatedly(Return(true));
for (int i = 0; i < 3; ++i)
mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
- g_high_detail_args);
+ MemoryDumpLevelOfDetail::DETAILED);
DisableTracing();
mdm_->UnregisterDumpProvider(&mdp);
- // Finally check the unregister logic (no calls to the mdp after unregister).
- EnableTracing(MemoryDumpManager::kTraceCategory);
+ // Finally check the unregister logic: the delegate will be invoked but not
+ // the dump provider, as it has been unregistered.
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
- g_high_detail_args);
+ MemoryDumpLevelOfDetail::DETAILED);
TraceLog::GetInstance()->SetDisabled();
}
+// Checks that requesting dumps with high level of detail actually propagates
+// the level of the detail properly to OnMemoryDump() call on dump providers.
TEST_F(MemoryDumpManagerTest, CheckMemoryDumpArgs) {
- // Check that requesting dumps with high level of detail actually propagates
- // to OnMemoryDump() call on dump providers.
- MockDumpProvider mdp_high_detail(MemoryDumpArgs::LevelOfDetail::HIGH);
- mdm_->RegisterDumpProvider(&mdp_high_detail);
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp;
- EnableTracing(MemoryDumpManager::kTraceCategory);
- EXPECT_CALL(mdp_high_detail, OnMemoryDump(_, _))
- .Times(1)
- .WillRepeatedly(
- Invoke(&mdp_high_detail,
- &MockDumpProvider::OnMemoryDump_CheckMemoryDumpArgs));
+ mdm_->RegisterDumpProvider(&mdp);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(mdp, OnMemoryDump(IsDetailedDump(), _)).WillOnce(Return(true));
mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
- g_high_detail_args);
+ MemoryDumpLevelOfDetail::DETAILED);
DisableTracing();
- mdm_->UnregisterDumpProvider(&mdp_high_detail);
+ mdm_->UnregisterDumpProvider(&mdp);
// Check that requesting dumps with low level of detail actually propagates to
// OnMemoryDump() call on dump providers.
- MockDumpProvider mdp_low_detail(MemoryDumpArgs::LevelOfDetail::LOW);
- mdm_->RegisterDumpProvider(&mdp_low_detail);
-
- EnableTracing(MemoryDumpManager::kTraceCategory);
- EXPECT_CALL(mdp_low_detail, OnMemoryDump(_, _))
- .Times(1)
- .WillRepeatedly(
- Invoke(&mdp_low_detail,
- &MockDumpProvider::OnMemoryDump_CheckMemoryDumpArgs));
+ mdm_->RegisterDumpProvider(&mdp);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(mdp, OnMemoryDump(IsLightDump(), _)).WillOnce(Return(true));
mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
- g_low_detail_args);
+ MemoryDumpLevelOfDetail::LIGHT);
DisableTracing();
- mdm_->UnregisterDumpProvider(&mdp_low_detail);
+ mdm_->UnregisterDumpProvider(&mdp);
}
+// Checks that the SharedSessionState object is acqually shared over time.
TEST_F(MemoryDumpManagerTest, SharedSessionState) {
- MockDumpProvider mdp1;
- MockDumpProvider mdp2;
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp1;
+ MockMemoryDumpProvider mdp2;
mdm_->RegisterDumpProvider(&mdp1);
mdm_->RegisterDumpProvider(&mdp2);
- EnableTracing(MemoryDumpManager::kTraceCategory);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ const MemoryDumpSessionState* session_state = mdm_->session_state().get();
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(2);
EXPECT_CALL(mdp1, OnMemoryDump(_, _))
.Times(2)
- .WillRepeatedly(
- Invoke(&mdp1, &MockDumpProvider::OnMemoryDump_CheckSessionState));
+ .WillRepeatedly(Invoke([session_state](const MemoryDumpArgs&,
+ ProcessMemoryDump* pmd) -> bool {
+ EXPECT_EQ(session_state, pmd->session_state().get());
+ return true;
+ }));
EXPECT_CALL(mdp2, OnMemoryDump(_, _))
.Times(2)
- .WillRepeatedly(
- Invoke(&mdp2, &MockDumpProvider::OnMemoryDump_CheckSessionState));
+ .WillRepeatedly(Invoke([session_state](const MemoryDumpArgs&,
+ ProcessMemoryDump* pmd) -> bool {
+ EXPECT_EQ(session_state, pmd->session_state().get());
+ return true;
+ }));
for (int i = 0; i < 2; ++i)
mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
- g_high_detail_args);
+ MemoryDumpLevelOfDetail::DETAILED);
DisableTracing();
}
+// Checks that the (Un)RegisterDumpProvider logic behaves sanely.
TEST_F(MemoryDumpManagerTest, MultipleDumpers) {
- MockDumpProvider mdp1;
- MockDumpProvider mdp2;
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp1;
+ MockMemoryDumpProvider mdp2;
// Enable only mdp1.
mdm_->RegisterDumpProvider(&mdp1);
- EnableTracing(MemoryDumpManager::kTraceCategory);
- EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(1).WillRepeatedly(Return(true));
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(mdp1, OnMemoryDump(_, _)).WillOnce(Return(true));
EXPECT_CALL(mdp2, OnMemoryDump(_, _)).Times(0);
mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
- g_high_detail_args);
+ MemoryDumpLevelOfDetail::DETAILED);
DisableTracing();
// Invert: enable mdp1 and disable mdp2.
mdm_->UnregisterDumpProvider(&mdp1);
mdm_->RegisterDumpProvider(&mdp2);
- EnableTracing(MemoryDumpManager::kTraceCategory);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(0);
- EXPECT_CALL(mdp2, OnMemoryDump(_, _)).Times(1).WillRepeatedly(Return(true));
+ EXPECT_CALL(mdp2, OnMemoryDump(_, _)).WillOnce(Return(true));
mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
- g_high_detail_args);
+ MemoryDumpLevelOfDetail::DETAILED);
DisableTracing();
// Enable both mdp1 and mdp2.
mdm_->RegisterDumpProvider(&mdp1);
- EnableTracing(MemoryDumpManager::kTraceCategory);
- EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(1).WillRepeatedly(Return(true));
- EXPECT_CALL(mdp2, OnMemoryDump(_, _)).Times(1).WillRepeatedly(Return(true));
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(mdp1, OnMemoryDump(_, _)).WillOnce(Return(true));
+ EXPECT_CALL(mdp2, OnMemoryDump(_, _)).WillOnce(Return(true));
mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
- g_high_detail_args);
+ MemoryDumpLevelOfDetail::DETAILED);
DisableTracing();
}
-// Verify that whether OnMemoryDump is called depends only on the current
+// Checks that the dump provider invocations depend only on the current
// registration state and not on previous registrations and dumps.
TEST_F(MemoryDumpManagerTest, RegistrationConsistency) {
- MockDumpProvider mdp;
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp;
mdm_->RegisterDumpProvider(&mdp);
{
- EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(1);
- EnableTracing(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(mdp, OnMemoryDump(_, _)).WillOnce(Return(true));
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
- g_high_detail_args);
+ MemoryDumpLevelOfDetail::DETAILED);
DisableTracing();
}
mdm_->UnregisterDumpProvider(&mdp);
{
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
- EnableTracing(MemoryDumpManager::kTraceCategory);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
- g_high_detail_args);
+ MemoryDumpLevelOfDetail::DETAILED);
DisableTracing();
}
@@ -336,10 +293,11 @@
mdm_->UnregisterDumpProvider(&mdp);
{
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
- EnableTracing(MemoryDumpManager::kTraceCategory);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
- g_high_detail_args);
+ MemoryDumpLevelOfDetail::DETAILED);
DisableTracing();
}
@@ -348,10 +306,11 @@
mdm_->RegisterDumpProvider(&mdp);
{
- EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(1);
- EnableTracing(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(mdp, OnMemoryDump(_, _)).WillOnce(Return(true));
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
- g_high_detail_args);
+ MemoryDumpLevelOfDetail::DETAILED);
DisableTracing();
}
}
@@ -361,27 +320,33 @@
// threads and registering a MemoryDumpProvider on each of them. At each
// iteration, one thread is removed, to check the live unregistration logic.
TEST_F(MemoryDumpManagerTest, RespectTaskRunnerAffinity) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
const uint32 kNumInitialThreads = 8;
ScopedVector<Thread> threads;
- ScopedVector<MockDumpProvider> mdps;
+ ScopedVector<MockMemoryDumpProvider> mdps;
// Create the threads and setup the expectations. Given that at each iteration
// we will pop out one thread/MemoryDumpProvider, each MDP is supposed to be
// invoked a number of times equal to its index.
for (uint32 i = kNumInitialThreads; i > 0; --i) {
- threads.push_back(new Thread("test thread"));
+ Thread* thread = new Thread("test thread");
+ threads.push_back(thread);
threads.back()->Start();
- mdps.push_back(new MockDumpProvider(threads.back()->task_runner()));
- MockDumpProvider* mdp = mdps.back();
- mdm_->RegisterDumpProvider(mdp, threads.back()->task_runner());
+ scoped_refptr<SingleThreadTaskRunner> task_runner = thread->task_runner();
+ MockMemoryDumpProvider* mdp = new MockMemoryDumpProvider();
+ mdps.push_back(mdp);
+ mdm_->RegisterDumpProvider(mdp, task_runner);
EXPECT_CALL(*mdp, OnMemoryDump(_, _))
.Times(i)
- .WillRepeatedly(
- Invoke(mdp, &MockDumpProvider::OnMemoryDump_CheckTaskRunner));
+ .WillRepeatedly(Invoke(
+ [task_runner](const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
+ EXPECT_TRUE(task_runner->RunsTasksOnCurrentThread());
+ return true;
+ }));
}
- EnableTracing(MemoryDumpManager::kTraceCategory);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
while (!threads.empty()) {
last_callback_success_ = false;
@@ -390,10 +355,11 @@
MemoryDumpCallback callback =
Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
MessageLoop::current()->task_runner(), run_loop.QuitClosure());
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
- g_high_detail_args, callback);
- // This nested message loop (|run_loop|) will be quit if and only if
- // the RequestGlobalDump callback is invoked.
+ MemoryDumpLevelOfDetail::DETAILED, callback);
+ // This nested message loop (|run_loop|) will quit if and only if the
+ // |callback| passed to RequestGlobalDump() is invoked.
run_loop.Run();
}
EXPECT_TRUE(last_callback_success_);
@@ -418,48 +384,60 @@
DisableTracing();
}
-// Enable both dump providers, make sure that mdp gets disabled after 3 failures
-// and not disabled after 1.
+// Checks that providers get disabled after 3 consecutive failures, but not
+// otherwise (e.g., if interleaved).
TEST_F(MemoryDumpManagerTest, DisableFailingDumpers) {
- MockDumpProvider mdp1;
- MockDumpProvider mdp2;
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp1;
+ MockMemoryDumpProvider mdp2;
mdm_->RegisterDumpProvider(&mdp1);
mdm_->RegisterDumpProvider(&mdp2);
- EnableTracing(MemoryDumpManager::kTraceCategory);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+
+ const int kNumDumps = 2 * GetMaxConsecutiveFailuresCount();
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(kNumDumps);
EXPECT_CALL(mdp1, OnMemoryDump(_, _))
- .Times(MemoryDumpManager::kMaxConsecutiveFailuresCount)
+ .Times(GetMaxConsecutiveFailuresCount())
.WillRepeatedly(Return(false));
EXPECT_CALL(mdp2, OnMemoryDump(_, _))
- .Times(1 + MemoryDumpManager::kMaxConsecutiveFailuresCount)
.WillOnce(Return(false))
- .WillRepeatedly(Return(true));
- for (int i = 0; i < 1 + MemoryDumpManager::kMaxConsecutiveFailuresCount;
- i++) {
+ .WillOnce(Return(true))
+ .WillOnce(Return(false))
+ .WillOnce(Return(false))
+ .WillOnce(Return(true))
+ .WillOnce(Return(false));
+
+ for (int i = 0; i < kNumDumps; i++) {
mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
- g_high_detail_args);
+ MemoryDumpLevelOfDetail::DETAILED);
}
DisableTracing();
}
-// Sneakily register an extra memory dump provider while an existing one is
+// Sneakily registers an extra memory dump provider while an existing one is
// dumping and expect it to take part in the already active tracing session.
TEST_F(MemoryDumpManagerTest, RegisterDumperWhileDumping) {
- MockDumpProvider mdp1;
- MockDumpProvider mdp2;
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp1;
+ MockMemoryDumpProvider mdp2;
- mdp1.dump_provider_to_register_or_unregister = &mdp2;
mdm_->RegisterDumpProvider(&mdp1);
- EnableTracing(MemoryDumpManager::kTraceCategory);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(4);
EXPECT_CALL(mdp1, OnMemoryDump(_, _))
.Times(4)
.WillOnce(Return(true))
- .WillOnce(Invoke(
- &mdp1, &MockDumpProvider::OnMemoryDump_RegisterExtraDumpProvider))
+ .WillOnce(
+ Invoke([&mdp2](const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
+ MemoryDumpManager::GetInstance()->RegisterDumpProvider(&mdp2);
+ return true;
+ }))
.WillRepeatedly(Return(true));
// Depending on the insertion order (before or after mdp1), mdp2 might be
@@ -470,52 +448,58 @@
for (int i = 0; i < 4; i++) {
mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
- g_high_detail_args);
+ MemoryDumpLevelOfDetail::DETAILED);
}
DisableTracing();
}
-// Like the above, but suddenly unregister the dump provider.
+// Like RegisterDumperWhileDumping, but unregister the dump provider instead.
TEST_F(MemoryDumpManagerTest, UnregisterDumperWhileDumping) {
- MockDumpProvider mdp1;
- MockDumpProvider mdp2;
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp1;
+ MockMemoryDumpProvider mdp2;
mdm_->RegisterDumpProvider(&mdp1, ThreadTaskRunnerHandle::Get());
mdm_->RegisterDumpProvider(&mdp2, ThreadTaskRunnerHandle::Get());
- mdp1.dump_provider_to_register_or_unregister = &mdp2;
- EnableTracing(MemoryDumpManager::kTraceCategory);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(4);
EXPECT_CALL(mdp1, OnMemoryDump(_, _))
.Times(4)
.WillOnce(Return(true))
.WillOnce(
- Invoke(&mdp1, &MockDumpProvider::OnMemoryDump_UnregisterDumpProvider))
+ Invoke([&mdp2](const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
+ MemoryDumpManager::GetInstance()->UnregisterDumpProvider(&mdp2);
+ return true;
+ }))
.WillRepeatedly(Return(true));
// Depending on the insertion order (before or after mdp1), mdp2 might have
- // been already called when OnMemoryDump_UnregisterDumpProvider happens.
+ // been already called when UnregisterDumpProvider happens.
EXPECT_CALL(mdp2, OnMemoryDump(_, _))
.Times(Between(1, 2))
.WillRepeatedly(Return(true));
for (int i = 0; i < 4; i++) {
mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
- g_high_detail_args);
+ MemoryDumpLevelOfDetail::DETAILED);
}
DisableTracing();
}
-// Verify that the dump does not abort when unregistering a provider while
+// Checks that the dump does not abort when unregistering a provider while
// dumping from a different thread than the dumping thread.
TEST_F(MemoryDumpManagerTest, UnregisterDumperFromThreadWhileDumping) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
ScopedVector<TestIOThread> threads;
- ScopedVector<MockDumpProvider> mdps;
+ ScopedVector<MockMemoryDumpProvider> mdps;
for (int i = 0; i < 2; i++) {
threads.push_back(new TestIOThread(TestIOThread::kAutoStart));
- mdps.push_back(new MockDumpProvider(threads.back()->task_runner()));
+ mdps.push_back(new MockMemoryDumpProvider());
mdm_->RegisterDumpProvider(mdps.back(), threads.back()->task_runner());
}
@@ -524,10 +508,10 @@
// When OnMemoryDump is called on either of the dump providers, it will
// unregister the other one.
- for (MockDumpProvider* mdp : mdps) {
+ for (MockMemoryDumpProvider* mdp : mdps) {
int other_idx = (mdps.front() == mdp);
TestIOThread* other_thread = threads[other_idx];
- MockDumpProvider* other_mdp = mdps[other_idx];
+ MockMemoryDumpProvider* other_mdp = mdps[other_idx];
auto on_dump = [this, other_thread, other_mdp, &on_memory_dump_call_count](
const MemoryDumpArgs& args, ProcessMemoryDump* pmd) {
other_thread->PostTaskAndWait(
@@ -549,10 +533,11 @@
Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
MessageLoop::current()->task_runner(), run_loop.QuitClosure());
- EnableTracing(MemoryDumpManager::kTraceCategory);
- MemoryDumpRequestArgs request_args = {0, MemoryDumpType::EXPLICITLY_TRIGGERED,
- g_high_detail_args};
- mdm_->CreateProcessDump(request_args, callback);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED, callback);
run_loop.Run();
@@ -562,12 +547,14 @@
DisableTracing();
}
-// Ensures that a NACK callback is invoked if RequestGlobalDump is called when
+// Checks that a NACK callback is invoked if RequestGlobalDump() is called when
// tracing is not enabled.
TEST_F(MemoryDumpManagerTest, CallbackCalledOnFailure) {
- MockDumpProvider mdp1;
-
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp1;
mdm_->RegisterDumpProvider(&mdp1);
+
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(0);
EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(0);
last_callback_success_ = true;
@@ -577,64 +564,142 @@
Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
MessageLoop::current()->task_runner(), run_loop.QuitClosure());
mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
- g_high_detail_args, callback);
+ MemoryDumpLevelOfDetail::DETAILED, callback);
run_loop.Run();
}
EXPECT_FALSE(last_callback_success_);
}
-MATCHER(IsHighDetail, "") {
- return arg.dump_args.level_of_detail == MemoryDumpArgs::LevelOfDetail::HIGH;
+// Checks that is the MemoryDumpManager is initialized after tracing already
+// began, it will still late-join the party (real use case: startup tracing).
+TEST_F(MemoryDumpManagerTest, InitializedAfterStartOfTracing) {
+ MockMemoryDumpProvider mdp;
+ mdm_->RegisterDumpProvider(&mdp);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+
+ // First check that a RequestGlobalDump() issued before the MemoryDumpManager
+ // initialization gets NACK-ed cleanly.
+ {
+ EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(0);
+ RunLoop run_loop;
+ MemoryDumpCallback callback =
+ Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
+ MessageLoop::current()->task_runner(), run_loop.QuitClosure());
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED, callback);
+ run_loop.Run();
+ EXPECT_FALSE(last_callback_success_);
+ }
+
+ // Now late-initialize the MemoryDumpManager and check that the
+ // RequestGlobalDump completes successfully.
+ {
+ EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ RunLoop run_loop;
+ MemoryDumpCallback callback =
+ Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
+ MessageLoop::current()->task_runner(), run_loop.QuitClosure());
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED, callback);
+ run_loop.Run();
+ EXPECT_TRUE(last_callback_success_);
+ }
+ DisableTracing();
}
-MATCHER(IsLowDetail, "") {
- return arg.dump_args.level_of_detail == MemoryDumpArgs::LevelOfDetail::LOW;
+// This test (and the MemoryDumpManagerTestCoordinator below) crystallizes the
+// expectations of the chrome://tracing UI and chrome telemetry w.r.t. periodic
+// dumps in memory-infra, handling gracefully the transition between the legacy
+// and the new-style (JSON-based) TraceConfig.
+TEST_F(MemoryDumpManagerTest, TraceConfigExpectations) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MemoryDumpManagerDelegateForTesting& delegate = *delegate_;
+
+ // Don't trigger the default behavior of the mock delegate in this test,
+ // which would short-circuit the dump request to the actual
+ // CreateProcessDump().
+ // We don't want to create any dump in this test, only check whether the dumps
+ // are requested or not.
+ ON_CALL(delegate, RequestGlobalMemoryDump(_, _)).WillByDefault(Return());
+
+ // Enabling memory-infra in a non-coordinator process should not trigger any
+ // periodic dumps.
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_FALSE(IsPeriodicDumpingEnabled());
+ DisableTracing();
+
+ // Enabling memory-infra with the new (JSON) TraceConfig in a non-coordinator
+ // process with a fully defined trigger config should NOT enable any periodic
+ // dumps.
+ EnableTracingWithTraceConfig(
+ TraceConfigMemoryTestUtil::GetTraceConfig_PeriodicTriggers(1, 5));
+ EXPECT_FALSE(IsPeriodicDumpingEnabled());
+ DisableTracing();
}
-TEST_F(MemoryDumpManagerTest, SchedulePeriodicDumpsFromTraceConfig) {
- const char kMemoryDumpTraceConfigString[] =
- "{"
- "\"included_categories\":["
- "\"disabled-by-default-memory-infra\""
- "],"
- "\"memory_dump_config\":{"
- "\"triggers\":["
- "{"
- "\"mode\":\"light\","
- "\"periodic_interval_ms\":1"
- "},"
- "{"
- "\"mode\":\"detailed\","
- "\"periodic_interval_ms\":3"
- "}"
- "]"
- "}"
- "}";
+TEST_F(MemoryDumpManagerTest, TraceConfigExpectationsWhenIsCoordinator) {
+ InitializeMemoryDumpManager(true /* is_coordinator */);
+ MemoryDumpManagerDelegateForTesting& delegate = *delegate_;
+ ON_CALL(delegate, RequestGlobalMemoryDump(_, _)).WillByDefault(Return());
+ // Enabling memory-infra with the legacy TraceConfig (category filter) in
+ // a coordinator process should enable periodic dumps.
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_TRUE(IsPeriodicDumpingEnabled());
+ DisableTracing();
+
+ // Enabling memory-infra with the new (JSON) TraceConfig in a coordinator
+ // process without specifying any "memory_dump_config" section should enable
+ // periodic dumps. This is to preserve the behavior chrome://tracing UI, that
+ // is: ticking memory-infra should dump periodically with the default config.
+ EnableTracingWithTraceConfig(
+ TraceConfigMemoryTestUtil::GetTraceConfig_NoTriggers());
+ EXPECT_TRUE(IsPeriodicDumpingEnabled());
+ DisableTracing();
+
+ // Enabling memory-infra with the new (JSON) TraceConfig in a coordinator
+ // process with an empty "memory_dump_config" should NOT enable periodic
+ // dumps. This is the way telemetry is supposed to use memory-infra with
+ // only explicitly triggered dumps.
+ EnableTracingWithTraceConfig(
+ TraceConfigMemoryTestUtil::GetTraceConfig_EmptyTriggers());
+ EXPECT_FALSE(IsPeriodicDumpingEnabled());
+ DisableTracing();
+
+ // Enabling memory-infra with the new (JSON) TraceConfig in a coordinator
+ // process with a fully defined trigger config should cause periodic dumps to
+ // be performed in the correct order.
RunLoop run_loop;
- scoped_ptr<MemoryDumpManagerDelegateForPeriodicDumpTest> delegate(
- new MemoryDumpManagerDelegateForPeriodicDumpTest());
-
auto quit_closure = run_loop.QuitClosure();
+
+ const int kHeavyDumpRate = 5;
+ const int kLightDumpPeriodMs = 1;
+ const int kHeavyDumpPeriodMs = kHeavyDumpRate * kLightDumpPeriodMs;
+ // The expected sequence with light=1ms, heavy=5ms is H,L,L,L,L,H,...
testing::InSequence sequence;
- EXPECT_CALL(*delegate.get(), RequestGlobalMemoryDump(IsHighDetail(), _))
- .Times(1);
- EXPECT_CALL(*delegate.get(), RequestGlobalMemoryDump(IsLowDetail(), _))
- .Times(2);
- EXPECT_CALL(*delegate.get(), RequestGlobalMemoryDump(IsHighDetail(), _))
- .Times(1);
- EXPECT_CALL(*delegate.get(), RequestGlobalMemoryDump(IsLowDetail(), _))
- .Times(1)
+ EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsDetailedDump(), _));
+ EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsLightDump(), _))
+ .Times(kHeavyDumpRate - 1);
+ EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsDetailedDump(), _));
+ EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsLightDump(), _))
+ .Times(kHeavyDumpRate - 2);
+ EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsLightDump(), _))
.WillOnce(Invoke([quit_closure](const MemoryDumpRequestArgs& args,
const MemoryDumpCallback& callback) {
- TraceLog::GetInstance()->SetDisabled();
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, quit_closure);
}));
- SetDelegate(delegate.Pass());
- EnableTracingWithTraceConfig(kMemoryDumpTraceConfigString);
+ // Swallow all the final spurious calls until tracing gets disabled.
+ EXPECT_CALL(delegate, RequestGlobalMemoryDump(_, _)).Times(AnyNumber());
+ EnableTracingWithTraceConfig(
+ TraceConfigMemoryTestUtil::GetTraceConfig_PeriodicTriggers(
+ kLightDumpPeriodMs, kHeavyDumpPeriodMs));
run_loop.Run();
+ DisableTracing();
}
} // namespace trace_event
diff --git a/trace_event/memory_dump_provider.h b/trace_event/memory_dump_provider.h
index 6ab20aa..3b1f136 100644
--- a/trace_event/memory_dump_provider.h
+++ b/trace_event/memory_dump_provider.h
@@ -7,23 +7,17 @@
#include "base/base_export.h"
#include "base/macros.h"
+#include "base/trace_event/memory_dump_request_args.h"
namespace base {
namespace trace_event {
class ProcessMemoryDump;
-// Contains information about the type of memory dump the MemoryDumpProvider
-// should generate on dump request. This is to control the size of dumps
-// generated.
+// Args passed to OnMemoryDump(). This is to avoid rewriting all the subclasses
+// in the codebase when extending the MemoryDumpProvider API.
struct MemoryDumpArgs {
- enum class LevelOfDetail {
- LOW,
- HIGH,
- LAST = HIGH // For IPC Macros.
- };
-
- LevelOfDetail level_of_detail;
+ MemoryDumpLevelOfDetail level_of_detail;
};
// The contract interface that memory dump providers must implement.
diff --git a/trace_event/memory_dump_request_args.cc b/trace_event/memory_dump_request_args.cc
index 79c2802..48b5ba6 100644
--- a/trace_event/memory_dump_request_args.cc
+++ b/trace_event/memory_dump_request_args.cc
@@ -13,16 +13,38 @@
const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type) {
switch (dump_type) {
case MemoryDumpType::TASK_BEGIN:
- return "TASK_BEGIN";
+ return "task_begin";
case MemoryDumpType::TASK_END:
- return "TASK_END";
+ return "task_end";
case MemoryDumpType::PERIODIC_INTERVAL:
- return "PERIODIC_INTERVAL";
+ return "periodic_interval";
case MemoryDumpType::EXPLICITLY_TRIGGERED:
- return "EXPLICITLY_TRIGGERED";
+ return "explicitly_triggered";
}
NOTREACHED();
- return "UNKNOWN";
+ return "unknown";
+}
+
+const char* MemoryDumpLevelOfDetailToString(
+ const MemoryDumpLevelOfDetail& level_of_detail) {
+ switch (level_of_detail) {
+ case MemoryDumpLevelOfDetail::LIGHT:
+ return "light";
+ case MemoryDumpLevelOfDetail::DETAILED:
+ return "detailed";
+ }
+ NOTREACHED();
+ return "unknown";
+}
+
+MemoryDumpLevelOfDetail StringToMemoryDumpLevelOfDetail(
+ const std::string& str) {
+ if (str == "light")
+ return MemoryDumpLevelOfDetail::LIGHT;
+ if (str == "detailed")
+ return MemoryDumpLevelOfDetail::DETAILED;
+ NOTREACHED();
+ return MemoryDumpLevelOfDetail::LAST;
}
} // namespace trace_event
diff --git a/trace_event/memory_dump_request_args.h b/trace_event/memory_dump_request_args.h
index 27ea451..ac38390 100644
--- a/trace_event/memory_dump_request_args.h
+++ b/trace_event/memory_dump_request_args.h
@@ -8,9 +8,10 @@
// This file defines the types and structs used to issue memory dump requests.
// These are also used in the IPCs for coordinating inter-process memory dumps.
+#include <string>
+
#include "base/base_export.h"
#include "base/callback.h"
-#include "base/trace_event/memory_dump_provider.h"
namespace base {
namespace trace_event {
@@ -25,11 +26,18 @@
LAST = EXPLICITLY_TRIGGERED // For IPC macros.
};
-// Returns the name in string for the dump type given.
-BASE_EXPORT const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type);
+// Tells the MemoryDumpProvider(s) how much detailed their dumps should be.
+// MemoryDumpProvider instances must guarantee that level of detail does not
+// affect the total size reported in the root node, but only the granularity of
+// the child MemoryAllocatorDump(s).
+enum class MemoryDumpLevelOfDetail {
+ LIGHT, // Few entries, typically a fixed number, per dump.
+ DETAILED, // Unrestricted amount of entries per dump.
+ LAST = DETAILED // For IPC Macros.
+};
-using MemoryDumpCallback = Callback<void(uint64 dump_guid, bool success)>;
-
+// Initial request arguments for a global memory dump. (see
+// MemoryDumpManager::RequestGlobalMemoryDump()).
struct BASE_EXPORT MemoryDumpRequestArgs {
// Globally unique identifier. In multi-process dumps, all processes issue a
// local dump with the same guid. This allows the trace importers to
@@ -37,10 +45,19 @@
uint64 dump_guid;
MemoryDumpType dump_type;
-
- MemoryDumpArgs dump_args;
+ MemoryDumpLevelOfDetail level_of_detail;
};
+using MemoryDumpCallback = Callback<void(uint64 dump_guid, bool success)>;
+
+BASE_EXPORT const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type);
+
+BASE_EXPORT const char* MemoryDumpLevelOfDetailToString(
+ const MemoryDumpLevelOfDetail& level_of_detail);
+
+BASE_EXPORT MemoryDumpLevelOfDetail
+StringToMemoryDumpLevelOfDetail(const std::string& str);
+
} // namespace trace_event
} // namespace base
diff --git a/trace_event/process_memory_dump.h b/trace_event/process_memory_dump.h
index 3b71a2c..da18a14 100644
--- a/trace_event/process_memory_dump.h
+++ b/trace_event/process_memory_dump.h
@@ -25,12 +25,8 @@
class MemoryDumpManager;
class MemoryDumpSessionState;
-// ProcessMemoryDump is as a strongly typed container which enforces the data
-// model for each memory dump and holds the dumps produced by the
-// MemoryDumpProvider(s) for a specific process.
-// At trace generation time (i.e. when AsValue() is called), ProcessMemoryDump
-// will compose a key-value dictionary of the various dumps obtained at trace
-// dump point time.
+// ProcessMemoryDump is as a strongly typed container which holds the dumps
+// produced by the MemoryDumpProvider(s) for a specific process.
class BASE_EXPORT ProcessMemoryDump {
public:
struct MemoryAllocatorDumpEdge {
diff --git a/trace_event/process_memory_maps_dump_provider.cc b/trace_event/process_memory_maps_dump_provider.cc
index 2dcdd37..38b2573 100644
--- a/trace_event/process_memory_maps_dump_provider.cc
+++ b/trace_event/process_memory_maps_dump_provider.cc
@@ -168,7 +168,7 @@
bool ProcessMemoryMapsDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
ProcessMemoryDump* pmd) {
// Snapshot of memory maps is not taken for light dump requests.
- if (args.level_of_detail == MemoryDumpArgs::LevelOfDetail::LOW)
+ if (args.level_of_detail == MemoryDumpLevelOfDetail::LIGHT)
return true;
uint32 res = 0;
diff --git a/trace_event/process_memory_maps_dump_provider_unittest.cc b/trace_event/process_memory_maps_dump_provider_unittest.cc
index f951a09..a73a21c 100644
--- a/trace_event/process_memory_maps_dump_provider_unittest.cc
+++ b/trace_event/process_memory_maps_dump_provider_unittest.cc
@@ -110,7 +110,7 @@
const uint32 kProtR = ProcessMemoryMaps::VMRegion::kProtectionFlagsRead;
const uint32 kProtW = ProcessMemoryMaps::VMRegion::kProtectionFlagsWrite;
const uint32 kProtX = ProcessMemoryMaps::VMRegion::kProtectionFlagsExec;
- const MemoryDumpArgs dump_args = {MemoryDumpArgs::LevelOfDetail::HIGH};
+ const MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
auto pmmdp = ProcessMemoryMapsDumpProvider::GetInstance();
diff --git a/trace_event/process_memory_totals_dump_provider_unittest.cc b/trace_event/process_memory_totals_dump_provider_unittest.cc
index 015cb2d..4ec37f5 100644
--- a/trace_event/process_memory_totals_dump_provider_unittest.cc
+++ b/trace_event/process_memory_totals_dump_provider_unittest.cc
@@ -12,7 +12,7 @@
namespace trace_event {
TEST(ProcessMemoryTotalsDumpProviderTest, DumpRSS) {
- const MemoryDumpArgs high_detail_args = {MemoryDumpArgs::LevelOfDetail::HIGH};
+ const MemoryDumpArgs high_detail_args = {MemoryDumpLevelOfDetail::DETAILED};
auto pmtdp = ProcessMemoryTotalsDumpProvider::GetInstance();
scoped_ptr<ProcessMemoryDump> pmd_before(new ProcessMemoryDump(nullptr));
scoped_ptr<ProcessMemoryDump> pmd_after(new ProcessMemoryDump(nullptr));
diff --git a/trace_event/trace_config.cc b/trace_event/trace_config.cc
index 8f0f1c4..cf3f3a6 100644
--- a/trace_event/trace_config.cc
+++ b/trace_event/trace_config.cc
@@ -11,6 +11,7 @@
#include "base/strings/string_tokenizer.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/memory_dump_manager.h"
+#include "base/trace_event/memory_dump_request_args.h"
#include "base/trace_event/trace_event.h"
namespace base {
@@ -44,16 +45,14 @@
const char kTriggersParam[] = "triggers";
const char kPeriodicIntervalParam[] = "periodic_interval_ms";
const char kModeParam[] = "mode";
-const char kDetailedParam[] = "detailed";
-const char kLightParam[] = "light";
// Default configuration of memory dumps.
const TraceConfig::MemoryDumpTriggerConfig kDefaultHeavyMemoryDumpTrigger = {
2000, // periodic_interval_ms
- MemoryDumpArgs::LevelOfDetail::HIGH};
+ MemoryDumpLevelOfDetail::DETAILED};
const TraceConfig::MemoryDumpTriggerConfig kDefaultLightMemoryDumpTrigger = {
250, // periodic_interval_ms
- MemoryDumpArgs::LevelOfDetail::LOW};
+ MemoryDumpLevelOfDetail::LIGHT};
} // namespace
@@ -465,7 +464,6 @@
continue;
MemoryDumpTriggerConfig dump_config;
- std::string dump_type;
int interval = 0;
if (!trigger->GetInteger(kPeriodicIntervalParam, &interval)) {
@@ -473,14 +471,10 @@
}
DCHECK_GT(interval, 0);
dump_config.periodic_interval_ms = static_cast<uint32>(interval);
- dump_config.level_of_detail = MemoryDumpArgs::LevelOfDetail::LOW;
-
- if (trigger->GetString(kModeParam, &dump_type)) {
- if (dump_type == kDetailedParam) {
- dump_config.level_of_detail = MemoryDumpArgs::LevelOfDetail::HIGH;
- }
- }
-
+ std::string level_of_detail_str;
+ trigger->GetString(kModeParam, &level_of_detail_str);
+ dump_config.level_of_detail =
+ StringToMemoryDumpLevelOfDetail(level_of_detail_str);
memory_dump_config_.push_back(dump_config);
}
}
@@ -541,17 +535,8 @@
new base::DictionaryValue());
trigger_dict->SetInteger(kPeriodicIntervalParam,
static_cast<int>(config.periodic_interval_ms));
-
- switch (config.level_of_detail) {
- case MemoryDumpArgs::LevelOfDetail::LOW:
- trigger_dict->SetString(kModeParam, kLightParam);
- break;
- case MemoryDumpArgs::LevelOfDetail::HIGH:
- trigger_dict->SetString(kModeParam, kDetailedParam);
- break;
- default:
- NOTREACHED();
- }
+ trigger_dict->SetString(
+ kModeParam, MemoryDumpLevelOfDetailToString(config.level_of_detail));
triggers_list->Append(trigger_dict.Pass());
}
diff --git a/trace_event/trace_config.h b/trace_event/trace_config.h
index 82ef9d7..44cf16d 100644
--- a/trace_event/trace_config.h
+++ b/trace_event/trace_config.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -10,7 +10,7 @@
#include "base/base_export.h"
#include "base/gtest_prod_util.h"
-#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/memory_dump_request_args.h"
#include "base/values.h"
namespace base {
@@ -40,7 +40,7 @@
// "memory-infra" category is enabled.
struct MemoryDumpTriggerConfig {
uint32 periodic_interval_ms;
- MemoryDumpArgs::LevelOfDetail level_of_detail;
+ MemoryDumpLevelOfDetail level_of_detail;
};
typedef std::vector<MemoryDumpTriggerConfig> MemoryDumpConfig;
diff --git a/trace_event/trace_config_memory_test_util.h b/trace_event/trace_config_memory_test_util.h
new file mode 100644
index 0000000..5a64d28
--- /dev/null
+++ b/trace_event/trace_config_memory_test_util.h
@@ -0,0 +1,79 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_CONFIG_MEMORY_TEST_UTIL_H_
+#define BASE_TRACE_EVENT_TRACE_CONFIG_MEMORY_TEST_UTIL_H_
+
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/memory_dump_manager.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceConfigMemoryTestUtil {
+ public:
+ static std::string GetTraceConfig_PeriodicTriggers(int light_period,
+ int heavy_period) {
+ return StringPrintf(
+ "{"
+ "\"enable_argument_filter\":false,"
+ "\"enable_sampling\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"memory_dump_config\":{"
+ "\"triggers\":["
+ "{"
+ "\"mode\":\"light\","
+ "\"periodic_interval_ms\":%d"
+ "},"
+ "{"
+ "\"mode\":\"detailed\","
+ "\"periodic_interval_ms\":%d"
+ "}"
+ "]"
+ "},"
+ "\"record_mode\":\"record-until-full\""
+ "}",
+ MemoryDumpManager::kTraceCategory, light_period, heavy_period);
+ }
+
+ static std::string GetTraceConfig_EmptyTriggers() {
+ return StringPrintf(
+ "{"
+ "\"enable_argument_filter\":false,"
+ "\"enable_sampling\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"memory_dump_config\":{"
+ "\"triggers\":["
+ "]"
+ "},"
+ "\"record_mode\":\"record-until-full\""
+ "}",
+ MemoryDumpManager::kTraceCategory);
+ }
+
+ static std::string GetTraceConfig_NoTriggers() {
+ return StringPrintf(
+ "{"
+ "\"enable_argument_filter\":false,"
+ "\"enable_sampling\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"record_mode\":\"record-until-full\""
+ "}",
+ MemoryDumpManager::kTraceCategory);
+ }
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_CONFIG_MEMORY_TEST_UTIL_H_
diff --git a/trace_event/trace_config_unittest.cc b/trace_event/trace_config_unittest.cc
index 7d8881d..9622595 100644
--- a/trace_event/trace_config_unittest.cc
+++ b/trace_event/trace_config_unittest.cc
@@ -1,9 +1,10 @@
-// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/trace_config.h"
+#include "base/trace_event/trace_config_memory_test_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -20,44 +21,6 @@
"\"record_mode\":\"record-until-full\""
"}";
-const char kMemoryDumpTraceConfigString[] =
- "{"
- "\"enable_argument_filter\":false,"
- "\"enable_sampling\":false,"
- "\"enable_systrace\":false,"
- "\"included_categories\":["
- "\"disabled-by-default-memory-infra\""
- "],"
- "\"memory_dump_config\":{"
- "\"triggers\":["
- "{"
- "\"mode\":\"light\","
- "\"periodic_interval_ms\":200"
- "},"
- "{"
- "\"mode\":\"detailed\","
- "\"periodic_interval_ms\":2000"
- "}"
- "]"
- "},"
- "\"record_mode\":\"record-until-full\""
- "}";
-
-const char kTraceConfigStringWithEmptyTriggers[] =
- "{"
- "\"enable_argument_filter\":false,"
- "\"enable_sampling\":false,"
- "\"enable_systrace\":false,"
- "\"included_categories\":["
- "\"disabled-by-default-memory-infra\""
- "],"
- "\"memory_dump_config\":{"
- "\"triggers\":["
- "]"
- "},"
- "\"record_mode\":\"record-until-full\""
- "}";
-
} // namespace
TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
@@ -528,24 +491,27 @@
}
TEST(TraceConfigTest, TraceConfigFromMemoryConfigString) {
- TraceConfig tc(kMemoryDumpTraceConfigString);
- EXPECT_STREQ(kMemoryDumpTraceConfigString, tc.ToString().c_str());
+ std::string tc_str =
+ TraceConfigMemoryTestUtil::GetTraceConfig_PeriodicTriggers(200, 2000);
+ TraceConfig tc(tc_str);
+ EXPECT_EQ(tc_str, tc.ToString());
EXPECT_TRUE(tc.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
EXPECT_EQ(2u, tc.memory_dump_config_.size());
EXPECT_EQ(200u, tc.memory_dump_config_[0].periodic_interval_ms);
- EXPECT_EQ(MemoryDumpArgs::LevelOfDetail::LOW,
+ EXPECT_EQ(MemoryDumpLevelOfDetail::LIGHT,
tc.memory_dump_config_[0].level_of_detail);
EXPECT_EQ(2000u, tc.memory_dump_config_[1].periodic_interval_ms);
- EXPECT_EQ(MemoryDumpArgs::LevelOfDetail::HIGH,
+ EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
tc.memory_dump_config_[1].level_of_detail);
}
TEST(TraceConfigTest, EmptyMemoryDumpConfigTest) {
// Empty trigger list should also be specified when converting back to string.
- TraceConfig tc(kTraceConfigStringWithEmptyTriggers);
- EXPECT_STREQ(kTraceConfigStringWithEmptyTriggers, tc.ToString().c_str());
+ TraceConfig tc(TraceConfigMemoryTestUtil::GetTraceConfig_EmptyTriggers());
+ EXPECT_EQ(TraceConfigMemoryTestUtil::GetTraceConfig_EmptyTriggers(),
+ tc.ToString());
EXPECT_EQ(0u, tc.memory_dump_config_.size());
}
diff --git a/trace_event/trace_event_etw_export_win.cc b/trace_event/trace_event_etw_export_win.cc
index 47935a4..c07c861 100644
--- a/trace_event/trace_event_etw_export_win.cc
+++ b/trace_event/trace_event_etw_export_win.cc
@@ -133,19 +133,18 @@
// This object will be created by each process. It's a background (low-priority)
// thread that will monitor the ETW keyword for any changes.
class TraceEventETWExport::ETWKeywordUpdateThread
- : public base::PlatformThread::Delegate {
+ : public PlatformThread::Delegate {
public:
ETWKeywordUpdateThread() {}
~ETWKeywordUpdateThread() override {}
// Implementation of PlatformThread::Delegate:
void ThreadMain() override {
- base::PlatformThread::SetName("ETW Keyword Update Thread");
- base::TimeDelta sleep_time =
- base::TimeDelta::FromMilliseconds(kUpdateTimerDelayMs);
+ PlatformThread::SetName("ETW Keyword Update Thread");
+ TimeDelta sleep_time = TimeDelta::FromMilliseconds(kUpdateTimerDelayMs);
while (1) {
- base::PlatformThread::Sleep(sleep_time);
- base::trace_event::TraceEventETWExport::UpdateETWKeyword();
+ PlatformThread::Sleep(sleep_time);
+ trace_event::TraceEventETWExport::UpdateETWKeyword();
}
}
diff --git a/trace_event/trace_event_etw_export_win.h b/trace_event/trace_event_etw_export_win.h
index c1eafff..7a1c029 100644
--- a/trace_event/trace_event_etw_export_win.h
+++ b/trace_event/trace_event_etw_export_win.h
@@ -12,11 +12,11 @@
#include "base/strings/string_piece.h"
#include "base/trace_event/trace_event_impl.h"
-// Fwd.
+namespace base {
+
template <typename Type>
struct StaticMemorySingletonTraits;
-namespace base {
namespace trace_event {
class BASE_EXPORT TraceEventETWExport {
@@ -86,7 +86,7 @@
bool etw_export_enabled_;
// Maps category names to their status (enabled/disabled).
- std::map<base::StringPiece, bool> categories_status_;
+ std::map<StringPiece, bool> categories_status_;
// Local copy of the ETW keyword.
uint64 etw_match_any_keyword_;
diff --git a/trace_event/trace_event_memory_overhead.cc b/trace_event/trace_event_memory_overhead.cc
index bf957b0..3fa6e42 100644
--- a/trace_event/trace_event_memory_overhead.cc
+++ b/trace_event/trace_event_memory_overhead.cc
@@ -144,7 +144,7 @@
it.second.allocated_size_in_bytes);
mad->AddScalar("resident_size", MemoryAllocatorDump::kUnitsBytes,
it.second.resident_size_in_bytes);
- mad->AddScalar(MemoryAllocatorDump::kNameObjectsCount,
+ mad->AddScalar(MemoryAllocatorDump::kNameObjectCount,
MemoryAllocatorDump::kUnitsObjects, it.second.count);
}
}
diff --git a/trace_event/trace_event_synthetic_delay.cc b/trace_event/trace_event_synthetic_delay.cc
index bad79cc..cd0c364 100644
--- a/trace_event/trace_event_synthetic_delay.cc
+++ b/trace_event/trace_event_synthetic_delay.cc
@@ -24,7 +24,7 @@
void ResetAllDelays();
// TraceEventSyntheticDelayClock implementation.
- base::TimeTicks Now() override;
+ TimeTicks Now() override;
private:
TraceEventSyntheticDelayRegistry();
@@ -34,7 +34,7 @@
Lock lock_;
TraceEventSyntheticDelay delays_[kMaxSyntheticDelays];
TraceEventSyntheticDelay dummy_delay_;
- base::subtle::Atomic32 delay_count_;
+ subtle::Atomic32 delay_count_;
DISALLOW_COPY_AND_ASSIGN(TraceEventSyntheticDelayRegistry);
};
@@ -57,8 +57,7 @@
clock_ = clock;
}
-void TraceEventSyntheticDelay::SetTargetDuration(
- base::TimeDelta target_duration) {
+void TraceEventSyntheticDelay::SetTargetDuration(TimeDelta target_duration) {
AutoLock lock(lock_);
target_duration_ = target_duration;
trigger_count_ = 0;
@@ -85,7 +84,7 @@
if (!target_duration_.ToInternalValue())
return;
- base::TimeTicks start_time = clock_->Now();
+ TimeTicks start_time = clock_->Now();
{
AutoLock lock(lock_);
if (++begin_count_ != 1)
@@ -94,15 +93,15 @@
}
}
-void TraceEventSyntheticDelay::BeginParallel(base::TimeTicks* out_end_time) {
+void TraceEventSyntheticDelay::BeginParallel(TimeTicks* out_end_time) {
// See note in Begin().
ANNOTATE_BENIGN_RACE(&target_duration_, "Synthetic delay duration");
if (!target_duration_.ToInternalValue()) {
- *out_end_time = base::TimeTicks();
+ *out_end_time = TimeTicks();
return;
}
- base::TimeTicks start_time = clock_->Now();
+ TimeTicks start_time = clock_->Now();
{
AutoLock lock(lock_);
*out_end_time = CalculateEndTimeLocked(start_time);
@@ -115,7 +114,7 @@
if (!target_duration_.ToInternalValue())
return;
- base::TimeTicks end_time;
+ TimeTicks end_time;
{
AutoLock lock(lock_);
if (!begin_count_ || --begin_count_ != 0)
@@ -126,21 +125,21 @@
ApplyDelay(end_time);
}
-void TraceEventSyntheticDelay::EndParallel(base::TimeTicks end_time) {
+void TraceEventSyntheticDelay::EndParallel(TimeTicks end_time) {
if (!end_time.is_null())
ApplyDelay(end_time);
}
-base::TimeTicks TraceEventSyntheticDelay::CalculateEndTimeLocked(
- base::TimeTicks start_time) {
+TimeTicks TraceEventSyntheticDelay::CalculateEndTimeLocked(
+ TimeTicks start_time) {
if (mode_ == ONE_SHOT && trigger_count_++)
- return base::TimeTicks();
+ return TimeTicks();
else if (mode_ == ALTERNATING && trigger_count_++ % 2)
- return base::TimeTicks();
+ return TimeTicks();
return start_time + target_duration_;
}
-void TraceEventSyntheticDelay::ApplyDelay(base::TimeTicks end_time) {
+void TraceEventSyntheticDelay::ApplyDelay(TimeTicks end_time) {
TRACE_EVENT0("synthetic_delay", name_.c_str());
while (clock_->Now() < end_time) {
// Busy loop.
@@ -161,14 +160,14 @@
const char* name) {
// Try to find an existing delay first without locking to make the common case
// fast.
- int delay_count = base::subtle::Acquire_Load(&delay_count_);
+ int delay_count = subtle::Acquire_Load(&delay_count_);
for (int i = 0; i < delay_count; ++i) {
if (!strcmp(name, delays_[i].name_.c_str()))
return &delays_[i];
}
AutoLock lock(lock_);
- delay_count = base::subtle::Acquire_Load(&delay_count_);
+ delay_count = subtle::Acquire_Load(&delay_count_);
for (int i = 0; i < delay_count; ++i) {
if (!strcmp(name, delays_[i].name_.c_str()))
return &delays_[i];
@@ -180,19 +179,19 @@
return &dummy_delay_;
delays_[delay_count].Initialize(std::string(name), this);
- base::subtle::Release_Store(&delay_count_, delay_count + 1);
+ subtle::Release_Store(&delay_count_, delay_count + 1);
return &delays_[delay_count];
}
-base::TimeTicks TraceEventSyntheticDelayRegistry::Now() {
- return base::TimeTicks::Now();
+TimeTicks TraceEventSyntheticDelayRegistry::Now() {
+ return TimeTicks::Now();
}
void TraceEventSyntheticDelayRegistry::ResetAllDelays() {
AutoLock lock(lock_);
- int delay_count = base::subtle::Acquire_Load(&delay_count_);
+ int delay_count = subtle::Acquire_Load(&delay_count_);
for (int i = 0; i < delay_count; ++i) {
- delays_[i].SetTargetDuration(base::TimeDelta());
+ delays_[i].SetTargetDuration(TimeDelta());
delays_[i].SetClock(this);
}
}
diff --git a/trace_event/trace_event_win.h b/trace_event/trace_event_win.h
index 4161361..e64be4d 100644
--- a/trace_event/trace_event_win.h
+++ b/trace_event/trace_event_win.h
@@ -12,11 +12,11 @@
#include "base/trace_event/trace_event.h"
#include "base/win/event_trace_provider.h"
-// Fwd.
+namespace base {
+
template <typename Type>
struct StaticMemorySingletonTraits;
-namespace base {
namespace trace_event {
// This EtwTraceProvider subclass implements ETW logging
diff --git a/trace_event/trace_log.cc b/trace_event/trace_log.cc
index 2822c37..519bb2b 100644
--- a/trace_event/trace_log.cc
+++ b/trace_event/trace_log.cc
@@ -39,18 +39,22 @@
#include "base/trace_event/trace_event_win.h"
#endif
-class DeleteTraceLogForTesting {
- public:
- static void Delete() {
- Singleton<base::trace_event::TraceLog,
- LeakySingletonTraits<base::trace_event::TraceLog>>::OnExit(0);
- }
-};
-
// The thread buckets for the sampling profiler.
BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
namespace base {
+namespace internal {
+
+class DeleteTraceLogForTesting {
+ public:
+ static void Delete() {
+ Singleton<trace_event::TraceLog,
+ LeakySingletonTraits<trace_event::TraceLog>>::OnExit(0);
+ }
+};
+
+} // namespace internal
+
namespace trace_event {
namespace {
@@ -1539,7 +1543,7 @@
}
void TraceLog::DeleteForTesting() {
- DeleteTraceLogForTesting::Delete();
+ internal::DeleteTraceLogForTesting::Delete();
}
TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) {
diff --git a/trace_event/trace_log.h b/trace_event/trace_log.h
index 727e1cd..29f6aa0 100644
--- a/trace_event/trace_log.h
+++ b/trace_event/trace_log.h
@@ -25,11 +25,10 @@
TRACE_EVENT_PHASE_INSTANT, name, reinterpret_cast<const void*>(id), \
extra)
-template <typename Type>
-struct DefaultSingletonTraits;
-
namespace base {
+template <typename Type>
+struct DefaultSingletonTraits;
class RefCountedString;
namespace trace_event {
diff --git a/trace_event/winheap_dump_provider_win.cc b/trace_event/winheap_dump_provider_win.cc
index 4390376..11f9dc4 100644
--- a/trace_event/winheap_dump_provider_win.cc
+++ b/trace_event/winheap_dump_provider_win.cc
@@ -35,7 +35,7 @@
inner_dump->AddScalar(MemoryAllocatorDump::kNameSize,
MemoryAllocatorDump::kUnitsBytes,
heap_info.allocated_size);
- inner_dump->AddScalar(MemoryAllocatorDump::kNameObjectsCount,
+ inner_dump->AddScalar(MemoryAllocatorDump::kNameObjectCount,
MemoryAllocatorDump::kUnitsObjects,
heap_info.block_count);
}
diff --git a/trace_event/winheap_dump_provider_win_unittest.cc b/trace_event/winheap_dump_provider_win_unittest.cc
index 93ae299..2a072aa 100644
--- a/trace_event/winheap_dump_provider_win_unittest.cc
+++ b/trace_event/winheap_dump_provider_win_unittest.cc
@@ -15,7 +15,7 @@
TEST(WinHeapDumpProviderTest, OnMemoryDump) {
ProcessMemoryDump pmd(make_scoped_refptr(new MemoryDumpSessionState()));
- MemoryDumpArgs dump_args = {MemoryDumpArgs::LevelOfDetail::HIGH};
+ MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
WinHeapDumpProvider* winheap_dump_provider =
WinHeapDumpProvider::GetInstance();
diff --git a/values.cc b/values.cc
index 5374d6c..aa8de36 100644
--- a/values.cc
+++ b/values.cc
@@ -351,6 +351,16 @@
///////////////////// DictionaryValue ////////////////////
+// static
+scoped_ptr<DictionaryValue> DictionaryValue::From(scoped_ptr<Value> value) {
+ DictionaryValue* out;
+ if (value && value->GetAsDictionary(&out)) {
+ ignore_result(value.release());
+ return make_scoped_ptr(out);
+ }
+ return nullptr;
+}
+
DictionaryValue::DictionaryValue()
: Value(TYPE_DICTIONARY) {
}
@@ -869,6 +879,16 @@
///////////////////// ListValue ////////////////////
+// static
+scoped_ptr<ListValue> ListValue::From(scoped_ptr<Value> value) {
+ ListValue* out;
+ if (value && value->GetAsList(&out)) {
+ ignore_result(value.release());
+ return make_scoped_ptr(out);
+ }
+ return nullptr;
+}
+
ListValue::ListValue() : Value(TYPE_LIST) {
}
diff --git a/values.h b/values.h
index 0ff6217..5a3e220 100644
--- a/values.h
+++ b/values.h
@@ -209,6 +209,9 @@
// are |std::string|s and should be UTF-8 encoded.
class BASE_EXPORT DictionaryValue : public Value {
public:
+ // Returns |value| if it is a dictionary, nullptr otherwise.
+ static scoped_ptr<DictionaryValue> From(scoped_ptr<Value> value);
+
DictionaryValue();
~DictionaryValue() override;
@@ -386,6 +389,9 @@
typedef ValueVector::iterator iterator;
typedef ValueVector::const_iterator const_iterator;
+ // Returns |value| if it is a list, nullptr otherwise.
+ static scoped_ptr<ListValue> From(scoped_ptr<Value> value);
+
ListValue();
~ListValue() override;
diff --git a/win/scoped_handle.h b/win/scoped_handle.h
index 97fd7a5..d1eb1d6 100644
--- a/win/scoped_handle.h
+++ b/win/scoped_handle.h
@@ -27,9 +27,13 @@
// Generic wrapper for raw handles that takes care of closing handles
// automatically. The class interface follows the style of
-// the ScopedFILE class with one addition:
+// the ScopedFILE class with two additions:
// - IsValid() method can tolerate multiple invalid handle values such as NULL
// and INVALID_HANDLE_VALUE (-1) for Win32 handles.
+// - Set() (and the constructors and assignment operators that call it)
+// preserve the Windows LastError code. This ensures that GetLastError() can
+// be called after stashing a handle in a GenericScopedHandle object. Doing
+// this explicitly is necessary because of bug 528394 and VC++ 2015.
template <class Traits, class Verifier>
class GenericScopedHandle {
MOVE_ONLY_TYPE_FOR_CPP_03(GenericScopedHandle, RValue)
@@ -66,6 +70,8 @@
void Set(Handle handle) {
if (handle_ != handle) {
+ // Preserve old LastError to avoid bug 528394.
+ auto last_error = ::GetLastError();
Close();
if (Traits::IsHandleValid(handle)) {
@@ -73,6 +79,7 @@
Verifier::StartTracking(handle, this, BASE_WIN_GET_CALLER,
tracked_objects::GetProgramCounter());
}
+ ::SetLastError(last_error);
}
}
diff --git a/win/scoped_handle_unittest.cc b/win/scoped_handle_unittest.cc
new file mode 100644
index 0000000..70431ac
--- /dev/null
+++ b/win/scoped_handle_unittest.cc
@@ -0,0 +1,32 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/scoped_handle.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+TEST(ScopedHandleTest, ScopedHandle) {
+ // Any illegal error code will do. We just need to test that it is preserved
+ // by ScopedHandle to avoid bug 528394.
+ const DWORD magic_error = 0x12345678;
+
+ HANDLE handle = ::CreateMutex(nullptr, FALSE, nullptr);
+ // Call SetLastError after creating the handle.
+ ::SetLastError(magic_error);
+ base::win::ScopedHandle handle_holder(handle);
+ EXPECT_EQ(magic_error, ::GetLastError());
+
+ // Create a new handle and then set LastError again.
+ handle = ::CreateMutex(nullptr, FALSE, nullptr);
+ ::SetLastError(magic_error);
+ handle_holder.Set(handle);
+ EXPECT_EQ(magic_error, ::GetLastError());
+
+ // Create a new handle and then set LastError again.
+ handle = ::CreateMutex(nullptr, FALSE, nullptr);
+ base::win::ScopedHandle handle_source(handle);
+ ::SetLastError(magic_error);
+ handle_holder = std::move(handle_source);
+ EXPECT_EQ(magic_error, ::GetLastError());
+}