Revved to chromium 4dfb55c9cf0950b8bac8b10070c9b8f3e7de66c2 refs/remotes/origin/HEAD
diff --git a/BUILD.gn b/BUILD.gn
index a190ea3..18f2b01 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -78,7 +78,7 @@
"atomicops.h",
"atomicops_internals_gcc.h",
"atomicops_internals_mac.h",
- "atomicops_internals_tsan.h",
+ "atomicops_internals_portable.h",
"atomicops_internals_x86_gcc.cc",
"atomicops_internals_x86_gcc.h",
"atomicops_internals_x86_msvc.h",
@@ -325,8 +325,6 @@
"memory/discardable_memory_emulated.h",
"memory/discardable_memory_linux.cc",
"memory/discardable_memory_mac.cc",
- "memory/discardable_memory_malloc.cc",
- "memory/discardable_memory_malloc.h",
"memory/discardable_memory_manager.cc",
"memory/discardable_memory_manager.h",
"memory/discardable_memory_win.cc",
@@ -865,6 +863,10 @@
# Mac.
if (is_mac) {
+ sources += [
+ "memory/discardable_memory_mach.cc",
+ "memory/discardable_memory_mach.h",
+ ]
sources -= [
"base_paths_posix.cc",
"native_library_posix.cc",
diff --git a/android/java/src/org/chromium/base/ApplicationStatus.java b/android/java/src/org/chromium/base/ApplicationStatus.java
index 53134b5..c706dfd 100644
--- a/android/java/src/org/chromium/base/ApplicationStatus.java
+++ b/android/java/src/org/chromium/base/ApplicationStatus.java
@@ -396,7 +396,7 @@
*/
@CalledByNative
private static void registerThreadSafeNativeApplicationStateListener() {
- ThreadUtils.runOnUiThread(new Runnable () {
+ ThreadUtils.runOnUiThread(new Runnable() {
@Override
public void run() {
if (sNativeApplicationStateListener != null) return;
diff --git a/android/java/src/org/chromium/base/library_loader/LibraryLoader.java b/android/java/src/org/chromium/base/library_loader/LibraryLoader.java
index 91e5367..86bca43 100644
--- a/android/java/src/org/chromium/base/library_loader/LibraryLoader.java
+++ b/android/java/src/org/chromium/base/library_loader/LibraryLoader.java
@@ -10,7 +10,6 @@
import org.chromium.base.CommandLine;
import org.chromium.base.JNINamespace;
-import org.chromium.base.SysUtils;
import org.chromium.base.TraceEvent;
/**
@@ -46,6 +45,15 @@
// library_loader_hooks.cc).
private static boolean sInitialized = false;
+ // One-way switches recording attempts to use Relro sharing in the browser.
+ // The flags are used to report UMA stats later.
+ private static boolean sIsUsingBrowserSharedRelros = false;
+ private static boolean sLoadAtFixedAddressFailed = false;
+
+ // One-way switch recording whether the device supports memory mapping
+ // APK files with executable permissions. Only used in the browser.
+ private static boolean sLibraryLoadFromApkSupported = false;
+
// One-way switch becomes true if the system library loading failed,
// and the right native library was found and loaded by the hack.
// The flag is used to report UMA stats later.
@@ -156,10 +164,17 @@
// Load libraries using the Chromium linker.
Linker.prepareLibraryLoad();
+ // Check if the device supports loading a library directly from the APK file.
+ String apkfile = context.getApplicationInfo().sourceDir;
+ if (Linker.isInBrowserProcess()) {
+ sLibraryLoadFromApkSupported = Linker.checkLibraryLoadFromApkSupport(
+ apkfile);
+ }
+
for (String library : NativeLibraries.LIBRARIES) {
String zipfile = null;
if (Linker.isInZipFile()) {
- zipfile = context.getApplicationInfo().sourceDir;
+ zipfile = apkfile;
Log.i(TAG, "Loading " + library + " from within " + zipfile);
} else {
Log.i(TAG, "Loading: " + library);
@@ -167,6 +182,7 @@
boolean isLoaded = false;
if (Linker.isUsingBrowserSharedRelros()) {
+ sIsUsingBrowserSharedRelros = true;
try {
if (zipfile != null) {
Linker.loadLibraryInZipFile(zipfile, library);
@@ -178,6 +194,7 @@
Log.w(TAG, "Failed to load native library with shared RELRO, " +
"retrying without");
Linker.disableSharedRelros();
+ sLoadAtFixedAddressFailed = true;
}
}
if (!isLoaded) {
@@ -229,9 +246,9 @@
Log.i(TAG, String.format(
"Expected native library version number \"%s\"," +
"actual native library version number \"%s\"",
- NativeLibraries.VERSION_NUMBER,
+ NativeLibraries.sVersionNumber,
nativeGetVersionNumber()));
- if (!NativeLibraries.VERSION_NUMBER.equals(nativeGetVersionNumber())) {
+ if (!NativeLibraries.sVersionNumber.equals(nativeGetVersionNumber())) {
throw new ProcessInitException(LoaderErrors.LOADER_ERROR_NATIVE_LIBRARY_WRONG_VERSION);
}
}
@@ -291,15 +308,33 @@
// Called after all native initializations are complete.
public static void onNativeInitializationComplete() {
- // Record histogram for the Chromium linker.
- if (Linker.isUsed()) {
- nativeRecordChromiumAndroidLinkerHistogram(Linker.loadAtFixedAddressFailed(),
- SysUtils.isLowEndDevice());
- }
-
+ recordBrowserProcessHistogram();
nativeRecordNativeLibraryHack(sNativeLibraryHackWasUsed);
}
+ // Record Chromium linker histogram state for the main browser process. Called from
+ // onNativeInitializationComplete().
+ private static void recordBrowserProcessHistogram() {
+ if (Linker.isUsed()) {
+ assert Linker.isInBrowserProcess();
+ nativeRecordChromiumAndroidLinkerBrowserHistogram(sIsUsingBrowserSharedRelros,
+ sLoadAtFixedAddressFailed,
+ sLibraryLoadFromApkSupported);
+ }
+ }
+
+ // Register pending Chromium linker histogram state for renderer processes. This cannot be
+ // recorded as a histogram immediately because histograms and IPC are not ready at the
+ // time it are captured. This function stores a pending value, so that a later call to
+ // RecordChromiumAndroidLinkerRendererHistogram() will record it correctly.
+ public static void registerRendererProcessHistogram(boolean requestedSharedRelro,
+ boolean loadAtFixedAddressFailed) {
+ if (Linker.isUsed()) {
+ nativeRegisterChromiumAndroidLinkerRendererHistogram(requestedSharedRelro,
+ loadAtFixedAddressFailed);
+ }
+ }
+
private static native void nativeInitCommandLine(String[] initCommandLine);
// Only methods needed before or during normal JNI registration are during System.OnLoad.
@@ -310,12 +345,21 @@
// Return true on success and false on failure.
private static native boolean nativeLibraryLoaded();
- // Method called to record statistics about the Chromium linker operation,
- // i.e. whether the library failed to be loaded at a fixed address, and
- // whether the device is 'low-memory'.
- private static native void nativeRecordChromiumAndroidLinkerHistogram(
- boolean loadedAtFixedAddressFailed,
- boolean isLowMemoryDevice);
+ // Method called to record statistics about the Chromium linker operation for the main
+ // browser process. Indicates whether the linker attempted relro sharing for the browser,
+ // and if it did, whether the library failed to load at a fixed address. Also records
+ // support for memory mapping APK files with executable permissions.
+ private static native void nativeRecordChromiumAndroidLinkerBrowserHistogram(
+ boolean isUsingBrowserSharedRelros,
+ boolean loadAtFixedAddressFailed,
+ boolean apkMemoryMappingSupported);
+
+ // Method called to register (for later recording) statistics about the Chromium linker
+ // operation for a renderer process. Indicates whether the linker attempted relro sharing,
+ // and if it did, whether the library failed to load at a fixed address.
+ private static native void nativeRegisterChromiumAndroidLinkerRendererHistogram(
+ boolean requestedSharedRelro,
+ boolean loadAtFixedAddressFailed);
// Get the version of the native library. This is needed so that we can check we
// have the right version before initializing the (rest of the) JNI.
diff --git a/android/java/src/org/chromium/base/library_loader/Linker.java b/android/java/src/org/chromium/base/library_loader/Linker.java
index 2a3006a..cc30af4 100644
--- a/android/java/src/org/chromium/base/library_loader/Linker.java
+++ b/android/java/src/org/chromium/base/library_loader/Linker.java
@@ -136,7 +136,7 @@
*
* This behaviour is altered by the BROWSER_SHARED_RELRO_CONFIG configuration
* variable below, which may force the browser to load the libraries at
- * fixed addresses to.
+ * fixed addresses too.
*
* - Once all libraries are loaded in the browser process, one can call
* getSharedRelros() which returns a Bundle instance containing a map that
@@ -215,9 +215,6 @@
// Current fixed-location load address for the next library called by loadLibrary().
private static long sCurrentLoadAddress = 0;
- // Becomes true if any library fails to load at a given, non-0, fixed address.
- private static boolean sLoadAtFixedAddressFailed = false;
-
// Becomes true once prepareLibraryLoad() has been called.
private static boolean sPrepareLibraryLoadCalled = false;
@@ -227,7 +224,7 @@
if (!sInitialized) {
sRelroSharingSupported = false;
- if (NativeLibraries.USE_LINKER) {
+ if (NativeLibraries.sUseLinker) {
if (DEBUG) Log.i(TAG, "Loading lib" + TAG + ".so");
try {
System.loadLibrary(TAG);
@@ -282,7 +279,7 @@
/**
* A public interface used to run runtime linker tests after loading
* libraries. Should only be used to implement the linker unit tests,
- * which is controlled by the value of NativeLibraries.ENABLE_LINKER_TESTS
+ * which is controlled by the value of NativeLibraries.sEnableLinkerTests
* configured at build time.
*/
public interface TestRunner {
@@ -306,7 +303,7 @@
public static void setTestRunnerClassName(String testRunnerClassName) {
if (DEBUG) Log.i(TAG, "setTestRunnerByClassName(" + testRunnerClassName + ") called");
- if (!NativeLibraries.ENABLE_LINKER_TESTS) {
+ if (!NativeLibraries.sEnableLinkerTests) {
// Ignore this in production code to prevent malvolent runtime injection.
return;
}
@@ -338,7 +335,7 @@
public static void setMemoryDeviceConfig(int memoryDeviceConfig) {
if (DEBUG) Log.i(TAG, "setMemoryDeviceConfig(" + memoryDeviceConfig + ") called");
// Sanity check. This method should only be called during tests.
- assert NativeLibraries.ENABLE_LINKER_TESTS;
+ assert NativeLibraries.sEnableLinkerTests;
synchronized (Linker.class) {
assert sMemoryDeviceConfig == MEMORY_DEVICE_CONFIG_INIT;
assert memoryDeviceConfig == MEMORY_DEVICE_CONFIG_LOW ||
@@ -361,8 +358,8 @@
public static boolean isUsed() {
// Only GYP targets that are APKs and have the 'use_chromium_linker' variable
// defined as 1 will use this linker. For all others (the default), the
- // auto-generated NativeLibraries.USE_LINKER variable will be false.
- if (!NativeLibraries.USE_LINKER)
+ // auto-generated NativeLibraries.sUseLinker variable will be false.
+ if (!NativeLibraries.sUseLinker)
return false;
synchronized (Linker.class) {
@@ -385,11 +382,24 @@
}
/**
+ * Call this method to determine if the linker is running in the browser
+ * process.
+ *
+ * @return true if the linker is running in the browser process.
+ */
+ public static boolean isInBrowserProcess() {
+ synchronized (Linker.class) {
+ ensureInitializedLocked();
+ return sInBrowserProcess;
+ }
+ }
+
+ /**
* Call this method to determine if the chromium project must load
* the library directly from the zip file.
*/
public static boolean isInZipFile() {
- return NativeLibraries.USE_LIBRARY_IN_ZIP_FILE;
+ return NativeLibraries.sUseLibraryInZipFile;
}
/**
@@ -458,7 +468,7 @@
}
}
- if (NativeLibraries.ENABLE_LINKER_TESTS && sTestRunnerClassName != null) {
+ if (NativeLibraries.sEnableLinkerTests && sTestRunnerClassName != null) {
// The TestRunner implementation must be instantiated _after_
// all libraries are loaded to ensure that its native methods
// are properly registered.
@@ -700,15 +710,6 @@
}
/**
- * Returns whether the linker was unable to load one library at a given fixed address.
- *
- * @return true if at least one library was not loaded at the expected fixed address.
- */
- public static boolean loadAtFixedAddressFailed() {
- return sLoadAtFixedAddressFailed;
- }
-
- /**
* Load a native shared library with the Chromium linker.
* The shared library is uncompressed and page aligned inside the zipfile.
* Note the crazy linker treats libraries and files as equivalent,
@@ -771,11 +772,9 @@
String sharedRelRoName = libName;
if (zipFile != null) {
- if (!nativeLoadLibraryInZipFile(
- zipFile, libName, loadAddress, libInfo)) {
- String errorMessage =
- "Unable to load library: " + libName + " in: " +
- zipFile;
+ if (!nativeLoadLibraryInZipFile(zipFile, libName, loadAddress, libInfo)) {
+ String errorMessage = "Unable to load library: " + libName +
+ ", in: " + zipFile;
Log.e(TAG, errorMessage);
throw new UnsatisfiedLinkError(errorMessage);
}
@@ -787,9 +786,6 @@
throw new UnsatisfiedLinkError(errorMessage);
}
}
- // Keep track whether the library has been loaded at the expected load address.
- if (loadAddress != 0 && loadAddress != libInfo.mLoadAddress)
- sLoadAtFixedAddressFailed = true;
// Print the load address to the logcat when testing the linker. The format
// of the string is expected by the Python test_runner script as one of:
@@ -797,7 +793,7 @@
// RENDERER_LIBRARY_ADDRESS: <library-name> <address>
// Where <library-name> is the library name, and <address> is the hexadecimal load
// address.
- if (NativeLibraries.ENABLE_LINKER_TESTS) {
+ if (NativeLibraries.sEnableLinkerTests) {
Log.i(TAG, String.format(
Locale.US,
"%s_LIBRARY_ADDRESS: %s %x",
@@ -837,6 +833,24 @@
}
/**
+ * Check whether the device supports loading a library directly from the APK file.
+ *
+ * @param apkFile Filename of the APK.
+ * @return true if supported.
+ */
+ public static boolean checkLibraryLoadFromApkSupport(String apkFile) {
+ synchronized (Linker.class) {
+ ensureInitializedLocked();
+
+ if (DEBUG) Log.i(TAG, "checkLibraryLoadFromApkSupported: " + apkFile);
+ boolean supported = nativeCheckLibraryLoadFromApkSupport(apkFile);
+ if (DEBUG) Log.i(TAG, "Loading a library directly from the APK file " +
+ (supported ? "" : "NOT ") + "supported");
+ return supported;
+ }
+ }
+
+ /**
* Move activity from the native thread to the main UI thread.
* Called from native code on its own thread. Posts a callback from
* the UI thread back to native code.
@@ -934,6 +948,16 @@
private static native long nativeGetRandomBaseLoadAddress(long sizeBytes);
/**
+ * Native method which checks whether the device supports loading a library
+ * directly from the APK file.
+ *
+ * @param apkFile Filename of the APK.
+ * @return true if supported.
+ *
+ */
+ private static native boolean nativeCheckLibraryLoadFromApkSupport(String apkFile);
+
+ /**
* Record information for a given library.
* IMPORTANT: Native code knows about this class's fields, so
* don't change them without modifying the corresponding C++ sources.
diff --git a/android/java/templates/NativeLibraries.template b/android/java/templates/NativeLibraries.template
index 165f468..f52acb4 100644
--- a/android/java/templates/NativeLibraries.template
+++ b/android/java/templates/NativeLibraries.template
@@ -49,21 +49,21 @@
// Set to true to enable the use of the Chromium Linker.
#if defined(ENABLE_CHROMIUM_LINKER)
- public static boolean USE_LINKER = true;
+ public static boolean sUseLinker = true;
#else
- public static boolean USE_LINKER = false;
+ public static boolean sUseLinker = false;
#endif
#if defined(ENABLE_CHROMIUM_LINKER_LIBRARY_IN_ZIP_FILE)
- public static boolean USE_LIBRARY_IN_ZIP_FILE = true;
+ public static boolean sUseLibraryInZipFile = true;
#else
- public static boolean USE_LIBRARY_IN_ZIP_FILE = false;
+ public static boolean sUseLibraryInZipFile = false;
#endif
#if defined(ENABLE_CHROMIUM_LINKER_TESTS)
- public static boolean ENABLE_LINKER_TESTS = true;
+ public static boolean sEnableLinkerTests = true;
#else
- public static boolean ENABLE_LINKER_TESTS = false;
+ public static boolean sEnableLinkerTests = false;
#endif
// This is the list of native libraries to be loaded (in the correct order)
@@ -83,7 +83,7 @@
// This is the expected version of the 'main' native library, which is the one that
// implements the initial set of base JNI functions including
// base::android::nativeGetVersionName()
- static String VERSION_NUMBER =
+ static String sVersionNumber =
#if defined(NATIVE_LIBRARIES_VERSION_NUMBER)
NATIVE_LIBRARIES_VERSION_NUMBER;
#else
diff --git a/android/jni_generator/java/src/org/chromium/example/jni_generator/SampleForTests.java b/android/jni_generator/java/src/org/chromium/example/jni_generator/SampleForTests.java
index df8b80f..c3a1df5 100644
--- a/android/jni_generator/java/src/org/chromium/example/jni_generator/SampleForTests.java
+++ b/android/jni_generator/java/src/org/chromium/example/jni_generator/SampleForTests.java
@@ -162,13 +162,13 @@
*/
// String constants that look like comments don't confuse the generator:
- private String arrgh = "*/*";
+ private String mArrgh = "*/*";
//------------------------------------------------------------------------------------------------
// Java fields which are accessed from C++ code only must be annotated with @AccessedByNative to
// prevent them being eliminated when unreferenced code is stripped.
@AccessedByNative
- private int javaField;
+ private int mJavaField;
//------------------------------------------------------------------------------------------------
// The following methods demonstrate declaring methods to call into C++ from Java.
diff --git a/android/library_loader/library_loader_hooks.cc b/android/library_loader/library_loader_hooks.cc
index 87f7910..819fe3d 100644
--- a/android/library_loader/library_loader_hooks.cc
+++ b/android/library_loader/library_loader_hooks.cc
@@ -19,8 +19,94 @@
const char* g_library_version_number = "";
LibraryLoadedHook* g_registration_callback = NULL;
+enum RendererHistogramCode {
+ // Renderer load at fixed address success, fail, or not attempted.
+ // Renderers do not attempt to load at at fixed address if on a
+ // low-memory device on which browser load at fixed address has already
+ // failed.
+ LFA_SUCCESS = 0,
+ LFA_BACKOFF_USED = 1,
+ LFA_NOT_ATTEMPTED = 2,
+
+ // End sentinel, also used as nothing-pending indicator.
+ MAX_RENDERER_HISTOGRAM_CODE = 3,
+ NO_PENDING_HISTOGRAM_CODE = MAX_RENDERER_HISTOGRAM_CODE
+};
+
+enum BrowserHistogramCode {
+ // Non-low-memory random address browser loads.
+ NORMAL_LRA_SUCCESS = 0,
+
+ // Low-memory browser loads at fixed address, success or fail.
+ LOW_MEMORY_LFA_SUCCESS = 1,
+ LOW_MEMORY_LFA_BACKOFF_USED = 2,
+
+ MAX_BROWSER_HISTOGRAM_CODE = 3,
+};
+
+RendererHistogramCode g_renderer_histogram_code = NO_PENDING_HISTOGRAM_CODE;
+
+enum LibraryLoadFromApkSupportCode {
+ // The device's support for loading a library directly from the APK file.
+ NOT_SUPPORTED = 0,
+ SUPPORTED = 1,
+
+ MAX_LIBRARY_LOAD_FROM_APK_SUPPORT_CODE = 2,
+};
+
} // namespace
+static void RegisterChromiumAndroidLinkerRendererHistogram(
+ JNIEnv* env,
+ jclass clazz,
+ jboolean requested_shared_relro,
+ jboolean load_at_fixed_address_failed) {
+ // Note a pending histogram value for later recording.
+ if (requested_shared_relro) {
+ g_renderer_histogram_code = load_at_fixed_address_failed
+ ? LFA_BACKOFF_USED : LFA_SUCCESS;
+ } else {
+ g_renderer_histogram_code = LFA_NOT_ATTEMPTED;
+ }
+}
+
+void RecordChromiumAndroidLinkerRendererHistogram() {
+ if (g_renderer_histogram_code == NO_PENDING_HISTOGRAM_CODE)
+ return;
+ // Record and release the pending histogram value.
+ UMA_HISTOGRAM_ENUMERATION("ChromiumAndroidLinker.RendererStates",
+ g_renderer_histogram_code,
+ MAX_RENDERER_HISTOGRAM_CODE);
+ g_renderer_histogram_code = NO_PENDING_HISTOGRAM_CODE;
+}
+
+static void RecordChromiumAndroidLinkerBrowserHistogram(
+ JNIEnv* env,
+ jclass clazz,
+ jboolean is_using_browser_shared_relros,
+ jboolean load_at_fixed_address_failed,
+ jboolean library_load_from_apk_supported) {
+ // For low-memory devices, record whether or not we successfully loaded the
+ // browser at a fixed address. Otherwise just record a normal invocation.
+ BrowserHistogramCode histogram_code;
+ if (is_using_browser_shared_relros) {
+ histogram_code = load_at_fixed_address_failed
+ ? LOW_MEMORY_LFA_BACKOFF_USED : LOW_MEMORY_LFA_SUCCESS;
+ } else {
+ histogram_code = NORMAL_LRA_SUCCESS;
+ }
+ UMA_HISTOGRAM_ENUMERATION("ChromiumAndroidLinker.BrowserStates",
+ histogram_code,
+ MAX_BROWSER_HISTOGRAM_CODE);
+
+ // Record whether the device supports loading a library directly from the APK
+ // file.
+ UMA_HISTOGRAM_ENUMERATION("ChromiumAndroidLinker.LibraryLoadFromApkSupported",
+ library_load_from_apk_supported ?
+ SUPPORTED : NOT_SUPPORTED,
+ MAX_LIBRARY_LOAD_FROM_APK_SUPPORT_CODE);
+}
+
void SetLibraryLoadedHook(LibraryLoadedHook* func) {
g_registration_callback = func;
}
@@ -31,23 +117,12 @@
}
static jboolean LibraryLoaded(JNIEnv* env, jclass clazz) {
- if(g_registration_callback == NULL) {
+ if (g_registration_callback == NULL) {
return true;
}
return g_registration_callback(env, clazz);
}
-static void RecordChromiumAndroidLinkerHistogram(
- JNIEnv* env,
- jclass clazz,
- jboolean loaded_at_fixed_address_failed,
- jboolean is_low_memory_device) {
- UMA_HISTOGRAM_BOOLEAN("ChromiumAndroidLinker.LoadedAtFixedAddressFailed",
- loaded_at_fixed_address_failed);
- UMA_HISTOGRAM_BOOLEAN("ChromiumAndroidLinker.IsLowMemoryDevice",
- is_low_memory_device);
-}
-
void LibraryLoaderExitHook() {
if (g_at_exit_manager) {
delete g_at_exit_manager;
diff --git a/android/library_loader/library_loader_hooks.h b/android/library_loader/library_loader_hooks.h
index 72935cf..78dc535 100644
--- a/android/library_loader/library_loader_hooks.h
+++ b/android/library_loader/library_loader_hooks.h
@@ -12,6 +12,10 @@
namespace base {
namespace android {
+// Record any pending renderer histogram value as a histogram. Pending values
+// are set by RegisterChromiumAndroidLinkerRendererHistogram.
+BASE_EXPORT void RecordChromiumAndroidLinkerRendererHistogram();
+
// Registers the callbacks that allows the entry point of the library to be
// exposed to the calling java code. This handles only registering the
// the callbacks needed by the loader. Any application specific JNI bindings
diff --git a/android/linker/linker_jni.cc b/android/linker/linker_jni.cc
index 79dd201..81c6647 100644
--- a/android/linker/linker_jni.cc
+++ b/android/linker/linker_jni.cc
@@ -15,7 +15,9 @@
#include <android/log.h>
#include <crazy_linker.h>
+#include <fcntl.h>
#include <jni.h>
+#include <limits.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <unistd.h>
@@ -574,6 +576,44 @@
return static_cast<jlong>(reinterpret_cast<uintptr_t>(address));
}
+// Check whether the device supports loading a library directly from the APK
+// file.
+//
+// |env| is the current JNI environment handle.
+// |clazz| is the static class handle which is not used here.
+// |apkfile_name| is the filename of the APK.
+// Returns true if supported.
+jboolean CheckLibraryLoadFromApkSupport(JNIEnv* env, jclass clazz,
+ jstring apkfile_name) {
+ String apkfile_name_str(env, apkfile_name);
+ const char* apkfile_name_c_str = apkfile_name_str.c_str();
+
+ int fd = open(apkfile_name_c_str, O_RDONLY);
+ if (fd == -1) {
+ LOG_ERROR("%s: Failed to open %s\n", __FUNCTION__, apkfile_name_c_str);
+ return false;
+ }
+
+ LOG_INFO(
+ "%s: Memory mapping the first page of %s with executable permissions\n",
+ __FUNCTION__, apkfile_name_c_str);
+ void* address = mmap(NULL, PAGE_SIZE, PROT_EXEC, MAP_PRIVATE, fd, 0);
+
+ jboolean success;
+ if (address == MAP_FAILED) {
+ success = false;
+ } else {
+ success = true;
+ munmap(address, PAGE_SIZE);
+ }
+
+ close(fd);
+
+ LOG_INFO(" %ssupported\n", success ? "" : "NOT ");
+ return success;
+
+}
+
const JNINativeMethod kNativeMethods[] = {
{"nativeLoadLibrary",
"("
@@ -623,7 +663,13 @@
"J"
")"
"J",
- reinterpret_cast<void*>(&GetRandomBaseLoadAddress)}, };
+ reinterpret_cast<void*>(&GetRandomBaseLoadAddress)},
+ {"nativeCheckLibraryLoadFromApkSupport",
+ "("
+ "Ljava/lang/String;"
+ ")"
+ "Z",
+ reinterpret_cast<void*>(&CheckLibraryLoadFromApkSupport)}, };
} // namespace
diff --git a/atomicops.h b/atomicops.h
index 84be8c0..833e170 100644
--- a/atomicops.h
+++ b/atomicops.h
@@ -28,8 +28,11 @@
#ifndef BASE_ATOMICOPS_H_
#define BASE_ATOMICOPS_H_
+#include <cassert> // Small C++ header which defines implementation specific
+ // macros used to identify the STL implementation.
#include <stdint.h>
+#include "base/base_export.h"
#include "build/build_config.h"
#if defined(OS_WIN) && defined(ARCH_CPU_64_BITS)
@@ -137,28 +140,66 @@
} // namespace subtle
} // namespace base
-// Include our platform specific implementation.
-#if defined(THREAD_SANITIZER)
-#include "base/atomicops_internals_tsan.h"
-#elif defined(OS_WIN) && defined(COMPILER_MSVC) && defined(ARCH_CPU_X86_FAMILY)
-#include "base/atomicops_internals_x86_msvc.h"
-#elif defined(OS_MACOSX)
-#include "base/atomicops_internals_mac.h"
-#elif defined(OS_NACL)
-#include "base/atomicops_internals_gcc.h"
-#elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARMEL)
-#include "base/atomicops_internals_arm_gcc.h"
-#elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARM64)
-#include "base/atomicops_internals_arm64_gcc.h"
-#elif defined(COMPILER_GCC) && defined(ARCH_CPU_X86_FAMILY)
-#include "base/atomicops_internals_x86_gcc.h"
-#elif defined(COMPILER_GCC) && \
- (defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_MIPS64_FAMILY))
-#include "base/atomicops_internals_mips_gcc.h"
-#else
-#error "Atomic operations are not supported on your platform"
+// The following x86 CPU features are used in atomicops_internals_x86_gcc.h, but
+// this file is duplicated inside of Chrome: protobuf and tcmalloc rely on the
+// struct being present at link time. Some parts of Chrome can currently use the
+// portable interface whereas others still use GCC one. The include guards are
+// the same as in atomicops_internals_x86_gcc.cc.
+#if defined(__i386__) || defined(__x86_64__)
+// This struct is not part of the public API of this module; clients may not
+// use it. (However, it's exported via BASE_EXPORT because clients implicitly
+// do use it at link time by inlining these functions.)
+// Features of this x86. Values may not be correct before main() is run,
+// but are set conservatively.
+struct AtomicOps_x86CPUFeatureStruct {
+ bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
+ // after acquire compare-and-swap.
+ // The following fields are unused by Chrome's base implementation but are
+ // still used by copies of the same code in other parts of the code base. This
+ // causes an ODR violation, and the other code is likely reading invalid
+ // memory.
+ // TODO(jfb) Delete these fields once the rest of the Chrome code base doesn't
+ // depend on them.
+ bool has_sse2; // Processor has SSE2.
+ bool has_cmpxchg16b; // Processor supports cmpxchg16b instruction.
+};
+BASE_EXPORT extern struct AtomicOps_x86CPUFeatureStruct
+ AtomicOps_Internalx86CPUFeatures;
#endif
+// Try to use a portable implementation based on C++11 atomics.
+//
+// Some toolchains support C++11 language features without supporting library
+// features (recent compiler, older STL). Whitelist libstdc++ and libc++ that we
+// know will have <atomic> when compiling C++11.
+#if ((__cplusplus >= 201103L) && \
+ ((defined(__GLIBCXX__) && (__GLIBCXX__ > 20110216)) || \
+ (defined(_LIBCPP_VERSION) && (_LIBCPP_STD_VER >= 11))))
+# include "base/atomicops_internals_portable.h"
+#else // Otherwise use a platform specific implementation.
+# if defined(THREAD_SANITIZER)
+# error "Thread sanitizer must use the portable atomic operations"
+# elif (defined(OS_WIN) && defined(COMPILER_MSVC) && \
+ defined(ARCH_CPU_X86_FAMILY))
+# include "base/atomicops_internals_x86_msvc.h"
+# elif defined(OS_MACOSX)
+# include "base/atomicops_internals_mac.h"
+# elif defined(OS_NACL)
+# include "base/atomicops_internals_gcc.h"
+# elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARMEL)
+# include "base/atomicops_internals_arm_gcc.h"
+# elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARM64)
+# include "base/atomicops_internals_arm64_gcc.h"
+# elif defined(COMPILER_GCC) && defined(ARCH_CPU_X86_FAMILY)
+# include "base/atomicops_internals_x86_gcc.h"
+# elif (defined(COMPILER_GCC) && \
+ (defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_MIPS64_FAMILY)))
+# include "base/atomicops_internals_mips_gcc.h"
+# else
+# error "Atomic operations are not supported on your platform"
+# endif
+#endif // Portable / non-portable includes.
+
// On some platforms we need additional declarations to make
// AtomicWord compatible with our other Atomic* types.
#if defined(OS_MACOSX) || defined(OS_OPENBSD)
diff --git a/atomicops_internals_portable.h b/atomicops_internals_portable.h
new file mode 100644
index 0000000..b25099f
--- /dev/null
+++ b/atomicops_internals_portable.h
@@ -0,0 +1,227 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+//
+// This implementation uses C++11 atomics' member functions. The code base is
+// currently written assuming atomicity revolves around accesses instead of
+// C++11's memory locations. The burden is on the programmer to ensure that all
+// memory locations accessed atomically are never accessed non-atomically (tsan
+// should help with this).
+//
+// TODO(jfb) Modify the atomicops.h API and user code to declare atomic
+// locations as truly atomic. See the static_assert below.
+//
+// Of note in this implementation:
+// * All NoBarrier variants are implemented as relaxed.
+// * All Barrier variants are implemented as sequentially-consistent.
+// * Compare exchange's failure ordering is always the same as the success one
+// (except for release, which fails as relaxed): using a weaker ordering is
+// only valid under certain uses of compare exchange.
+// * Acquire store doesn't exist in the C11 memory model, it is instead
+// implemented as a relaxed store followed by a sequentially consistent
+// fence.
+// * Release load doesn't exist in the C11 memory model, it is instead
+// implemented as sequentially consistent fence followed by a relaxed load.
+// * Atomic increment is expected to return the post-incremented value, whereas
+// C11 fetch add returns the previous value. The implementation therefore
+// needs to increment twice (which the compiler should be able to detect and
+// optimize).
+
+#ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
+#define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
+
+#include <atomic>
+
+namespace base {
+namespace subtle {
+
+// This implementation is transitional and maintains the original API for
+// atomicops.h. This requires casting memory locations to the atomic types, and
+// assumes that the API and the C++11 implementation are layout-compatible,
+// which isn't true for all implementations or hardware platforms. The static
+// assertion should detect this issue, were it to fire then this header
+// shouldn't be used.
+//
+// TODO(jfb) If this header manages to stay committed then the API should be
+// modified, and all call sites updated.
+typedef volatile std::atomic<Atomic32>* AtomicLocation32;
+static_assert(sizeof(*(AtomicLocation32) nullptr) == sizeof(Atomic32),
+ "incompatible 32-bit atomic layout");
+
+inline void MemoryBarrier() {
+#if defined(__GLIBCXX__)
+ // Work around libstdc++ bug 51038 where atomic_thread_fence was declared but
+ // not defined, leading to the linker complaining about undefined references.
+ __atomic_thread_fence(std::memory_order_seq_cst);
+#else
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+#endif
+}
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ ((AtomicLocation32)ptr)
+ ->compare_exchange_strong(old_value,
+ new_value,
+ std::memory_order_relaxed,
+ std::memory_order_relaxed);
+ return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ return ((AtomicLocation32)ptr)
+ ->exchange(new_value, std::memory_order_relaxed);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return increment +
+ ((AtomicLocation32)ptr)
+ ->fetch_add(increment, std::memory_order_relaxed);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return increment + ((AtomicLocation32)ptr)->fetch_add(increment);
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ ((AtomicLocation32)ptr)
+ ->compare_exchange_strong(old_value,
+ new_value,
+ std::memory_order_acquire,
+ std::memory_order_acquire);
+ return old_value;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ ((AtomicLocation32)ptr)
+ ->compare_exchange_strong(old_value,
+ new_value,
+ std::memory_order_release,
+ std::memory_order_relaxed);
+ return old_value;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ ((AtomicLocation32)ptr)->store(value, std::memory_order_release);
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ return ((AtomicLocation32)ptr)->load(std::memory_order_acquire);
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
+}
+
+#if defined(ARCH_CPU_64_BITS)
+
+typedef volatile std::atomic<Atomic64>* AtomicLocation64;
+static_assert(sizeof(*(AtomicLocation64) nullptr) == sizeof(Atomic64),
+ "incompatible 64-bit atomic layout");
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ ((AtomicLocation64)ptr)
+ ->compare_exchange_strong(old_value,
+ new_value,
+ std::memory_order_relaxed,
+ std::memory_order_relaxed);
+ return old_value;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ return ((AtomicLocation64)ptr)
+ ->exchange(new_value, std::memory_order_relaxed);
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return increment +
+ ((AtomicLocation64)ptr)
+ ->fetch_add(increment, std::memory_order_relaxed);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return increment + ((AtomicLocation64)ptr)->fetch_add(increment);
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ ((AtomicLocation64)ptr)
+ ->compare_exchange_strong(old_value,
+ new_value,
+ std::memory_order_acquire,
+ std::memory_order_acquire);
+ return old_value;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ ((AtomicLocation64)ptr)
+ ->compare_exchange_strong(old_value,
+ new_value,
+ std::memory_order_release,
+ std::memory_order_relaxed);
+ return old_value;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ ((AtomicLocation64)ptr)->store(value, std::memory_order_release);
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ return ((AtomicLocation64)ptr)->load(std::memory_order_acquire);
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ MemoryBarrier();
+ return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
+}
+
+#endif // defined(ARCH_CPU_64_BITS)
+}
+} // namespace base::subtle
+
+#endif // BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
diff --git a/atomicops_internals_tsan.h b/atomicops_internals_tsan.h
deleted file mode 100644
index 24382fd..0000000
--- a/atomicops_internals_tsan.h
+++ /dev/null
@@ -1,186 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is an internal atomic implementation for compiler-based
-// ThreadSanitizer. Use base/atomicops.h instead.
-
-#ifndef BASE_ATOMICOPS_INTERNALS_TSAN_H_
-#define BASE_ATOMICOPS_INTERNALS_TSAN_H_
-
-#include <sanitizer/tsan_interface_atomic.h>
-
-namespace base {
-namespace subtle {
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 cmp = old_value;
- __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
- return cmp;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- return __tsan_atomic32_exchange(ptr, new_value,
- __tsan_memory_order_relaxed);
-}
-
-inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- return __tsan_atomic32_exchange(ptr, new_value,
- __tsan_memory_order_acquire);
-}
-
-inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- return __tsan_atomic32_exchange(ptr, new_value,
- __tsan_memory_order_release);
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return increment + __tsan_atomic32_fetch_add(ptr, increment,
- __tsan_memory_order_relaxed);
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return increment + __tsan_atomic32_fetch_add(ptr, increment,
- __tsan_memory_order_acq_rel);
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 cmp = old_value;
- __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_acquire, __tsan_memory_order_acquire);
- return cmp;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 cmp = old_value;
- __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_release, __tsan_memory_order_relaxed);
- return cmp;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
- __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
- return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 cmp = old_value;
- __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
- return cmp;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
-}
-
-inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return increment + __tsan_atomic64_fetch_add(ptr, increment,
- __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return increment + __tsan_atomic64_fetch_add(ptr, increment,
- __tsan_memory_order_acq_rel);
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
- __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
- return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 cmp = old_value;
- __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_acquire, __tsan_memory_order_acquire);
- return cmp;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 cmp = old_value;
- __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_release, __tsan_memory_order_relaxed);
- return cmp;
-}
-
-inline void MemoryBarrier() {
- __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
-}
-
-} // namespace base::subtle
-} // namespace base
-
-#endif // BASE_ATOMICOPS_INTERNALS_TSAN_H_
diff --git a/atomicops_internals_x86_gcc.cc b/atomicops_internals_x86_gcc.cc
index 3f47458..c21e96d 100644
--- a/atomicops_internals_x86_gcc.cc
+++ b/atomicops_internals_x86_gcc.cc
@@ -10,15 +10,11 @@
#include "base/atomicops.h"
-// This file only makes sense with atomicops_internals_x86_gcc.h -- it
-// depends on structs that are defined in that file. If atomicops.h
-// doesn't sub-include that file, then we aren't needed, and shouldn't
-// try to do anything.
-#ifdef BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
-
// Inline cpuid instruction. In PIC compilations, %ebx contains the address
// of the global offset table. To avoid breaking such executables, this code
// must preserve that register's value across cpuid instructions.
+//
+// The include guards are the same as in atomicops.h.
#if defined(__i386__)
#define cpuid(a, b, c, d, inp) \
asm("mov %%ebx, %%edi\n" \
@@ -39,7 +35,10 @@
// if we haven't been initialized yet, we're probably single threaded, and our
// default values should hopefully be pretty safe.
struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
- false, // bug can't exist before process spawns multiple threads
+ false, // bug can't exist before process spawns multiple threads
+ false, // Chrome requires SSE2, but for transition assume not and initialize
+ // this properly.
+ false, // cmpxchg16b isn't present on early AMD64 CPUs.
};
namespace {
@@ -81,6 +80,12 @@
} else {
AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false;
}
+
+ // edx bit 26 is SSE2 which we use to tell use whether we can use mfence
+ AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
+
+ // ecx bit 13 indicates whether the cmpxchg16b instruction is supported
+ AtomicOps_Internalx86CPUFeatures.has_cmpxchg16b = ((ecx >> 13) & 1);
}
class AtomicOpsx86Initializer {
@@ -96,5 +101,3 @@
} // namespace
#endif // if x86
-
-#endif // ifdef BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
diff --git a/atomicops_internals_x86_gcc.h b/atomicops_internals_x86_gcc.h
index 7386fab..69eacdb 100644
--- a/atomicops_internals_x86_gcc.h
+++ b/atomicops_internals_x86_gcc.h
@@ -7,20 +7,6 @@
#ifndef BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
#define BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
-#include "base/base_export.h"
-
-// This struct is not part of the public API of this module; clients may not
-// use it. (However, it's exported via BASE_EXPORT because clients implicitly
-// do use it at link time by inlining these functions.)
-// Features of this x86. Values may not be correct before main() is run,
-// but are set conservatively.
-struct AtomicOps_x86CPUFeatureStruct {
- bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
- // after acquire compare-and-swap.
-};
-BASE_EXPORT extern struct AtomicOps_x86CPUFeatureStruct
- AtomicOps_Internalx86CPUFeatures;
-
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
namespace base {
diff --git a/base.gyp b/base.gyp
index 12de4b1..41fac4f 100644
--- a/base.gyp
+++ b/base.gyp
@@ -904,6 +904,8 @@
'test/multiprocess_test_android.cc',
'test/null_task_runner.cc',
'test/null_task_runner.h',
+ 'test/opaque_ref_counted.cc',
+ 'test/opaque_ref_counted.h',
'test/perf_log.cc',
'test/perf_log.h',
'test/perf_test_suite.cc',
diff --git a/base.gypi b/base.gypi
index c5796ef..5cd8b8c 100644
--- a/base.gypi
+++ b/base.gypi
@@ -81,7 +81,7 @@
'atomicops.h',
'atomicops_internals_gcc.h',
'atomicops_internals_mac.h',
- 'atomicops_internals_tsan.h',
+ 'atomicops_internals_portable.h',
'atomicops_internals_x86_gcc.cc',
'atomicops_internals_x86_gcc.h',
'atomicops_internals_x86_msvc.h',
@@ -328,8 +328,6 @@
'memory/discardable_memory_emulated.h',
'memory/discardable_memory_linux.cc',
'memory/discardable_memory_mac.cc',
- 'memory/discardable_memory_malloc.cc',
- 'memory/discardable_memory_malloc.h',
'memory/discardable_memory_manager.cc',
'memory/discardable_memory_manager.h',
'memory/discardable_memory_win.cc',
@@ -940,6 +938,10 @@
],
}],
['(OS == "mac" or OS == "ios") and >(nacl_untrusted_build)==0', {
+ 'sources': [
+ 'memory/discardable_memory_mach.cc',
+ 'memory/discardable_memory_mach.h',
+ ],
'sources/': [
['exclude', '^files/file_path_watcher_stub\\.cc$'],
['exclude', '^base_paths_posix\\.cc$'],
diff --git a/base.isolate b/base.isolate
index 047d5de..762d915 100644
--- a/base.isolate
+++ b/base.isolate
@@ -9,7 +9,7 @@
'../third_party/icu/icu.isolate',
],
'conditions': [
- ['OS=="linux" and asan==1', {
+ ['OS=="linux" and asan==1 and chromeos==0', {
'variables': {
'files': [
'<(PRODUCT_DIR)/lib/libc++.so',
@@ -23,6 +23,22 @@
],
},
}],
+ ['asan==1', {
+ 'variables': {
+ 'files': [
+ '../tools/valgrind/asan/',
+ '../third_party/llvm-build/Release+Asserts/bin/llvm-symbolizer',
+ '../third_party/llvm-build/Release+Asserts/lib/libstdc++.so.6',
+ ],
+ },
+ }],
+ ['lsan==1', {
+ 'variables': {
+ 'files': [
+ '../tools/lsan/suppressions.txt',
+ ],
+ },
+ }],
['OS=="win" and component=="shared_library" and CONFIGURATION_NAME=="Debug"', {
'variables': {
'files': [
diff --git a/base_unittests.isolate b/base_unittests.isolate
index c5c192c..f561d20 100644
--- a/base_unittests.isolate
+++ b/base_unittests.isolate
@@ -18,6 +18,8 @@
'<(PRODUCT_DIR)/base_unittests<(EXECUTABLE_SUFFIX)',
'--brave-new-test-launcher',
'--test-launcher-bot-mode',
+ '--asan=<(asan)',
+ '--lsan=<(lsan)',
],
'files': [
'../testing/xvfb.py',
@@ -47,6 +49,8 @@
'<(PRODUCT_DIR)/base_unittests<(EXECUTABLE_SUFFIX)',
'--brave-new-test-launcher',
'--test-launcher-bot-mode',
+ '--asan=<(asan)',
+ '--lsan=<(lsan)',
],
},
}],
diff --git a/compiler_specific.h b/compiler_specific.h
index a93d350..ba57cc3 100644
--- a/compiler_specific.h
+++ b/compiler_specific.h
@@ -139,13 +139,6 @@
// virtual void foo() OVERRIDE;
#define OVERRIDE override
-// Annotate a virtual method indicating that subclasses must not override it,
-// or annotate a class to indicate that it cannot be subclassed.
-// Use like:
-// virtual void foo() FINAL;
-// class B FINAL : public A {};
-#define FINAL final
-
// Annotate a function indicating the caller must examine the return value.
// Use like:
// int foo() WARN_UNUSED_RESULT;
diff --git a/containers/hash_tables.h b/containers/hash_tables.h
index 6f37c49..c803ace 100644
--- a/containers/hash_tables.h
+++ b/containers/hash_tables.h
@@ -85,6 +85,17 @@
#undef DEFINE_TRIVIAL_HASH
#endif // !defined(OS_ANDROID)
+// To align with C++11's std::hash and MSVC's pre-standard stdext::hash_value,
+// provide a default hash function for raw pointers. Note: const char * is still
+// specialized to hash as a C string. This is consistent with the currently used
+// stdext::hash_value, but not C++11.
+template<typename T>
+struct hash<T*> {
+ std::size_t operator()(T* value) const {
+ return hash<uintptr_t>()(reinterpret_cast<uintptr_t>(value));
+ }
+};
+
// Implement string hash functions so that strings of various flavors can
// be used as keys in STL maps and sets. The hash algorithm comes from the
// GNU C++ library, in <tr1/functional>. It is duplicated here because GCC
diff --git a/debug/trace_event_impl.cc b/debug/trace_event_impl.cc
index 0607a19..d8f32cc 100644
--- a/debug/trace_event_impl.cc
+++ b/debug/trace_event_impl.cc
@@ -222,7 +222,7 @@
TraceBufferChunk* chunk = chunks_[chunk_index];
cloned_buffer->chunks_.push_back(chunk ? chunk->Clone().release() : NULL);
}
- return cloned_buffer.PassAs<TraceBuffer>();
+ return cloned_buffer.Pass();
}
private:
diff --git a/debug/trace_event_impl.h b/debug/trace_event_impl.h
index f915541..bac74e3 100644
--- a/debug/trace_event_impl.h
+++ b/debug/trace_event_impl.h
@@ -45,17 +45,6 @@
template <typename Type>
struct DefaultSingletonTraits;
-#if defined(COMPILER_GCC)
-namespace BASE_HASH_NAMESPACE {
-template <>
-struct hash<base::MessageLoop*> {
- std::size_t operator()(base::MessageLoop* value) const {
- return reinterpret_cast<std::size_t>(value);
- }
-};
-} // BASE_HASH_NAMESPACE
-#endif
-
namespace base {
class WaitableEvent;
diff --git a/file_util.h b/file_util.h
deleted file mode 100644
index 9760a34..0000000
--- a/file_util.h
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// TODO(brettw) update callers to use the new location and remove this file.
-#include "base/files/file_util.h"
diff --git a/files/file_path.cc b/files/file_path.cc
index ebc2d6d..11bf69d 100644
--- a/files/file_path.cc
+++ b/files/file_path.cc
@@ -35,7 +35,7 @@
namespace {
-const char* kCommonDoubleExtensionSuffixes[] = { "gz", "z", "bz2" };
+const char* kCommonDoubleExtensionSuffixes[] = { "gz", "z", "bz2", "bz" };
const char* kCommonDoubleExtensions[] = { "user.js" };
const FilePath::CharType kStringTerminator = FILE_PATH_LITERAL('\0');
diff --git a/files/file_path_unittest.cc b/files/file_path_unittest.cc
index 906d8df..956faea 100644
--- a/files/file_path_unittest.cc
+++ b/files/file_path_unittest.cc
@@ -765,6 +765,7 @@
{ FPL("/foo.tar.gz.gz"), FPL(".gz.gz") },
{ FPL("/foo.1234.user.js"), FPL(".user.js") },
{ FPL("foo.user.js"), FPL(".user.js") },
+ { FPL("/foo.tar.bz"), FPL(".tar.bz") },
};
for (unsigned int i = 0; i < arraysize(cases); ++i) {
FilePath path(cases[i].input);
diff --git a/mac/sdk_forward_declarations.h b/mac/sdk_forward_declarations.h
index e450282..1729ea5 100644
--- a/mac/sdk_forward_declarations.h
+++ b/mac/sdk_forward_declarations.h
@@ -207,6 +207,15 @@
@end
BASE_EXPORT extern "C" NSString* const NSWindowWillEnterFullScreenNotification;
+BASE_EXPORT extern "C" NSString* const NSWindowWillExitFullScreenNotification;
+BASE_EXPORT extern "C" NSString* const NSWindowDidEnterFullScreenNotification;
+BASE_EXPORT extern "C" NSString* const NSWindowDidExitFullScreenNotification;
+
+BASE_EXPORT extern "C" NSString* const
+ NSWindowDidFailToEnterFullScreenNotification;
+
+BASE_EXPORT extern "C" NSString* const
+ NSWindowDidFailToExitFullScreenNotification;
#endif // MAC_OS_X_VERSION_10_7
diff --git a/mac/sdk_forward_declarations.mm b/mac/sdk_forward_declarations.mm
index 5b76c88..06d10f2 100644
--- a/mac/sdk_forward_declarations.mm
+++ b/mac/sdk_forward_declarations.mm
@@ -11,6 +11,21 @@
NSString* const NSWindowWillEnterFullScreenNotification =
@"NSWindowWillEnterFullScreenNotification";
+NSString* const NSWindowWillExitFullScreenNotification =
+ @"NSWindowWillExitFullScreenNotification";
+
+NSString* const NSWindowDidEnterFullScreenNotification =
+ @"NSWindowDidEnterFullScreenNotification";
+
+NSString* const NSWindowDidExitFullScreenNotification =
+ @"NSWindowDidExitFullScreenNotification";
+
+NSString* const NSWindowDidFailToEnterFullScreenNotification =
+ @"NSWindowDidFailToEnterFullScreenNotification";
+
+NSString* const NSWindowDidFailToExitFullScreenNotification =
+ @"NSWindowDidFailToExitFullScreenNotification";
+
#endif // MAC_OS_X_VERSION_10_7
// Replicate specific 10.10 SDK declarations for building with prior SDKs.
diff --git a/macros.h b/macros.h
index 2741afc..2e3fc09 100644
--- a/macros.h
+++ b/macros.h
@@ -50,12 +50,6 @@
// The expression is a compile-time constant, and therefore can be
// used in defining new arrays, for example. If you use arraysize on
// a pointer by mistake, you will get a compile-time error.
-//
-// One caveat is that arraysize() doesn't accept any array of an
-// anonymous type or a type defined inside a function. In these rare
-// cases, you have to use the unsafe ARRAYSIZE_UNSAFE() macro below. This is
-// due to a limitation in C++'s template system. The limitation might
-// eventually be removed, but it hasn't happened yet.
// This template function declaration is used in defining arraysize.
// Note that the function doesn't need an implementation, as we only
@@ -73,46 +67,10 @@
#define arraysize(array) (sizeof(ArraySizeHelper(array)))
-// ARRAYSIZE_UNSAFE performs essentially the same calculation as arraysize,
-// but can be used on anonymous types or types defined inside
-// functions. It's less safe than arraysize as it accepts some
-// (although not all) pointers. Therefore, you should use arraysize
-// whenever possible.
-//
-// The expression ARRAYSIZE_UNSAFE(a) is a compile-time constant of type
-// size_t.
-//
-// ARRAYSIZE_UNSAFE catches a few type errors. If you see a compiler error
-//
-// "warning: division by zero in ..."
-//
-// when using ARRAYSIZE_UNSAFE, you are (wrongfully) giving it a pointer.
-// You should only use ARRAYSIZE_UNSAFE on statically allocated arrays.
-//
-// The following comments are on the implementation details, and can
-// be ignored by the users.
-//
-// ARRAYSIZE_UNSAFE(arr) works by inspecting sizeof(arr) (the # of bytes in
-// the array) and sizeof(*(arr)) (the # of bytes in one array
-// element). If the former is divisible by the latter, perhaps arr is
-// indeed an array, in which case the division result is the # of
-// elements in the array. Otherwise, arr cannot possibly be an array,
-// and we generate a compiler error to prevent the code from
-// compiling.
-//
-// Since the size of bool is implementation-defined, we need to cast
-// !(sizeof(a) & sizeof(*(a))) to size_t in order to ensure the final
-// result has type size_t.
-//
-// This macro is not perfect as it wrongfully accepts certain
-// pointers, namely where the pointer size is divisible by the pointee
-// size. Since all our code has to go through a 32-bit compiler,
-// where a pointer is 4 bytes, this means all pointers to a type whose
-// size is 3 or greater than 4 will be (righteously) rejected.
-
-#define ARRAYSIZE_UNSAFE(a) \
- ((sizeof(a) / sizeof(*(a))) / \
- static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
+// DEPRECATED: Just use |arraysize()|, now that C++11 has removed the
+// limitations that forced the use of |ARRAYSIZE_UNSAFE()|.
+// TODO(viettrungluu): Convert all instances and delete. crbug.com/423134
+#define ARRAYSIZE_UNSAFE(a) arraysize(a)
// Use implicit_cast as a safe version of static_cast or const_cast
@@ -141,7 +99,7 @@
// expression is true. For example, you could use it to verify the
// size of a static array:
//
-// COMPILE_ASSERT(ARRAYSIZE_UNSAFE(content_type_names) == CONTENT_NUM_TYPES,
+// COMPILE_ASSERT(arraysize(content_type_names) == CONTENT_NUM_TYPES,
// content_type_names_incorrect_size);
//
// or to make sure a struct is smaller than a certain size:
diff --git a/memory/discardable_memory.cc b/memory/discardable_memory.cc
index 9ba47aa..3ecc5f7 100644
--- a/memory/discardable_memory.cc
+++ b/memory/discardable_memory.cc
@@ -15,9 +15,8 @@
const char* name;
} kTypeNamePairs[] = {
{ DISCARDABLE_MEMORY_TYPE_ASHMEM, "ashmem" },
- { DISCARDABLE_MEMORY_TYPE_MAC, "mac" },
- { DISCARDABLE_MEMORY_TYPE_EMULATED, "emulated" },
- { DISCARDABLE_MEMORY_TYPE_MALLOC, "malloc" }
+ { DISCARDABLE_MEMORY_TYPE_MACH, "mach" },
+ { DISCARDABLE_MEMORY_TYPE_EMULATED, "emulated" }
};
DiscardableMemoryType g_preferred_type = DISCARDABLE_MEMORY_TYPE_NONE;
diff --git a/memory/discardable_memory.h b/memory/discardable_memory.h
index 5b74705..5f83e33 100644
--- a/memory/discardable_memory.h
+++ b/memory/discardable_memory.h
@@ -18,9 +18,8 @@
enum DiscardableMemoryType {
DISCARDABLE_MEMORY_TYPE_NONE,
DISCARDABLE_MEMORY_TYPE_ASHMEM,
- DISCARDABLE_MEMORY_TYPE_MAC,
- DISCARDABLE_MEMORY_TYPE_EMULATED,
- DISCARDABLE_MEMORY_TYPE_MALLOC
+ DISCARDABLE_MEMORY_TYPE_MACH,
+ DISCARDABLE_MEMORY_TYPE_EMULATED
};
enum DiscardableMemoryLockStatus {
diff --git a/memory/discardable_memory_android.cc b/memory/discardable_memory_android.cc
index acf29ac..0c9f3fc 100644
--- a/memory/discardable_memory_android.cc
+++ b/memory/discardable_memory_android.cc
@@ -11,7 +11,6 @@
#include "base/memory/discardable_memory_ashmem.h"
#include "base/memory/discardable_memory_ashmem_allocator.h"
#include "base/memory/discardable_memory_emulated.h"
-#include "base/memory/discardable_memory_malloc.h"
#include "base/sys_info.h"
namespace base {
@@ -53,8 +52,7 @@
std::vector<DiscardableMemoryType>* types) {
const DiscardableMemoryType supported_types[] = {
DISCARDABLE_MEMORY_TYPE_ASHMEM,
- DISCARDABLE_MEMORY_TYPE_EMULATED,
- DISCARDABLE_MEMORY_TYPE_MALLOC
+ DISCARDABLE_MEMORY_TYPE_EMULATED
};
types->assign(supported_types, supported_types + arraysize(supported_types));
}
@@ -63,39 +61,32 @@
scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemoryWithType(
DiscardableMemoryType type, size_t size) {
switch (type) {
- case DISCARDABLE_MEMORY_TYPE_NONE:
- case DISCARDABLE_MEMORY_TYPE_MAC:
- return scoped_ptr<DiscardableMemory>();
case DISCARDABLE_MEMORY_TYPE_ASHMEM: {
SharedState* const shared_state = g_shared_state.Pointer();
scoped_ptr<internal::DiscardableMemoryAshmem> memory(
new internal::DiscardableMemoryAshmem(
size, &shared_state->allocator, &shared_state->manager));
if (!memory->Initialize())
- return scoped_ptr<DiscardableMemory>();
+ return nullptr;
- return memory.PassAs<DiscardableMemory>();
+ return memory.Pass();
}
case DISCARDABLE_MEMORY_TYPE_EMULATED: {
scoped_ptr<internal::DiscardableMemoryEmulated> memory(
new internal::DiscardableMemoryEmulated(size));
if (!memory->Initialize())
- return scoped_ptr<DiscardableMemory>();
+ return nullptr;
- return memory.PassAs<DiscardableMemory>();
+ return memory.Pass();
}
- case DISCARDABLE_MEMORY_TYPE_MALLOC: {
- scoped_ptr<internal::DiscardableMemoryMalloc> memory(
- new internal::DiscardableMemoryMalloc(size));
- if (!memory->Initialize())
- return scoped_ptr<DiscardableMemory>();
-
- return memory.PassAs<DiscardableMemory>();
- }
+ case DISCARDABLE_MEMORY_TYPE_NONE:
+ case DISCARDABLE_MEMORY_TYPE_MACH:
+ NOTREACHED();
+ return nullptr;
}
NOTREACHED();
- return scoped_ptr<DiscardableMemory>();
+ return nullptr;
}
// static
diff --git a/memory/discardable_memory_linux.cc b/memory/discardable_memory_linux.cc
index 578b2c1..6a9a28d 100644
--- a/memory/discardable_memory_linux.cc
+++ b/memory/discardable_memory_linux.cc
@@ -6,7 +6,6 @@
#include "base/logging.h"
#include "base/memory/discardable_memory_emulated.h"
-#include "base/memory/discardable_memory_malloc.h"
namespace base {
@@ -19,8 +18,7 @@
void DiscardableMemory::GetSupportedTypes(
std::vector<DiscardableMemoryType>* types) {
const DiscardableMemoryType supported_types[] = {
- DISCARDABLE_MEMORY_TYPE_EMULATED,
- DISCARDABLE_MEMORY_TYPE_MALLOC
+ DISCARDABLE_MEMORY_TYPE_EMULATED
};
types->assign(supported_types, supported_types + arraysize(supported_types));
}
@@ -29,30 +27,23 @@
scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemoryWithType(
DiscardableMemoryType type, size_t size) {
switch (type) {
- case DISCARDABLE_MEMORY_TYPE_NONE:
- case DISCARDABLE_MEMORY_TYPE_ASHMEM:
- case DISCARDABLE_MEMORY_TYPE_MAC:
- return scoped_ptr<DiscardableMemory>();
case DISCARDABLE_MEMORY_TYPE_EMULATED: {
scoped_ptr<internal::DiscardableMemoryEmulated> memory(
new internal::DiscardableMemoryEmulated(size));
if (!memory->Initialize())
- return scoped_ptr<DiscardableMemory>();
+ return nullptr;
- return memory.PassAs<DiscardableMemory>();
+ return memory.Pass();
}
- case DISCARDABLE_MEMORY_TYPE_MALLOC: {
- scoped_ptr<internal::DiscardableMemoryMalloc> memory(
- new internal::DiscardableMemoryMalloc(size));
- if (!memory->Initialize())
- return scoped_ptr<DiscardableMemory>();
-
- return memory.PassAs<DiscardableMemory>();
- }
+ case DISCARDABLE_MEMORY_TYPE_NONE:
+ case DISCARDABLE_MEMORY_TYPE_ASHMEM:
+ case DISCARDABLE_MEMORY_TYPE_MACH:
+ NOTREACHED();
+ return nullptr;
}
NOTREACHED();
- return scoped_ptr<DiscardableMemory>();
+ return nullptr;
}
// static
diff --git a/memory/discardable_memory_mac.cc b/memory/discardable_memory_mac.cc
index 231eb17..6896e5a 100644
--- a/memory/discardable_memory_mac.cc
+++ b/memory/discardable_memory_mac.cc
@@ -4,156 +4,13 @@
#include "base/memory/discardable_memory.h"
-#include <mach/mach.h>
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "base/lazy_instance.h"
#include "base/logging.h"
-#include "base/mac/mach_logging.h"
-#include "base/mac/scoped_mach_vm.h"
#include "base/memory/discardable_memory_emulated.h"
-#include "base/memory/discardable_memory_malloc.h"
+#include "base/memory/discardable_memory_mach.h"
#include "base/memory/discardable_memory_manager.h"
#include "base/memory/scoped_ptr.h"
namespace base {
-namespace {
-
-// For Mac, have the DiscardableMemoryManager trigger userspace eviction when
-// address space usage gets too high (e.g. 512 MBytes).
-const size_t kMacMemoryLimit = 512 * 1024 * 1024;
-
-struct SharedState {
- SharedState() : manager(kMacMemoryLimit, kMacMemoryLimit, TimeDelta::Max()) {}
-
- internal::DiscardableMemoryManager manager;
-};
-LazyInstance<SharedState>::Leaky g_shared_state = LAZY_INSTANCE_INITIALIZER;
-
-// The VM subsystem allows tagging of memory and 240-255 is reserved for
-// application use (see mach/vm_statistics.h). Pick 252 (after chromium's atomic
-// weight of ~52).
-const int kDiscardableMemoryTag = VM_MAKE_TAG(252);
-
-class DiscardableMemoryMac
- : public DiscardableMemory,
- public internal::DiscardableMemoryManagerAllocation {
- public:
- explicit DiscardableMemoryMac(size_t bytes)
- : memory_(0, 0),
- bytes_(mach_vm_round_page(bytes)),
- is_locked_(false) {
- g_shared_state.Pointer()->manager.Register(this, bytes);
- }
-
- bool Initialize() { return Lock() != DISCARDABLE_MEMORY_LOCK_STATUS_FAILED; }
-
- virtual ~DiscardableMemoryMac() {
- if (is_locked_)
- Unlock();
- g_shared_state.Pointer()->manager.Unregister(this);
- }
-
- // Overridden from DiscardableMemory:
- virtual DiscardableMemoryLockStatus Lock() override {
- DCHECK(!is_locked_);
-
- bool purged = false;
- if (!g_shared_state.Pointer()->manager.AcquireLock(this, &purged))
- return DISCARDABLE_MEMORY_LOCK_STATUS_FAILED;
-
- is_locked_ = true;
- return purged ? DISCARDABLE_MEMORY_LOCK_STATUS_PURGED
- : DISCARDABLE_MEMORY_LOCK_STATUS_SUCCESS;
- }
-
- virtual void Unlock() override {
- DCHECK(is_locked_);
- g_shared_state.Pointer()->manager.ReleaseLock(this);
- is_locked_ = false;
- }
-
- virtual void* Memory() const override {
- DCHECK(is_locked_);
- return reinterpret_cast<void*>(memory_.address());
- }
-
- // Overridden from internal::DiscardableMemoryManagerAllocation:
- virtual bool AllocateAndAcquireLock() override {
- kern_return_t ret;
- bool persistent;
- if (!memory_.size()) {
- vm_address_t address = 0;
- ret = vm_allocate(
- mach_task_self(),
- &address,
- bytes_,
- VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE | kDiscardableMemoryTag);
- MACH_CHECK(ret == KERN_SUCCESS, ret) << "vm_allocate";
- memory_.reset(address, bytes_);
-
- // When making a fresh allocation, it's impossible for |persistent| to
- // be true.
- persistent = false;
- } else {
- // |persistent| will be reset to false below if appropriate, but when
- // reusing an existing allocation, it's possible for it to be true.
- persistent = true;
-
-#if !defined(NDEBUG)
- ret = vm_protect(mach_task_self(),
- memory_.address(),
- memory_.size(),
- FALSE,
- VM_PROT_DEFAULT);
- MACH_DCHECK(ret == KERN_SUCCESS, ret) << "vm_protect";
-#endif
- }
-
- int state = VM_PURGABLE_NONVOLATILE;
- ret = vm_purgable_control(mach_task_self(),
- memory_.address(),
- VM_PURGABLE_SET_STATE,
- &state);
- MACH_CHECK(ret == KERN_SUCCESS, ret) << "vm_purgable_control";
- if (state & VM_PURGABLE_EMPTY)
- persistent = false;
-
- return persistent;
- }
-
- virtual void ReleaseLock() override {
- int state = VM_PURGABLE_VOLATILE | VM_VOLATILE_GROUP_DEFAULT;
- kern_return_t ret = vm_purgable_control(mach_task_self(),
- memory_.address(),
- VM_PURGABLE_SET_STATE,
- &state);
- MACH_CHECK(ret == KERN_SUCCESS, ret) << "vm_purgable_control";
-
-#if !defined(NDEBUG)
- ret = vm_protect(mach_task_self(),
- memory_.address(),
- memory_.size(),
- FALSE,
- VM_PROT_NONE);
- MACH_DCHECK(ret == KERN_SUCCESS, ret) << "vm_protect";
-#endif
- }
-
- virtual void Purge() override {
- memory_.reset();
- }
-
- private:
- mac::ScopedMachVM memory_;
- const size_t bytes_;
- bool is_locked_;
-
- DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryMac);
-};
-
-} // namespace
// static
bool DiscardableMemory::ReduceMemoryUsage() {
@@ -164,9 +21,8 @@
void DiscardableMemory::GetSupportedTypes(
std::vector<DiscardableMemoryType>* types) {
const DiscardableMemoryType supported_types[] = {
- DISCARDABLE_MEMORY_TYPE_MAC,
- DISCARDABLE_MEMORY_TYPE_EMULATED,
- DISCARDABLE_MEMORY_TYPE_MALLOC
+ DISCARDABLE_MEMORY_TYPE_MACH,
+ DISCARDABLE_MEMORY_TYPE_EMULATED
};
types->assign(supported_types, supported_types + arraysize(supported_types));
}
@@ -175,42 +31,35 @@
scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemoryWithType(
DiscardableMemoryType type, size_t size) {
switch (type) {
- case DISCARDABLE_MEMORY_TYPE_NONE:
- case DISCARDABLE_MEMORY_TYPE_ASHMEM:
- return scoped_ptr<DiscardableMemory>();
- case DISCARDABLE_MEMORY_TYPE_MAC: {
- scoped_ptr<DiscardableMemoryMac> memory(new DiscardableMemoryMac(size));
+ case DISCARDABLE_MEMORY_TYPE_MACH: {
+ scoped_ptr<internal::DiscardableMemoryMach> memory(
+ new internal::DiscardableMemoryMach(size));
if (!memory->Initialize())
- return scoped_ptr<DiscardableMemory>();
+ return nullptr;
- return memory.PassAs<DiscardableMemory>();
+ return memory.Pass();
}
case DISCARDABLE_MEMORY_TYPE_EMULATED: {
scoped_ptr<internal::DiscardableMemoryEmulated> memory(
new internal::DiscardableMemoryEmulated(size));
if (!memory->Initialize())
- return scoped_ptr<DiscardableMemory>();
+ return nullptr;
- return memory.PassAs<DiscardableMemory>();
+ return memory.Pass();
}
- case DISCARDABLE_MEMORY_TYPE_MALLOC: {
- scoped_ptr<internal::DiscardableMemoryMalloc> memory(
- new internal::DiscardableMemoryMalloc(size));
- if (!memory->Initialize())
- return scoped_ptr<DiscardableMemory>();
-
- return memory.PassAs<DiscardableMemory>();
- }
+ case DISCARDABLE_MEMORY_TYPE_NONE:
+ case DISCARDABLE_MEMORY_TYPE_ASHMEM:
+ NOTREACHED();
+ return nullptr;
}
NOTREACHED();
- return scoped_ptr<DiscardableMemory>();
+ return nullptr;
}
// static
void DiscardableMemory::PurgeForTesting() {
- int state = 0;
- vm_purgable_control(mach_task_self(), 0, VM_PURGABLE_PURGE_ALL, &state);
+ internal::DiscardableMemoryMach::PurgeForTesting();
internal::DiscardableMemoryEmulated::PurgeForTesting();
}
diff --git a/memory/discardable_memory_mach.cc b/memory/discardable_memory_mach.cc
new file mode 100644
index 0000000..1051569
--- /dev/null
+++ b/memory/discardable_memory_mach.cc
@@ -0,0 +1,142 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/discardable_memory_mach.h"
+
+#include <mach/mach.h>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/mac/mach_logging.h"
+
+namespace base {
+namespace {
+
+// For Mach, have the DiscardableMemoryManager trigger userspace eviction when
+// address space usage gets too high (e.g. 512 MBytes).
+const size_t kMachMemoryLimit = 512 * 1024 * 1024;
+
+struct SharedState {
+ SharedState()
+ : manager(kMachMemoryLimit, kMachMemoryLimit, TimeDelta::Max()) {}
+
+ internal::DiscardableMemoryManager manager;
+};
+LazyInstance<SharedState>::Leaky g_shared_state = LAZY_INSTANCE_INITIALIZER;
+
+// The VM subsystem allows tagging of memory and 240-255 is reserved for
+// application use (see mach/vm_statistics.h). Pick 252 (after chromium's atomic
+// weight of ~52).
+const int kDiscardableMemoryTag = VM_MAKE_TAG(252);
+
+} // namespace
+
+namespace internal {
+
+DiscardableMemoryMach::DiscardableMemoryMach(size_t bytes)
+ : memory_(0, 0), bytes_(mach_vm_round_page(bytes)), is_locked_(false) {
+ g_shared_state.Pointer()->manager.Register(this, bytes);
+}
+
+DiscardableMemoryMach::~DiscardableMemoryMach() {
+ if (is_locked_)
+ Unlock();
+ g_shared_state.Pointer()->manager.Unregister(this);
+}
+
+// static
+void DiscardableMemoryMach::PurgeForTesting() {
+ int state = 0;
+ vm_purgable_control(mach_task_self(), 0, VM_PURGABLE_PURGE_ALL, &state);
+}
+
+bool DiscardableMemoryMach::Initialize() {
+ return Lock() != DISCARDABLE_MEMORY_LOCK_STATUS_FAILED;
+}
+
+DiscardableMemoryLockStatus DiscardableMemoryMach::Lock() {
+ DCHECK(!is_locked_);
+
+ bool purged = false;
+ if (!g_shared_state.Pointer()->manager.AcquireLock(this, &purged))
+ return DISCARDABLE_MEMORY_LOCK_STATUS_FAILED;
+
+ is_locked_ = true;
+ return purged ? DISCARDABLE_MEMORY_LOCK_STATUS_PURGED
+ : DISCARDABLE_MEMORY_LOCK_STATUS_SUCCESS;
+}
+
+void DiscardableMemoryMach::Unlock() {
+ DCHECK(is_locked_);
+ g_shared_state.Pointer()->manager.ReleaseLock(this);
+ is_locked_ = false;
+}
+
+void* DiscardableMemoryMach::Memory() const {
+ DCHECK(is_locked_);
+ return reinterpret_cast<void*>(memory_.address());
+}
+
+bool DiscardableMemoryMach::AllocateAndAcquireLock() {
+ kern_return_t ret;
+ bool persistent;
+ if (!memory_.size()) {
+ vm_address_t address = 0;
+ ret = vm_allocate(
+ mach_task_self(),
+ &address,
+ bytes_,
+ VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE | kDiscardableMemoryTag);
+ MACH_CHECK(ret == KERN_SUCCESS, ret) << "vm_allocate";
+ memory_.reset(address, bytes_);
+
+ // When making a fresh allocation, it's impossible for |persistent| to
+ // be true.
+ persistent = false;
+ } else {
+ // |persistent| will be reset to false below if appropriate, but when
+ // reusing an existing allocation, it's possible for it to be true.
+ persistent = true;
+
+#if !defined(NDEBUG)
+ ret = vm_protect(mach_task_self(),
+ memory_.address(),
+ memory_.size(),
+ FALSE,
+ VM_PROT_DEFAULT);
+ MACH_DCHECK(ret == KERN_SUCCESS, ret) << "vm_protect";
+#endif
+ }
+
+ int state = VM_PURGABLE_NONVOLATILE;
+ ret = vm_purgable_control(
+ mach_task_self(), memory_.address(), VM_PURGABLE_SET_STATE, &state);
+ MACH_CHECK(ret == KERN_SUCCESS, ret) << "vm_purgable_control";
+ if (state & VM_PURGABLE_EMPTY)
+ persistent = false;
+
+ return persistent;
+}
+
+void DiscardableMemoryMach::ReleaseLock() {
+ int state = VM_PURGABLE_VOLATILE | VM_VOLATILE_GROUP_DEFAULT;
+ kern_return_t ret = vm_purgable_control(
+ mach_task_self(), memory_.address(), VM_PURGABLE_SET_STATE, &state);
+ MACH_CHECK(ret == KERN_SUCCESS, ret) << "vm_purgable_control";
+
+#if !defined(NDEBUG)
+ ret = vm_protect(
+ mach_task_self(), memory_.address(), memory_.size(), FALSE, VM_PROT_NONE);
+ MACH_DCHECK(ret == KERN_SUCCESS, ret) << "vm_protect";
+#endif
+}
+
+void DiscardableMemoryMach::Purge() {
+ memory_.reset();
+}
+
+} // namespace internal
+} // namespace base
diff --git a/memory/discardable_memory_mach.h b/memory/discardable_memory_mach.h
new file mode 100644
index 0000000..a409047
--- /dev/null
+++ b/memory/discardable_memory_mach.h
@@ -0,0 +1,48 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_DISCARDABLE_MEMORY_MACH_H_
+#define BASE_MEMORY_DISCARDABLE_MEMORY_MACH_H_
+
+#include "base/memory/discardable_memory.h"
+
+#include "base/mac/scoped_mach_vm.h"
+#include "base/memory/discardable_memory_manager.h"
+
+namespace base {
+namespace internal {
+
+class DiscardableMemoryMach
+ : public DiscardableMemory,
+ public internal::DiscardableMemoryManagerAllocation {
+ public:
+ explicit DiscardableMemoryMach(size_t bytes);
+ virtual ~DiscardableMemoryMach();
+
+ static void PurgeForTesting();
+
+ bool Initialize();
+
+ // Overridden from DiscardableMemory:
+ virtual DiscardableMemoryLockStatus Lock() override;
+ virtual void Unlock() override;
+ virtual void* Memory() const override;
+
+ // Overridden from internal::DiscardableMemoryManagerAllocation:
+ virtual bool AllocateAndAcquireLock() override;
+ virtual void ReleaseLock() override;
+ virtual void Purge() override;
+
+ private:
+ mac::ScopedMachVM memory_;
+ const size_t bytes_;
+ bool is_locked_;
+
+ DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryMach);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_MEMORY_DISCARDABLE_MEMORY_MACH_H_
diff --git a/memory/discardable_memory_malloc.cc b/memory/discardable_memory_malloc.cc
deleted file mode 100644
index a72f911..0000000
--- a/memory/discardable_memory_malloc.cc
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/discardable_memory_malloc.h"
-
-#include "base/logging.h"
-
-namespace base {
-namespace internal {
-
-DiscardableMemoryMalloc::DiscardableMemoryMalloc(size_t size) : size_(size) {
-}
-
-DiscardableMemoryMalloc::~DiscardableMemoryMalloc() {
-}
-
-bool DiscardableMemoryMalloc::Initialize() {
- return Lock() != DISCARDABLE_MEMORY_LOCK_STATUS_FAILED;
-}
-
-DiscardableMemoryLockStatus DiscardableMemoryMalloc::Lock() {
- DCHECK(!memory_);
-
- memory_.reset(static_cast<uint8*>(malloc(size_)));
- if (!memory_)
- return DISCARDABLE_MEMORY_LOCK_STATUS_FAILED;
-
- return DISCARDABLE_MEMORY_LOCK_STATUS_PURGED;
-}
-
-void DiscardableMemoryMalloc::Unlock() {
- DCHECK(memory_);
- memory_.reset();
-}
-
-void* DiscardableMemoryMalloc::Memory() const {
- DCHECK(memory_);
- return memory_.get();
-}
-
-} // namespace internal
-} // namespace base
diff --git a/memory/discardable_memory_malloc.h b/memory/discardable_memory_malloc.h
deleted file mode 100644
index e22d515..0000000
--- a/memory/discardable_memory_malloc.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MEMORY_DISCARDABLE_MEMORY_MALLOC_H_
-#define BASE_MEMORY_DISCARDABLE_MEMORY_MALLOC_H_
-
-#include "base/memory/discardable_memory.h"
-
-namespace base {
-namespace internal {
-
-class DiscardableMemoryMalloc : public DiscardableMemory {
- public:
- explicit DiscardableMemoryMalloc(size_t size);
- virtual ~DiscardableMemoryMalloc();
-
- bool Initialize();
-
- // Overridden from DiscardableMemory:
- virtual DiscardableMemoryLockStatus Lock() override;
- virtual void Unlock() override;
- virtual void* Memory() const override;
-
- private:
- scoped_ptr<uint8, FreeDeleter> memory_;
- const size_t size_;
-
- DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryMalloc);
-};
-
-} // namespace internal
-} // namespace base
-
-#endif // BASE_MEMORY_DISCARDABLE_MEMORY_MALLOC_H_
diff --git a/memory/discardable_memory_manager.cc b/memory/discardable_memory_manager.cc
index 3647b7b..5f5e604 100644
--- a/memory/discardable_memory_manager.cc
+++ b/memory/discardable_memory_manager.cc
@@ -5,6 +5,7 @@
#include "base/memory/discardable_memory_manager.h"
#include "base/bind.h"
+#include "base/containers/adapters.h"
#include "base/containers/hash_tables.h"
#include "base/containers/mru_cache.h"
#include "base/debug/crash_logging.h"
@@ -177,11 +178,9 @@
lock_.AssertAcquired();
size_t bytes_allocated_before_purging = bytes_allocated_;
- for (AllocationMap::reverse_iterator it = allocations_.rbegin();
- it != allocations_.rend();
- ++it) {
- Allocation* allocation = it->first;
- AllocationInfo* info = &it->second;
+ for (auto& entry : base::Reversed(allocations_)) {
+ Allocation* allocation = entry.first;
+ AllocationInfo* info = &entry.second;
if (bytes_allocated_ <= limit)
break;
diff --git a/memory/discardable_memory_manager.h b/memory/discardable_memory_manager.h
index 94b3c55..43737f8 100644
--- a/memory/discardable_memory_manager.h
+++ b/memory/discardable_memory_manager.h
@@ -38,18 +38,6 @@
} // namespace internal
} // namespace base
-#if defined(COMPILER_GCC)
-namespace BASE_HASH_NAMESPACE {
-template <>
-struct hash<base::internal::DiscardableMemoryManagerAllocation*> {
- size_t operator()(
- base::internal::DiscardableMemoryManagerAllocation* ptr) const {
- return hash<size_t>()(reinterpret_cast<size_t>(ptr));
- }
-};
-} // namespace BASE_HASH_NAMESPACE
-#endif // COMPILER
-
namespace base {
namespace internal {
diff --git a/memory/discardable_memory_unittest.cc b/memory/discardable_memory_unittest.cc
index 516a96b..a111cfc 100644
--- a/memory/discardable_memory_unittest.cc
+++ b/memory/discardable_memory_unittest.cc
@@ -38,9 +38,8 @@
}
bool IsNativeType(DiscardableMemoryType type) {
- return
- type == DISCARDABLE_MEMORY_TYPE_ASHMEM ||
- type == DISCARDABLE_MEMORY_TYPE_MAC;
+ return type == DISCARDABLE_MEMORY_TYPE_ASHMEM ||
+ type == DISCARDABLE_MEMORY_TYPE_MACH;
}
TEST_P(DiscardableMemoryTest, SupportedNatively) {
diff --git a/memory/discardable_memory_win.cc b/memory/discardable_memory_win.cc
index 578b2c1..6a9a28d 100644
--- a/memory/discardable_memory_win.cc
+++ b/memory/discardable_memory_win.cc
@@ -6,7 +6,6 @@
#include "base/logging.h"
#include "base/memory/discardable_memory_emulated.h"
-#include "base/memory/discardable_memory_malloc.h"
namespace base {
@@ -19,8 +18,7 @@
void DiscardableMemory::GetSupportedTypes(
std::vector<DiscardableMemoryType>* types) {
const DiscardableMemoryType supported_types[] = {
- DISCARDABLE_MEMORY_TYPE_EMULATED,
- DISCARDABLE_MEMORY_TYPE_MALLOC
+ DISCARDABLE_MEMORY_TYPE_EMULATED
};
types->assign(supported_types, supported_types + arraysize(supported_types));
}
@@ -29,30 +27,23 @@
scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemoryWithType(
DiscardableMemoryType type, size_t size) {
switch (type) {
- case DISCARDABLE_MEMORY_TYPE_NONE:
- case DISCARDABLE_MEMORY_TYPE_ASHMEM:
- case DISCARDABLE_MEMORY_TYPE_MAC:
- return scoped_ptr<DiscardableMemory>();
case DISCARDABLE_MEMORY_TYPE_EMULATED: {
scoped_ptr<internal::DiscardableMemoryEmulated> memory(
new internal::DiscardableMemoryEmulated(size));
if (!memory->Initialize())
- return scoped_ptr<DiscardableMemory>();
+ return nullptr;
- return memory.PassAs<DiscardableMemory>();
+ return memory.Pass();
}
- case DISCARDABLE_MEMORY_TYPE_MALLOC: {
- scoped_ptr<internal::DiscardableMemoryMalloc> memory(
- new internal::DiscardableMemoryMalloc(size));
- if (!memory->Initialize())
- return scoped_ptr<DiscardableMemory>();
-
- return memory.PassAs<DiscardableMemory>();
- }
+ case DISCARDABLE_MEMORY_TYPE_NONE:
+ case DISCARDABLE_MEMORY_TYPE_ASHMEM:
+ case DISCARDABLE_MEMORY_TYPE_MACH:
+ NOTREACHED();
+ return nullptr;
}
NOTREACHED();
- return scoped_ptr<DiscardableMemory>();
+ return nullptr;
}
// static
diff --git a/memory/ref_counted.h b/memory/ref_counted.h
index be4919c..7869e72 100644
--- a/memory/ref_counted.h
+++ b/memory/ref_counted.h
@@ -17,7 +17,7 @@
#include "base/threading/thread_collision_warner.h"
#include "build/build_config.h"
-#if defined(OS_LINUX) || (defined(OS_MACOSX) && !defined(OS_IOS))
+#if defined(OS_LINUX) || defined(OS_MACOSX) || defined(OS_IOS) || defined(OS_ANDROID)
#define DISABLE_SCOPED_REFPTR_CONVERSION_OPERATOR
#endif
@@ -276,23 +276,23 @@
scoped_refptr(T* p) : ptr_(p) {
if (ptr_)
- ptr_->AddRef();
+ AddRef(ptr_);
}
scoped_refptr(const scoped_refptr<T>& r) : ptr_(r.ptr_) {
if (ptr_)
- ptr_->AddRef();
+ AddRef(ptr_);
}
template <typename U>
scoped_refptr(const scoped_refptr<U>& r) : ptr_(r.get()) {
if (ptr_)
- ptr_->AddRef();
+ AddRef(ptr_);
}
~scoped_refptr() {
if (ptr_)
- ptr_->Release();
+ Release(ptr_);
}
T* get() const { return ptr_; }
@@ -316,11 +316,11 @@
scoped_refptr<T>& operator=(T* p) {
// AddRef first so that self assignment should work
if (p)
- p->AddRef();
+ AddRef(p);
T* old_ptr = ptr_;
ptr_ = p;
if (old_ptr)
- old_ptr->Release();
+ Release(old_ptr);
return *this;
}
@@ -362,8 +362,26 @@
protected:
T* ptr_;
+
+ private:
+ // Non-inline helpers to allow:
+ // class Opaque;
+ // extern template class scoped_refptr<Opaque>;
+ // Otherwise the compiler will complain that Opaque is an incomplete type.
+ static void AddRef(T* ptr);
+ static void Release(T* ptr);
};
+template <typename T>
+void scoped_refptr<T>::AddRef(T* ptr) {
+ ptr->AddRef();
+}
+
+template <typename T>
+void scoped_refptr<T>::Release(T* ptr) {
+ ptr->Release();
+}
+
// Handy utility for creating a scoped_refptr<T> out of a T* explicitly without
// having to retype all the template arguments
template <typename T>
diff --git a/memory/ref_counted_unittest.cc b/memory/ref_counted_unittest.cc
index e8eb0fd..7e73bde 100644
--- a/memory/ref_counted_unittest.cc
+++ b/memory/ref_counted_unittest.cc
@@ -3,6 +3,8 @@
// found in the LICENSE file.
#include "base/memory/ref_counted.h"
+
+#include "base/test/opaque_ref_counted.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace {
@@ -60,3 +62,13 @@
check->SelfDestruct();
EXPECT_TRUE(ScopedRefPtrToSelf::was_destroyed());
}
+
+TEST(RefCountedUnitTest, ScopedRefPtrToOpaque) {
+ scoped_refptr<base::OpaqueRefCounted> p = base::MakeOpaqueRefCounted();
+ base::TestOpaqueRefCounted(p);
+
+ scoped_refptr<base::OpaqueRefCounted> q;
+ q = p;
+ base::TestOpaqueRefCounted(p);
+ base::TestOpaqueRefCounted(q);
+}
diff --git a/memory/scoped_ptr.h b/memory/scoped_ptr.h
index d93a8b4..f3bbd12 100644
--- a/memory/scoped_ptr.h
+++ b/memory/scoped_ptr.h
@@ -184,6 +184,17 @@
};
};
+template <typename T>
+struct ShouldAbortOnSelfReset {
+ template <typename U>
+ static NoType Test(const typename U::AllowSelfReset*);
+
+ template <typename U>
+ static YesType Test(...);
+
+ static const bool value = sizeof(Test<T>(0)) == sizeof(YesType);
+};
+
// Minimal implementation of the core logic of scoped_ptr, suitable for
// reuse in both scoped_ptr and its specializations.
template <class T, class D>
@@ -222,9 +233,9 @@
}
void reset(T* p) {
- // This is a self-reset, which is no longer allowed: http://crbug.com/162971
- if (p != nullptr && p == data_.ptr)
- abort();
+ // This is a self-reset, which is no longer allowed for default deleters:
+ // https://crbug.com/162971
+ assert(!ShouldAbortOnSelfReset<D>::value || p == nullptr || p != data_.ptr);
// Note that running data_.ptr = p can lead to undefined behavior if
// get_deleter()(get()) deletes this. In order to prevent this, reset()
diff --git a/memory/scoped_ptr_unittest.cc b/memory/scoped_ptr_unittest.cc
index 2ea44e9..3da8d3b 100644
--- a/memory/scoped_ptr_unittest.cc
+++ b/memory/scoped_ptr_unittest.cc
@@ -656,3 +656,41 @@
scoped_ptr<Super> super2 = SubClassReturn();
super2 = SubClassReturn();
}
+
+// Android death tests don't work properly with assert(). Yay.
+#if !defined(NDEBUG) && defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+TEST(ScopedPtrTest, SelfResetAbortsWithDefaultDeleter) {
+ scoped_ptr<int> x(new int);
+ EXPECT_DEATH(x.reset(x.get()), "");
+}
+
+TEST(ScopedPtrTest, SelfResetAbortsWithDefaultArrayDeleter) {
+ scoped_ptr<int[]> y(new int[4]);
+ EXPECT_DEATH(y.reset(y.get()), "");
+}
+
+TEST(ScopedPtrTest, SelfResetAbortsWithDefaultFreeDeleter) {
+ scoped_ptr<int, base::FreeDeleter> z(static_cast<int*>(malloc(sizeof(int))));
+ EXPECT_DEATH(z.reset(z.get()), "");
+}
+
+// A custom deleter that doesn't opt out should still crash.
+TEST(ScopedPtrTest, SelfResetAbortsWithCustomDeleter) {
+ struct CustomDeleter {
+ inline void operator()(int* x) { delete x; }
+ };
+ scoped_ptr<int, CustomDeleter> x(new int);
+ EXPECT_DEATH(x.reset(x.get()), "");
+}
+#endif
+
+TEST(ScopedPtrTest, SelfResetWithCustomDeleterOptOut) {
+ // A custom deleter should be able to opt out of self-reset abort behavior.
+ struct NoOpDeleter {
+ typedef void AllowSelfReset;
+ inline void operator()(int*) {}
+ };
+ scoped_ptr<int> owner(new int);
+ scoped_ptr<int, NoOpDeleter> x(owner.get());
+ x.reset(x.get());
+}
diff --git a/metrics/histogram.cc b/metrics/histogram.cc
index 0a4fcc0..fe5b6e6 100644
--- a/metrics/histogram.cc
+++ b/metrics/histogram.cc
@@ -268,7 +268,7 @@
}
scoped_ptr<HistogramSamples> Histogram::SnapshotSamples() const {
- return SnapshotSampleVector().PassAs<HistogramSamples>();
+ return SnapshotSampleVector().Pass();
}
void Histogram::AddSamples(const HistogramSamples& samples) {
@@ -369,7 +369,7 @@
scoped_ptr<SampleVector> Histogram::SnapshotSampleVector() const {
scoped_ptr<SampleVector> samples(new SampleVector(bucket_ranges()));
samples->Add(*samples_);
- return samples.Pass();
+ return samples;
}
void Histogram::WriteAsciiImpl(bool graph_it,
diff --git a/metrics/sparse_histogram.cc b/metrics/sparse_histogram.cc
index 737ccad..773eeb6 100644
--- a/metrics/sparse_histogram.cc
+++ b/metrics/sparse_histogram.cc
@@ -57,7 +57,7 @@
base::AutoLock auto_lock(lock_);
snapshot->Add(samples_);
- return snapshot.PassAs<HistogramSamples>();
+ return snapshot.Pass();
}
void SparseHistogram::AddSamples(const HistogramSamples& samples) {
diff --git a/numerics/safe_conversions_impl.h b/numerics/safe_conversions_impl.h
index f05d553..c26757a 100644
--- a/numerics/safe_conversions_impl.h
+++ b/numerics/safe_conversions_impl.h
@@ -7,7 +7,6 @@
#include <limits>
-#include "base/macros.h"
#include "base/template_util.h"
namespace base {
@@ -203,10 +202,10 @@
template <typename Dst, typename Src>
inline RangeConstraint DstRangeRelationToSrcRange(Src value) {
- COMPILE_ASSERT(std::numeric_limits<Src>::is_specialized,
- argument_must_be_numeric);
- COMPILE_ASSERT(std::numeric_limits<Dst>::is_specialized,
- result_must_be_numeric);
+ static_assert(std::numeric_limits<Src>::is_specialized,
+ "Argument must be numeric.");
+ static_assert(std::numeric_limits<Dst>::is_specialized,
+ "Result must be numeric.");
return DstRangeRelationToSrcRangeImpl<Dst, Src>::Check(value);
}
diff --git a/numerics/safe_math.h b/numerics/safe_math.h
index b3694fe..ccda1c8 100644
--- a/numerics/safe_math.h
+++ b/numerics/safe_math.h
@@ -62,8 +62,8 @@
template <typename Src>
CheckedNumeric(Src value)
: state_(value) {
- COMPILE_ASSERT(std::numeric_limits<Src>::is_specialized,
- argument_must_be_numeric);
+ static_assert(std::numeric_limits<Src>::is_specialized,
+ "Argument must be numeric.");
}
// IsValid() is the public API to test if a CheckedNumeric is currently valid.
@@ -87,7 +87,7 @@
// we provide an easy method for extracting them directly, without a risk of
// crashing on a CHECK.
T ValueFloating() const {
- COMPILE_ASSERT(std::numeric_limits<T>::is_iec559, argument_must_be_float);
+ static_assert(std::numeric_limits<T>::is_iec559, "Argument must be float.");
return CheckedNumeric<T>::cast(*this).ValueUnsafe();
}
diff --git a/numerics/safe_math_impl.h b/numerics/safe_math_impl.h
index 34e2bf5..663f393 100644
--- a/numerics/safe_math_impl.h
+++ b/numerics/safe_math_impl.h
@@ -11,7 +11,6 @@
#include <cstdlib>
#include <limits>
-#include "base/macros.h"
#include "base/numerics/safe_conversions.h"
#include "base/template_util.h"
@@ -362,8 +361,8 @@
: value_(static_cast<T>(value)),
validity_(GetRangeConstraint(validity |
DstRangeRelationToSrcRange<T>(value))) {
- COMPILE_ASSERT(std::numeric_limits<Src>::is_specialized,
- argument_must_be_numeric);
+ static_assert(std::numeric_limits<Src>::is_specialized,
+ "Argument must be numeric.");
}
// Copy constructor.
diff --git a/numerics/safe_numerics_unittest.cc b/numerics/safe_numerics_unittest.cc
index 23c2c78..0402cef 100644
--- a/numerics/safe_numerics_unittest.cc
+++ b/numerics/safe_numerics_unittest.cc
@@ -308,15 +308,15 @@
typedef numeric_limits<Src> SrcLimits;
typedef numeric_limits<Dst> DstLimits;
// Integral to floating.
- COMPILE_ASSERT((DstLimits::is_iec559 && SrcLimits::is_integer) ||
- // Not floating to integral and...
- (!(DstLimits::is_integer && SrcLimits::is_iec559) &&
- // Same sign, same numeric, source is narrower or same.
- ((SrcLimits::is_signed == DstLimits::is_signed &&
- sizeof(Dst) >= sizeof(Src)) ||
- // Or signed destination and source is smaller
- (DstLimits::is_signed && sizeof(Dst) > sizeof(Src)))),
- comparison_must_be_sign_preserving_and_value_preserving);
+ static_assert((DstLimits::is_iec559 && SrcLimits::is_integer) ||
+ // Not floating to integral and...
+ (!(DstLimits::is_integer && SrcLimits::is_iec559) &&
+ // Same sign, same numeric, source is narrower or same.
+ ((SrcLimits::is_signed == DstLimits::is_signed &&
+ sizeof(Dst) >= sizeof(Src)) ||
+ // Or signed destination and source is smaller
+ (DstLimits::is_signed && sizeof(Dst) > sizeof(Src)))),
+ "Comparison must be sign preserving and value preserving");
const CheckedNumeric<Dst> checked_dst = SrcLimits::max();
;
@@ -354,11 +354,11 @@
static void Test(const char *dst, const char *src, int line) {
typedef numeric_limits<Src> SrcLimits;
typedef numeric_limits<Dst> DstLimits;
- COMPILE_ASSERT(SrcLimits::is_signed == DstLimits::is_signed,
- destination_and_source_sign_must_be_the_same);
- COMPILE_ASSERT(sizeof(Dst) < sizeof(Src) ||
+ static_assert(SrcLimits::is_signed == DstLimits::is_signed,
+ "Destination and source sign must be the same");
+ static_assert(sizeof(Dst) < sizeof(Src) ||
(DstLimits::is_integer && SrcLimits::is_iec559),
- destination_must_be_narrower_than_source);
+ "Destination must be narrower than source");
const CheckedNumeric<Dst> checked_dst;
TEST_EXPECTED_VALIDITY(RANGE_OVERFLOW, checked_dst + SrcLimits::max());
@@ -390,10 +390,10 @@
static void Test(const char *dst, const char *src, int line) {
typedef numeric_limits<Src> SrcLimits;
typedef numeric_limits<Dst> DstLimits;
- COMPILE_ASSERT(sizeof(Dst) >= sizeof(Src),
- destination_must_be_equal_or_wider_than_source);
- COMPILE_ASSERT(SrcLimits::is_signed, source_must_be_signed);
- COMPILE_ASSERT(!DstLimits::is_signed, destination_must_be_unsigned);
+ static_assert(sizeof(Dst) >= sizeof(Src),
+ "Destination must be equal or wider than source.");
+ static_assert(SrcLimits::is_signed, "Source must be signed");
+ static_assert(!DstLimits::is_signed, "Destination must be unsigned");
const CheckedNumeric<Dst> checked_dst;
TEST_EXPECTED_VALUE(SrcLimits::max(), checked_dst + SrcLimits::max());
@@ -412,11 +412,11 @@
static void Test(const char *dst, const char *src, int line) {
typedef numeric_limits<Src> SrcLimits;
typedef numeric_limits<Dst> DstLimits;
- COMPILE_ASSERT((DstLimits::is_integer && SrcLimits::is_iec559) ||
+ static_assert((DstLimits::is_integer && SrcLimits::is_iec559) ||
(sizeof(Dst) < sizeof(Src)),
- destination_must_be_narrower_than_source);
- COMPILE_ASSERT(SrcLimits::is_signed, source_must_be_signed);
- COMPILE_ASSERT(!DstLimits::is_signed, destination_must_be_unsigned);
+ "Destination must be narrower than source.");
+ static_assert(SrcLimits::is_signed, "Source must be signed.");
+ static_assert(!DstLimits::is_signed, "Destination must be unsigned.");
const CheckedNumeric<Dst> checked_dst;
TEST_EXPECTED_VALUE(1, checked_dst + static_cast<Src>(1));
@@ -444,10 +444,10 @@
static void Test(const char *dst, const char *src, int line) {
typedef numeric_limits<Src> SrcLimits;
typedef numeric_limits<Dst> DstLimits;
- COMPILE_ASSERT(sizeof(Dst) <= sizeof(Src),
- destination_must_be_narrower_or_equal_to_source);
- COMPILE_ASSERT(!SrcLimits::is_signed, source_must_be_unsigned);
- COMPILE_ASSERT(DstLimits::is_signed, destination_must_be_signed);
+ static_assert(sizeof(Dst) <= sizeof(Src),
+ "Destination must be narrower or equal to source.");
+ static_assert(!SrcLimits::is_signed, "Source must be unsigned.");
+ static_assert(DstLimits::is_signed, "Destination must be signed.");
const CheckedNumeric<Dst> checked_dst;
TEST_EXPECTED_VALUE(1, checked_dst + static_cast<Src>(1));
diff --git a/prefs/json_pref_store_unittest.cc b/prefs/json_pref_store_unittest.cc
index 437f337..45bf895 100644
--- a/prefs/json_pref_store_unittest.cc
+++ b/prefs/json_pref_store_unittest.cc
@@ -388,7 +388,7 @@
scoped_refptr<JsonPrefStore> pref_store =
new JsonPrefStore(input_file,
message_loop_.message_loop_proxy().get(),
- intercepting_pref_filter.PassAs<PrefFilter>());
+ intercepting_pref_filter.Pass());
ASSERT_EQ(PersistentPrefStore::PREF_READ_ERROR_ASYNCHRONOUS_TASK_INCOMPLETE,
pref_store->ReadPrefs());
@@ -435,7 +435,7 @@
scoped_refptr<JsonPrefStore> pref_store =
new JsonPrefStore(input_file,
message_loop_.message_loop_proxy().get(),
- intercepting_pref_filter.PassAs<PrefFilter>());
+ intercepting_pref_filter.Pass());
MockPrefStoreObserver mock_observer;
pref_store->AddObserver(&mock_observer);
diff --git a/process/process_metrics.cc b/process/process_metrics.cc
index 90baae5..2edd9c7 100644
--- a/process/process_metrics.cc
+++ b/process/process_metrics.cc
@@ -40,7 +40,7 @@
res->Set("swapinfo", swap_info_.ToValue().release());
#endif
- return res.PassAs<Value>();
+ return res.Pass();
}
double ProcessMetrics::GetPlatformIndependentCPUUsage() {
diff --git a/process/process_metrics_linux.cc b/process/process_metrics_linux.cc
index 03cc7eb..e8db571 100644
--- a/process/process_metrics_linux.cc
+++ b/process/process_metrics_linux.cc
@@ -524,7 +524,7 @@
res->SetInteger("gem_size", gem_size);
#endif
- return res.PassAs<Value>();
+ return res.Pass();
}
// exposed for testing
@@ -731,7 +731,7 @@
res->SetDouble("io_time", static_cast<double>(io_time));
res->SetDouble("weighted_io_time", static_cast<double>(weighted_io_time));
- return res.PassAs<Value>();
+ return res.Pass();
}
bool IsValidDiskName(const std::string& candidate) {
@@ -856,7 +856,7 @@
else
res->SetDouble("compression_ratio", 0);
- return res.PassAs<Value>();
+ return res.Pass();
}
void GetSwapInfo(SwapInfo* swap_info) {
diff --git a/test/BUILD.gn b/test/BUILD.gn
index add74cb..a2864b5 100644
--- a/test/BUILD.gn
+++ b/test/BUILD.gn
@@ -41,6 +41,8 @@
"multiprocess_test_android.cc",
"null_task_runner.cc",
"null_task_runner.h",
+ "opaque_ref_counted.cc",
+ "opaque_ref_counted.h",
"perf_log.cc",
"perf_log.h",
"perf_test_suite.cc",
diff --git a/test/opaque_ref_counted.cc b/test/opaque_ref_counted.cc
new file mode 100644
index 0000000..ed6c36f
--- /dev/null
+++ b/test/opaque_ref_counted.cc
@@ -0,0 +1,35 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/opaque_ref_counted.h"
+
+#include "base/macros.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+class OpaqueRefCounted : public RefCounted<OpaqueRefCounted> {
+ public:
+ OpaqueRefCounted() {}
+
+ int Return42() { return 42; }
+
+ private:
+ virtual ~OpaqueRefCounted() {}
+
+ friend RefCounted<OpaqueRefCounted>;
+ DISALLOW_COPY_AND_ASSIGN(OpaqueRefCounted);
+};
+
+scoped_refptr<OpaqueRefCounted> MakeOpaqueRefCounted() {
+ return new OpaqueRefCounted();
+}
+
+void TestOpaqueRefCounted(scoped_refptr<OpaqueRefCounted> p) {
+ EXPECT_EQ(42, p->Return42());
+}
+
+} // namespace base
+
+template class scoped_refptr<base::OpaqueRefCounted>;
diff --git a/test/opaque_ref_counted.h b/test/opaque_ref_counted.h
new file mode 100644
index 0000000..faf6a65
--- /dev/null
+++ b/test/opaque_ref_counted.h
@@ -0,0 +1,24 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_OPAQUE_REF_COUNTED_H_
+#define BASE_TEST_OPAQUE_REF_COUNTED_H_
+
+#include "base/memory/ref_counted.h"
+
+namespace base {
+
+// OpaqueRefCounted is a test class for scoped_refptr to ensure it still works
+// when the pointed-to type is opaque (i.e., incomplete).
+class OpaqueRefCounted;
+
+// Test functions that return and accept scoped_refptr<OpaqueRefCounted> values.
+scoped_refptr<OpaqueRefCounted> MakeOpaqueRefCounted();
+void TestOpaqueRefCounted(scoped_refptr<OpaqueRefCounted> p);
+
+} // namespace base
+
+extern template class scoped_refptr<base::OpaqueRefCounted>;
+
+#endif // BASE_TEST_OPAQUE_REF_COUNTED_H_
diff --git a/test/test_support_ios.mm b/test/test_support_ios.mm
index 80a4caf..67fae06 100644
--- a/test/test_support_ios.mm
+++ b/test/test_support_ios.mm
@@ -70,6 +70,7 @@
// Yes, this is leaked, it's just to make what's running visible.
window_.reset([[UIWindow alloc] initWithFrame:bounds]);
+ [window_ setBackgroundColor:[UIColor whiteColor]];
[window_ makeKeyAndVisible];
// Add a label with the app name.
diff --git a/threading/sequenced_worker_pool.cc b/threading/sequenced_worker_pool.cc
index 5304f37..b0256c3 100644
--- a/threading/sequenced_worker_pool.cc
+++ b/threading/sequenced_worker_pool.cc
@@ -903,11 +903,6 @@
std::vector<Closure>* delete_these_outside_lock) {
lock_.AssertAcquired();
-#if !defined(OS_NACL)
- UMA_HISTOGRAM_COUNTS_100("SequencedWorkerPool.TaskCount",
- static_cast<int>(pending_tasks_.size()));
-#endif
-
// Find the next task with a sequence token that's not currently in use.
// If the token is in use, that means another thread is running something
// in that sequence, and we can't run it without going out-of-order.
@@ -988,13 +983,6 @@
break;
}
- // Track the number of tasks we had to skip over to see if we should be
- // making this more efficient. If this number ever becomes large or is
- // frequently "some", we should consider the optimization above.
-#if !defined(OS_NACL)
- UMA_HISTOGRAM_COUNTS_100("SequencedWorkerPool.UnrunnableTaskCount",
- unrunnable_tasks);
-#endif
return status;
}
diff --git a/threading/thread_restrictions.h b/threading/thread_restrictions.h
index 3c8612d..3653c96 100644
--- a/threading/thread_restrictions.h
+++ b/threading/thread_restrictions.h
@@ -41,6 +41,7 @@
}
namespace content {
class BrowserGpuChannelHostFactory;
+class BrowserGpuMemoryBufferManager;
class BrowserShutdownProfileDumper;
class BrowserTestBase;
class GLHelper;
@@ -213,6 +214,8 @@
friend class chrome_browser_net::Predictor; // http://crbug.com/78451
friend class
content::BrowserGpuChannelHostFactory; // http://crbug.com/125248
+ friend class
+ content::BrowserGpuMemoryBufferManager; // http://crbug.com/420368
friend class content::GLHelper; // http://crbug.com/125415
friend class content::GpuChannelHost; // http://crbug.com/125264
friend class content::TextInputClientMac; // http://crbug.com/121917
diff --git a/win/OWNERS b/win/OWNERS
index 65ed721..8624efe 100644
--- a/win/OWNERS
+++ b/win/OWNERS
@@ -1,2 +1,3 @@
cpu@chromium.org
+grt@chromium.org
rvargas@chromium.org
diff --git a/win/registry.cc b/win/registry.cc
index a6cb9ae..e8fb892 100644
--- a/win/registry.cc
+++ b/win/registry.cc
@@ -487,10 +487,26 @@
// RegistryValueIterator ------------------------------------------------------
RegistryValueIterator::RegistryValueIterator(HKEY root_key,
+ const wchar_t* folder_key,
+ REGSAM wow64access)
+ : name_(MAX_PATH, L'\0'),
+ value_(MAX_PATH, L'\0') {
+ Initialize(root_key, folder_key, wow64access);
+}
+
+RegistryValueIterator::RegistryValueIterator(HKEY root_key,
const wchar_t* folder_key)
: name_(MAX_PATH, L'\0'),
value_(MAX_PATH, L'\0') {
- LONG result = RegOpenKeyEx(root_key, folder_key, 0, KEY_READ, &key_);
+ Initialize(root_key, folder_key, 0);
+}
+
+void RegistryValueIterator::Initialize(HKEY root_key,
+ const wchar_t* folder_key,
+ REGSAM wow64access) {
+ DCHECK_EQ(wow64access & ~kWow64AccessMask, static_cast<REGSAM>(0));
+ LONG result =
+ RegOpenKeyEx(root_key, folder_key, 0, KEY_READ | wow64access, &key_);
if (result != ERROR_SUCCESS) {
key_ = NULL;
} else {
@@ -577,23 +593,13 @@
RegistryKeyIterator::RegistryKeyIterator(HKEY root_key,
const wchar_t* folder_key) {
- LONG result = RegOpenKeyEx(root_key, folder_key, 0, KEY_READ, &key_);
- if (result != ERROR_SUCCESS) {
- key_ = NULL;
- } else {
- DWORD count = 0;
- LONG result = ::RegQueryInfoKey(key_, NULL, 0, NULL, &count, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL);
+ Initialize(root_key, folder_key, 0);
+}
- if (result != ERROR_SUCCESS) {
- ::RegCloseKey(key_);
- key_ = NULL;
- } else {
- index_ = count - 1;
- }
- }
-
- Read();
+RegistryKeyIterator::RegistryKeyIterator(HKEY root_key,
+ const wchar_t* folder_key,
+ REGSAM wow64access) {
+ Initialize(root_key, folder_key, wow64access);
}
RegistryKeyIterator::~RegistryKeyIterator() {
@@ -634,5 +640,29 @@
return false;
}
+void RegistryKeyIterator::Initialize(HKEY root_key,
+ const wchar_t* folder_key,
+ REGSAM wow64access) {
+ DCHECK_EQ(wow64access & ~kWow64AccessMask, static_cast<REGSAM>(0));
+ LONG result =
+ RegOpenKeyEx(root_key, folder_key, 0, KEY_READ | wow64access, &key_);
+ if (result != ERROR_SUCCESS) {
+ key_ = NULL;
+ } else {
+ DWORD count = 0;
+ LONG result = ::RegQueryInfoKey(key_, NULL, 0, NULL, &count, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL);
+
+ if (result != ERROR_SUCCESS) {
+ ::RegCloseKey(key_);
+ key_ = NULL;
+ } else {
+ index_ = count - 1;
+ }
+ }
+
+ Read();
+}
+
} // namespace win
} // namespace base
diff --git a/win/registry.h b/win/registry.h
index af1aee7..e5524b8 100644
--- a/win/registry.h
+++ b/win/registry.h
@@ -157,8 +157,18 @@
// Iterates the entries found in a particular folder on the registry.
class BASE_EXPORT RegistryValueIterator {
public:
+ // Construct a Registry Value Iterator with default WOW64 access.
RegistryValueIterator(HKEY root_key, const wchar_t* folder_key);
+ // Construct a Registry Key Iterator with specific WOW64 access, one of
+ // KEY_WOW64_32KEY or KEY_WOW64_64KEY, or 0.
+ // Note: |wow64access| should be the same access used to open |root_key|
+ // previously, or a predefined key (e.g. HKEY_LOCAL_MACHINE).
+ // See http://msdn.microsoft.com/en-us/library/windows/desktop/aa384129.aspx.
+ RegistryValueIterator(HKEY root_key,
+ const wchar_t* folder_key,
+ REGSAM wow64access);
+
~RegistryValueIterator();
DWORD ValueCount() const;
@@ -181,6 +191,8 @@
// Read in the current values.
bool Read();
+ void Initialize(HKEY root_key, const wchar_t* folder_key, REGSAM wow64access);
+
// The registry key being iterated.
HKEY key_;
@@ -198,8 +210,18 @@
class BASE_EXPORT RegistryKeyIterator {
public:
+ // Construct a Registry Key Iterator with default WOW64 access.
RegistryKeyIterator(HKEY root_key, const wchar_t* folder_key);
+ // Construct a Registry Value Iterator with specific WOW64 access, one of
+ // KEY_WOW64_32KEY or KEY_WOW64_64KEY, or 0.
+ // Note: |wow64access| should be the same access used to open |root_key|
+ // previously, or a predefined key (e.g. HKEY_LOCAL_MACHINE).
+ // See http://msdn.microsoft.com/en-us/library/windows/desktop/aa384129.aspx.
+ RegistryKeyIterator(HKEY root_key,
+ const wchar_t* folder_key,
+ REGSAM wow64access);
+
~RegistryKeyIterator();
DWORD SubkeyCount() const;
@@ -218,6 +240,8 @@
// Read in the current values.
bool Read();
+ void Initialize(HKEY root_key, const wchar_t* folder_key, REGSAM wow64access);
+
// The registry key being iterated.
HKEY key_;