blob: d28561076dd5f300710c090ebc6d67cca2888b39 [file] [log] [blame]
James Robinsone2ac7e82014-10-15 13:21:59 -07001// Copyright (c) 2014 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// This file is an internal atomic implementation, use atomicops.h instead.
6//
7// This implementation uses C++11 atomics' member functions. The code base is
8// currently written assuming atomicity revolves around accesses instead of
9// C++11's memory locations. The burden is on the programmer to ensure that all
10// memory locations accessed atomically are never accessed non-atomically (tsan
11// should help with this).
12//
13// TODO(jfb) Modify the atomicops.h API and user code to declare atomic
14// locations as truly atomic. See the static_assert below.
15//
16// Of note in this implementation:
17// * All NoBarrier variants are implemented as relaxed.
18// * All Barrier variants are implemented as sequentially-consistent.
19// * Compare exchange's failure ordering is always the same as the success one
20// (except for release, which fails as relaxed): using a weaker ordering is
21// only valid under certain uses of compare exchange.
22// * Acquire store doesn't exist in the C11 memory model, it is instead
23// implemented as a relaxed store followed by a sequentially consistent
24// fence.
25// * Release load doesn't exist in the C11 memory model, it is instead
26// implemented as sequentially consistent fence followed by a relaxed load.
27// * Atomic increment is expected to return the post-incremented value, whereas
28// C11 fetch add returns the previous value. The implementation therefore
29// needs to increment twice (which the compiler should be able to detect and
30// optimize).
31
32#ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
33#define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
34
35#include <atomic>
36
37namespace base {
38namespace subtle {
39
40// This implementation is transitional and maintains the original API for
41// atomicops.h. This requires casting memory locations to the atomic types, and
42// assumes that the API and the C++11 implementation are layout-compatible,
43// which isn't true for all implementations or hardware platforms. The static
44// assertion should detect this issue, were it to fire then this header
45// shouldn't be used.
46//
47// TODO(jfb) If this header manages to stay committed then the API should be
48// modified, and all call sites updated.
49typedef volatile std::atomic<Atomic32>* AtomicLocation32;
50static_assert(sizeof(*(AtomicLocation32) nullptr) == sizeof(Atomic32),
51 "incompatible 32-bit atomic layout");
52
53inline void MemoryBarrier() {
54#if defined(__GLIBCXX__)
55 // Work around libstdc++ bug 51038 where atomic_thread_fence was declared but
56 // not defined, leading to the linker complaining about undefined references.
57 __atomic_thread_fence(std::memory_order_seq_cst);
58#else
59 std::atomic_thread_fence(std::memory_order_seq_cst);
60#endif
61}
62
63inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
64 Atomic32 old_value,
65 Atomic32 new_value) {
66 ((AtomicLocation32)ptr)
67 ->compare_exchange_strong(old_value,
68 new_value,
69 std::memory_order_relaxed,
70 std::memory_order_relaxed);
71 return old_value;
72}
73
74inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
75 Atomic32 new_value) {
76 return ((AtomicLocation32)ptr)
77 ->exchange(new_value, std::memory_order_relaxed);
78}
79
80inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
81 Atomic32 increment) {
82 return increment +
83 ((AtomicLocation32)ptr)
84 ->fetch_add(increment, std::memory_order_relaxed);
85}
86
87inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
88 Atomic32 increment) {
89 return increment + ((AtomicLocation32)ptr)->fetch_add(increment);
90}
91
92inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
93 Atomic32 old_value,
94 Atomic32 new_value) {
95 ((AtomicLocation32)ptr)
96 ->compare_exchange_strong(old_value,
97 new_value,
98 std::memory_order_acquire,
99 std::memory_order_acquire);
100 return old_value;
101}
102
103inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
104 Atomic32 old_value,
105 Atomic32 new_value) {
106 ((AtomicLocation32)ptr)
107 ->compare_exchange_strong(old_value,
108 new_value,
109 std::memory_order_release,
110 std::memory_order_relaxed);
111 return old_value;
112}
113
114inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
115 ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
116}
117
118inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
119 ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
120 MemoryBarrier();
121}
122
123inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
124 ((AtomicLocation32)ptr)->store(value, std::memory_order_release);
125}
126
127inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
128 return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
129}
130
131inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
132 return ((AtomicLocation32)ptr)->load(std::memory_order_acquire);
133}
134
135inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
136 MemoryBarrier();
137 return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
138}
139
140#if defined(ARCH_CPU_64_BITS)
141
142typedef volatile std::atomic<Atomic64>* AtomicLocation64;
143static_assert(sizeof(*(AtomicLocation64) nullptr) == sizeof(Atomic64),
144 "incompatible 64-bit atomic layout");
145
146inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
147 Atomic64 old_value,
148 Atomic64 new_value) {
149 ((AtomicLocation64)ptr)
150 ->compare_exchange_strong(old_value,
151 new_value,
152 std::memory_order_relaxed,
153 std::memory_order_relaxed);
154 return old_value;
155}
156
157inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
158 Atomic64 new_value) {
159 return ((AtomicLocation64)ptr)
160 ->exchange(new_value, std::memory_order_relaxed);
161}
162
163inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
164 Atomic64 increment) {
165 return increment +
166 ((AtomicLocation64)ptr)
167 ->fetch_add(increment, std::memory_order_relaxed);
168}
169
170inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
171 Atomic64 increment) {
172 return increment + ((AtomicLocation64)ptr)->fetch_add(increment);
173}
174
175inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
176 Atomic64 old_value,
177 Atomic64 new_value) {
178 ((AtomicLocation64)ptr)
179 ->compare_exchange_strong(old_value,
180 new_value,
181 std::memory_order_acquire,
182 std::memory_order_acquire);
183 return old_value;
184}
185
186inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
187 Atomic64 old_value,
188 Atomic64 new_value) {
189 ((AtomicLocation64)ptr)
190 ->compare_exchange_strong(old_value,
191 new_value,
192 std::memory_order_release,
193 std::memory_order_relaxed);
194 return old_value;
195}
196
197inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
198 ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
199}
200
201inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
202 ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
203 MemoryBarrier();
204}
205
206inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
207 ((AtomicLocation64)ptr)->store(value, std::memory_order_release);
208}
209
210inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
211 return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
212}
213
214inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
215 return ((AtomicLocation64)ptr)->load(std::memory_order_acquire);
216}
217
218inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
219 MemoryBarrier();
220 return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
221}
222
223#endif // defined(ARCH_CPU_64_BITS)
Nick Bray0bcbd3b2015-03-12 16:29:36 -0700224} // namespace subtle
225} // namespace base
James Robinsone2ac7e82014-10-15 13:21:59 -0700226
227#endif // BASE_ATOMICOPS_INTERNALS_PORTABLE_H_