James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 1 | // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #include "base/debug/trace_event_impl.h" |
| 6 | |
| 7 | #include <algorithm> |
| 8 | |
| 9 | #include "base/base_switches.h" |
| 10 | #include "base/bind.h" |
| 11 | #include "base/command_line.h" |
| 12 | #include "base/debug/leak_annotations.h" |
| 13 | #include "base/debug/trace_event.h" |
| 14 | #include "base/debug/trace_event_synthetic_delay.h" |
| 15 | #include "base/float_util.h" |
| 16 | #include "base/format_macros.h" |
| 17 | #include "base/json/string_escape.h" |
| 18 | #include "base/lazy_instance.h" |
| 19 | #include "base/memory/singleton.h" |
| 20 | #include "base/message_loop/message_loop.h" |
| 21 | #include "base/process/process_metrics.h" |
| 22 | #include "base/stl_util.h" |
| 23 | #include "base/strings/string_number_conversions.h" |
| 24 | #include "base/strings/string_split.h" |
| 25 | #include "base/strings/string_tokenizer.h" |
| 26 | #include "base/strings/string_util.h" |
| 27 | #include "base/strings/stringprintf.h" |
| 28 | #include "base/strings/utf_string_conversions.h" |
| 29 | #include "base/synchronization/cancellation_flag.h" |
| 30 | #include "base/synchronization/waitable_event.h" |
| 31 | #include "base/sys_info.h" |
| 32 | #include "base/third_party/dynamic_annotations/dynamic_annotations.h" |
| 33 | #include "base/threading/platform_thread.h" |
| 34 | #include "base/threading/thread_id_name_manager.h" |
| 35 | #include "base/time/time.h" |
| 36 | |
| 37 | #if defined(OS_WIN) |
| 38 | #include "base/debug/trace_event_win.h" |
| 39 | #endif |
| 40 | |
| 41 | class DeleteTraceLogForTesting { |
| 42 | public: |
| 43 | static void Delete() { |
| 44 | Singleton<base::debug::TraceLog, |
| 45 | LeakySingletonTraits<base::debug::TraceLog> >::OnExit(0); |
| 46 | } |
| 47 | }; |
| 48 | |
| 49 | // The thread buckets for the sampling profiler. |
| 50 | BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3]; |
| 51 | |
| 52 | namespace base { |
| 53 | namespace debug { |
| 54 | |
| 55 | namespace { |
| 56 | |
| 57 | // The overhead of TraceEvent above this threshold will be reported in the |
| 58 | // trace. |
| 59 | const int kOverheadReportThresholdInMicroseconds = 50; |
| 60 | |
| 61 | // String options that can be used to initialize TraceOptions. |
| 62 | const char kRecordUntilFull[] = "record-until-full"; |
| 63 | const char kRecordContinuously[] = "record-continuously"; |
| 64 | const char kRecordAsMuchAsPossible[] = "record-as-much-as-possible"; |
| 65 | const char kTraceToConsole[] = "trace-to-console"; |
| 66 | const char kEnableSampling[] = "enable-sampling"; |
| 67 | const char kEnableSystrace[] = "enable-systrace"; |
| 68 | |
| 69 | // Controls the number of trace events we will buffer in-memory |
| 70 | // before throwing them away. |
| 71 | const size_t kTraceBufferChunkSize = TraceBufferChunk::kTraceBufferChunkSize; |
| 72 | const size_t kTraceEventVectorBigBufferChunks = |
| 73 | 512000000 / kTraceBufferChunkSize; |
| 74 | const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize; |
| 75 | const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4; |
| 76 | const size_t kTraceEventBatchChunks = 1000 / kTraceBufferChunkSize; |
| 77 | // Can store results for 30 seconds with 1 ms sampling interval. |
| 78 | const size_t kMonitorTraceEventBufferChunks = 30000 / kTraceBufferChunkSize; |
| 79 | // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events. |
| 80 | const size_t kEchoToConsoleTraceEventBufferChunks = 256; |
| 81 | |
| 82 | const int kThreadFlushTimeoutMs = 3000; |
| 83 | |
| 84 | #if !defined(OS_NACL) |
| 85 | // These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575. |
| 86 | const char kEchoToConsoleCategoryFilter[] = "-ipc,-task"; |
| 87 | #endif |
| 88 | |
| 89 | const char kSyntheticDelayCategoryFilterPrefix[] = "DELAY("; |
| 90 | |
| 91 | #define MAX_CATEGORY_GROUPS 100 |
| 92 | |
| 93 | // Parallel arrays g_category_groups and g_category_group_enabled are separate |
| 94 | // so that a pointer to a member of g_category_group_enabled can be easily |
| 95 | // converted to an index into g_category_groups. This allows macros to deal |
| 96 | // only with char enabled pointers from g_category_group_enabled, and we can |
| 97 | // convert internally to determine the category name from the char enabled |
| 98 | // pointer. |
| 99 | const char* g_category_groups[MAX_CATEGORY_GROUPS] = { |
| 100 | "toplevel", |
| 101 | "tracing already shutdown", |
| 102 | "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS", |
| 103 | "__metadata", |
| 104 | // For reporting trace_event overhead. For thread local event buffers only. |
| 105 | "trace_event_overhead"}; |
| 106 | |
| 107 | // The enabled flag is char instead of bool so that the API can be used from C. |
| 108 | unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = { 0 }; |
| 109 | // Indexes here have to match the g_category_groups array indexes above. |
| 110 | const int g_category_already_shutdown = 1; |
| 111 | const int g_category_categories_exhausted = 2; |
| 112 | const int g_category_metadata = 3; |
| 113 | const int g_category_trace_event_overhead = 4; |
| 114 | const int g_num_builtin_categories = 5; |
| 115 | // Skip default categories. |
| 116 | base::subtle::AtomicWord g_category_index = g_num_builtin_categories; |
| 117 | |
| 118 | // The name of the current thread. This is used to decide if the current |
| 119 | // thread name has changed. We combine all the seen thread names into the |
| 120 | // output name for the thread. |
| 121 | LazyInstance<ThreadLocalPointer<const char> >::Leaky |
| 122 | g_current_thread_name = LAZY_INSTANCE_INITIALIZER; |
| 123 | |
| 124 | TimeTicks ThreadNow() { |
| 125 | return TimeTicks::IsThreadNowSupported() ? |
| 126 | TimeTicks::ThreadNow() : TimeTicks(); |
| 127 | } |
| 128 | |
| 129 | class TraceBufferRingBuffer : public TraceBuffer { |
| 130 | public: |
| 131 | TraceBufferRingBuffer(size_t max_chunks) |
| 132 | : max_chunks_(max_chunks), |
| 133 | recyclable_chunks_queue_(new size_t[queue_capacity()]), |
| 134 | queue_head_(0), |
| 135 | queue_tail_(max_chunks), |
| 136 | current_iteration_index_(0), |
| 137 | current_chunk_seq_(1) { |
| 138 | chunks_.reserve(max_chunks); |
| 139 | for (size_t i = 0; i < max_chunks; ++i) |
| 140 | recyclable_chunks_queue_[i] = i; |
| 141 | } |
| 142 | |
James Robinson | e1b30cf | 2014-10-21 12:25:40 -0700 | [diff] [blame] | 143 | scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override { |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 144 | // Because the number of threads is much less than the number of chunks, |
| 145 | // the queue should never be empty. |
| 146 | DCHECK(!QueueIsEmpty()); |
| 147 | |
| 148 | *index = recyclable_chunks_queue_[queue_head_]; |
| 149 | queue_head_ = NextQueueIndex(queue_head_); |
| 150 | current_iteration_index_ = queue_head_; |
| 151 | |
| 152 | if (*index >= chunks_.size()) |
| 153 | chunks_.resize(*index + 1); |
| 154 | |
| 155 | TraceBufferChunk* chunk = chunks_[*index]; |
| 156 | chunks_[*index] = NULL; // Put NULL in the slot of a in-flight chunk. |
| 157 | if (chunk) |
| 158 | chunk->Reset(current_chunk_seq_++); |
| 159 | else |
| 160 | chunk = new TraceBufferChunk(current_chunk_seq_++); |
| 161 | |
| 162 | return scoped_ptr<TraceBufferChunk>(chunk); |
| 163 | } |
| 164 | |
James Robinson | e1b30cf | 2014-10-21 12:25:40 -0700 | [diff] [blame] | 165 | void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override { |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 166 | // When this method is called, the queue should not be full because it |
| 167 | // can contain all chunks including the one to be returned. |
| 168 | DCHECK(!QueueIsFull()); |
| 169 | DCHECK(chunk); |
| 170 | DCHECK_LT(index, chunks_.size()); |
| 171 | DCHECK(!chunks_[index]); |
| 172 | chunks_[index] = chunk.release(); |
| 173 | recyclable_chunks_queue_[queue_tail_] = index; |
| 174 | queue_tail_ = NextQueueIndex(queue_tail_); |
| 175 | } |
| 176 | |
James Robinson | e1b30cf | 2014-10-21 12:25:40 -0700 | [diff] [blame] | 177 | bool IsFull() const override { return false; } |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 178 | |
James Robinson | e1b30cf | 2014-10-21 12:25:40 -0700 | [diff] [blame] | 179 | size_t Size() const override { |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 180 | // This is approximate because not all of the chunks are full. |
| 181 | return chunks_.size() * kTraceBufferChunkSize; |
| 182 | } |
| 183 | |
James Robinson | e1b30cf | 2014-10-21 12:25:40 -0700 | [diff] [blame] | 184 | size_t Capacity() const override { |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 185 | return max_chunks_ * kTraceBufferChunkSize; |
| 186 | } |
| 187 | |
James Robinson | e1b30cf | 2014-10-21 12:25:40 -0700 | [diff] [blame] | 188 | TraceEvent* GetEventByHandle(TraceEventHandle handle) override { |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 189 | if (handle.chunk_index >= chunks_.size()) |
| 190 | return NULL; |
| 191 | TraceBufferChunk* chunk = chunks_[handle.chunk_index]; |
| 192 | if (!chunk || chunk->seq() != handle.chunk_seq) |
| 193 | return NULL; |
| 194 | return chunk->GetEventAt(handle.event_index); |
| 195 | } |
| 196 | |
James Robinson | e1b30cf | 2014-10-21 12:25:40 -0700 | [diff] [blame] | 197 | const TraceBufferChunk* NextChunk() override { |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 198 | if (chunks_.empty()) |
| 199 | return NULL; |
| 200 | |
| 201 | while (current_iteration_index_ != queue_tail_) { |
| 202 | size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_]; |
| 203 | current_iteration_index_ = NextQueueIndex(current_iteration_index_); |
| 204 | if (chunk_index >= chunks_.size()) // Skip uninitialized chunks. |
| 205 | continue; |
| 206 | DCHECK(chunks_[chunk_index]); |
| 207 | return chunks_[chunk_index]; |
| 208 | } |
| 209 | return NULL; |
| 210 | } |
| 211 | |
James Robinson | e1b30cf | 2014-10-21 12:25:40 -0700 | [diff] [blame] | 212 | scoped_ptr<TraceBuffer> CloneForIteration() const override { |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 213 | scoped_ptr<ClonedTraceBuffer> cloned_buffer(new ClonedTraceBuffer()); |
| 214 | for (size_t queue_index = queue_head_; queue_index != queue_tail_; |
| 215 | queue_index = NextQueueIndex(queue_index)) { |
| 216 | size_t chunk_index = recyclable_chunks_queue_[queue_index]; |
| 217 | if (chunk_index >= chunks_.size()) // Skip uninitialized chunks. |
| 218 | continue; |
| 219 | TraceBufferChunk* chunk = chunks_[chunk_index]; |
| 220 | cloned_buffer->chunks_.push_back(chunk ? chunk->Clone().release() : NULL); |
| 221 | } |
James Robinson | e2ac7e8 | 2014-10-15 13:21:59 -0700 | [diff] [blame] | 222 | return cloned_buffer.Pass(); |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 223 | } |
| 224 | |
| 225 | private: |
| 226 | class ClonedTraceBuffer : public TraceBuffer { |
| 227 | public: |
| 228 | ClonedTraceBuffer() : current_iteration_index_(0) {} |
| 229 | |
| 230 | // The only implemented method. |
James Robinson | e1b30cf | 2014-10-21 12:25:40 -0700 | [diff] [blame] | 231 | const TraceBufferChunk* NextChunk() override { |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 232 | return current_iteration_index_ < chunks_.size() ? |
| 233 | chunks_[current_iteration_index_++] : NULL; |
| 234 | } |
| 235 | |
James Robinson | e1b30cf | 2014-10-21 12:25:40 -0700 | [diff] [blame] | 236 | scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override { |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 237 | NOTIMPLEMENTED(); |
| 238 | return scoped_ptr<TraceBufferChunk>(); |
| 239 | } |
James Robinson | e1b30cf | 2014-10-21 12:25:40 -0700 | [diff] [blame] | 240 | void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk>) override { |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 241 | NOTIMPLEMENTED(); |
| 242 | } |
James Robinson | e1b30cf | 2014-10-21 12:25:40 -0700 | [diff] [blame] | 243 | bool IsFull() const override { return false; } |
| 244 | size_t Size() const override { return 0; } |
| 245 | size_t Capacity() const override { return 0; } |
| 246 | TraceEvent* GetEventByHandle(TraceEventHandle handle) override { |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 247 | return NULL; |
| 248 | } |
James Robinson | e1b30cf | 2014-10-21 12:25:40 -0700 | [diff] [blame] | 249 | scoped_ptr<TraceBuffer> CloneForIteration() const override { |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 250 | NOTIMPLEMENTED(); |
| 251 | return scoped_ptr<TraceBuffer>(); |
| 252 | } |
| 253 | |
| 254 | size_t current_iteration_index_; |
| 255 | ScopedVector<TraceBufferChunk> chunks_; |
| 256 | }; |
| 257 | |
| 258 | bool QueueIsEmpty() const { |
| 259 | return queue_head_ == queue_tail_; |
| 260 | } |
| 261 | |
| 262 | size_t QueueSize() const { |
| 263 | return queue_tail_ > queue_head_ ? queue_tail_ - queue_head_ : |
| 264 | queue_tail_ + queue_capacity() - queue_head_; |
| 265 | } |
| 266 | |
| 267 | bool QueueIsFull() const { |
| 268 | return QueueSize() == queue_capacity() - 1; |
| 269 | } |
| 270 | |
| 271 | size_t queue_capacity() const { |
| 272 | // One extra space to help distinguish full state and empty state. |
| 273 | return max_chunks_ + 1; |
| 274 | } |
| 275 | |
| 276 | size_t NextQueueIndex(size_t index) const { |
| 277 | index++; |
| 278 | if (index >= queue_capacity()) |
| 279 | index = 0; |
| 280 | return index; |
| 281 | } |
| 282 | |
| 283 | size_t max_chunks_; |
| 284 | ScopedVector<TraceBufferChunk> chunks_; |
| 285 | |
| 286 | scoped_ptr<size_t[]> recyclable_chunks_queue_; |
| 287 | size_t queue_head_; |
| 288 | size_t queue_tail_; |
| 289 | |
| 290 | size_t current_iteration_index_; |
| 291 | uint32 current_chunk_seq_; |
| 292 | |
| 293 | DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer); |
| 294 | }; |
| 295 | |
| 296 | class TraceBufferVector : public TraceBuffer { |
| 297 | public: |
| 298 | TraceBufferVector(size_t max_chunks) |
| 299 | : in_flight_chunk_count_(0), |
| 300 | current_iteration_index_(0), |
| 301 | max_chunks_(max_chunks) { |
| 302 | chunks_.reserve(max_chunks_); |
| 303 | } |
| 304 | |
James Robinson | e1b30cf | 2014-10-21 12:25:40 -0700 | [diff] [blame] | 305 | scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override { |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 306 | // This function may be called when adding normal events or indirectly from |
| 307 | // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we |
| 308 | // have to add the metadata events and flush thread-local buffers even if |
| 309 | // the buffer is full. |
| 310 | *index = chunks_.size(); |
| 311 | chunks_.push_back(NULL); // Put NULL in the slot of a in-flight chunk. |
| 312 | ++in_flight_chunk_count_; |
| 313 | // + 1 because zero chunk_seq is not allowed. |
| 314 | return scoped_ptr<TraceBufferChunk>( |
| 315 | new TraceBufferChunk(static_cast<uint32>(*index) + 1)); |
| 316 | } |
| 317 | |
James Robinson | e1b30cf | 2014-10-21 12:25:40 -0700 | [diff] [blame] | 318 | void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override { |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 319 | DCHECK_GT(in_flight_chunk_count_, 0u); |
| 320 | DCHECK_LT(index, chunks_.size()); |
| 321 | DCHECK(!chunks_[index]); |
| 322 | --in_flight_chunk_count_; |
| 323 | chunks_[index] = chunk.release(); |
| 324 | } |
| 325 | |
James Robinson | e1b30cf | 2014-10-21 12:25:40 -0700 | [diff] [blame] | 326 | bool IsFull() const override { return chunks_.size() >= max_chunks_; } |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 327 | |
James Robinson | e1b30cf | 2014-10-21 12:25:40 -0700 | [diff] [blame] | 328 | size_t Size() const override { |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 329 | // This is approximate because not all of the chunks are full. |
| 330 | return chunks_.size() * kTraceBufferChunkSize; |
| 331 | } |
| 332 | |
James Robinson | e1b30cf | 2014-10-21 12:25:40 -0700 | [diff] [blame] | 333 | size_t Capacity() const override { |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 334 | return max_chunks_ * kTraceBufferChunkSize; |
| 335 | } |
| 336 | |
James Robinson | e1b30cf | 2014-10-21 12:25:40 -0700 | [diff] [blame] | 337 | TraceEvent* GetEventByHandle(TraceEventHandle handle) override { |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 338 | if (handle.chunk_index >= chunks_.size()) |
| 339 | return NULL; |
| 340 | TraceBufferChunk* chunk = chunks_[handle.chunk_index]; |
| 341 | if (!chunk || chunk->seq() != handle.chunk_seq) |
| 342 | return NULL; |
| 343 | return chunk->GetEventAt(handle.event_index); |
| 344 | } |
| 345 | |
James Robinson | e1b30cf | 2014-10-21 12:25:40 -0700 | [diff] [blame] | 346 | const TraceBufferChunk* NextChunk() override { |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 347 | while (current_iteration_index_ < chunks_.size()) { |
| 348 | // Skip in-flight chunks. |
| 349 | const TraceBufferChunk* chunk = chunks_[current_iteration_index_++]; |
| 350 | if (chunk) |
| 351 | return chunk; |
| 352 | } |
| 353 | return NULL; |
| 354 | } |
| 355 | |
James Robinson | e1b30cf | 2014-10-21 12:25:40 -0700 | [diff] [blame] | 356 | scoped_ptr<TraceBuffer> CloneForIteration() const override { |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 357 | NOTIMPLEMENTED(); |
| 358 | return scoped_ptr<TraceBuffer>(); |
| 359 | } |
| 360 | |
| 361 | private: |
| 362 | size_t in_flight_chunk_count_; |
| 363 | size_t current_iteration_index_; |
| 364 | size_t max_chunks_; |
| 365 | ScopedVector<TraceBufferChunk> chunks_; |
| 366 | |
| 367 | DISALLOW_COPY_AND_ASSIGN(TraceBufferVector); |
| 368 | }; |
| 369 | |
| 370 | template <typename T> |
| 371 | void InitializeMetadataEvent(TraceEvent* trace_event, |
| 372 | int thread_id, |
| 373 | const char* metadata_name, const char* arg_name, |
| 374 | const T& value) { |
| 375 | if (!trace_event) |
| 376 | return; |
| 377 | |
| 378 | int num_args = 1; |
| 379 | unsigned char arg_type; |
| 380 | unsigned long long arg_value; |
| 381 | ::trace_event_internal::SetTraceValue(value, &arg_type, &arg_value); |
| 382 | trace_event->Initialize(thread_id, |
| 383 | TimeTicks(), TimeTicks(), TRACE_EVENT_PHASE_METADATA, |
| 384 | &g_category_group_enabled[g_category_metadata], |
| 385 | metadata_name, ::trace_event_internal::kNoEventId, |
| 386 | num_args, &arg_name, &arg_type, &arg_value, NULL, |
| 387 | TRACE_EVENT_FLAG_NONE); |
| 388 | } |
| 389 | |
| 390 | class AutoThreadLocalBoolean { |
| 391 | public: |
| 392 | explicit AutoThreadLocalBoolean(ThreadLocalBoolean* thread_local_boolean) |
| 393 | : thread_local_boolean_(thread_local_boolean) { |
| 394 | DCHECK(!thread_local_boolean_->Get()); |
| 395 | thread_local_boolean_->Set(true); |
| 396 | } |
| 397 | ~AutoThreadLocalBoolean() { |
| 398 | thread_local_boolean_->Set(false); |
| 399 | } |
| 400 | |
| 401 | private: |
| 402 | ThreadLocalBoolean* thread_local_boolean_; |
| 403 | DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean); |
| 404 | }; |
| 405 | |
| 406 | } // namespace |
| 407 | |
| 408 | void TraceBufferChunk::Reset(uint32 new_seq) { |
| 409 | for (size_t i = 0; i < next_free_; ++i) |
| 410 | chunk_[i].Reset(); |
| 411 | next_free_ = 0; |
| 412 | seq_ = new_seq; |
| 413 | } |
| 414 | |
| 415 | TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) { |
| 416 | DCHECK(!IsFull()); |
| 417 | *event_index = next_free_++; |
| 418 | return &chunk_[*event_index]; |
| 419 | } |
| 420 | |
| 421 | scoped_ptr<TraceBufferChunk> TraceBufferChunk::Clone() const { |
| 422 | scoped_ptr<TraceBufferChunk> cloned_chunk(new TraceBufferChunk(seq_)); |
| 423 | cloned_chunk->next_free_ = next_free_; |
| 424 | for (size_t i = 0; i < next_free_; ++i) |
| 425 | cloned_chunk->chunk_[i].CopyFrom(chunk_[i]); |
| 426 | return cloned_chunk.Pass(); |
| 427 | } |
| 428 | |
| 429 | // A helper class that allows the lock to be acquired in the middle of the scope |
| 430 | // and unlocks at the end of scope if locked. |
| 431 | class TraceLog::OptionalAutoLock { |
| 432 | public: |
| 433 | explicit OptionalAutoLock(Lock& lock) |
| 434 | : lock_(lock), |
| 435 | locked_(false) { |
| 436 | } |
| 437 | |
| 438 | ~OptionalAutoLock() { |
| 439 | if (locked_) |
| 440 | lock_.Release(); |
| 441 | } |
| 442 | |
| 443 | void EnsureAcquired() { |
| 444 | if (!locked_) { |
| 445 | lock_.Acquire(); |
| 446 | locked_ = true; |
| 447 | } |
| 448 | } |
| 449 | |
| 450 | private: |
| 451 | Lock& lock_; |
| 452 | bool locked_; |
| 453 | DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock); |
| 454 | }; |
| 455 | |
| 456 | // Use this function instead of TraceEventHandle constructor to keep the |
| 457 | // overhead of ScopedTracer (trace_event.h) constructor minimum. |
| 458 | void MakeHandle(uint32 chunk_seq, size_t chunk_index, size_t event_index, |
| 459 | TraceEventHandle* handle) { |
| 460 | DCHECK(chunk_seq); |
| 461 | DCHECK(chunk_index < (1u << 16)); |
| 462 | DCHECK(event_index < (1u << 16)); |
| 463 | handle->chunk_seq = chunk_seq; |
| 464 | handle->chunk_index = static_cast<uint16>(chunk_index); |
| 465 | handle->event_index = static_cast<uint16>(event_index); |
| 466 | } |
| 467 | |
| 468 | //////////////////////////////////////////////////////////////////////////////// |
| 469 | // |
| 470 | // TraceEvent |
| 471 | // |
| 472 | //////////////////////////////////////////////////////////////////////////////// |
| 473 | |
| 474 | namespace { |
| 475 | |
| 476 | size_t GetAllocLength(const char* str) { return str ? strlen(str) + 1 : 0; } |
| 477 | |
| 478 | // Copies |*member| into |*buffer|, sets |*member| to point to this new |
| 479 | // location, and then advances |*buffer| by the amount written. |
| 480 | void CopyTraceEventParameter(char** buffer, |
| 481 | const char** member, |
| 482 | const char* end) { |
| 483 | if (*member) { |
| 484 | size_t written = strlcpy(*buffer, *member, end - *buffer) + 1; |
| 485 | DCHECK_LE(static_cast<int>(written), end - *buffer); |
| 486 | *member = *buffer; |
| 487 | *buffer += written; |
| 488 | } |
| 489 | } |
| 490 | |
| 491 | } // namespace |
| 492 | |
| 493 | TraceEvent::TraceEvent() |
| 494 | : duration_(TimeDelta::FromInternalValue(-1)), |
| 495 | id_(0u), |
| 496 | category_group_enabled_(NULL), |
| 497 | name_(NULL), |
| 498 | thread_id_(0), |
| 499 | phase_(TRACE_EVENT_PHASE_BEGIN), |
| 500 | flags_(0) { |
| 501 | for (int i = 0; i < kTraceMaxNumArgs; ++i) |
| 502 | arg_names_[i] = NULL; |
| 503 | memset(arg_values_, 0, sizeof(arg_values_)); |
| 504 | } |
| 505 | |
| 506 | TraceEvent::~TraceEvent() { |
| 507 | } |
| 508 | |
| 509 | void TraceEvent::CopyFrom(const TraceEvent& other) { |
| 510 | timestamp_ = other.timestamp_; |
| 511 | thread_timestamp_ = other.thread_timestamp_; |
| 512 | duration_ = other.duration_; |
| 513 | id_ = other.id_; |
| 514 | category_group_enabled_ = other.category_group_enabled_; |
| 515 | name_ = other.name_; |
| 516 | thread_id_ = other.thread_id_; |
| 517 | phase_ = other.phase_; |
| 518 | flags_ = other.flags_; |
| 519 | parameter_copy_storage_ = other.parameter_copy_storage_; |
| 520 | |
| 521 | for (int i = 0; i < kTraceMaxNumArgs; ++i) { |
| 522 | arg_names_[i] = other.arg_names_[i]; |
| 523 | arg_types_[i] = other.arg_types_[i]; |
| 524 | arg_values_[i] = other.arg_values_[i]; |
| 525 | convertable_values_[i] = other.convertable_values_[i]; |
| 526 | } |
| 527 | } |
| 528 | |
| 529 | void TraceEvent::Initialize( |
| 530 | int thread_id, |
| 531 | TimeTicks timestamp, |
| 532 | TimeTicks thread_timestamp, |
| 533 | char phase, |
| 534 | const unsigned char* category_group_enabled, |
| 535 | const char* name, |
| 536 | unsigned long long id, |
| 537 | int num_args, |
| 538 | const char** arg_names, |
| 539 | const unsigned char* arg_types, |
| 540 | const unsigned long long* arg_values, |
| 541 | const scoped_refptr<ConvertableToTraceFormat>* convertable_values, |
| 542 | unsigned char flags) { |
| 543 | timestamp_ = timestamp; |
| 544 | thread_timestamp_ = thread_timestamp; |
| 545 | duration_ = TimeDelta::FromInternalValue(-1); |
| 546 | id_ = id; |
| 547 | category_group_enabled_ = category_group_enabled; |
| 548 | name_ = name; |
| 549 | thread_id_ = thread_id; |
| 550 | phase_ = phase; |
| 551 | flags_ = flags; |
| 552 | |
| 553 | // Clamp num_args since it may have been set by a third_party library. |
| 554 | num_args = (num_args > kTraceMaxNumArgs) ? kTraceMaxNumArgs : num_args; |
| 555 | int i = 0; |
| 556 | for (; i < num_args; ++i) { |
| 557 | arg_names_[i] = arg_names[i]; |
| 558 | arg_types_[i] = arg_types[i]; |
| 559 | |
| 560 | if (arg_types[i] == TRACE_VALUE_TYPE_CONVERTABLE) |
| 561 | convertable_values_[i] = convertable_values[i]; |
| 562 | else |
| 563 | arg_values_[i].as_uint = arg_values[i]; |
| 564 | } |
| 565 | for (; i < kTraceMaxNumArgs; ++i) { |
| 566 | arg_names_[i] = NULL; |
| 567 | arg_values_[i].as_uint = 0u; |
| 568 | convertable_values_[i] = NULL; |
| 569 | arg_types_[i] = TRACE_VALUE_TYPE_UINT; |
| 570 | } |
| 571 | |
| 572 | bool copy = !!(flags & TRACE_EVENT_FLAG_COPY); |
| 573 | size_t alloc_size = 0; |
| 574 | if (copy) { |
| 575 | alloc_size += GetAllocLength(name); |
| 576 | for (i = 0; i < num_args; ++i) { |
| 577 | alloc_size += GetAllocLength(arg_names_[i]); |
| 578 | if (arg_types_[i] == TRACE_VALUE_TYPE_STRING) |
| 579 | arg_types_[i] = TRACE_VALUE_TYPE_COPY_STRING; |
| 580 | } |
| 581 | } |
| 582 | |
| 583 | bool arg_is_copy[kTraceMaxNumArgs]; |
| 584 | for (i = 0; i < num_args; ++i) { |
| 585 | // No copying of convertable types, we retain ownership. |
| 586 | if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE) |
| 587 | continue; |
| 588 | |
| 589 | // We only take a copy of arg_vals if they are of type COPY_STRING. |
| 590 | arg_is_copy[i] = (arg_types_[i] == TRACE_VALUE_TYPE_COPY_STRING); |
| 591 | if (arg_is_copy[i]) |
| 592 | alloc_size += GetAllocLength(arg_values_[i].as_string); |
| 593 | } |
| 594 | |
| 595 | if (alloc_size) { |
| 596 | parameter_copy_storage_ = new RefCountedString; |
| 597 | parameter_copy_storage_->data().resize(alloc_size); |
| 598 | char* ptr = string_as_array(¶meter_copy_storage_->data()); |
| 599 | const char* end = ptr + alloc_size; |
| 600 | if (copy) { |
| 601 | CopyTraceEventParameter(&ptr, &name_, end); |
| 602 | for (i = 0; i < num_args; ++i) { |
| 603 | CopyTraceEventParameter(&ptr, &arg_names_[i], end); |
| 604 | } |
| 605 | } |
| 606 | for (i = 0; i < num_args; ++i) { |
| 607 | if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE) |
| 608 | continue; |
| 609 | if (arg_is_copy[i]) |
| 610 | CopyTraceEventParameter(&ptr, &arg_values_[i].as_string, end); |
| 611 | } |
| 612 | DCHECK_EQ(end, ptr) << "Overrun by " << ptr - end; |
| 613 | } |
| 614 | } |
| 615 | |
| 616 | void TraceEvent::Reset() { |
| 617 | // Only reset fields that won't be initialized in Initialize(), or that may |
| 618 | // hold references to other objects. |
| 619 | duration_ = TimeDelta::FromInternalValue(-1); |
| 620 | parameter_copy_storage_ = NULL; |
| 621 | for (int i = 0; i < kTraceMaxNumArgs; ++i) |
| 622 | convertable_values_[i] = NULL; |
| 623 | } |
| 624 | |
| 625 | void TraceEvent::UpdateDuration(const TimeTicks& now, |
| 626 | const TimeTicks& thread_now) { |
| 627 | DCHECK(duration_.ToInternalValue() == -1); |
| 628 | duration_ = now - timestamp_; |
| 629 | thread_duration_ = thread_now - thread_timestamp_; |
| 630 | } |
| 631 | |
| 632 | // static |
| 633 | void TraceEvent::AppendValueAsJSON(unsigned char type, |
| 634 | TraceEvent::TraceValue value, |
| 635 | std::string* out) { |
| 636 | switch (type) { |
| 637 | case TRACE_VALUE_TYPE_BOOL: |
| 638 | *out += value.as_bool ? "true" : "false"; |
| 639 | break; |
| 640 | case TRACE_VALUE_TYPE_UINT: |
| 641 | StringAppendF(out, "%" PRIu64, static_cast<uint64>(value.as_uint)); |
| 642 | break; |
| 643 | case TRACE_VALUE_TYPE_INT: |
| 644 | StringAppendF(out, "%" PRId64, static_cast<int64>(value.as_int)); |
| 645 | break; |
| 646 | case TRACE_VALUE_TYPE_DOUBLE: { |
| 647 | // FIXME: base/json/json_writer.cc is using the same code, |
| 648 | // should be made into a common method. |
| 649 | std::string real; |
| 650 | double val = value.as_double; |
| 651 | if (IsFinite(val)) { |
| 652 | real = DoubleToString(val); |
| 653 | // Ensure that the number has a .0 if there's no decimal or 'e'. This |
| 654 | // makes sure that when we read the JSON back, it's interpreted as a |
| 655 | // real rather than an int. |
| 656 | if (real.find('.') == std::string::npos && |
| 657 | real.find('e') == std::string::npos && |
| 658 | real.find('E') == std::string::npos) { |
| 659 | real.append(".0"); |
| 660 | } |
| 661 | // The JSON spec requires that non-integer values in the range (-1,1) |
| 662 | // have a zero before the decimal point - ".52" is not valid, "0.52" is. |
| 663 | if (real[0] == '.') { |
| 664 | real.insert(0, "0"); |
| 665 | } else if (real.length() > 1 && real[0] == '-' && real[1] == '.') { |
| 666 | // "-.1" bad "-0.1" good |
| 667 | real.insert(1, "0"); |
| 668 | } |
| 669 | } else if (IsNaN(val)){ |
| 670 | // The JSON spec doesn't allow NaN and Infinity (since these are |
| 671 | // objects in EcmaScript). Use strings instead. |
| 672 | real = "\"NaN\""; |
| 673 | } else if (val < 0) { |
| 674 | real = "\"-Infinity\""; |
| 675 | } else { |
| 676 | real = "\"Infinity\""; |
| 677 | } |
| 678 | StringAppendF(out, "%s", real.c_str()); |
| 679 | break; |
| 680 | } |
| 681 | case TRACE_VALUE_TYPE_POINTER: |
| 682 | // JSON only supports double and int numbers. |
| 683 | // So as not to lose bits from a 64-bit pointer, output as a hex string. |
| 684 | StringAppendF(out, "\"0x%" PRIx64 "\"", static_cast<uint64>( |
| 685 | reinterpret_cast<intptr_t>( |
| 686 | value.as_pointer))); |
| 687 | break; |
| 688 | case TRACE_VALUE_TYPE_STRING: |
| 689 | case TRACE_VALUE_TYPE_COPY_STRING: |
| 690 | EscapeJSONString(value.as_string ? value.as_string : "NULL", true, out); |
| 691 | break; |
| 692 | default: |
| 693 | NOTREACHED() << "Don't know how to print this value"; |
| 694 | break; |
| 695 | } |
| 696 | } |
| 697 | |
| 698 | void TraceEvent::AppendAsJSON(std::string* out) const { |
| 699 | int64 time_int64 = timestamp_.ToInternalValue(); |
| 700 | int process_id = TraceLog::GetInstance()->process_id(); |
| 701 | // Category group checked at category creation time. |
| 702 | DCHECK(!strchr(name_, '"')); |
| 703 | StringAppendF(out, |
| 704 | "{\"cat\":\"%s\",\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64 "," |
| 705 | "\"ph\":\"%c\",\"name\":\"%s\",\"args\":{", |
| 706 | TraceLog::GetCategoryGroupName(category_group_enabled_), |
| 707 | process_id, |
| 708 | thread_id_, |
| 709 | time_int64, |
| 710 | phase_, |
| 711 | name_); |
| 712 | |
| 713 | // Output argument names and values, stop at first NULL argument name. |
| 714 | for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) { |
| 715 | if (i > 0) |
| 716 | *out += ","; |
| 717 | *out += "\""; |
| 718 | *out += arg_names_[i]; |
| 719 | *out += "\":"; |
| 720 | |
| 721 | if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE) |
| 722 | convertable_values_[i]->AppendAsTraceFormat(out); |
| 723 | else |
| 724 | AppendValueAsJSON(arg_types_[i], arg_values_[i], out); |
| 725 | } |
| 726 | *out += "}"; |
| 727 | |
| 728 | if (phase_ == TRACE_EVENT_PHASE_COMPLETE) { |
| 729 | int64 duration = duration_.ToInternalValue(); |
| 730 | if (duration != -1) |
| 731 | StringAppendF(out, ",\"dur\":%" PRId64, duration); |
| 732 | if (!thread_timestamp_.is_null()) { |
| 733 | int64 thread_duration = thread_duration_.ToInternalValue(); |
| 734 | if (thread_duration != -1) |
| 735 | StringAppendF(out, ",\"tdur\":%" PRId64, thread_duration); |
| 736 | } |
| 737 | } |
| 738 | |
| 739 | // Output tts if thread_timestamp is valid. |
| 740 | if (!thread_timestamp_.is_null()) { |
| 741 | int64 thread_time_int64 = thread_timestamp_.ToInternalValue(); |
| 742 | StringAppendF(out, ",\"tts\":%" PRId64, thread_time_int64); |
| 743 | } |
| 744 | |
| 745 | // If id_ is set, print it out as a hex string so we don't loose any |
| 746 | // bits (it might be a 64-bit pointer). |
| 747 | if (flags_ & TRACE_EVENT_FLAG_HAS_ID) |
| 748 | StringAppendF(out, ",\"id\":\"0x%" PRIx64 "\"", static_cast<uint64>(id_)); |
| 749 | |
| 750 | // Instant events also output their scope. |
| 751 | if (phase_ == TRACE_EVENT_PHASE_INSTANT) { |
| 752 | char scope = '?'; |
| 753 | switch (flags_ & TRACE_EVENT_FLAG_SCOPE_MASK) { |
| 754 | case TRACE_EVENT_SCOPE_GLOBAL: |
| 755 | scope = TRACE_EVENT_SCOPE_NAME_GLOBAL; |
| 756 | break; |
| 757 | |
| 758 | case TRACE_EVENT_SCOPE_PROCESS: |
| 759 | scope = TRACE_EVENT_SCOPE_NAME_PROCESS; |
| 760 | break; |
| 761 | |
| 762 | case TRACE_EVENT_SCOPE_THREAD: |
| 763 | scope = TRACE_EVENT_SCOPE_NAME_THREAD; |
| 764 | break; |
| 765 | } |
| 766 | StringAppendF(out, ",\"s\":\"%c\"", scope); |
| 767 | } |
| 768 | |
| 769 | *out += "}"; |
| 770 | } |
| 771 | |
| 772 | void TraceEvent::AppendPrettyPrinted(std::ostringstream* out) const { |
| 773 | *out << name_ << "["; |
| 774 | *out << TraceLog::GetCategoryGroupName(category_group_enabled_); |
| 775 | *out << "]"; |
| 776 | if (arg_names_[0]) { |
| 777 | *out << ", {"; |
| 778 | for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) { |
| 779 | if (i > 0) |
| 780 | *out << ", "; |
| 781 | *out << arg_names_[i] << ":"; |
| 782 | std::string value_as_text; |
| 783 | |
| 784 | if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE) |
| 785 | convertable_values_[i]->AppendAsTraceFormat(&value_as_text); |
| 786 | else |
| 787 | AppendValueAsJSON(arg_types_[i], arg_values_[i], &value_as_text); |
| 788 | |
| 789 | *out << value_as_text; |
| 790 | } |
| 791 | *out << "}"; |
| 792 | } |
| 793 | } |
| 794 | |
| 795 | //////////////////////////////////////////////////////////////////////////////// |
| 796 | // |
| 797 | // TraceResultBuffer |
| 798 | // |
| 799 | //////////////////////////////////////////////////////////////////////////////// |
| 800 | |
| 801 | TraceResultBuffer::OutputCallback |
| 802 | TraceResultBuffer::SimpleOutput::GetCallback() { |
| 803 | return Bind(&SimpleOutput::Append, Unretained(this)); |
| 804 | } |
| 805 | |
| 806 | void TraceResultBuffer::SimpleOutput::Append( |
| 807 | const std::string& json_trace_output) { |
| 808 | json_output += json_trace_output; |
| 809 | } |
| 810 | |
| 811 | TraceResultBuffer::TraceResultBuffer() : append_comma_(false) { |
| 812 | } |
| 813 | |
| 814 | TraceResultBuffer::~TraceResultBuffer() { |
| 815 | } |
| 816 | |
| 817 | void TraceResultBuffer::SetOutputCallback( |
| 818 | const OutputCallback& json_chunk_callback) { |
| 819 | output_callback_ = json_chunk_callback; |
| 820 | } |
| 821 | |
| 822 | void TraceResultBuffer::Start() { |
| 823 | append_comma_ = false; |
| 824 | output_callback_.Run("["); |
| 825 | } |
| 826 | |
| 827 | void TraceResultBuffer::AddFragment(const std::string& trace_fragment) { |
| 828 | if (append_comma_) |
| 829 | output_callback_.Run(","); |
| 830 | append_comma_ = true; |
| 831 | output_callback_.Run(trace_fragment); |
| 832 | } |
| 833 | |
| 834 | void TraceResultBuffer::Finish() { |
| 835 | output_callback_.Run("]"); |
| 836 | } |
| 837 | |
| 838 | //////////////////////////////////////////////////////////////////////////////// |
| 839 | // |
| 840 | // TraceSamplingThread |
| 841 | // |
| 842 | //////////////////////////////////////////////////////////////////////////////// |
| 843 | class TraceBucketData; |
| 844 | typedef base::Callback<void(TraceBucketData*)> TraceSampleCallback; |
| 845 | |
| 846 | class TraceBucketData { |
| 847 | public: |
| 848 | TraceBucketData(base::subtle::AtomicWord* bucket, |
| 849 | const char* name, |
| 850 | TraceSampleCallback callback); |
| 851 | ~TraceBucketData(); |
| 852 | |
| 853 | TRACE_EVENT_API_ATOMIC_WORD* bucket; |
| 854 | const char* bucket_name; |
| 855 | TraceSampleCallback callback; |
| 856 | }; |
| 857 | |
| 858 | // This object must be created on the IO thread. |
| 859 | class TraceSamplingThread : public PlatformThread::Delegate { |
| 860 | public: |
| 861 | TraceSamplingThread(); |
James Robinson | e1b30cf | 2014-10-21 12:25:40 -0700 | [diff] [blame] | 862 | ~TraceSamplingThread() override; |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 863 | |
| 864 | // Implementation of PlatformThread::Delegate: |
James Robinson | e1b30cf | 2014-10-21 12:25:40 -0700 | [diff] [blame] | 865 | void ThreadMain() override; |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 866 | |
| 867 | static void DefaultSamplingCallback(TraceBucketData* bucekt_data); |
| 868 | |
| 869 | void Stop(); |
| 870 | void WaitSamplingEventForTesting(); |
| 871 | |
| 872 | private: |
| 873 | friend class TraceLog; |
| 874 | |
| 875 | void GetSamples(); |
| 876 | // Not thread-safe. Once the ThreadMain has been called, this can no longer |
| 877 | // be called. |
| 878 | void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD* bucket, |
| 879 | const char* const name, |
| 880 | TraceSampleCallback callback); |
| 881 | // Splits a combined "category\0name" into the two component parts. |
| 882 | static void ExtractCategoryAndName(const char* combined, |
| 883 | const char** category, |
| 884 | const char** name); |
| 885 | std::vector<TraceBucketData> sample_buckets_; |
| 886 | bool thread_running_; |
| 887 | CancellationFlag cancellation_flag_; |
| 888 | WaitableEvent waitable_event_for_testing_; |
| 889 | }; |
| 890 | |
| 891 | |
| 892 | TraceSamplingThread::TraceSamplingThread() |
| 893 | : thread_running_(false), |
| 894 | waitable_event_for_testing_(false, false) { |
| 895 | } |
| 896 | |
| 897 | TraceSamplingThread::~TraceSamplingThread() { |
| 898 | } |
| 899 | |
| 900 | void TraceSamplingThread::ThreadMain() { |
| 901 | PlatformThread::SetName("Sampling Thread"); |
| 902 | thread_running_ = true; |
| 903 | const int kSamplingFrequencyMicroseconds = 1000; |
| 904 | while (!cancellation_flag_.IsSet()) { |
| 905 | PlatformThread::Sleep( |
| 906 | TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds)); |
| 907 | GetSamples(); |
| 908 | waitable_event_for_testing_.Signal(); |
| 909 | } |
| 910 | } |
| 911 | |
| 912 | // static |
| 913 | void TraceSamplingThread::DefaultSamplingCallback( |
| 914 | TraceBucketData* bucket_data) { |
| 915 | TRACE_EVENT_API_ATOMIC_WORD category_and_name = |
| 916 | TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data->bucket); |
| 917 | if (!category_and_name) |
| 918 | return; |
| 919 | const char* const combined = |
| 920 | reinterpret_cast<const char* const>(category_and_name); |
| 921 | const char* category_group; |
| 922 | const char* name; |
| 923 | ExtractCategoryAndName(combined, &category_group, &name); |
| 924 | TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SAMPLE, |
| 925 | TraceLog::GetCategoryGroupEnabled(category_group), |
| 926 | name, 0, 0, NULL, NULL, NULL, NULL, 0); |
| 927 | } |
| 928 | |
| 929 | void TraceSamplingThread::GetSamples() { |
| 930 | for (size_t i = 0; i < sample_buckets_.size(); ++i) { |
| 931 | TraceBucketData* bucket_data = &sample_buckets_[i]; |
| 932 | bucket_data->callback.Run(bucket_data); |
| 933 | } |
| 934 | } |
| 935 | |
| 936 | void TraceSamplingThread::RegisterSampleBucket( |
| 937 | TRACE_EVENT_API_ATOMIC_WORD* bucket, |
| 938 | const char* const name, |
| 939 | TraceSampleCallback callback) { |
| 940 | // Access to sample_buckets_ doesn't cause races with the sampling thread |
| 941 | // that uses the sample_buckets_, because it is guaranteed that |
| 942 | // RegisterSampleBucket is called before the sampling thread is created. |
| 943 | DCHECK(!thread_running_); |
| 944 | sample_buckets_.push_back(TraceBucketData(bucket, name, callback)); |
| 945 | } |
| 946 | |
| 947 | // static |
| 948 | void TraceSamplingThread::ExtractCategoryAndName(const char* combined, |
| 949 | const char** category, |
| 950 | const char** name) { |
| 951 | *category = combined; |
| 952 | *name = &combined[strlen(combined) + 1]; |
| 953 | } |
| 954 | |
| 955 | void TraceSamplingThread::Stop() { |
| 956 | cancellation_flag_.Set(); |
| 957 | } |
| 958 | |
| 959 | void TraceSamplingThread::WaitSamplingEventForTesting() { |
| 960 | waitable_event_for_testing_.Wait(); |
| 961 | } |
| 962 | |
| 963 | TraceBucketData::TraceBucketData(base::subtle::AtomicWord* bucket, |
| 964 | const char* name, |
| 965 | TraceSampleCallback callback) |
| 966 | : bucket(bucket), |
| 967 | bucket_name(name), |
| 968 | callback(callback) { |
| 969 | } |
| 970 | |
| 971 | TraceBucketData::~TraceBucketData() { |
| 972 | } |
| 973 | |
| 974 | //////////////////////////////////////////////////////////////////////////////// |
| 975 | // |
| 976 | // TraceOptions |
| 977 | // |
| 978 | //////////////////////////////////////////////////////////////////////////////// |
| 979 | |
| 980 | bool TraceOptions::SetFromString(const std::string& options_string) { |
| 981 | record_mode = RECORD_UNTIL_FULL; |
| 982 | enable_sampling = false; |
| 983 | enable_systrace = false; |
| 984 | |
| 985 | std::vector<std::string> split; |
| 986 | std::vector<std::string>::iterator iter; |
| 987 | base::SplitString(options_string, ',', &split); |
| 988 | for (iter = split.begin(); iter != split.end(); ++iter) { |
| 989 | if (*iter == kRecordUntilFull) { |
| 990 | record_mode = RECORD_UNTIL_FULL; |
| 991 | } else if (*iter == kRecordContinuously) { |
| 992 | record_mode = RECORD_CONTINUOUSLY; |
| 993 | } else if (*iter == kTraceToConsole) { |
| 994 | record_mode = ECHO_TO_CONSOLE; |
| 995 | } else if (*iter == kRecordAsMuchAsPossible) { |
| 996 | record_mode = RECORD_AS_MUCH_AS_POSSIBLE; |
| 997 | } else if (*iter == kEnableSampling) { |
| 998 | enable_sampling = true; |
| 999 | } else if (*iter == kEnableSystrace) { |
| 1000 | enable_systrace = true; |
| 1001 | } else { |
| 1002 | return false; |
| 1003 | } |
| 1004 | } |
| 1005 | return true; |
| 1006 | } |
| 1007 | |
| 1008 | std::string TraceOptions::ToString() const { |
| 1009 | std::string ret; |
| 1010 | switch (record_mode) { |
| 1011 | case RECORD_UNTIL_FULL: |
| 1012 | ret = kRecordUntilFull; |
| 1013 | break; |
| 1014 | case RECORD_CONTINUOUSLY: |
| 1015 | ret = kRecordContinuously; |
| 1016 | break; |
| 1017 | case ECHO_TO_CONSOLE: |
| 1018 | ret = kTraceToConsole; |
| 1019 | break; |
| 1020 | case RECORD_AS_MUCH_AS_POSSIBLE: |
| 1021 | ret = kRecordAsMuchAsPossible; |
| 1022 | break; |
| 1023 | default: |
| 1024 | NOTREACHED(); |
| 1025 | } |
| 1026 | if (enable_sampling) |
| 1027 | ret = ret + "," + kEnableSampling; |
| 1028 | if (enable_systrace) |
| 1029 | ret = ret + "," + kEnableSystrace; |
| 1030 | return ret; |
| 1031 | } |
| 1032 | |
| 1033 | //////////////////////////////////////////////////////////////////////////////// |
| 1034 | // |
| 1035 | // TraceLog |
| 1036 | // |
| 1037 | //////////////////////////////////////////////////////////////////////////////// |
| 1038 | |
| 1039 | class TraceLog::ThreadLocalEventBuffer |
| 1040 | : public MessageLoop::DestructionObserver { |
| 1041 | public: |
| 1042 | ThreadLocalEventBuffer(TraceLog* trace_log); |
James Robinson | e1b30cf | 2014-10-21 12:25:40 -0700 | [diff] [blame] | 1043 | ~ThreadLocalEventBuffer() override; |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 1044 | |
| 1045 | TraceEvent* AddTraceEvent(TraceEventHandle* handle); |
| 1046 | |
| 1047 | void ReportOverhead(const TimeTicks& event_timestamp, |
| 1048 | const TimeTicks& event_thread_timestamp); |
| 1049 | |
| 1050 | TraceEvent* GetEventByHandle(TraceEventHandle handle) { |
| 1051 | if (!chunk_ || handle.chunk_seq != chunk_->seq() || |
| 1052 | handle.chunk_index != chunk_index_) |
| 1053 | return NULL; |
| 1054 | |
| 1055 | return chunk_->GetEventAt(handle.event_index); |
| 1056 | } |
| 1057 | |
| 1058 | int generation() const { return generation_; } |
| 1059 | |
| 1060 | private: |
| 1061 | // MessageLoop::DestructionObserver |
James Robinson | e1b30cf | 2014-10-21 12:25:40 -0700 | [diff] [blame] | 1062 | void WillDestroyCurrentMessageLoop() override; |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 1063 | |
| 1064 | void FlushWhileLocked(); |
| 1065 | |
| 1066 | void CheckThisIsCurrentBuffer() const { |
| 1067 | DCHECK(trace_log_->thread_local_event_buffer_.Get() == this); |
| 1068 | } |
| 1069 | |
| 1070 | // Since TraceLog is a leaky singleton, trace_log_ will always be valid |
| 1071 | // as long as the thread exists. |
| 1072 | TraceLog* trace_log_; |
| 1073 | scoped_ptr<TraceBufferChunk> chunk_; |
| 1074 | size_t chunk_index_; |
| 1075 | int event_count_; |
| 1076 | TimeDelta overhead_; |
| 1077 | int generation_; |
| 1078 | |
| 1079 | DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer); |
| 1080 | }; |
| 1081 | |
| 1082 | TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log) |
| 1083 | : trace_log_(trace_log), |
| 1084 | chunk_index_(0), |
| 1085 | event_count_(0), |
| 1086 | generation_(trace_log->generation()) { |
| 1087 | // ThreadLocalEventBuffer is created only if the thread has a message loop, so |
| 1088 | // the following message_loop won't be NULL. |
| 1089 | MessageLoop* message_loop = MessageLoop::current(); |
| 1090 | message_loop->AddDestructionObserver(this); |
| 1091 | |
| 1092 | AutoLock lock(trace_log->lock_); |
| 1093 | trace_log->thread_message_loops_.insert(message_loop); |
| 1094 | } |
| 1095 | |
| 1096 | TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() { |
| 1097 | CheckThisIsCurrentBuffer(); |
| 1098 | MessageLoop::current()->RemoveDestructionObserver(this); |
| 1099 | |
| 1100 | // Zero event_count_ happens in either of the following cases: |
| 1101 | // - no event generated for the thread; |
| 1102 | // - the thread has no message loop; |
| 1103 | // - trace_event_overhead is disabled. |
| 1104 | if (event_count_) { |
| 1105 | InitializeMetadataEvent(AddTraceEvent(NULL), |
| 1106 | static_cast<int>(base::PlatformThread::CurrentId()), |
| 1107 | "overhead", "average_overhead", |
| 1108 | overhead_.InMillisecondsF() / event_count_); |
| 1109 | } |
| 1110 | |
| 1111 | { |
| 1112 | AutoLock lock(trace_log_->lock_); |
| 1113 | FlushWhileLocked(); |
| 1114 | trace_log_->thread_message_loops_.erase(MessageLoop::current()); |
| 1115 | } |
| 1116 | trace_log_->thread_local_event_buffer_.Set(NULL); |
| 1117 | } |
| 1118 | |
| 1119 | TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent( |
| 1120 | TraceEventHandle* handle) { |
| 1121 | CheckThisIsCurrentBuffer(); |
| 1122 | |
| 1123 | if (chunk_ && chunk_->IsFull()) { |
| 1124 | AutoLock lock(trace_log_->lock_); |
| 1125 | FlushWhileLocked(); |
| 1126 | chunk_.reset(); |
| 1127 | } |
| 1128 | if (!chunk_) { |
| 1129 | AutoLock lock(trace_log_->lock_); |
| 1130 | chunk_ = trace_log_->logged_events_->GetChunk(&chunk_index_); |
| 1131 | trace_log_->CheckIfBufferIsFullWhileLocked(); |
| 1132 | } |
| 1133 | if (!chunk_) |
| 1134 | return NULL; |
| 1135 | |
| 1136 | size_t event_index; |
| 1137 | TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index); |
| 1138 | if (trace_event && handle) |
| 1139 | MakeHandle(chunk_->seq(), chunk_index_, event_index, handle); |
| 1140 | |
| 1141 | return trace_event; |
| 1142 | } |
| 1143 | |
| 1144 | void TraceLog::ThreadLocalEventBuffer::ReportOverhead( |
| 1145 | const TimeTicks& event_timestamp, |
| 1146 | const TimeTicks& event_thread_timestamp) { |
| 1147 | if (!g_category_group_enabled[g_category_trace_event_overhead]) |
| 1148 | return; |
| 1149 | |
| 1150 | CheckThisIsCurrentBuffer(); |
| 1151 | |
| 1152 | event_count_++; |
| 1153 | TimeTicks thread_now = ThreadNow(); |
| 1154 | TimeTicks now = trace_log_->OffsetNow(); |
| 1155 | TimeDelta overhead = now - event_timestamp; |
| 1156 | if (overhead.InMicroseconds() >= kOverheadReportThresholdInMicroseconds) { |
| 1157 | TraceEvent* trace_event = AddTraceEvent(NULL); |
| 1158 | if (trace_event) { |
| 1159 | trace_event->Initialize( |
| 1160 | static_cast<int>(PlatformThread::CurrentId()), |
| 1161 | event_timestamp, event_thread_timestamp, |
| 1162 | TRACE_EVENT_PHASE_COMPLETE, |
| 1163 | &g_category_group_enabled[g_category_trace_event_overhead], |
| 1164 | "overhead", 0, 0, NULL, NULL, NULL, NULL, 0); |
| 1165 | trace_event->UpdateDuration(now, thread_now); |
| 1166 | } |
| 1167 | } |
| 1168 | overhead_ += overhead; |
| 1169 | } |
| 1170 | |
| 1171 | void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() { |
| 1172 | delete this; |
| 1173 | } |
| 1174 | |
| 1175 | void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() { |
| 1176 | if (!chunk_) |
| 1177 | return; |
| 1178 | |
| 1179 | trace_log_->lock_.AssertAcquired(); |
| 1180 | if (trace_log_->CheckGeneration(generation_)) { |
| 1181 | // Return the chunk to the buffer only if the generation matches. |
| 1182 | trace_log_->logged_events_->ReturnChunk(chunk_index_, chunk_.Pass()); |
| 1183 | } |
| 1184 | // Otherwise this method may be called from the destructor, or TraceLog will |
| 1185 | // find the generation mismatch and delete this buffer soon. |
| 1186 | } |
| 1187 | |
Benjamin Lerman | 5799890 | 2014-11-18 16:06:02 +0100 | [diff] [blame] | 1188 | TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) { |
| 1189 | } |
| 1190 | |
| 1191 | TraceLogStatus::~TraceLogStatus() { |
| 1192 | } |
| 1193 | |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 1194 | // static |
| 1195 | TraceLog* TraceLog::GetInstance() { |
| 1196 | return Singleton<TraceLog, LeakySingletonTraits<TraceLog> >::get(); |
| 1197 | } |
| 1198 | |
| 1199 | TraceLog::TraceLog() |
| 1200 | : mode_(DISABLED), |
| 1201 | num_traces_recorded_(0), |
| 1202 | event_callback_(0), |
| 1203 | dispatching_to_observer_list_(false), |
| 1204 | process_sort_index_(0), |
| 1205 | process_id_hash_(0), |
| 1206 | process_id_(0), |
| 1207 | watch_category_(0), |
| 1208 | trace_options_(kInternalRecordUntilFull), |
| 1209 | sampling_thread_handle_(0), |
| 1210 | category_filter_(CategoryFilter::kDefaultCategoryFilterString), |
| 1211 | event_callback_category_filter_( |
| 1212 | CategoryFilter::kDefaultCategoryFilterString), |
| 1213 | thread_shared_chunk_index_(0), |
| 1214 | generation_(0) { |
| 1215 | // Trace is enabled or disabled on one thread while other threads are |
| 1216 | // accessing the enabled flag. We don't care whether edge-case events are |
| 1217 | // traced or not, so we allow races on the enabled flag to keep the trace |
| 1218 | // macros fast. |
| 1219 | // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots: |
| 1220 | // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled, |
| 1221 | // sizeof(g_category_group_enabled), |
| 1222 | // "trace_event category enabled"); |
| 1223 | for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) { |
| 1224 | ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i], |
| 1225 | "trace_event category enabled"); |
| 1226 | } |
| 1227 | #if defined(OS_NACL) // NaCl shouldn't expose the process id. |
| 1228 | SetProcessID(0); |
| 1229 | #else |
| 1230 | SetProcessID(static_cast<int>(GetCurrentProcId())); |
| 1231 | |
| 1232 | // NaCl also shouldn't access the command line. |
| 1233 | if (CommandLine::InitializedForCurrentProcess() && |
| 1234 | CommandLine::ForCurrentProcess()->HasSwitch(switches::kTraceToConsole)) { |
| 1235 | std::string filter = CommandLine::ForCurrentProcess()->GetSwitchValueASCII( |
| 1236 | switches::kTraceToConsole); |
| 1237 | if (filter.empty()) { |
| 1238 | filter = kEchoToConsoleCategoryFilter; |
| 1239 | } else { |
| 1240 | filter.append(","); |
| 1241 | filter.append(kEchoToConsoleCategoryFilter); |
| 1242 | } |
| 1243 | |
| 1244 | LOG(ERROR) << "Start " << switches::kTraceToConsole |
| 1245 | << " with CategoryFilter '" << filter << "'."; |
| 1246 | SetEnabled(CategoryFilter(filter), |
| 1247 | RECORDING_MODE, |
| 1248 | TraceOptions(ECHO_TO_CONSOLE)); |
| 1249 | } |
| 1250 | #endif |
| 1251 | |
| 1252 | logged_events_.reset(CreateTraceBuffer()); |
| 1253 | } |
| 1254 | |
| 1255 | TraceLog::~TraceLog() { |
| 1256 | } |
| 1257 | |
| 1258 | const unsigned char* TraceLog::GetCategoryGroupEnabled( |
| 1259 | const char* category_group) { |
| 1260 | TraceLog* tracelog = GetInstance(); |
| 1261 | if (!tracelog) { |
| 1262 | DCHECK(!g_category_group_enabled[g_category_already_shutdown]); |
| 1263 | return &g_category_group_enabled[g_category_already_shutdown]; |
| 1264 | } |
| 1265 | return tracelog->GetCategoryGroupEnabledInternal(category_group); |
| 1266 | } |
| 1267 | |
| 1268 | const char* TraceLog::GetCategoryGroupName( |
| 1269 | const unsigned char* category_group_enabled) { |
| 1270 | // Calculate the index of the category group by finding |
| 1271 | // category_group_enabled in g_category_group_enabled array. |
| 1272 | uintptr_t category_begin = |
| 1273 | reinterpret_cast<uintptr_t>(g_category_group_enabled); |
| 1274 | uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled); |
| 1275 | DCHECK(category_ptr >= category_begin && |
| 1276 | category_ptr < reinterpret_cast<uintptr_t>( |
| 1277 | g_category_group_enabled + MAX_CATEGORY_GROUPS)) << |
| 1278 | "out of bounds category pointer"; |
| 1279 | uintptr_t category_index = |
| 1280 | (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]); |
| 1281 | return g_category_groups[category_index]; |
| 1282 | } |
| 1283 | |
| 1284 | void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) { |
| 1285 | unsigned char enabled_flag = 0; |
| 1286 | const char* category_group = g_category_groups[category_index]; |
| 1287 | if (mode_ == RECORDING_MODE && |
| 1288 | category_filter_.IsCategoryGroupEnabled(category_group)) |
| 1289 | enabled_flag |= ENABLED_FOR_RECORDING; |
| 1290 | else if (mode_ == MONITORING_MODE && |
| 1291 | category_filter_.IsCategoryGroupEnabled(category_group)) |
| 1292 | enabled_flag |= ENABLED_FOR_MONITORING; |
| 1293 | if (event_callback_ && |
| 1294 | event_callback_category_filter_.IsCategoryGroupEnabled(category_group)) |
| 1295 | enabled_flag |= ENABLED_FOR_EVENT_CALLBACK; |
| 1296 | g_category_group_enabled[category_index] = enabled_flag; |
| 1297 | } |
| 1298 | |
| 1299 | void TraceLog::UpdateCategoryGroupEnabledFlags() { |
| 1300 | size_t category_index = base::subtle::NoBarrier_Load(&g_category_index); |
| 1301 | for (size_t i = 0; i < category_index; i++) |
| 1302 | UpdateCategoryGroupEnabledFlag(i); |
| 1303 | } |
| 1304 | |
| 1305 | void TraceLog::UpdateSyntheticDelaysFromCategoryFilter() { |
| 1306 | ResetTraceEventSyntheticDelays(); |
| 1307 | const CategoryFilter::StringList& delays = |
| 1308 | category_filter_.GetSyntheticDelayValues(); |
| 1309 | CategoryFilter::StringList::const_iterator ci; |
| 1310 | for (ci = delays.begin(); ci != delays.end(); ++ci) { |
| 1311 | StringTokenizer tokens(*ci, ";"); |
| 1312 | if (!tokens.GetNext()) |
| 1313 | continue; |
| 1314 | TraceEventSyntheticDelay* delay = |
| 1315 | TraceEventSyntheticDelay::Lookup(tokens.token()); |
| 1316 | while (tokens.GetNext()) { |
| 1317 | std::string token = tokens.token(); |
| 1318 | char* duration_end; |
| 1319 | double target_duration = strtod(token.c_str(), &duration_end); |
| 1320 | if (duration_end != token.c_str()) { |
| 1321 | delay->SetTargetDuration(TimeDelta::FromMicroseconds( |
| 1322 | static_cast<int64>(target_duration * 1e6))); |
| 1323 | } else if (token == "static") { |
| 1324 | delay->SetMode(TraceEventSyntheticDelay::STATIC); |
| 1325 | } else if (token == "oneshot") { |
| 1326 | delay->SetMode(TraceEventSyntheticDelay::ONE_SHOT); |
| 1327 | } else if (token == "alternating") { |
| 1328 | delay->SetMode(TraceEventSyntheticDelay::ALTERNATING); |
| 1329 | } |
| 1330 | } |
| 1331 | } |
| 1332 | } |
| 1333 | |
| 1334 | const unsigned char* TraceLog::GetCategoryGroupEnabledInternal( |
| 1335 | const char* category_group) { |
| 1336 | DCHECK(!strchr(category_group, '"')) << |
| 1337 | "Category groups may not contain double quote"; |
| 1338 | // The g_category_groups is append only, avoid using a lock for the fast path. |
| 1339 | size_t current_category_index = base::subtle::Acquire_Load(&g_category_index); |
| 1340 | |
| 1341 | // Search for pre-existing category group. |
| 1342 | for (size_t i = 0; i < current_category_index; ++i) { |
| 1343 | if (strcmp(g_category_groups[i], category_group) == 0) { |
| 1344 | return &g_category_group_enabled[i]; |
| 1345 | } |
| 1346 | } |
| 1347 | |
| 1348 | unsigned char* category_group_enabled = NULL; |
| 1349 | // This is the slow path: the lock is not held in the case above, so more |
| 1350 | // than one thread could have reached here trying to add the same category. |
| 1351 | // Only hold to lock when actually appending a new category, and |
| 1352 | // check the categories groups again. |
| 1353 | AutoLock lock(lock_); |
| 1354 | size_t category_index = base::subtle::Acquire_Load(&g_category_index); |
| 1355 | for (size_t i = 0; i < category_index; ++i) { |
| 1356 | if (strcmp(g_category_groups[i], category_group) == 0) { |
| 1357 | return &g_category_group_enabled[i]; |
| 1358 | } |
| 1359 | } |
| 1360 | |
| 1361 | // Create a new category group. |
| 1362 | DCHECK(category_index < MAX_CATEGORY_GROUPS) << |
| 1363 | "must increase MAX_CATEGORY_GROUPS"; |
| 1364 | if (category_index < MAX_CATEGORY_GROUPS) { |
| 1365 | // Don't hold on to the category_group pointer, so that we can create |
| 1366 | // category groups with strings not known at compile time (this is |
| 1367 | // required by SetWatchEvent). |
| 1368 | const char* new_group = strdup(category_group); |
| 1369 | ANNOTATE_LEAKING_OBJECT_PTR(new_group); |
| 1370 | g_category_groups[category_index] = new_group; |
| 1371 | DCHECK(!g_category_group_enabled[category_index]); |
| 1372 | // Note that if both included and excluded patterns in the |
| 1373 | // CategoryFilter are empty, we exclude nothing, |
| 1374 | // thereby enabling this category group. |
| 1375 | UpdateCategoryGroupEnabledFlag(category_index); |
| 1376 | category_group_enabled = &g_category_group_enabled[category_index]; |
| 1377 | // Update the max index now. |
| 1378 | base::subtle::Release_Store(&g_category_index, category_index + 1); |
| 1379 | } else { |
| 1380 | category_group_enabled = |
| 1381 | &g_category_group_enabled[g_category_categories_exhausted]; |
| 1382 | } |
| 1383 | return category_group_enabled; |
| 1384 | } |
| 1385 | |
| 1386 | void TraceLog::GetKnownCategoryGroups( |
| 1387 | std::vector<std::string>* category_groups) { |
| 1388 | AutoLock lock(lock_); |
| 1389 | category_groups->push_back( |
| 1390 | g_category_groups[g_category_trace_event_overhead]); |
| 1391 | size_t category_index = base::subtle::NoBarrier_Load(&g_category_index); |
| 1392 | for (size_t i = g_num_builtin_categories; i < category_index; i++) |
| 1393 | category_groups->push_back(g_category_groups[i]); |
| 1394 | } |
| 1395 | |
| 1396 | void TraceLog::SetEnabled(const CategoryFilter& category_filter, |
| 1397 | Mode mode, |
| 1398 | const TraceOptions& options) { |
| 1399 | std::vector<EnabledStateObserver*> observer_list; |
| 1400 | { |
| 1401 | AutoLock lock(lock_); |
| 1402 | |
| 1403 | // Can't enable tracing when Flush() is in progress. |
| 1404 | DCHECK(!flush_message_loop_proxy_.get()); |
| 1405 | |
| 1406 | InternalTraceOptions new_options = |
| 1407 | GetInternalOptionsFromTraceOptions(options); |
| 1408 | |
| 1409 | InternalTraceOptions old_options = trace_options(); |
| 1410 | |
| 1411 | if (IsEnabled()) { |
| 1412 | if (new_options != old_options) { |
| 1413 | DLOG(ERROR) << "Attempting to re-enable tracing with a different " |
| 1414 | << "set of options."; |
| 1415 | } |
| 1416 | |
| 1417 | if (mode != mode_) { |
| 1418 | DLOG(ERROR) << "Attempting to re-enable tracing with a different mode."; |
| 1419 | } |
| 1420 | |
| 1421 | category_filter_.Merge(category_filter); |
| 1422 | UpdateCategoryGroupEnabledFlags(); |
| 1423 | return; |
| 1424 | } |
| 1425 | |
| 1426 | if (dispatching_to_observer_list_) { |
| 1427 | DLOG(ERROR) << |
| 1428 | "Cannot manipulate TraceLog::Enabled state from an observer."; |
| 1429 | return; |
| 1430 | } |
| 1431 | |
| 1432 | mode_ = mode; |
| 1433 | |
| 1434 | if (new_options != old_options) { |
| 1435 | subtle::NoBarrier_Store(&trace_options_, new_options); |
| 1436 | UseNextTraceBuffer(); |
| 1437 | } |
| 1438 | |
| 1439 | num_traces_recorded_++; |
| 1440 | |
| 1441 | category_filter_ = CategoryFilter(category_filter); |
| 1442 | UpdateCategoryGroupEnabledFlags(); |
| 1443 | UpdateSyntheticDelaysFromCategoryFilter(); |
| 1444 | |
| 1445 | if (new_options & kInternalEnableSampling) { |
| 1446 | sampling_thread_.reset(new TraceSamplingThread); |
| 1447 | sampling_thread_->RegisterSampleBucket( |
| 1448 | &g_trace_state[0], |
| 1449 | "bucket0", |
| 1450 | Bind(&TraceSamplingThread::DefaultSamplingCallback)); |
| 1451 | sampling_thread_->RegisterSampleBucket( |
| 1452 | &g_trace_state[1], |
| 1453 | "bucket1", |
| 1454 | Bind(&TraceSamplingThread::DefaultSamplingCallback)); |
| 1455 | sampling_thread_->RegisterSampleBucket( |
| 1456 | &g_trace_state[2], |
| 1457 | "bucket2", |
| 1458 | Bind(&TraceSamplingThread::DefaultSamplingCallback)); |
| 1459 | if (!PlatformThread::Create( |
| 1460 | 0, sampling_thread_.get(), &sampling_thread_handle_)) { |
| 1461 | DCHECK(false) << "failed to create thread"; |
| 1462 | } |
| 1463 | } |
| 1464 | |
| 1465 | dispatching_to_observer_list_ = true; |
| 1466 | observer_list = enabled_state_observer_list_; |
| 1467 | } |
| 1468 | // Notify observers outside the lock in case they trigger trace events. |
| 1469 | for (size_t i = 0; i < observer_list.size(); ++i) |
| 1470 | observer_list[i]->OnTraceLogEnabled(); |
| 1471 | |
| 1472 | { |
| 1473 | AutoLock lock(lock_); |
| 1474 | dispatching_to_observer_list_ = false; |
| 1475 | } |
| 1476 | } |
| 1477 | |
| 1478 | TraceLog::InternalTraceOptions TraceLog::GetInternalOptionsFromTraceOptions( |
| 1479 | const TraceOptions& options) { |
| 1480 | InternalTraceOptions ret = |
| 1481 | options.enable_sampling ? kInternalEnableSampling : kInternalNone; |
| 1482 | switch (options.record_mode) { |
| 1483 | case RECORD_UNTIL_FULL: |
| 1484 | return ret | kInternalRecordUntilFull; |
| 1485 | case RECORD_CONTINUOUSLY: |
| 1486 | return ret | kInternalRecordContinuously; |
| 1487 | case ECHO_TO_CONSOLE: |
| 1488 | return ret | kInternalEchoToConsole; |
| 1489 | case RECORD_AS_MUCH_AS_POSSIBLE: |
| 1490 | return ret | kInternalRecordAsMuchAsPossible; |
| 1491 | } |
| 1492 | NOTREACHED(); |
| 1493 | return kInternalNone; |
| 1494 | } |
| 1495 | |
| 1496 | CategoryFilter TraceLog::GetCurrentCategoryFilter() { |
| 1497 | AutoLock lock(lock_); |
| 1498 | return category_filter_; |
| 1499 | } |
| 1500 | |
| 1501 | TraceOptions TraceLog::GetCurrentTraceOptions() const { |
| 1502 | TraceOptions ret; |
| 1503 | InternalTraceOptions option = trace_options(); |
| 1504 | ret.enable_sampling = (option & kInternalEnableSampling) != 0; |
| 1505 | if (option & kInternalRecordUntilFull) |
| 1506 | ret.record_mode = RECORD_UNTIL_FULL; |
| 1507 | else if (option & kInternalRecordContinuously) |
| 1508 | ret.record_mode = RECORD_CONTINUOUSLY; |
| 1509 | else if (option & kInternalEchoToConsole) |
| 1510 | ret.record_mode = ECHO_TO_CONSOLE; |
| 1511 | else if (option & kInternalRecordAsMuchAsPossible) |
| 1512 | ret.record_mode = RECORD_AS_MUCH_AS_POSSIBLE; |
| 1513 | else |
| 1514 | NOTREACHED(); |
| 1515 | return ret; |
| 1516 | } |
| 1517 | |
| 1518 | void TraceLog::SetDisabled() { |
| 1519 | AutoLock lock(lock_); |
| 1520 | SetDisabledWhileLocked(); |
| 1521 | } |
| 1522 | |
| 1523 | void TraceLog::SetDisabledWhileLocked() { |
| 1524 | lock_.AssertAcquired(); |
| 1525 | |
| 1526 | if (!IsEnabled()) |
| 1527 | return; |
| 1528 | |
| 1529 | if (dispatching_to_observer_list_) { |
| 1530 | DLOG(ERROR) |
| 1531 | << "Cannot manipulate TraceLog::Enabled state from an observer."; |
| 1532 | return; |
| 1533 | } |
| 1534 | |
| 1535 | mode_ = DISABLED; |
| 1536 | |
| 1537 | if (sampling_thread_.get()) { |
| 1538 | // Stop the sampling thread. |
| 1539 | sampling_thread_->Stop(); |
| 1540 | lock_.Release(); |
| 1541 | PlatformThread::Join(sampling_thread_handle_); |
| 1542 | lock_.Acquire(); |
| 1543 | sampling_thread_handle_ = PlatformThreadHandle(); |
| 1544 | sampling_thread_.reset(); |
| 1545 | } |
| 1546 | |
| 1547 | category_filter_.Clear(); |
| 1548 | subtle::NoBarrier_Store(&watch_category_, 0); |
| 1549 | watch_event_name_ = ""; |
| 1550 | UpdateCategoryGroupEnabledFlags(); |
| 1551 | AddMetadataEventsWhileLocked(); |
| 1552 | |
| 1553 | dispatching_to_observer_list_ = true; |
| 1554 | std::vector<EnabledStateObserver*> observer_list = |
| 1555 | enabled_state_observer_list_; |
| 1556 | |
| 1557 | { |
| 1558 | // Dispatch to observers outside the lock in case the observer triggers a |
| 1559 | // trace event. |
| 1560 | AutoUnlock unlock(lock_); |
| 1561 | for (size_t i = 0; i < observer_list.size(); ++i) |
| 1562 | observer_list[i]->OnTraceLogDisabled(); |
| 1563 | } |
| 1564 | dispatching_to_observer_list_ = false; |
| 1565 | } |
| 1566 | |
| 1567 | int TraceLog::GetNumTracesRecorded() { |
| 1568 | AutoLock lock(lock_); |
| 1569 | if (!IsEnabled()) |
| 1570 | return -1; |
| 1571 | return num_traces_recorded_; |
| 1572 | } |
| 1573 | |
| 1574 | void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) { |
| 1575 | enabled_state_observer_list_.push_back(listener); |
| 1576 | } |
| 1577 | |
| 1578 | void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver* listener) { |
| 1579 | std::vector<EnabledStateObserver*>::iterator it = |
| 1580 | std::find(enabled_state_observer_list_.begin(), |
| 1581 | enabled_state_observer_list_.end(), |
| 1582 | listener); |
| 1583 | if (it != enabled_state_observer_list_.end()) |
| 1584 | enabled_state_observer_list_.erase(it); |
| 1585 | } |
| 1586 | |
| 1587 | bool TraceLog::HasEnabledStateObserver(EnabledStateObserver* listener) const { |
| 1588 | std::vector<EnabledStateObserver*>::const_iterator it = |
| 1589 | std::find(enabled_state_observer_list_.begin(), |
| 1590 | enabled_state_observer_list_.end(), |
| 1591 | listener); |
| 1592 | return it != enabled_state_observer_list_.end(); |
| 1593 | } |
| 1594 | |
Benjamin Lerman | 5799890 | 2014-11-18 16:06:02 +0100 | [diff] [blame] | 1595 | TraceLogStatus TraceLog::GetStatus() const { |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 1596 | AutoLock lock(lock_); |
Benjamin Lerman | 5799890 | 2014-11-18 16:06:02 +0100 | [diff] [blame] | 1597 | TraceLogStatus result; |
| 1598 | result.event_capacity = logged_events_->Capacity(); |
| 1599 | result.event_count = logged_events_->Size(); |
| 1600 | return result; |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 1601 | } |
| 1602 | |
| 1603 | bool TraceLog::BufferIsFull() const { |
| 1604 | AutoLock lock(lock_); |
| 1605 | return logged_events_->IsFull(); |
| 1606 | } |
| 1607 | |
| 1608 | TraceBuffer* TraceLog::CreateTraceBuffer() { |
| 1609 | InternalTraceOptions options = trace_options(); |
| 1610 | if (options & kInternalRecordContinuously) |
| 1611 | return new TraceBufferRingBuffer(kTraceEventRingBufferChunks); |
| 1612 | else if ((options & kInternalEnableSampling) && mode_ == MONITORING_MODE) |
| 1613 | return new TraceBufferRingBuffer(kMonitorTraceEventBufferChunks); |
| 1614 | else if (options & kInternalEchoToConsole) |
| 1615 | return new TraceBufferRingBuffer(kEchoToConsoleTraceEventBufferChunks); |
| 1616 | else if (options & kInternalRecordAsMuchAsPossible) |
| 1617 | return CreateTraceBufferVectorOfSize(kTraceEventVectorBigBufferChunks); |
| 1618 | return CreateTraceBufferVectorOfSize(kTraceEventVectorBufferChunks); |
| 1619 | } |
| 1620 | |
| 1621 | TraceBuffer* TraceLog::CreateTraceBufferVectorOfSize(size_t max_chunks) { |
| 1622 | return new TraceBufferVector(max_chunks); |
| 1623 | } |
| 1624 | |
| 1625 | TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked( |
| 1626 | TraceEventHandle* handle, bool check_buffer_is_full) { |
| 1627 | lock_.AssertAcquired(); |
| 1628 | |
| 1629 | if (thread_shared_chunk_ && thread_shared_chunk_->IsFull()) { |
| 1630 | logged_events_->ReturnChunk(thread_shared_chunk_index_, |
| 1631 | thread_shared_chunk_.Pass()); |
| 1632 | } |
| 1633 | |
| 1634 | if (!thread_shared_chunk_) { |
| 1635 | thread_shared_chunk_ = logged_events_->GetChunk( |
| 1636 | &thread_shared_chunk_index_); |
| 1637 | if (check_buffer_is_full) |
| 1638 | CheckIfBufferIsFullWhileLocked(); |
| 1639 | } |
| 1640 | if (!thread_shared_chunk_) |
| 1641 | return NULL; |
| 1642 | |
| 1643 | size_t event_index; |
| 1644 | TraceEvent* trace_event = thread_shared_chunk_->AddTraceEvent(&event_index); |
| 1645 | if (trace_event && handle) { |
| 1646 | MakeHandle(thread_shared_chunk_->seq(), thread_shared_chunk_index_, |
| 1647 | event_index, handle); |
| 1648 | } |
| 1649 | return trace_event; |
| 1650 | } |
| 1651 | |
| 1652 | void TraceLog::CheckIfBufferIsFullWhileLocked() { |
| 1653 | lock_.AssertAcquired(); |
| 1654 | if (logged_events_->IsFull()) { |
| 1655 | if (buffer_limit_reached_timestamp_.is_null()) { |
| 1656 | buffer_limit_reached_timestamp_ = OffsetNow(); |
| 1657 | } |
| 1658 | SetDisabledWhileLocked(); |
| 1659 | } |
| 1660 | } |
| 1661 | |
| 1662 | void TraceLog::SetEventCallbackEnabled(const CategoryFilter& category_filter, |
| 1663 | EventCallback cb) { |
| 1664 | AutoLock lock(lock_); |
| 1665 | subtle::NoBarrier_Store(&event_callback_, |
| 1666 | reinterpret_cast<subtle::AtomicWord>(cb)); |
| 1667 | event_callback_category_filter_ = category_filter; |
| 1668 | UpdateCategoryGroupEnabledFlags(); |
| 1669 | }; |
| 1670 | |
| 1671 | void TraceLog::SetEventCallbackDisabled() { |
| 1672 | AutoLock lock(lock_); |
| 1673 | subtle::NoBarrier_Store(&event_callback_, 0); |
| 1674 | UpdateCategoryGroupEnabledFlags(); |
| 1675 | } |
| 1676 | |
| 1677 | // Flush() works as the following: |
| 1678 | // 1. Flush() is called in threadA whose message loop is saved in |
| 1679 | // flush_message_loop_proxy_; |
| 1680 | // 2. If thread_message_loops_ is not empty, threadA posts task to each message |
| 1681 | // loop to flush the thread local buffers; otherwise finish the flush; |
| 1682 | // 3. FlushCurrentThread() deletes the thread local event buffer: |
| 1683 | // - The last batch of events of the thread are flushed into the main buffer; |
| 1684 | // - The message loop will be removed from thread_message_loops_; |
| 1685 | // If this is the last message loop, finish the flush; |
| 1686 | // 4. If any thread hasn't finish its flush in time, finish the flush. |
| 1687 | void TraceLog::Flush(const TraceLog::OutputCallback& cb) { |
| 1688 | if (IsEnabled()) { |
| 1689 | // Can't flush when tracing is enabled because otherwise PostTask would |
| 1690 | // - generate more trace events; |
| 1691 | // - deschedule the calling thread on some platforms causing inaccurate |
| 1692 | // timing of the trace events. |
| 1693 | scoped_refptr<RefCountedString> empty_result = new RefCountedString; |
| 1694 | if (!cb.is_null()) |
| 1695 | cb.Run(empty_result, false); |
| 1696 | LOG(WARNING) << "Ignored TraceLog::Flush called when tracing is enabled"; |
| 1697 | return; |
| 1698 | } |
| 1699 | |
| 1700 | int generation = this->generation(); |
| 1701 | // Copy of thread_message_loops_ to be used without locking. |
| 1702 | std::vector<scoped_refptr<SingleThreadTaskRunner> > |
| 1703 | thread_message_loop_task_runners; |
| 1704 | { |
| 1705 | AutoLock lock(lock_); |
| 1706 | DCHECK(!flush_message_loop_proxy_.get()); |
| 1707 | flush_message_loop_proxy_ = MessageLoopProxy::current(); |
| 1708 | DCHECK(!thread_message_loops_.size() || flush_message_loop_proxy_.get()); |
| 1709 | flush_output_callback_ = cb; |
| 1710 | |
| 1711 | if (thread_shared_chunk_) { |
| 1712 | logged_events_->ReturnChunk(thread_shared_chunk_index_, |
| 1713 | thread_shared_chunk_.Pass()); |
| 1714 | } |
| 1715 | |
| 1716 | if (thread_message_loops_.size()) { |
| 1717 | for (hash_set<MessageLoop*>::const_iterator it = |
| 1718 | thread_message_loops_.begin(); |
| 1719 | it != thread_message_loops_.end(); ++it) { |
| 1720 | thread_message_loop_task_runners.push_back((*it)->task_runner()); |
| 1721 | } |
| 1722 | } |
| 1723 | } |
| 1724 | |
| 1725 | if (thread_message_loop_task_runners.size()) { |
| 1726 | for (size_t i = 0; i < thread_message_loop_task_runners.size(); ++i) { |
| 1727 | thread_message_loop_task_runners[i]->PostTask( |
| 1728 | FROM_HERE, |
| 1729 | Bind(&TraceLog::FlushCurrentThread, Unretained(this), generation)); |
| 1730 | } |
| 1731 | flush_message_loop_proxy_->PostDelayedTask( |
| 1732 | FROM_HERE, |
| 1733 | Bind(&TraceLog::OnFlushTimeout, Unretained(this), generation), |
| 1734 | TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs)); |
| 1735 | return; |
| 1736 | } |
| 1737 | |
| 1738 | FinishFlush(generation); |
| 1739 | } |
| 1740 | |
| 1741 | void TraceLog::ConvertTraceEventsToTraceFormat( |
| 1742 | scoped_ptr<TraceBuffer> logged_events, |
| 1743 | const TraceLog::OutputCallback& flush_output_callback) { |
| 1744 | |
| 1745 | if (flush_output_callback.is_null()) |
| 1746 | return; |
| 1747 | |
| 1748 | // The callback need to be called at least once even if there is no events |
| 1749 | // to let the caller know the completion of flush. |
| 1750 | bool has_more_events = true; |
| 1751 | do { |
| 1752 | scoped_refptr<RefCountedString> json_events_str_ptr = |
| 1753 | new RefCountedString(); |
| 1754 | |
| 1755 | for (size_t i = 0; i < kTraceEventBatchChunks; ++i) { |
| 1756 | const TraceBufferChunk* chunk = logged_events->NextChunk(); |
| 1757 | if (!chunk) { |
| 1758 | has_more_events = false; |
| 1759 | break; |
| 1760 | } |
| 1761 | for (size_t j = 0; j < chunk->size(); ++j) { |
| 1762 | if (i > 0 || j > 0) |
| 1763 | json_events_str_ptr->data().append(","); |
| 1764 | chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data())); |
| 1765 | } |
| 1766 | } |
| 1767 | |
| 1768 | flush_output_callback.Run(json_events_str_ptr, has_more_events); |
| 1769 | } while (has_more_events); |
| 1770 | } |
| 1771 | |
| 1772 | void TraceLog::FinishFlush(int generation) { |
| 1773 | scoped_ptr<TraceBuffer> previous_logged_events; |
| 1774 | OutputCallback flush_output_callback; |
| 1775 | |
| 1776 | if (!CheckGeneration(generation)) |
| 1777 | return; |
| 1778 | |
| 1779 | { |
| 1780 | AutoLock lock(lock_); |
| 1781 | |
| 1782 | previous_logged_events.swap(logged_events_); |
| 1783 | UseNextTraceBuffer(); |
| 1784 | thread_message_loops_.clear(); |
| 1785 | |
| 1786 | flush_message_loop_proxy_ = NULL; |
| 1787 | flush_output_callback = flush_output_callback_; |
| 1788 | flush_output_callback_.Reset(); |
| 1789 | } |
| 1790 | |
| 1791 | ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(), |
| 1792 | flush_output_callback); |
| 1793 | } |
| 1794 | |
| 1795 | // Run in each thread holding a local event buffer. |
| 1796 | void TraceLog::FlushCurrentThread(int generation) { |
| 1797 | { |
| 1798 | AutoLock lock(lock_); |
| 1799 | if (!CheckGeneration(generation) || !flush_message_loop_proxy_.get()) { |
| 1800 | // This is late. The corresponding flush has finished. |
| 1801 | return; |
| 1802 | } |
| 1803 | } |
| 1804 | |
| 1805 | // This will flush the thread local buffer. |
| 1806 | delete thread_local_event_buffer_.Get(); |
| 1807 | |
| 1808 | AutoLock lock(lock_); |
| 1809 | if (!CheckGeneration(generation) || !flush_message_loop_proxy_.get() || |
| 1810 | thread_message_loops_.size()) |
| 1811 | return; |
| 1812 | |
| 1813 | flush_message_loop_proxy_->PostTask( |
| 1814 | FROM_HERE, |
| 1815 | Bind(&TraceLog::FinishFlush, Unretained(this), generation)); |
| 1816 | } |
| 1817 | |
| 1818 | void TraceLog::OnFlushTimeout(int generation) { |
| 1819 | { |
| 1820 | AutoLock lock(lock_); |
| 1821 | if (!CheckGeneration(generation) || !flush_message_loop_proxy_.get()) { |
| 1822 | // Flush has finished before timeout. |
| 1823 | return; |
| 1824 | } |
| 1825 | |
| 1826 | LOG(WARNING) << |
| 1827 | "The following threads haven't finished flush in time. " |
| 1828 | "If this happens stably for some thread, please call " |
| 1829 | "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from " |
| 1830 | "the thread to avoid its trace events from being lost."; |
| 1831 | for (hash_set<MessageLoop*>::const_iterator it = |
| 1832 | thread_message_loops_.begin(); |
| 1833 | it != thread_message_loops_.end(); ++it) { |
| 1834 | LOG(WARNING) << "Thread: " << (*it)->thread_name(); |
| 1835 | } |
| 1836 | } |
| 1837 | FinishFlush(generation); |
| 1838 | } |
| 1839 | |
| 1840 | void TraceLog::FlushButLeaveBufferIntact( |
| 1841 | const TraceLog::OutputCallback& flush_output_callback) { |
| 1842 | scoped_ptr<TraceBuffer> previous_logged_events; |
| 1843 | { |
| 1844 | AutoLock lock(lock_); |
| 1845 | AddMetadataEventsWhileLocked(); |
| 1846 | if (thread_shared_chunk_) { |
| 1847 | // Return the chunk to the main buffer to flush the sampling data. |
| 1848 | logged_events_->ReturnChunk(thread_shared_chunk_index_, |
| 1849 | thread_shared_chunk_.Pass()); |
| 1850 | } |
| 1851 | previous_logged_events = logged_events_->CloneForIteration().Pass(); |
| 1852 | } // release lock |
| 1853 | |
| 1854 | ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(), |
| 1855 | flush_output_callback); |
| 1856 | } |
| 1857 | |
| 1858 | void TraceLog::UseNextTraceBuffer() { |
| 1859 | logged_events_.reset(CreateTraceBuffer()); |
| 1860 | subtle::NoBarrier_AtomicIncrement(&generation_, 1); |
| 1861 | thread_shared_chunk_.reset(); |
| 1862 | thread_shared_chunk_index_ = 0; |
| 1863 | } |
| 1864 | |
| 1865 | TraceEventHandle TraceLog::AddTraceEvent( |
| 1866 | char phase, |
| 1867 | const unsigned char* category_group_enabled, |
| 1868 | const char* name, |
| 1869 | unsigned long long id, |
| 1870 | int num_args, |
| 1871 | const char** arg_names, |
| 1872 | const unsigned char* arg_types, |
| 1873 | const unsigned long long* arg_values, |
| 1874 | const scoped_refptr<ConvertableToTraceFormat>* convertable_values, |
| 1875 | unsigned char flags) { |
| 1876 | int thread_id = static_cast<int>(base::PlatformThread::CurrentId()); |
| 1877 | base::TimeTicks now = base::TimeTicks::NowFromSystemTraceTime(); |
| 1878 | return AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled, |
| 1879 | name, id, thread_id, now, |
| 1880 | num_args, arg_names, |
| 1881 | arg_types, arg_values, |
| 1882 | convertable_values, flags); |
| 1883 | } |
| 1884 | |
| 1885 | TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp( |
| 1886 | char phase, |
| 1887 | const unsigned char* category_group_enabled, |
| 1888 | const char* name, |
| 1889 | unsigned long long id, |
| 1890 | int thread_id, |
| 1891 | const TimeTicks& timestamp, |
| 1892 | int num_args, |
| 1893 | const char** arg_names, |
| 1894 | const unsigned char* arg_types, |
| 1895 | const unsigned long long* arg_values, |
| 1896 | const scoped_refptr<ConvertableToTraceFormat>* convertable_values, |
| 1897 | unsigned char flags) { |
| 1898 | TraceEventHandle handle = { 0, 0, 0 }; |
| 1899 | if (!*category_group_enabled) |
| 1900 | return handle; |
| 1901 | |
| 1902 | // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when |
| 1903 | // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) -> |
| 1904 | // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ... |
| 1905 | if (thread_is_in_trace_event_.Get()) |
| 1906 | return handle; |
| 1907 | |
| 1908 | AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_); |
| 1909 | |
| 1910 | DCHECK(name); |
| 1911 | |
| 1912 | if (flags & TRACE_EVENT_FLAG_MANGLE_ID) |
| 1913 | id ^= process_id_hash_; |
| 1914 | |
| 1915 | TimeTicks now = OffsetTimestamp(timestamp); |
| 1916 | TimeTicks thread_now = ThreadNow(); |
| 1917 | |
| 1918 | ThreadLocalEventBuffer* thread_local_event_buffer = NULL; |
| 1919 | // A ThreadLocalEventBuffer needs the message loop |
| 1920 | // - to know when the thread exits; |
| 1921 | // - to handle the final flush. |
| 1922 | // For a thread without a message loop or the message loop may be blocked, the |
| 1923 | // trace events will be added into the main buffer directly. |
| 1924 | if (!thread_blocks_message_loop_.Get() && MessageLoop::current()) { |
| 1925 | thread_local_event_buffer = thread_local_event_buffer_.Get(); |
| 1926 | if (thread_local_event_buffer && |
| 1927 | !CheckGeneration(thread_local_event_buffer->generation())) { |
| 1928 | delete thread_local_event_buffer; |
| 1929 | thread_local_event_buffer = NULL; |
| 1930 | } |
| 1931 | if (!thread_local_event_buffer) { |
| 1932 | thread_local_event_buffer = new ThreadLocalEventBuffer(this); |
| 1933 | thread_local_event_buffer_.Set(thread_local_event_buffer); |
| 1934 | } |
| 1935 | } |
| 1936 | |
| 1937 | // Check and update the current thread name only if the event is for the |
| 1938 | // current thread to avoid locks in most cases. |
| 1939 | if (thread_id == static_cast<int>(PlatformThread::CurrentId())) { |
| 1940 | const char* new_name = ThreadIdNameManager::GetInstance()-> |
| 1941 | GetName(thread_id); |
| 1942 | // Check if the thread name has been set or changed since the previous |
| 1943 | // call (if any), but don't bother if the new name is empty. Note this will |
| 1944 | // not detect a thread name change within the same char* buffer address: we |
| 1945 | // favor common case performance over corner case correctness. |
| 1946 | if (new_name != g_current_thread_name.Get().Get() && |
| 1947 | new_name && *new_name) { |
| 1948 | g_current_thread_name.Get().Set(new_name); |
| 1949 | |
| 1950 | AutoLock thread_info_lock(thread_info_lock_); |
| 1951 | |
| 1952 | hash_map<int, std::string>::iterator existing_name = |
| 1953 | thread_names_.find(thread_id); |
| 1954 | if (existing_name == thread_names_.end()) { |
| 1955 | // This is a new thread id, and a new name. |
| 1956 | thread_names_[thread_id] = new_name; |
| 1957 | } else { |
| 1958 | // This is a thread id that we've seen before, but potentially with a |
| 1959 | // new name. |
| 1960 | std::vector<StringPiece> existing_names; |
| 1961 | Tokenize(existing_name->second, ",", &existing_names); |
| 1962 | bool found = std::find(existing_names.begin(), |
| 1963 | existing_names.end(), |
| 1964 | new_name) != existing_names.end(); |
| 1965 | if (!found) { |
| 1966 | if (existing_names.size()) |
| 1967 | existing_name->second.push_back(','); |
| 1968 | existing_name->second.append(new_name); |
| 1969 | } |
| 1970 | } |
| 1971 | } |
| 1972 | } |
| 1973 | |
| 1974 | std::string console_message; |
| 1975 | if (*category_group_enabled & |
| 1976 | (ENABLED_FOR_RECORDING | ENABLED_FOR_MONITORING)) { |
| 1977 | OptionalAutoLock lock(lock_); |
| 1978 | |
| 1979 | TraceEvent* trace_event = NULL; |
| 1980 | if (thread_local_event_buffer) { |
| 1981 | trace_event = thread_local_event_buffer->AddTraceEvent(&handle); |
| 1982 | } else { |
| 1983 | lock.EnsureAcquired(); |
| 1984 | trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true); |
| 1985 | } |
| 1986 | |
| 1987 | if (trace_event) { |
| 1988 | trace_event->Initialize(thread_id, now, thread_now, phase, |
| 1989 | category_group_enabled, name, id, |
| 1990 | num_args, arg_names, arg_types, arg_values, |
| 1991 | convertable_values, flags); |
| 1992 | |
| 1993 | #if defined(OS_ANDROID) |
| 1994 | trace_event->SendToATrace(); |
| 1995 | #endif |
| 1996 | } |
| 1997 | |
| 1998 | if (trace_options() & kInternalEchoToConsole) { |
| 1999 | console_message = EventToConsoleMessage( |
| 2000 | phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase, |
| 2001 | timestamp, trace_event); |
| 2002 | } |
| 2003 | } |
| 2004 | |
| 2005 | if (console_message.size()) |
| 2006 | LOG(ERROR) << console_message; |
| 2007 | |
| 2008 | if (reinterpret_cast<const unsigned char*>(subtle::NoBarrier_Load( |
| 2009 | &watch_category_)) == category_group_enabled) { |
| 2010 | bool event_name_matches; |
| 2011 | WatchEventCallback watch_event_callback_copy; |
| 2012 | { |
| 2013 | AutoLock lock(lock_); |
| 2014 | event_name_matches = watch_event_name_ == name; |
| 2015 | watch_event_callback_copy = watch_event_callback_; |
| 2016 | } |
| 2017 | if (event_name_matches) { |
| 2018 | if (!watch_event_callback_copy.is_null()) |
| 2019 | watch_event_callback_copy.Run(); |
| 2020 | } |
| 2021 | } |
| 2022 | |
| 2023 | if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) { |
| 2024 | EventCallback event_callback = reinterpret_cast<EventCallback>( |
| 2025 | subtle::NoBarrier_Load(&event_callback_)); |
| 2026 | if (event_callback) { |
| 2027 | event_callback(now, |
| 2028 | phase == TRACE_EVENT_PHASE_COMPLETE ? |
| 2029 | TRACE_EVENT_PHASE_BEGIN : phase, |
| 2030 | category_group_enabled, name, id, |
| 2031 | num_args, arg_names, arg_types, arg_values, |
| 2032 | flags); |
| 2033 | } |
| 2034 | } |
| 2035 | |
| 2036 | if (thread_local_event_buffer) |
| 2037 | thread_local_event_buffer->ReportOverhead(now, thread_now); |
| 2038 | |
| 2039 | return handle; |
| 2040 | } |
| 2041 | |
| 2042 | // May be called when a COMPELETE event ends and the unfinished event has been |
| 2043 | // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL). |
| 2044 | std::string TraceLog::EventToConsoleMessage(unsigned char phase, |
| 2045 | const TimeTicks& timestamp, |
| 2046 | TraceEvent* trace_event) { |
| 2047 | AutoLock thread_info_lock(thread_info_lock_); |
| 2048 | |
| 2049 | // The caller should translate TRACE_EVENT_PHASE_COMPLETE to |
| 2050 | // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END. |
| 2051 | DCHECK(phase != TRACE_EVENT_PHASE_COMPLETE); |
| 2052 | |
| 2053 | TimeDelta duration; |
| 2054 | int thread_id = trace_event ? |
| 2055 | trace_event->thread_id() : PlatformThread::CurrentId(); |
| 2056 | if (phase == TRACE_EVENT_PHASE_END) { |
| 2057 | duration = timestamp - thread_event_start_times_[thread_id].top(); |
| 2058 | thread_event_start_times_[thread_id].pop(); |
| 2059 | } |
| 2060 | |
| 2061 | std::string thread_name = thread_names_[thread_id]; |
| 2062 | if (thread_colors_.find(thread_name) == thread_colors_.end()) |
| 2063 | thread_colors_[thread_name] = (thread_colors_.size() % 6) + 1; |
| 2064 | |
| 2065 | std::ostringstream log; |
| 2066 | log << base::StringPrintf("%s: \x1b[0;3%dm", |
| 2067 | thread_name.c_str(), |
| 2068 | thread_colors_[thread_name]); |
| 2069 | |
| 2070 | size_t depth = 0; |
| 2071 | if (thread_event_start_times_.find(thread_id) != |
| 2072 | thread_event_start_times_.end()) |
| 2073 | depth = thread_event_start_times_[thread_id].size(); |
| 2074 | |
| 2075 | for (size_t i = 0; i < depth; ++i) |
| 2076 | log << "| "; |
| 2077 | |
| 2078 | if (trace_event) |
| 2079 | trace_event->AppendPrettyPrinted(&log); |
| 2080 | if (phase == TRACE_EVENT_PHASE_END) |
| 2081 | log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF()); |
| 2082 | |
| 2083 | log << "\x1b[0;m"; |
| 2084 | |
| 2085 | if (phase == TRACE_EVENT_PHASE_BEGIN) |
| 2086 | thread_event_start_times_[thread_id].push(timestamp); |
| 2087 | |
| 2088 | return log.str(); |
| 2089 | } |
| 2090 | |
| 2091 | void TraceLog::AddTraceEventEtw(char phase, |
| 2092 | const char* name, |
| 2093 | const void* id, |
| 2094 | const char* extra) { |
| 2095 | #if defined(OS_WIN) |
| 2096 | TraceEventETWProvider::Trace(name, phase, id, extra); |
| 2097 | #endif |
| 2098 | INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name, |
| 2099 | TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra); |
| 2100 | } |
| 2101 | |
| 2102 | void TraceLog::AddTraceEventEtw(char phase, |
| 2103 | const char* name, |
| 2104 | const void* id, |
| 2105 | const std::string& extra) { |
| 2106 | #if defined(OS_WIN) |
| 2107 | TraceEventETWProvider::Trace(name, phase, id, extra); |
| 2108 | #endif |
| 2109 | INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name, |
| 2110 | TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra); |
| 2111 | } |
| 2112 | |
| 2113 | void TraceLog::UpdateTraceEventDuration( |
| 2114 | const unsigned char* category_group_enabled, |
| 2115 | const char* name, |
| 2116 | TraceEventHandle handle) { |
| 2117 | // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when |
| 2118 | // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) -> |
| 2119 | // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ... |
| 2120 | if (thread_is_in_trace_event_.Get()) |
| 2121 | return; |
| 2122 | |
| 2123 | AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_); |
| 2124 | |
| 2125 | TimeTicks thread_now = ThreadNow(); |
| 2126 | TimeTicks now = OffsetNow(); |
| 2127 | |
| 2128 | std::string console_message; |
| 2129 | if (*category_group_enabled & ENABLED_FOR_RECORDING) { |
| 2130 | OptionalAutoLock lock(lock_); |
| 2131 | |
| 2132 | TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock); |
| 2133 | if (trace_event) { |
| 2134 | DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE); |
| 2135 | trace_event->UpdateDuration(now, thread_now); |
| 2136 | #if defined(OS_ANDROID) |
| 2137 | trace_event->SendToATrace(); |
| 2138 | #endif |
| 2139 | } |
| 2140 | |
| 2141 | if (trace_options() & kInternalEchoToConsole) { |
| 2142 | console_message = EventToConsoleMessage(TRACE_EVENT_PHASE_END, |
| 2143 | now, trace_event); |
| 2144 | } |
| 2145 | } |
| 2146 | |
| 2147 | if (console_message.size()) |
| 2148 | LOG(ERROR) << console_message; |
| 2149 | |
| 2150 | if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) { |
| 2151 | EventCallback event_callback = reinterpret_cast<EventCallback>( |
| 2152 | subtle::NoBarrier_Load(&event_callback_)); |
| 2153 | if (event_callback) { |
| 2154 | event_callback(now, TRACE_EVENT_PHASE_END, category_group_enabled, name, |
| 2155 | trace_event_internal::kNoEventId, 0, NULL, NULL, NULL, |
| 2156 | TRACE_EVENT_FLAG_NONE); |
| 2157 | } |
| 2158 | } |
| 2159 | } |
| 2160 | |
| 2161 | void TraceLog::SetWatchEvent(const std::string& category_name, |
| 2162 | const std::string& event_name, |
| 2163 | const WatchEventCallback& callback) { |
| 2164 | const unsigned char* category = GetCategoryGroupEnabled( |
| 2165 | category_name.c_str()); |
| 2166 | AutoLock lock(lock_); |
| 2167 | subtle::NoBarrier_Store(&watch_category_, |
| 2168 | reinterpret_cast<subtle::AtomicWord>(category)); |
| 2169 | watch_event_name_ = event_name; |
| 2170 | watch_event_callback_ = callback; |
| 2171 | } |
| 2172 | |
| 2173 | void TraceLog::CancelWatchEvent() { |
| 2174 | AutoLock lock(lock_); |
| 2175 | subtle::NoBarrier_Store(&watch_category_, 0); |
| 2176 | watch_event_name_ = ""; |
| 2177 | watch_event_callback_.Reset(); |
| 2178 | } |
| 2179 | |
| 2180 | void TraceLog::AddMetadataEventsWhileLocked() { |
| 2181 | lock_.AssertAcquired(); |
| 2182 | |
| 2183 | #if !defined(OS_NACL) // NaCl shouldn't expose the process id. |
| 2184 | InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false), |
| 2185 | 0, |
| 2186 | "num_cpus", "number", |
| 2187 | base::SysInfo::NumberOfProcessors()); |
| 2188 | #endif |
| 2189 | |
| 2190 | |
| 2191 | int current_thread_id = static_cast<int>(base::PlatformThread::CurrentId()); |
| 2192 | if (process_sort_index_ != 0) { |
| 2193 | InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false), |
| 2194 | current_thread_id, |
| 2195 | "process_sort_index", "sort_index", |
| 2196 | process_sort_index_); |
| 2197 | } |
| 2198 | |
| 2199 | if (process_name_.size()) { |
| 2200 | InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false), |
| 2201 | current_thread_id, |
| 2202 | "process_name", "name", |
| 2203 | process_name_); |
| 2204 | } |
| 2205 | |
| 2206 | if (process_labels_.size() > 0) { |
| 2207 | std::vector<std::string> labels; |
| 2208 | for(base::hash_map<int, std::string>::iterator it = process_labels_.begin(); |
| 2209 | it != process_labels_.end(); |
| 2210 | it++) { |
| 2211 | labels.push_back(it->second); |
| 2212 | } |
| 2213 | InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false), |
| 2214 | current_thread_id, |
| 2215 | "process_labels", "labels", |
| 2216 | JoinString(labels, ',')); |
| 2217 | } |
| 2218 | |
| 2219 | // Thread sort indices. |
| 2220 | for(hash_map<int, int>::iterator it = thread_sort_indices_.begin(); |
| 2221 | it != thread_sort_indices_.end(); |
| 2222 | it++) { |
| 2223 | if (it->second == 0) |
| 2224 | continue; |
| 2225 | InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false), |
| 2226 | it->first, |
| 2227 | "thread_sort_index", "sort_index", |
| 2228 | it->second); |
| 2229 | } |
| 2230 | |
| 2231 | // Thread names. |
| 2232 | AutoLock thread_info_lock(thread_info_lock_); |
| 2233 | for(hash_map<int, std::string>::iterator it = thread_names_.begin(); |
| 2234 | it != thread_names_.end(); |
| 2235 | it++) { |
| 2236 | if (it->second.empty()) |
| 2237 | continue; |
| 2238 | InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false), |
| 2239 | it->first, |
| 2240 | "thread_name", "name", |
| 2241 | it->second); |
| 2242 | } |
| 2243 | |
| 2244 | // If buffer is full, add a metadata record to report this. |
| 2245 | if (!buffer_limit_reached_timestamp_.is_null()) { |
| 2246 | InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false), |
| 2247 | current_thread_id, |
| 2248 | "trace_buffer_overflowed", |
| 2249 | "overflowed_at_ts", |
| 2250 | buffer_limit_reached_timestamp_); |
| 2251 | } |
| 2252 | } |
| 2253 | |
| 2254 | void TraceLog::WaitSamplingEventForTesting() { |
| 2255 | if (!sampling_thread_) |
| 2256 | return; |
| 2257 | sampling_thread_->WaitSamplingEventForTesting(); |
| 2258 | } |
| 2259 | |
| 2260 | void TraceLog::DeleteForTesting() { |
| 2261 | DeleteTraceLogForTesting::Delete(); |
| 2262 | } |
| 2263 | |
| 2264 | TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) { |
| 2265 | return GetEventByHandleInternal(handle, NULL); |
| 2266 | } |
| 2267 | |
| 2268 | TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle, |
| 2269 | OptionalAutoLock* lock) { |
| 2270 | if (!handle.chunk_seq) |
| 2271 | return NULL; |
| 2272 | |
| 2273 | if (thread_local_event_buffer_.Get()) { |
| 2274 | TraceEvent* trace_event = |
| 2275 | thread_local_event_buffer_.Get()->GetEventByHandle(handle); |
| 2276 | if (trace_event) |
| 2277 | return trace_event; |
| 2278 | } |
| 2279 | |
| 2280 | // The event has been out-of-control of the thread local buffer. |
| 2281 | // Try to get the event from the main buffer with a lock. |
| 2282 | if (lock) |
| 2283 | lock->EnsureAcquired(); |
| 2284 | |
| 2285 | if (thread_shared_chunk_ && |
| 2286 | handle.chunk_index == thread_shared_chunk_index_) { |
| 2287 | return handle.chunk_seq == thread_shared_chunk_->seq() ? |
| 2288 | thread_shared_chunk_->GetEventAt(handle.event_index) : NULL; |
| 2289 | } |
| 2290 | |
| 2291 | return logged_events_->GetEventByHandle(handle); |
| 2292 | } |
| 2293 | |
| 2294 | void TraceLog::SetProcessID(int process_id) { |
| 2295 | process_id_ = process_id; |
| 2296 | // Create a FNV hash from the process ID for XORing. |
| 2297 | // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details. |
| 2298 | unsigned long long offset_basis = 14695981039346656037ull; |
| 2299 | unsigned long long fnv_prime = 1099511628211ull; |
| 2300 | unsigned long long pid = static_cast<unsigned long long>(process_id_); |
| 2301 | process_id_hash_ = (offset_basis ^ pid) * fnv_prime; |
| 2302 | } |
| 2303 | |
| 2304 | void TraceLog::SetProcessSortIndex(int sort_index) { |
| 2305 | AutoLock lock(lock_); |
| 2306 | process_sort_index_ = sort_index; |
| 2307 | } |
| 2308 | |
| 2309 | void TraceLog::SetProcessName(const std::string& process_name) { |
| 2310 | AutoLock lock(lock_); |
| 2311 | process_name_ = process_name; |
| 2312 | } |
| 2313 | |
| 2314 | void TraceLog::UpdateProcessLabel( |
| 2315 | int label_id, const std::string& current_label) { |
| 2316 | if(!current_label.length()) |
| 2317 | return RemoveProcessLabel(label_id); |
| 2318 | |
| 2319 | AutoLock lock(lock_); |
| 2320 | process_labels_[label_id] = current_label; |
| 2321 | } |
| 2322 | |
| 2323 | void TraceLog::RemoveProcessLabel(int label_id) { |
| 2324 | AutoLock lock(lock_); |
| 2325 | base::hash_map<int, std::string>::iterator it = process_labels_.find( |
| 2326 | label_id); |
| 2327 | if (it == process_labels_.end()) |
| 2328 | return; |
| 2329 | |
| 2330 | process_labels_.erase(it); |
| 2331 | } |
| 2332 | |
| 2333 | void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) { |
| 2334 | AutoLock lock(lock_); |
| 2335 | thread_sort_indices_[static_cast<int>(thread_id)] = sort_index; |
| 2336 | } |
| 2337 | |
| 2338 | void TraceLog::SetTimeOffset(TimeDelta offset) { |
| 2339 | time_offset_ = offset; |
| 2340 | } |
| 2341 | |
| 2342 | size_t TraceLog::GetObserverCountForTest() const { |
| 2343 | return enabled_state_observer_list_.size(); |
| 2344 | } |
| 2345 | |
| 2346 | void TraceLog::SetCurrentThreadBlocksMessageLoop() { |
| 2347 | thread_blocks_message_loop_.Set(true); |
| 2348 | if (thread_local_event_buffer_.Get()) { |
| 2349 | // This will flush the thread local buffer. |
| 2350 | delete thread_local_event_buffer_.Get(); |
| 2351 | } |
| 2352 | } |
| 2353 | |
| 2354 | bool CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace( |
| 2355 | const std::string& str) { |
| 2356 | return str.empty() || |
| 2357 | str.at(0) == ' ' || |
| 2358 | str.at(str.length() - 1) == ' '; |
| 2359 | } |
| 2360 | |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 2361 | CategoryFilter::CategoryFilter(const std::string& filter_string) { |
| 2362 | if (!filter_string.empty()) |
| 2363 | Initialize(filter_string); |
| 2364 | else |
| 2365 | Initialize(CategoryFilter::kDefaultCategoryFilterString); |
| 2366 | } |
| 2367 | |
| 2368 | CategoryFilter::CategoryFilter() { |
| 2369 | Initialize(CategoryFilter::kDefaultCategoryFilterString); |
| 2370 | } |
| 2371 | |
| 2372 | CategoryFilter::CategoryFilter(const CategoryFilter& cf) |
| 2373 | : included_(cf.included_), |
| 2374 | disabled_(cf.disabled_), |
| 2375 | excluded_(cf.excluded_), |
| 2376 | delays_(cf.delays_) { |
| 2377 | } |
| 2378 | |
| 2379 | CategoryFilter::~CategoryFilter() { |
| 2380 | } |
| 2381 | |
| 2382 | CategoryFilter& CategoryFilter::operator=(const CategoryFilter& rhs) { |
| 2383 | if (this == &rhs) |
| 2384 | return *this; |
| 2385 | |
| 2386 | included_ = rhs.included_; |
| 2387 | disabled_ = rhs.disabled_; |
| 2388 | excluded_ = rhs.excluded_; |
| 2389 | delays_ = rhs.delays_; |
| 2390 | return *this; |
| 2391 | } |
| 2392 | |
| 2393 | void CategoryFilter::Initialize(const std::string& filter_string) { |
| 2394 | // Tokenize list of categories, delimited by ','. |
| 2395 | StringTokenizer tokens(filter_string, ","); |
| 2396 | // Add each token to the appropriate list (included_,excluded_). |
| 2397 | while (tokens.GetNext()) { |
| 2398 | std::string category = tokens.token(); |
| 2399 | // Ignore empty categories. |
| 2400 | if (category.empty()) |
| 2401 | continue; |
| 2402 | // Synthetic delays are of the form 'DELAY(delay;option;option;...)'. |
| 2403 | if (category.find(kSyntheticDelayCategoryFilterPrefix) == 0 && |
| 2404 | category.at(category.size() - 1) == ')') { |
| 2405 | category = category.substr( |
| 2406 | strlen(kSyntheticDelayCategoryFilterPrefix), |
| 2407 | category.size() - strlen(kSyntheticDelayCategoryFilterPrefix) - 1); |
| 2408 | size_t name_length = category.find(';'); |
| 2409 | if (name_length != std::string::npos && name_length > 0 && |
| 2410 | name_length != category.size() - 1) { |
| 2411 | delays_.push_back(category); |
| 2412 | } |
| 2413 | } else if (category.at(0) == '-') { |
| 2414 | // Excluded categories start with '-'. |
| 2415 | // Remove '-' from category string. |
| 2416 | category = category.substr(1); |
| 2417 | excluded_.push_back(category); |
| 2418 | } else if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")), |
| 2419 | TRACE_DISABLED_BY_DEFAULT("")) == 0) { |
| 2420 | disabled_.push_back(category); |
| 2421 | } else { |
| 2422 | included_.push_back(category); |
| 2423 | } |
| 2424 | } |
| 2425 | } |
| 2426 | |
| 2427 | void CategoryFilter::WriteString(const StringList& values, |
| 2428 | std::string* out, |
| 2429 | bool included) const { |
| 2430 | bool prepend_comma = !out->empty(); |
| 2431 | int token_cnt = 0; |
| 2432 | for (StringList::const_iterator ci = values.begin(); |
| 2433 | ci != values.end(); ++ci) { |
| 2434 | if (token_cnt > 0 || prepend_comma) |
| 2435 | StringAppendF(out, ","); |
| 2436 | StringAppendF(out, "%s%s", (included ? "" : "-"), ci->c_str()); |
| 2437 | ++token_cnt; |
| 2438 | } |
| 2439 | } |
| 2440 | |
| 2441 | void CategoryFilter::WriteString(const StringList& delays, |
| 2442 | std::string* out) const { |
| 2443 | bool prepend_comma = !out->empty(); |
| 2444 | int token_cnt = 0; |
| 2445 | for (StringList::const_iterator ci = delays.begin(); |
| 2446 | ci != delays.end(); ++ci) { |
| 2447 | if (token_cnt > 0 || prepend_comma) |
| 2448 | StringAppendF(out, ","); |
| 2449 | StringAppendF(out, "%s%s)", kSyntheticDelayCategoryFilterPrefix, |
| 2450 | ci->c_str()); |
| 2451 | ++token_cnt; |
| 2452 | } |
| 2453 | } |
| 2454 | |
| 2455 | std::string CategoryFilter::ToString() const { |
| 2456 | std::string filter_string; |
| 2457 | WriteString(included_, &filter_string, true); |
| 2458 | WriteString(disabled_, &filter_string, true); |
| 2459 | WriteString(excluded_, &filter_string, false); |
| 2460 | WriteString(delays_, &filter_string); |
| 2461 | return filter_string; |
| 2462 | } |
| 2463 | |
| 2464 | bool CategoryFilter::IsCategoryGroupEnabled( |
| 2465 | const char* category_group_name) const { |
| 2466 | // TraceLog should call this method only as part of enabling/disabling |
| 2467 | // categories. |
Etienne Membrives | b1556b3 | 2014-12-16 13:56:09 +0100 | [diff] [blame^] | 2468 | |
| 2469 | bool had_enabled_by_default = false; |
| 2470 | DCHECK(category_group_name); |
| 2471 | CStringTokenizer category_group_tokens( |
| 2472 | category_group_name, category_group_name + strlen(category_group_name), |
| 2473 | ","); |
| 2474 | while (category_group_tokens.GetNext()) { |
| 2475 | std::string category_group_token = category_group_tokens.token(); |
| 2476 | // Don't allow empty tokens, nor tokens with leading or trailing space. |
| 2477 | DCHECK(!CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace( |
| 2478 | category_group_token)) |
| 2479 | << "Disallowed category string"; |
| 2480 | if (IsCategoryEnabled(category_group_token.c_str())) { |
| 2481 | return true; |
| 2482 | } |
| 2483 | if (!MatchPattern(category_group_token.c_str(), |
| 2484 | TRACE_DISABLED_BY_DEFAULT("*"))) |
| 2485 | had_enabled_by_default = true; |
| 2486 | } |
| 2487 | // Do a second pass to check for explicitly disabled categories |
| 2488 | // (those explicitly enabled have priority due to first pass). |
| 2489 | category_group_tokens.Reset(); |
| 2490 | while (category_group_tokens.GetNext()) { |
| 2491 | std::string category_group_token = category_group_tokens.token(); |
| 2492 | for (StringList::const_iterator ci = excluded_.begin(); |
| 2493 | ci != excluded_.end(); ++ci) { |
| 2494 | if (MatchPattern(category_group_token.c_str(), ci->c_str())) |
| 2495 | return false; |
| 2496 | } |
| 2497 | } |
| 2498 | // If the category group is not excluded, and there are no included patterns |
| 2499 | // we consider this category group enabled, as long as it had categories |
| 2500 | // other than disabled-by-default. |
| 2501 | return included_.empty() && had_enabled_by_default; |
| 2502 | } |
| 2503 | |
| 2504 | bool CategoryFilter::IsCategoryEnabled(const char* category_name) const { |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 2505 | StringList::const_iterator ci; |
| 2506 | |
| 2507 | // Check the disabled- filters and the disabled-* wildcard first so that a |
| 2508 | // "*" filter does not include the disabled. |
| 2509 | for (ci = disabled_.begin(); ci != disabled_.end(); ++ci) { |
Etienne Membrives | b1556b3 | 2014-12-16 13:56:09 +0100 | [diff] [blame^] | 2510 | if (MatchPattern(category_name, ci->c_str())) |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 2511 | return true; |
| 2512 | } |
Etienne Membrives | b1556b3 | 2014-12-16 13:56:09 +0100 | [diff] [blame^] | 2513 | |
| 2514 | if (MatchPattern(category_name, TRACE_DISABLED_BY_DEFAULT("*"))) |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 2515 | return false; |
| 2516 | |
| 2517 | for (ci = included_.begin(); ci != included_.end(); ++ci) { |
Etienne Membrives | b1556b3 | 2014-12-16 13:56:09 +0100 | [diff] [blame^] | 2518 | if (MatchPattern(category_name, ci->c_str())) |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 2519 | return true; |
| 2520 | } |
| 2521 | |
Etienne Membrives | b1556b3 | 2014-12-16 13:56:09 +0100 | [diff] [blame^] | 2522 | return false; |
James Robinson | 646469d | 2014-10-03 15:33:28 -0700 | [diff] [blame] | 2523 | } |
| 2524 | |
| 2525 | bool CategoryFilter::HasIncludedPatterns() const { |
| 2526 | return !included_.empty(); |
| 2527 | } |
| 2528 | |
| 2529 | void CategoryFilter::Merge(const CategoryFilter& nested_filter) { |
| 2530 | // Keep included patterns only if both filters have an included entry. |
| 2531 | // Otherwise, one of the filter was specifying "*" and we want to honour the |
| 2532 | // broadest filter. |
| 2533 | if (HasIncludedPatterns() && nested_filter.HasIncludedPatterns()) { |
| 2534 | included_.insert(included_.end(), |
| 2535 | nested_filter.included_.begin(), |
| 2536 | nested_filter.included_.end()); |
| 2537 | } else { |
| 2538 | included_.clear(); |
| 2539 | } |
| 2540 | |
| 2541 | disabled_.insert(disabled_.end(), |
| 2542 | nested_filter.disabled_.begin(), |
| 2543 | nested_filter.disabled_.end()); |
| 2544 | excluded_.insert(excluded_.end(), |
| 2545 | nested_filter.excluded_.begin(), |
| 2546 | nested_filter.excluded_.end()); |
| 2547 | delays_.insert(delays_.end(), |
| 2548 | nested_filter.delays_.begin(), |
| 2549 | nested_filter.delays_.end()); |
| 2550 | } |
| 2551 | |
| 2552 | void CategoryFilter::Clear() { |
| 2553 | included_.clear(); |
| 2554 | disabled_.clear(); |
| 2555 | excluded_.clear(); |
| 2556 | } |
| 2557 | |
| 2558 | const CategoryFilter::StringList& |
| 2559 | CategoryFilter::GetSyntheticDelayValues() const { |
| 2560 | return delays_; |
| 2561 | } |
| 2562 | |
| 2563 | } // namespace debug |
| 2564 | } // namespace base |
| 2565 | |
| 2566 | namespace trace_event_internal { |
| 2567 | |
| 2568 | ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient( |
| 2569 | const char* category_group, const char* name) { |
| 2570 | // The single atom works because for now the category_group can only be "gpu". |
| 2571 | DCHECK(strcmp(category_group, "gpu") == 0); |
| 2572 | static TRACE_EVENT_API_ATOMIC_WORD atomic = 0; |
| 2573 | INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( |
| 2574 | category_group, atomic, category_group_enabled_); |
| 2575 | name_ = name; |
| 2576 | if (*category_group_enabled_) { |
| 2577 | event_handle_ = |
| 2578 | TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP( |
| 2579 | TRACE_EVENT_PHASE_COMPLETE, category_group_enabled_, name, |
| 2580 | trace_event_internal::kNoEventId, |
| 2581 | static_cast<int>(base::PlatformThread::CurrentId()), |
| 2582 | base::TimeTicks::NowFromSystemTraceTime(), |
| 2583 | 0, NULL, NULL, NULL, NULL, TRACE_EVENT_FLAG_NONE); |
| 2584 | } |
| 2585 | } |
| 2586 | |
| 2587 | ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { |
| 2588 | if (*category_group_enabled_) { |
| 2589 | TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, |
| 2590 | name_, event_handle_); |
| 2591 | } |
| 2592 | } |
| 2593 | |
| 2594 | } // namespace trace_event_internal |