Merge cherrypicks of ['googleplex-android-review.googlesource.com/26314276'] into sparse-11501734-L84300030002205709.
SPARSE_CHANGE: Ie53ee2b854cf2da488d115623dbdcf959fbacade
Change-Id: I892aaede606e95700e6a1ae891b9180ed995da62
diff --git a/src/tracing/core/trace_buffer.cc b/src/tracing/core/trace_buffer.cc
index baa6a34..e25aaf6 100644
--- a/src/tracing/core/trace_buffer.cc
+++ b/src/tracing/core/trace_buffer.cc
@@ -75,6 +75,7 @@
return false;
}
size_ = size;
+ used_size_ = 0;
stats_.set_buffer_size(size);
max_chunk_size_ = std::min(size, ChunkRecord::kMaxSize);
wptr_ = begin();
@@ -914,8 +915,8 @@
// The assignments below must be done after Initialize().
- data_.EnsureCommitted(data_.size());
- memcpy(data_.Get(), src.data_.Get(), src.data_.size());
+ EnsureCommitted(src.used_size_);
+ memcpy(data_.Get(), src.data_.Get(), src.used_size_);
last_chunk_id_written_ = src.last_chunk_id_written_;
stats_ = src.stats_;
diff --git a/src/tracing/core/trace_buffer.h b/src/tracing/core/trace_buffer.h
index f6994ba..ca58957 100644
--- a/src/tracing/core/trace_buffer.h
+++ b/src/tracing/core/trace_buffer.h
@@ -284,6 +284,7 @@
const WriterStatsMap& writer_stats() const { return writer_stats_; }
const TraceStats::BufferStats& stats() const { return stats_; }
size_t size() const { return size_; }
+ size_t used_size() const { return used_size_; }
private:
friend class TraceBufferTest;
@@ -609,11 +610,16 @@
ChunkRecord* GetChunkRecordAt(uint8_t* ptr) {
DcheckIsAlignedAndWithinBounds(ptr);
// We may be accessing a new (empty) record.
- data_.EnsureCommitted(
- static_cast<size_t>(ptr + sizeof(ChunkRecord) - begin()));
+ EnsureCommitted(static_cast<size_t>(ptr + sizeof(ChunkRecord) - begin()));
return reinterpret_cast<ChunkRecord*>(ptr);
}
+ void EnsureCommitted(size_t size) {
+ PERFETTO_DCHECK(size <= size_);
+ data_.EnsureCommitted(size);
+ used_size_ = std::max(used_size_, size);
+ }
+
void DiscardWrite();
// |src| can be nullptr (in which case |size| must be ==
@@ -635,7 +641,7 @@
DcheckIsAlignedAndWithinBounds(wptr);
// We may be writing to this area for the first time.
- data_.EnsureCommitted(static_cast<size_t>(wptr + record.size - begin()));
+ EnsureCommitted(static_cast<size_t>(wptr + record.size - begin()));
// Deliberately not a *D*CHECK.
PERFETTO_CHECK(wptr + sizeof(record) + size <= end());
@@ -672,6 +678,12 @@
base::PagedMemory data_;
size_t size_ = 0; // Size in bytes of |data_|.
+
+ // High watermark. The number of bytes (<= |size_|) written into the buffer
+ // before the first wraparound. This increases as data is written into the
+ // buffer and then saturates at |size_|. Used for CloneReadOnly().
+ size_t used_size_ = 0;
+
size_t max_chunk_size_ = 0; // Max size in bytes allowed for a chunk.
uint8_t* wptr_ = nullptr; // Write pointer.
diff --git a/src/tracing/core/trace_buffer_unittest.cc b/src/tracing/core/trace_buffer_unittest.cc
index 38d343d..44847dd 100644
--- a/src/tracing/core/trace_buffer_unittest.cc
+++ b/src/tracing/core/trace_buffer_unittest.cc
@@ -26,6 +26,7 @@
#include "perfetto/ext/tracing/core/shared_memory_abi.h"
#include "perfetto/ext/tracing/core/trace_packet.h"
#include "perfetto/protozero/proto_utils.h"
+#include "src/base/test/vm_test_utils.h"
#include "src/tracing/core/trace_buffer.h"
#include "src/tracing/test/fake_packet.h"
#include "test/gtest_and_gmock.h"
@@ -49,6 +50,22 @@
static constexpr uint8_t kChunkNeedsPatching =
SharedMemoryABI::ChunkHeader::kChunkNeedsPatching;
+ void TearDown() override {
+ // Test that the used_size() logic works and that all the data after that
+ // is zero-filled.
+ if (trace_buffer_) {
+ const size_t used_size = trace_buffer_->used_size();
+ ASSERT_LE(used_size, trace_buffer_->size());
+ trace_buffer_->EnsureCommitted(trace_buffer_->size());
+ bool zero_padded = true;
+ for (size_t i = used_size; i < trace_buffer_->size(); ++i) {
+ bool is_zero = static_cast<char*>(trace_buffer_->data_.Get())[i] == 0;
+ zero_padded = zero_padded && is_zero;
+ }
+ ASSERT_TRUE(zero_padded);
+ }
+ }
+
FakeChunk CreateChunk(ProducerID p, WriterID w, ChunkID c) {
return FakeChunk(trace_buffer_.get(), p, w, c);
}
@@ -144,6 +161,7 @@
return keys;
}
+ uint8_t* GetBufData(const TraceBuffer& buf) { return buf.begin(); }
TraceBuffer* trace_buffer() { return trace_buffer_.get(); }
size_t size_to_end() { return trace_buffer_->size_to_end(); }
@@ -1854,6 +1872,8 @@
.CopyIntoTraceBuffer());
}
+ ASSERT_EQ(trace_buffer()->used_size(), 32u * kNumWriters);
+
// Make some reads *before* cloning. This is to check that the behaviour of
// CloneReadOnly() is not affected by reads made before cloning.
// On every round (|num_pre_reads|), read a different number of packets.
@@ -1866,6 +1886,7 @@
// Now create a snapshot and make sure we always read all the packets.
std::unique_ptr<TraceBuffer> snap = trace_buffer()->CloneReadOnly();
+ ASSERT_EQ(snap->used_size(), 32u * kNumWriters);
snap->BeginRead();
for (char i = 'A'; i < 'A' + kNumWriters; i++) {
auto frags = ReadPacket(snap);
@@ -1929,4 +1950,69 @@
ASSERT_THAT(ReadPacket(snap), IsEmpty());
}
+TEST_F(TraceBufferTest, Clone_Wrapping) {
+ ResetBuffer(4096);
+ const size_t kFrgSize = 1024 - 16; // For perfect wrapping every 4 fragments.
+ for (WriterID i = 0; i < 6; i++) {
+ CreateChunk(ProducerID(1), WriterID(i), ChunkID(0))
+ .AddPacket(kFrgSize, static_cast<char>('a' + i))
+ .CopyIntoTraceBuffer();
+ }
+ std::unique_ptr<TraceBuffer> snap = trace_buffer()->CloneReadOnly();
+ ASSERT_EQ(snap->used_size(), snap->size());
+ snap->BeginRead();
+ ASSERT_THAT(ReadPacket(snap), ElementsAre(FakePacketFragment(kFrgSize, 'c')));
+ ASSERT_THAT(ReadPacket(snap), ElementsAre(FakePacketFragment(kFrgSize, 'd')));
+ ASSERT_THAT(ReadPacket(snap), ElementsAre(FakePacketFragment(kFrgSize, 'e')));
+ ASSERT_THAT(ReadPacket(snap), ElementsAre(FakePacketFragment(kFrgSize, 'f')));
+ ASSERT_THAT(ReadPacket(snap), IsEmpty());
+}
+
+TEST_F(TraceBufferTest, Clone_WrappingWithPadding) {
+ ResetBuffer(4096);
+ // First create one 2KB chunk, so the contents are [aaaaaaaa00000000].
+ CreateChunk(ProducerID(1), WriterID(0), ChunkID(0))
+ .AddPacket(2048, static_cast<char>('a'))
+ .CopyIntoTraceBuffer();
+
+ // Then write a 3KB chunk that fits in the buffer, but requires zero padding
+ // and restarting from the beginning, so the contents are [bbbbbbbbbbbb0000].
+ CreateChunk(ProducerID(1), WriterID(1), ChunkID(0))
+ .AddPacket(3192, static_cast<char>('b'))
+ .CopyIntoTraceBuffer();
+
+ ASSERT_EQ(trace_buffer()->used_size(), trace_buffer()->size());
+ std::unique_ptr<TraceBuffer> snap = trace_buffer()->CloneReadOnly();
+ ASSERT_EQ(snap->used_size(), snap->size());
+ snap->BeginRead();
+ ASSERT_THAT(ReadPacket(snap), ElementsAre(FakePacketFragment(3192, 'b')));
+ ASSERT_THAT(ReadPacket(snap), IsEmpty());
+}
+
+TEST_F(TraceBufferTest, Clone_CommitOnlyUsedSize) {
+ const size_t kPages = 32;
+ const size_t kPageSize = base::GetSysPageSize();
+ ResetBuffer(kPageSize * kPages);
+ CreateChunk(ProducerID(1), WriterID(0), ChunkID(0))
+ .AddPacket(1024, static_cast<char>('a'))
+ .CopyIntoTraceBuffer();
+
+ using base::vm_test_utils::IsMapped;
+ auto is_only_first_page_mapped = [&](const TraceBuffer& buf) {
+ bool first_mapped = IsMapped(GetBufData(buf), kPageSize);
+ bool rest_mapped = IsMapped(GetBufData(buf) + kPageSize, kPages - 1);
+ return first_mapped && !rest_mapped;
+ };
+
+ // If the test doesn't work as expected until here, there is no point checking
+ // that the same assumptions hold true on the cloned buffer. Various platforms
+ // can legitimately pre-fetch memory even if we don't page fault (also asan).
+ if (!is_only_first_page_mapped(*trace_buffer()))
+ GTEST_SKIP() << "VM commit detection not supported";
+
+ std::unique_ptr<TraceBuffer> snap = trace_buffer()->CloneReadOnly();
+ ASSERT_EQ(snap->used_size(), trace_buffer()->used_size());
+ ASSERT_TRUE(is_only_first_page_mapped(*snap));
+}
+
} // namespace perfetto