[ios] Bring up first draft thread and memory snapshot.

Gather most of the necessary information for the thread snapshot.

Note that:
 - The 'capture' portion of this CL will be moved out of the snapshot
   interface and into a separate in-process dump to disk location.
 - All of the pointer dereferences need to be wrapped in vm_read.
 - The read-fast-and-dump logic in thread_snapshot may end up in a
   different file completely, but until we pick a
   serialization/deserialization method, keep it as-is.

Change-Id: Ic892227d169bb43c6b7fd14f0875c3c5dc8cad25
Reviewed-on: https://chromium-review.googlesource.com/c/crashpad/crashpad/+/2085572
Reviewed-by: Mark Mentovai <mark@chromium.org>
Reviewed-by: Justin Cohen <justincohen@chromium.org>
GitOrigin-RevId: 4e2a190ad6e6674c1dafed858f603b32a2344e52
diff --git a/snapshot/BUILD.gn b/snapshot/BUILD.gn
index bfee6f5..60cf1df 100644
--- a/snapshot/BUILD.gn
+++ b/snapshot/BUILD.gn
@@ -110,10 +110,16 @@
 
   if (crashpad_is_ios) {
     sources += [
+      "ios/memory_snapshot_ios.cc",
+      "ios/memory_snapshot_ios.h",
       "ios/module_snapshot_ios.cc",
       "ios/module_snapshot_ios.h",
       "ios/process_snapshot_ios.cc",
       "ios/process_snapshot_ios.h",
+      "ios/thread_snapshot_ios.cc",
+      "ios/thread_snapshot_ios.h",
+      "mac/cpu_context_mac.cc",
+      "mac/cpu_context_mac.h",
     ]
   }
 
diff --git a/snapshot/ios/memory_snapshot_ios.cc b/snapshot/ios/memory_snapshot_ios.cc
new file mode 100644
index 0000000..e760465
--- /dev/null
+++ b/snapshot/ios/memory_snapshot_ios.cc
@@ -0,0 +1,65 @@
+// Copyright 2020 The Crashpad Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "snapshot/ios/memory_snapshot_ios.h"
+
+namespace crashpad {
+namespace internal {
+
+void MemorySnapshotIOS::Initialize(vm_address_t address, vm_size_t size) {
+  INITIALIZATION_STATE_SET_INITIALIZING(initialized_);
+  address_ = address;
+  size_ = base::checked_cast<size_t>(size);
+
+  // TODO(justincohen): This is temporary, as MemorySnapshotIOS will likely be
+  // able to point directly to the deserialized data dump rather than copying
+  // data around.
+  buffer_ = std::unique_ptr<uint8_t[]>(new uint8_t[size_]);
+  memcpy(buffer_.get(), reinterpret_cast<void*>(address_), size_);
+  INITIALIZATION_STATE_SET_VALID(initialized_);
+}
+
+uint64_t MemorySnapshotIOS::Address() const {
+  INITIALIZATION_STATE_DCHECK_VALID(initialized_);
+  return address_;
+}
+
+size_t MemorySnapshotIOS::Size() const {
+  INITIALIZATION_STATE_DCHECK_VALID(initialized_);
+  return size_;
+}
+
+bool MemorySnapshotIOS::Read(Delegate* delegate) const {
+  INITIALIZATION_STATE_DCHECK_VALID(initialized_);
+
+  if (size_ == 0) {
+    return delegate->MemorySnapshotDelegateRead(nullptr, size_);
+  }
+
+  return delegate->MemorySnapshotDelegateRead(buffer_.get(), size_);
+}
+
+const MemorySnapshot* MemorySnapshotIOS::MergeWithOtherSnapshot(
+    const MemorySnapshot* other) const {
+  CheckedRange<uint64_t, size_t> merged(0, 0);
+  if (!LoggingDetermineMergedRange(this, other, &merged))
+    return nullptr;
+
+  auto result = std::make_unique<MemorySnapshotIOS>();
+  result->Initialize(merged.base(), merged.size());
+  return result.release();
+}
+
+}  // namespace internal
+}  // namespace crashpad
diff --git a/snapshot/ios/memory_snapshot_ios.h b/snapshot/ios/memory_snapshot_ios.h
new file mode 100644
index 0000000..be99105
--- /dev/null
+++ b/snapshot/ios/memory_snapshot_ios.h
@@ -0,0 +1,63 @@
+// Copyright 2020 The Crashpad Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef CRASHPAD_SNAPSHOT_IOS_MEMORY_SNAPSHOT_IOS_H_
+#define CRASHPAD_SNAPSHOT_IOS_MEMORY_SNAPSHOT_IOS_H_
+
+#include "base/macros.h"
+#include "snapshot/memory_snapshot.h"
+#include "util/misc/address_types.h"
+#include "util/misc/initialization_state_dcheck.h"
+
+namespace crashpad {
+namespace internal {
+
+//! \brief A MemorySnapshot of a memory region.
+class MemorySnapshotIOS final : public MemorySnapshot {
+ public:
+  MemorySnapshotIOS() = default;
+  ~MemorySnapshotIOS() = default;
+
+  //! \brief Initializes the object.
+  //!
+  //! \param[in] address The base address of the memory region to snapshot.
+  //! \param[in] size The size of the memory region to snapshot.
+  void Initialize(vm_address_t address, vm_size_t size);
+
+  // MemorySnapshot:
+  uint64_t Address() const override;
+  size_t Size() const override;
+  bool Read(Delegate* delegate) const override;
+  const MemorySnapshot* MergeWithOtherSnapshot(
+      const MemorySnapshot* other) const override;
+
+ private:
+  template <class T>
+  friend const MemorySnapshot* MergeWithOtherSnapshotImpl(
+      const T* self,
+      const MemorySnapshot* other);
+
+  // TODO(justincohen): This is temporary until deserialization is worked out.
+  std::unique_ptr<uint8_t[]> buffer_;
+  vm_address_t address_;
+  vm_size_t size_;
+  InitializationStateDcheck initialized_;
+
+  DISALLOW_COPY_AND_ASSIGN(MemorySnapshotIOS);
+};
+
+}  // namespace internal
+}  // namespace crashpad
+
+#endif  // CRASHPAD_SNAPSHOT_IOS_MEMORY_SNAPSHOT_IOS_H_
diff --git a/snapshot/ios/process_snapshot_ios.cc b/snapshot/ios/process_snapshot_ios.cc
index 666fc19..27f9de4 100644
--- a/snapshot/ios/process_snapshot_ios.cc
+++ b/snapshot/ios/process_snapshot_ios.cc
@@ -26,6 +26,7 @@
 
 ProcessSnapshotIOS::ProcessSnapshotIOS()
     : ProcessSnapshot(),
+      threads_(),
       modules_(),
       report_id_(),
       client_id_(),
@@ -43,6 +44,7 @@
     return false;
   }
 
+  InitializeThreads();
   InitializeModules();
 
   INITIALIZATION_STATE_SET_VALID(initialized_);
@@ -96,7 +98,11 @@
 
 std::vector<const ThreadSnapshot*> ProcessSnapshotIOS::Threads() const {
   INITIALIZATION_STATE_DCHECK_VALID(initialized_);
-  return std::vector<const ThreadSnapshot*>();
+  std::vector<const ThreadSnapshot*> threads;
+  for (const auto& thread : threads_) {
+    threads.push_back(thread.get());
+  }
+  return threads;
 }
 
 std::vector<const ModuleSnapshot*> ProcessSnapshotIOS::Modules() const {
@@ -140,6 +146,25 @@
   return nullptr;
 }
 
+void ProcessSnapshotIOS::InitializeThreads() {
+  mach_msg_type_number_t thread_count = 0;
+  const thread_act_array_t threads =
+      internal::ThreadSnapshotIOS::GetThreads(&thread_count);
+  for (uint32_t thread_index = 0; thread_index < thread_count; ++thread_index) {
+    thread_t thread = threads[thread_index];
+    auto thread_snapshot = std::make_unique<internal::ThreadSnapshotIOS>();
+    if (thread_snapshot->Initialize(thread)) {
+      threads_.push_back(std::move(thread_snapshot));
+    }
+    mach_port_deallocate(mach_task_self(), thread);
+  }
+  // TODO(justincohen): This dealloc above and below needs to move with the
+  // call to task_threads inside internal::ThreadSnapshotIOS::GetThreads.
+  vm_deallocate(mach_task_self(),
+                reinterpret_cast<vm_address_t>(threads),
+                sizeof(thread_t) * thread_count);
+}
+
 void ProcessSnapshotIOS::InitializeModules() {
   const dyld_all_image_infos* image_infos =
       internal::ModuleSnapshotIOS::DyldAllImageInfo();
diff --git a/snapshot/ios/process_snapshot_ios.h b/snapshot/ios/process_snapshot_ios.h
index 4ebcaf1..19d43b2 100644
--- a/snapshot/ios/process_snapshot_ios.h
+++ b/snapshot/ios/process_snapshot_ios.h
@@ -18,7 +18,9 @@
 #include <vector>
 
 #include "snapshot/ios/module_snapshot_ios.h"
+#include "snapshot/ios/thread_snapshot_ios.h"
 #include "snapshot/process_snapshot.h"
+#include "snapshot/thread_snapshot.h"
 #include "snapshot/unloaded_module_snapshot.h"
 
 namespace crashpad {
@@ -60,6 +62,10 @@
   // Initializes modules_ on behalf of Initialize().
   void InitializeModules();
 
+  // Initializes threads_ on behalf of Initialize().
+  void InitializeThreads();
+
+  std::vector<std::unique_ptr<internal::ThreadSnapshotIOS>> threads_;
   std::vector<std::unique_ptr<internal::ModuleSnapshotIOS>> modules_;
   UUID report_id_;
   UUID client_id_;
diff --git a/snapshot/ios/thread_snapshot_ios.cc b/snapshot/ios/thread_snapshot_ios.cc
new file mode 100644
index 0000000..a5e9696
--- /dev/null
+++ b/snapshot/ios/thread_snapshot_ios.cc
@@ -0,0 +1,472 @@
+// Copyright 2020 The Crashpad Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "snapshot/ios/thread_snapshot_ios.h"
+
+#include "base/mac/mach_logging.h"
+#include "snapshot/mac/cpu_context_mac.h"
+
+namespace {
+
+#if defined(ARCH_CPU_X86_64)
+const thread_state_flavor_t kThreadStateFlavor = x86_THREAD_STATE64;
+const thread_state_flavor_t kFloatStateFlavor = x86_FLOAT_STATE64;
+const thread_state_flavor_t kDebugStateFlavor = x86_DEBUG_STATE64;
+#elif defined(ARCH_CPU_ARM64)
+const thread_state_flavor_t kThreadStateFlavor = ARM_THREAD_STATE64;
+const thread_state_flavor_t kFloatStateFlavor = ARM_NEON_STATE64;
+#endif
+
+kern_return_t MachVMRegionRecurseDeepest(task_t task,
+                                         vm_address_t* address,
+                                         vm_size_t* size,
+                                         natural_t* depth,
+                                         vm_prot_t* protection,
+                                         unsigned int* user_tag) {
+  vm_region_submap_short_info_64 submap_info;
+  mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
+  while (true) {
+    kern_return_t kr = vm_region_recurse_64(
+        task,
+        address,
+        size,
+        depth,
+        reinterpret_cast<vm_region_recurse_info_t>(&submap_info),
+        &count);
+    if (kr != KERN_SUCCESS) {
+      return kr;
+    }
+
+    if (!submap_info.is_submap) {
+      *protection = submap_info.protection;
+      *user_tag = submap_info.user_tag;
+      return KERN_SUCCESS;
+    }
+
+    ++*depth;
+  }
+}
+
+//! \brief Adjusts the region for the red zone, if the ABI requires one.
+//!
+//! This method performs red zone calculation for CalculateStackRegion(). Its
+//! parameters are local variables used within that method, and may be
+//! modified as needed.
+//!
+//! Where a red zone is required, the region of memory captured for a thread’s
+//! stack will be extended to include the red zone below the stack pointer,
+//! provided that such memory is mapped, readable, and has the correct user
+//! tag value. If these conditions cannot be met fully, as much of the red
+//! zone will be captured as is possible while meeting these conditions.
+//!
+//! \param[in,out] start_address The base address of the region to begin
+//!     capturing stack memory from. On entry, \a start_address is the stack
+//!     pointer. On return, \a start_address may be decreased to encompass a
+//!     red zone.
+//! \param[in,out] region_base The base address of the region that contains
+//!     stack memory. This is distinct from \a start_address in that \a
+//!     region_base will be page-aligned. On entry, \a region_base is the
+//!     base address of a region that contains \a start_address. On return,
+//!     if \a start_address is decremented and is outside of the region
+//!     originally described by \a region_base, \a region_base will also be
+//!     decremented appropriately.
+//! \param[in,out] region_size The size of the region that contains stack
+//!     memory. This region begins at \a region_base. On return, if \a
+//!     region_base is decremented, \a region_size will be incremented
+//!     appropriately.
+//! \param[in] user_tag The Mach VM system’s user tag for the region described
+//!     by the initial values of \a region_base and \a region_size. The red
+//!     zone will only be allowed to extend out of the region described by
+//!     these initial values if the user tag is appropriate for stack memory
+//!     and the expanded region has the same user tag value.
+void LocateRedZone(vm_address_t* const start_address,
+                   vm_address_t* const region_base,
+                   vm_address_t* const region_size,
+                   const unsigned int user_tag) {
+  // x86_64 has a red zone. See AMD64 ABI 0.99.8,
+  // https://raw.githubusercontent.com/wiki/hjl-tools/x86-psABI/x86-64-psABI-r252.pdf#page=19,
+  // section 3.2.2, “The Stack Frame”.
+  // So does ARM64,
+  // https://developer.apple.com/library/archive/documentation/Xcode/Conceptual/iPhoneOSABIReference/Articles/ARM64FunctionCallingConventions.html
+  // section "Red Zone".
+  constexpr vm_size_t kRedZoneSize = 128;
+  vm_address_t red_zone_base =
+      *start_address >= kRedZoneSize ? *start_address - kRedZoneSize : 0;
+  bool red_zone_ok = false;
+  if (red_zone_base >= *region_base) {
+    // The red zone is within the region already discovered.
+    red_zone_ok = true;
+  } else if (red_zone_base < *region_base && user_tag == VM_MEMORY_STACK) {
+    // Probe to see if there’s a region immediately below the one already
+    // discovered.
+    vm_address_t red_zone_region_base = red_zone_base;
+    vm_size_t red_zone_region_size;
+    natural_t red_zone_depth = 0;
+    vm_prot_t red_zone_protection;
+    unsigned int red_zone_user_tag;
+    kern_return_t kr = MachVMRegionRecurseDeepest(mach_task_self(),
+                                                  &red_zone_region_base,
+                                                  &red_zone_region_size,
+                                                  &red_zone_depth,
+                                                  &red_zone_protection,
+                                                  &red_zone_user_tag);
+    if (kr != KERN_SUCCESS) {
+      MACH_LOG(INFO, kr) << "vm_region_recurse";
+      *start_address = *region_base;
+    } else if (red_zone_region_base + red_zone_region_size == *region_base &&
+               (red_zone_protection & VM_PROT_READ) != 0 &&
+               red_zone_user_tag == user_tag) {
+      // The region containing the red zone is immediately below the region
+      // already found, it’s readable (not the guard region), and it has the
+      // same user tag as the region already found, so merge them.
+      red_zone_ok = true;
+      *region_base -= red_zone_region_size;
+      *region_size += red_zone_region_size;
+    }
+  }
+
+  if (red_zone_ok) {
+    // Begin capturing from the base of the red zone (but not the entire
+    // region that encompasses the red zone).
+    *start_address = red_zone_base;
+  } else {
+    // The red zone would go lower into another region in memory, but no
+    // region was found. Memory can only be captured to an address as low as
+    // the base address of the region already found.
+    *start_address = *region_base;
+  }
+}
+
+//! \brief Calculates the base address and size of the region used as a
+//!     thread’s stack.
+//!
+//! The region returned by this method may be formed by merging multiple
+//! adjacent regions in a process’ memory map if appropriate. The base address
+//! of the returned region may be lower than the \a stack_pointer passed in
+//! when the ABI mandates a red zone below the stack pointer.
+//!
+//! \param[in] stack_pointer The stack pointer, referring to the top (lowest
+//!     address) of a thread’s stack.
+//! \param[out] stack_region_size The size of the memory region used as the
+//!     thread’s stack.
+//!
+//! \return The base address (lowest address) of the memory region used as the
+//!     thread’s stack.
+vm_address_t CalculateStackRegion(vm_address_t stack_pointer,
+                                  vm_size_t* stack_region_size) {
+  // For pthreads, it may be possible to compute the stack region based on the
+  // internal _pthread::stackaddr and _pthread::stacksize. The _pthread struct
+  // for a thread can be located at TSD slot 0, or the known offsets of
+  // stackaddr and stacksize from the TSD area could be used.
+  vm_address_t region_base = stack_pointer;
+  vm_size_t region_size;
+  natural_t depth = 0;
+  vm_prot_t protection;
+  unsigned int user_tag;
+  kern_return_t kr = MachVMRegionRecurseDeepest(mach_task_self(),
+                                                &region_base,
+                                                &region_size,
+                                                &depth,
+                                                &protection,
+                                                &user_tag);
+  if (kr != KERN_SUCCESS) {
+    MACH_LOG(INFO, kr) << "mach_vm_region_recurse";
+    *stack_region_size = 0;
+    return 0;
+  }
+
+  if (region_base > stack_pointer) {
+    // There’s nothing mapped at the stack pointer’s address. Something may have
+    // trashed the stack pointer. Note that this shouldn’t happen for a normal
+    // stack guard region violation because the guard region is mapped but has
+    // VM_PROT_NONE protection.
+    *stack_region_size = 0;
+    return 0;
+  }
+
+  vm_address_t start_address = stack_pointer;
+
+  if ((protection & VM_PROT_READ) == 0) {
+    // If the region isn’t readable, the stack pointer probably points to the
+    // guard region. Don’t include it as part of the stack, and don’t include
+    // anything at any lower memory address. The code below may still possibly
+    // find the real stack region at a memory address higher than this region.
+    start_address = region_base + region_size;
+  } else {
+    // If the ABI requires a red zone, adjust the region to include it if
+    // possible.
+    LocateRedZone(&start_address, &region_base, &region_size, user_tag);
+
+    // Regardless of whether the ABI requires a red zone, capture up to
+    // kExtraCaptureSize additional bytes of stack, but only if present in the
+    // region that was already found.
+    constexpr vm_size_t kExtraCaptureSize = 128;
+    start_address = std::max(start_address >= kExtraCaptureSize
+                                 ? start_address - kExtraCaptureSize
+                                 : start_address,
+                             region_base);
+
+    // Align start_address to a 16-byte boundary, which can help readers by
+    // ensuring that data is aligned properly. This could page-align instead,
+    // but that might be wasteful.
+    constexpr vm_size_t kDesiredAlignment = 16;
+    start_address &= ~(kDesiredAlignment - 1);
+    DCHECK_GE(start_address, region_base);
+  }
+
+  region_size -= (start_address - region_base);
+  region_base = start_address;
+
+  vm_size_t total_region_size = region_size;
+
+  // The stack region may have gotten split up into multiple abutting regions.
+  // Try to coalesce them. This frequently happens for the main thread’s stack
+  // when setrlimit(RLIMIT_STACK, …) is called. It may also happen if a region
+  // is split up due to an mprotect() or vm_protect() call.
+  //
+  // Stack regions created by the kernel and the pthreads library will be marked
+  // with the VM_MEMORY_STACK user tag. Scanning for multiple adjacent regions
+  // with the same tag should find an entire stack region. Checking that the
+  // protection on individual regions is not VM_PROT_NONE should guarantee that
+  // this algorithm doesn’t collect map entries belonging to another thread’s
+  // stack: well-behaved stacks (such as those created by the kernel and the
+  // pthreads library) have VM_PROT_NONE guard regions at their low-address
+  // ends.
+  //
+  // Other stack regions may not be so well-behaved and thus if user_tag is not
+  // VM_MEMORY_STACK, the single region that was found is used as-is without
+  // trying to merge it with other adjacent regions.
+  if (user_tag == VM_MEMORY_STACK) {
+    vm_address_t try_address = region_base;
+    vm_address_t original_try_address;
+
+    while (try_address += region_size,
+           original_try_address = try_address,
+           (kr = MachVMRegionRecurseDeepest(mach_task_self(),
+                                            &try_address,
+                                            &region_size,
+                                            &depth,
+                                            &protection,
+                                            &user_tag) == KERN_SUCCESS) &&
+               try_address == original_try_address &&
+               (protection & VM_PROT_READ) != 0 &&
+               user_tag == VM_MEMORY_STACK) {
+      total_region_size += region_size;
+    }
+
+    if (kr != KERN_SUCCESS && kr != KERN_INVALID_ADDRESS) {
+      // Tolerate KERN_INVALID_ADDRESS because it will be returned when there
+      // are no more regions in the map at or above the specified |try_address|.
+      MACH_LOG(INFO, kr) << "vm_region_recurse";
+    }
+  }
+
+  *stack_region_size = total_region_size;
+  return region_base;
+}
+
+}  // namespace
+
+namespace crashpad {
+namespace internal {
+
+ThreadSnapshotIOS::ThreadSnapshotIOS()
+    : ThreadSnapshot(),
+      context_(),
+      stack_(),
+      thread_id_(0),
+      thread_specific_data_address_(0),
+      suspend_count_(0),
+      priority_(0),
+      initialized_() {}
+
+ThreadSnapshotIOS::~ThreadSnapshotIOS() {}
+
+// static
+thread_act_array_t ThreadSnapshotIOS::GetThreads(
+    mach_msg_type_number_t* count) {
+  thread_act_array_t threads;
+  kern_return_t kr = task_threads(mach_task_self(), &threads, count);
+  if (kr != KERN_SUCCESS) {
+    MACH_LOG(WARNING, kr) << "task_threads";
+  }
+  return threads;
+}
+
+bool ThreadSnapshotIOS::Initialize(thread_t thread) {
+  INITIALIZATION_STATE_SET_INITIALIZING(initialized_);
+
+  // TODO(justincohen): Move the following thread_get_state, thread_get_info,
+  // thread_policy_get and CalculateStackRegion to the serialize-on-read
+  // section.
+  thread_basic_info basic_info;
+  thread_precedence_policy precedence;
+  vm_size_t stack_region_size;
+  vm_address_t stack_region_address;
+#if defined(ARCH_CPU_X86_64)
+  x86_thread_state64_t thread_state;
+  x86_float_state64_t float_state;
+  x86_debug_state64_t debug_state;
+  mach_msg_type_number_t thread_state_count = x86_THREAD_STATE64_COUNT;
+  mach_msg_type_number_t float_state_count = x86_FLOAT_STATE64_COUNT;
+  mach_msg_type_number_t debug_state_count = x86_DEBUG_STATE64_COUNT;
+#elif defined(ARCH_CPU_ARM64)
+  arm_thread_state64_t thread_state;
+  arm_neon_state64_t float_state;
+  mach_msg_type_number_t thread_state_count = ARM_THREAD_STATE64_COUNT;
+  mach_msg_type_number_t float_state_count = ARM_NEON_STATE64_COUNT;
+#endif
+
+  kern_return_t kr =
+      thread_get_state(thread,
+                       kThreadStateFlavor,
+                       reinterpret_cast<thread_state_t>(&thread_state),
+                       &thread_state_count);
+  if (kr != KERN_SUCCESS) {
+    MACH_LOG(ERROR, kr) << "thread_get_state(" << kThreadStateFlavor << ")";
+  }
+
+  kr = thread_get_state(thread,
+                        kFloatStateFlavor,
+                        reinterpret_cast<thread_state_t>(&float_state),
+                        &float_state_count);
+  if (kr != KERN_SUCCESS) {
+    MACH_LOG(ERROR, kr) << "thread_get_state(" << kFloatStateFlavor << ")";
+  }
+
+#if defined(ARCH_CPU_X86_64)
+  kr = thread_get_state(thread,
+                        kDebugStateFlavor,
+                        reinterpret_cast<thread_state_t>(&debug_state),
+                        &debug_state_count);
+  if (kr != KERN_SUCCESS) {
+    MACH_LOG(ERROR, kr) << "thread_get_state(" << kDebugStateFlavor << ")";
+  }
+#endif
+
+  mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT;
+  kr = thread_info(thread,
+                   THREAD_BASIC_INFO,
+                   reinterpret_cast<thread_info_t>(&basic_info),
+                   &count);
+  if (kr != KERN_SUCCESS) {
+    MACH_LOG(WARNING, kr) << "thread_info(THREAD_BASIC_INFO)";
+  }
+
+  thread_identifier_info identifier_info;
+  count = THREAD_IDENTIFIER_INFO_COUNT;
+  kr = thread_info(thread,
+                   THREAD_IDENTIFIER_INFO,
+                   reinterpret_cast<thread_info_t>(&identifier_info),
+                   &count);
+  if (kr != KERN_SUCCESS) {
+    MACH_LOG(WARNING, kr) << "thread_info(THREAD_IDENTIFIER_INFO)";
+  }
+
+  count = THREAD_PRECEDENCE_POLICY_COUNT;
+  boolean_t get_default = FALSE;
+  kr = thread_policy_get(thread,
+                         THREAD_PRECEDENCE_POLICY,
+                         reinterpret_cast<thread_policy_t>(&precedence),
+                         &count,
+                         &get_default);
+  if (kr != KERN_SUCCESS) {
+    MACH_LOG(ERROR, kr) << "thread_policy_get";
+  }
+
+#if defined(ARCH_CPU_X86_64)
+  vm_address_t stack_pointer = thread_state.__rsp;
+#elif defined(ARCH_CPU_ARM64)
+  vm_address_t stack_pointer = thread_state.__sp;
+#endif
+  stack_region_address =
+      CalculateStackRegion(stack_pointer, &stack_region_size);
+
+  // TODO(justincohen): Assume the following will fill in snapshot data from
+  // a deserialized object.
+  thread_id_ = identifier_info.thread_id;
+  suspend_count_ = basic_info.suspend_count;
+  priority_ = precedence.importance;
+
+  // thread_identifier_info::thread_handle contains the base of the
+  // thread-specific data area, which on x86 and x86_64 is the thread’s base
+  // address of the %gs segment. 10.9.2 xnu-2422.90.20/osfmk/kern/thread.c
+  // thread_info_internal() gets the value from
+  // machine_thread::cthread_self, which is the same value used to set the
+  // %gs base in xnu-2422.90.20/osfmk/i386/pcb_native.c
+  // act_machine_switch_pcb().
+  //
+  // On ARM64 10.15.0 xnu-6153.11.26/osfmk/kern/thread.c, it sets
+  // thread_identifier_info_t::thread_handle to
+  // thread->machine.cthread_self, which is set to tsd_base in
+  // osfmk/arm64/pcb.c.
+  thread_specific_data_address_ = identifier_info.thread_handle;
+  stack_.Initialize(stack_region_address, stack_region_size);
+
+#if defined(ARCH_CPU_X86_64)
+  context_.architecture = kCPUArchitectureX86_64;
+  context_.x86_64 = &context_x86_64_;
+  InitializeCPUContextX86_64(&context_x86_64_,
+                             THREAD_STATE_NONE,
+                             nullptr,
+                             0,
+                             &thread_state,
+                             &float_state,
+                             &debug_state);
+#elif defined(ARCH_CPU_ARM64)
+  context_.architecture = kCPUArchitectureARM64;
+  context_.arm64 = &context_arm64_;
+  InitializeCPUContextARM64(&context_arm64_, &thread_state, &float_state);
+#endif
+
+  INITIALIZATION_STATE_SET_VALID(initialized_);
+  return true;
+}
+
+const CPUContext* ThreadSnapshotIOS::Context() const {
+  INITIALIZATION_STATE_DCHECK_VALID(initialized_);
+  return &context_;
+}
+
+const MemorySnapshot* ThreadSnapshotIOS::Stack() const {
+  INITIALIZATION_STATE_DCHECK_VALID(initialized_);
+  return &stack_;
+}
+
+uint64_t ThreadSnapshotIOS::ThreadID() const {
+  INITIALIZATION_STATE_DCHECK_VALID(initialized_);
+  return thread_id_;
+}
+
+int ThreadSnapshotIOS::SuspendCount() const {
+  INITIALIZATION_STATE_DCHECK_VALID(initialized_);
+  return suspend_count_;
+}
+
+int ThreadSnapshotIOS::Priority() const {
+  INITIALIZATION_STATE_DCHECK_VALID(initialized_);
+  return priority_;
+}
+
+uint64_t ThreadSnapshotIOS::ThreadSpecificDataAddress() const {
+  INITIALIZATION_STATE_DCHECK_VALID(initialized_);
+  return thread_specific_data_address_;
+}
+
+std::vector<const MemorySnapshot*> ThreadSnapshotIOS::ExtraMemory() const {
+  return std::vector<const MemorySnapshot*>();
+}
+
+}  // namespace internal
+}  // namespace crashpad
diff --git a/snapshot/ios/thread_snapshot_ios.h b/snapshot/ios/thread_snapshot_ios.h
new file mode 100644
index 0000000..c6dde7e
--- /dev/null
+++ b/snapshot/ios/thread_snapshot_ios.h
@@ -0,0 +1,77 @@
+// Copyright 2020 The Crashpad Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef CRASHPAD_SNAPSHOT_IOS_THREAD_SNAPSHOT_IOS_H_
+#define CRASHPAD_SNAPSHOT_IOS_THREAD_SNAPSHOT_IOS_H_
+
+#include "base/macros.h"
+#include "build/build_config.h"
+#include "snapshot/cpu_context.h"
+#include "snapshot/ios/memory_snapshot_ios.h"
+#include "snapshot/thread_snapshot.h"
+#include "util/misc/initialization_state_dcheck.h"
+
+namespace crashpad {
+namespace internal {
+
+//! \brief A ThreadSnapshot of a thread on an iOS system.
+class ThreadSnapshotIOS final : public ThreadSnapshot {
+ public:
+  ThreadSnapshotIOS();
+  ~ThreadSnapshotIOS() override;
+
+  //! \brief Initializes the object.
+  //!
+  //! \brief thread The mach thread used to initialize this object.
+  bool Initialize(thread_t thread);
+
+  //! \brief Returns an array of thread_t threads.
+  //!
+  //! \param[out] count The number of threads returned.
+  //!
+  //! \return An array of of size \a count threads.
+  static thread_act_array_t GetThreads(mach_msg_type_number_t* count);
+
+  // ThreadSnapshot:
+  const CPUContext* Context() const override;
+  const MemorySnapshot* Stack() const override;
+  uint64_t ThreadID() const override;
+  int SuspendCount() const override;
+  int Priority() const override;
+  uint64_t ThreadSpecificDataAddress() const override;
+  std::vector<const MemorySnapshot*> ExtraMemory() const override;
+
+ private:
+#if defined(ARCH_CPU_X86_64)
+  CPUContextX86_64 context_x86_64_;
+#elif defined(ARCH_CPU_ARM64)
+  CPUContextARM64 context_arm64_;
+#else
+#error Port.
+#endif  // ARCH_CPU_X86_64
+  CPUContext context_;
+  MemorySnapshotIOS stack_;
+  uint64_t thread_id_;
+  uint64_t thread_specific_data_address_;
+  int suspend_count_;
+  int priority_;
+  InitializationStateDcheck initialized_;
+
+  DISALLOW_COPY_AND_ASSIGN(ThreadSnapshotIOS);
+};
+
+}  // namespace internal
+}  // namespace crashpad
+
+#endif  // CRASHPAD_SNAPSHOT_IOS_THREAD_SNAPSHOT_IOS_H_
diff --git a/snapshot/mac/cpu_context_mac.cc b/snapshot/mac/cpu_context_mac.cc
index c91e331..acec60e 100644
--- a/snapshot/mac/cpu_context_mac.cc
+++ b/snapshot/mac/cpu_context_mac.cc
@@ -436,6 +436,32 @@
 
 }  // namespace internal
 
+#elif defined(ARCH_CPU_ARM_FAMILY)
+
+namespace internal {
+
+void InitializeCPUContextARM64(CPUContextARM64* context,
+                               const arm_thread_state64_t* arm_thread_state64,
+                               const arm_neon_state64_t* arm_neon_state64) {
+  // The structures of context->regs and arm_thread_state64->__x are laid out
+  // identically for this copy, even though the members are organized
+  // differently.  Because of this difference, there can't be a static assert
+  // similar to the one below for fpsimd.
+  memcpy(context->regs, arm_thread_state64->__x, sizeof(context->regs));
+  context->sp = arm_thread_state64->__sp;
+  context->pc = arm_thread_state64->__pc;
+  context->spsr =
+      static_cast<decltype(context->spsr)>(arm_thread_state64->__cpsr);
+
+  static_assert(sizeof(context->fpsimd) == sizeof(arm_neon_state64->__v),
+                "fpsimd context size mismatch");
+  memcpy(context->fpsimd, arm_neon_state64->__v, sizeof(arm_neon_state64->__v));
+  context->fpsr = arm_neon_state64->__fpsr;
+  context->fpcr = arm_neon_state64->__fpcr;
+}
+
+}  // namespace internal
+
 #endif
 
 }  // namespace crashpad
diff --git a/snapshot/mac/cpu_context_mac.h b/snapshot/mac/cpu_context_mac.h
index 30281c1..77aedb6 100644
--- a/snapshot/mac/cpu_context_mac.h
+++ b/snapshot/mac/cpu_context_mac.h
@@ -108,6 +108,17 @@
                                 const x86_float_state64_t* x86_float_state64,
                                 const x86_debug_state64_t* x86_debug_state64);
 
+#elif defined(ARCH_CPU_ARM_FAMILY) || DOXYGEN
+//! \brief Initializes a CPUContextARM64 structure from native context
+//! structures.
+//!
+//! \param[out] context The CPUContextARM64 structure to initialize.
+//! \param[in] arm_thread_state64 The state of the thread’s integer registers.
+//! \param[in] arm_neon_state64 The state of the thread’s floating-point
+//!     registers.
+void InitializeCPUContextARM64(CPUContextARM64* context,
+                               const arm_thread_state64_t* arm_thread_state64,
+                               const arm_neon_state64_t* arm_neon_state64);
 #endif
 
 }  // namespace internal