Sync address_space_shared_slots_host_memory_allocator.cpp to external/qemu
Bug: 322026591
Test: presubmit
Change-Id: I0bb8a525c28523afb14d9a29d3709e1bf2431ea1
Signed-off-by: Roman Kiryanov <rkir@google.com>
GitOrigin-RevId: cc1f4f0d839bde42534e97991cd60c605a0be6e3
diff --git a/host-common/address_space_shared_slots_host_memory_allocator.cpp b/host-common/address_space_shared_slots_host_memory_allocator.cpp
index 31032b3..7136048 100644
--- a/host-common/address_space_shared_slots_host_memory_allocator.cpp
+++ b/host-common/address_space_shared_slots_host_memory_allocator.cpp
@@ -49,6 +49,23 @@
}
}
+uint64_t allocateAddressSpaceBlockFixed(uint64_t gpa, const AddressSpaceHwFuncs* hw, uint32_t size) {
+ uint64_t offset = gpa - hw->getPhysAddrStartLocked();
+ if (hw->allocSharedHostRegionFixedLocked(size, offset)) {
+ // Note: even if we do not succeed in allocSharedHostRegionFixedLocked,
+ // assume this is because we're doing a snapshot load, and the VMSTATE
+ // description of memory slots in hw/pci/goldfish_address_space.c
+ // already contains the entry we wanted. TODO: Consider always
+ // allowing allocSharedHostRegionFixedLocked succeed if it encounters
+ // an unavailable block at the same offset and size, and/or add a
+ // "forSnapshotLoad" flag to allocSharedHostRegionFixedLocked in order
+ // to specifically account for this case.
+ return hw->getPhysAddrStartLocked() + offset;
+ } else {
+ return hw->getPhysAddrStartLocked() + offset;
+ }
+}
+
int freeAddressBlock(const AddressSpaceHwFuncs* hw, uint64_t phys) {
const uint64_t start = hw->getPhysAddrStartLocked();
if (phys < start) { return -1; }
@@ -238,7 +255,7 @@
android::aligned_buf_free(bits);
return false;
}
- const uint64_t physBase = allocateAddressSpaceBlock(hw, bitsSize);
+ const uint64_t physBase = allocateAddressSpaceBlockFixed(physBaseLoaded, hw, bitsSize);
if (!physBase) {
android::aligned_buf_free(bits);
return false;