blob: a5c04fc0c70ea9a5b69bb965e7915fa62e63a846 [file] [log] [blame]
// Copyright (C) 2018 The Android Open Source Project
// Copyright (C) 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ResourceTracker.h"
#include "../OpenglSystemCommon/EmulatorFeatureInfo.h"
#include "../OpenglSystemCommon/HostConnection.h"
#include "CommandBufferStagingStream.h"
#include "DescriptorSetVirtualization.h"
#include "Resources.h"
#include "aemu/base/Optional.h"
#include "aemu/base/Tracing.h"
#include "aemu/base/threads/AndroidWorkPool.h"
#include "goldfish_vk_private_defs.h"
#include "vulkan/vulkan_core.h"
/// Use installed headers or locally defined Fuchsia-specific bits
#ifdef VK_USE_PLATFORM_FUCHSIA
#include <cutils/native_handle.h>
#include <fidl/fuchsia.hardware.goldfish/cpp/wire.h>
#include <fidl/fuchsia.sysmem/cpp/wire.h>
#include <lib/zx/channel.h>
#include <lib/zx/vmo.h>
#include <optional>
#include <zircon/errors.h>
#include <zircon/process.h>
#include <zircon/rights.h>
#include <zircon/syscalls.h>
#include <zircon/syscalls/object.h>
#include "services/service_connector.h"
#ifndef FUCHSIA_NO_TRACE
#include <lib/trace/event.h>
#endif
#define GET_STATUS_SAFE(result, member) \
((result).ok() ? ((result)->member) : ZX_OK)
#else
typedef uint32_t zx_handle_t;
typedef uint64_t zx_koid_t;
#define ZX_HANDLE_INVALID ((zx_handle_t)0)
#define ZX_KOID_INVALID ((zx_koid_t)0)
void zx_handle_close(zx_handle_t) { }
void zx_event_create(int, zx_handle_t*) { }
#endif // VK_USE_PLATFORM_FUCHSIA
/// Use installed headers or locally defined Android-specific bits
#ifdef VK_USE_PLATFORM_ANDROID_KHR
/// Goldfish sync only used for AEMU -- should replace in virtio-gpu when possibe
#include "../egl/goldfish_sync.h"
#include "AndroidHardwareBuffer.h"
#else
#if defined(__linux__)
#include "../egl/goldfish_sync.h"
#endif
#include <android/hardware_buffer.h>
#endif // VK_USE_PLATFORM_ANDROID_KHR
#include "HostVisibleMemoryVirtualization.h"
#include "Resources.h"
#include "VkEncoder.h"
#include "aemu/base/AlignedBuf.h"
#include "aemu/base/synchronization/AndroidLock.h"
#include "virtgpu_gfxstream_protocol.h"
#include "goldfish_address_space.h"
#include "goldfish_vk_private_defs.h"
#ifdef VK_USE_PLATFORM_ANDROID_KHR
#include "vk_format_info.h"
#endif
#include "vk_struct_id.h"
#include "vk_util.h"
#include <set>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <vndk/hardware_buffer.h>
#include <log/log.h>
#include <stdlib.h>
#include <sync/sync.h>
#if defined(__ANDROID__) || defined(__linux__) || defined(__APPLE__)
#include <sys/mman.h>
#include <unistd.h>
#include <sys/syscall.h>
#ifdef HOST_BUILD
#include "android/utils/tempfile.h"
#endif
static inline int
inline_memfd_create(const char *name, unsigned int flags) {
#ifdef HOST_BUILD
TempFile* tmpFile = tempfile_create();
return open(tempfile_path(tmpFile), O_RDWR);
// TODO: Windows is not suppose to support VkSemaphoreGetFdInfoKHR
#else
return syscall(SYS_memfd_create, name, flags);
#endif
}
#define memfd_create inline_memfd_create
#endif
#define RESOURCE_TRACKER_DEBUG 0
#if RESOURCE_TRACKER_DEBUG
#undef D
#define D(fmt,...) ALOGD("%s: " fmt, __func__, ##__VA_ARGS__);
#else
#ifndef D
#define D(fmt,...)
#endif
#endif
using android::base::Optional;
using android::base::guest::AutoLock;
using android::base::guest::RecursiveLock;
using android::base::guest::Lock;
using android::base::guest::WorkPool;
namespace goldfish_vk {
#define MAKE_HANDLE_MAPPING_FOREACH(type_name, map_impl, map_to_u64_impl, map_from_u64_impl) \
void mapHandles_##type_name(type_name* handles, size_t count) override { \
for (size_t i = 0; i < count; ++i) { \
map_impl; \
} \
} \
void mapHandles_##type_name##_u64(const type_name* handles, uint64_t* handle_u64s, size_t count) override { \
for (size_t i = 0; i < count; ++i) { \
map_to_u64_impl; \
} \
} \
void mapHandles_u64_##type_name(const uint64_t* handle_u64s, type_name* handles, size_t count) override { \
for (size_t i = 0; i < count; ++i) { \
map_from_u64_impl; \
} \
} \
#define DEFINE_RESOURCE_TRACKING_CLASS(class_name, impl) \
class class_name : public VulkanHandleMapping { \
public: \
virtual ~class_name() { } \
GOLDFISH_VK_LIST_HANDLE_TYPES(impl) \
}; \
#define CREATE_MAPPING_IMPL_FOR_TYPE(type_name) \
MAKE_HANDLE_MAPPING_FOREACH(type_name, \
handles[i] = new_from_host_##type_name(handles[i]); ResourceTracker::get()->register_##type_name(handles[i]);, \
handle_u64s[i] = (uint64_t)new_from_host_##type_name(handles[i]), \
handles[i] = (type_name)new_from_host_u64_##type_name(handle_u64s[i]); ResourceTracker::get()->register_##type_name(handles[i]);)
#define UNWRAP_MAPPING_IMPL_FOR_TYPE(type_name) \
MAKE_HANDLE_MAPPING_FOREACH(type_name, \
handles[i] = get_host_##type_name(handles[i]), \
handle_u64s[i] = (uint64_t)get_host_u64_##type_name(handles[i]), \
handles[i] = (type_name)get_host_##type_name((type_name)handle_u64s[i]))
#define DESTROY_MAPPING_IMPL_FOR_TYPE(type_name) \
MAKE_HANDLE_MAPPING_FOREACH(type_name, \
ResourceTracker::get()->unregister_##type_name(handles[i]); delete_goldfish_##type_name(handles[i]), \
(void)handle_u64s[i]; delete_goldfish_##type_name(handles[i]), \
(void)handles[i]; delete_goldfish_##type_name((type_name)handle_u64s[i]))
DEFINE_RESOURCE_TRACKING_CLASS(CreateMapping, CREATE_MAPPING_IMPL_FOR_TYPE)
DEFINE_RESOURCE_TRACKING_CLASS(UnwrapMapping, UNWRAP_MAPPING_IMPL_FOR_TYPE)
DEFINE_RESOURCE_TRACKING_CLASS(DestroyMapping, DESTROY_MAPPING_IMPL_FOR_TYPE)
static uint32_t* sSeqnoPtr = nullptr;
// static
uint32_t ResourceTracker::streamFeatureBits = 0;
ResourceTracker::ThreadingCallbacks ResourceTracker::threadingCallbacks;
struct StagingInfo {
Lock mLock;
std::vector<CommandBufferStagingStream*> streams;
std::vector<VkEncoder*> encoders;
/// \brief sets alloc and free callbacks for memory allocation for CommandBufferStagingStream(s)
/// \param allocFn is the callback to allocate memory
/// \param freeFn is the callback to free memory
void setAllocFree(CommandBufferStagingStream::Alloc&& allocFn,
CommandBufferStagingStream::Free&& freeFn) {
mAlloc = allocFn;
mFree = freeFn;
}
~StagingInfo() {
for (auto stream : streams) {
delete stream;
}
for (auto encoder : encoders) {
delete encoder;
}
}
void pushStaging(CommandBufferStagingStream* stream, VkEncoder* encoder) {
AutoLock<Lock> lock(mLock);
stream->reset();
streams.push_back(stream);
encoders.push_back(encoder);
}
void popStaging(CommandBufferStagingStream** streamOut, VkEncoder** encoderOut) {
AutoLock<Lock> lock(mLock);
CommandBufferStagingStream* stream;
VkEncoder* encoder;
if (streams.empty()) {
if (mAlloc && mFree) {
// if custom allocators are provided, forward them to CommandBufferStagingStream
stream = new CommandBufferStagingStream(mAlloc, mFree);
} else {
stream = new CommandBufferStagingStream;
}
encoder = new VkEncoder(stream);
} else {
stream = streams.back();
encoder = encoders.back();
streams.pop_back();
encoders.pop_back();
}
*streamOut = stream;
*encoderOut = encoder;
}
private:
CommandBufferStagingStream::Alloc mAlloc = nullptr;
CommandBufferStagingStream::Free mFree = nullptr;
};
static StagingInfo sStaging;
class ResourceTracker::Impl {
public:
Impl() = default;
CreateMapping createMapping;
UnwrapMapping unwrapMapping;
DestroyMapping destroyMapping;
DefaultHandleMapping defaultMapping;
#define HANDLE_DEFINE_TRIVIAL_INFO_STRUCT(type) \
struct type##_Info { \
uint32_t unused; \
}; \
GOLDFISH_VK_LIST_TRIVIAL_HANDLE_TYPES(HANDLE_DEFINE_TRIVIAL_INFO_STRUCT)
struct VkInstance_Info {
uint32_t highestApiVersion;
std::set<std::string> enabledExtensions;
// Fodder for vkEnumeratePhysicalDevices.
std::vector<VkPhysicalDevice> physicalDevices;
};
struct VkDevice_Info {
VkPhysicalDevice physdev;
VkPhysicalDeviceProperties props;
VkPhysicalDeviceMemoryProperties memProps;
uint32_t apiVersion;
std::set<std::string> enabledExtensions;
std::vector<std::pair<PFN_vkDeviceMemoryReportCallbackEXT, void *>> deviceMemoryReportCallbacks;
};
struct VkDeviceMemory_Info {
bool dedicated = false;
bool imported = false;
#ifdef VK_USE_PLATFORM_ANDROID_KHR
AHardwareBuffer* ahw = nullptr;
#endif
zx_handle_t vmoHandle = ZX_HANDLE_INVALID;
VkDevice device;
uint8_t* ptr = nullptr;
uint64_t allocationSize = 0;
uint32_t memoryTypeIndex = 0;
uint64_t coherentMemorySize = 0;
uint64_t coherentMemoryOffset = 0;
GoldfishAddressSpaceBlockPtr goldfishBlock = nullptr;
CoherentMemoryPtr coherentMemory = nullptr;
};
struct VkCommandBuffer_Info {
uint32_t placeholder;
};
struct VkQueue_Info {
VkDevice device;
};
// custom guest-side structs for images/buffers because of AHardwareBuffer :((
struct VkImage_Info {
VkDevice device;
VkImageCreateInfo createInfo;
bool external = false;
VkExternalMemoryImageCreateInfo externalCreateInfo;
VkDeviceMemory currentBacking = VK_NULL_HANDLE;
VkDeviceSize currentBackingOffset = 0;
VkDeviceSize currentBackingSize = 0;
bool baseRequirementsKnown = false;
VkMemoryRequirements baseRequirements;
#ifdef VK_USE_PLATFORM_ANDROID_KHR
bool hasExternalFormat = false;
unsigned androidFormat = 0;
std::vector<int> pendingQsriSyncFds;
#endif
#ifdef VK_USE_PLATFORM_FUCHSIA
bool isSysmemBackedMemory = false;
#endif
};
struct VkBuffer_Info {
VkDevice device;
VkBufferCreateInfo createInfo;
bool external = false;
VkExternalMemoryBufferCreateInfo externalCreateInfo;
VkDeviceMemory currentBacking = VK_NULL_HANDLE;
VkDeviceSize currentBackingOffset = 0;
VkDeviceSize currentBackingSize = 0;
bool baseRequirementsKnown = false;
VkMemoryRequirements baseRequirements;
#ifdef VK_USE_PLATFORM_FUCHSIA
bool isSysmemBackedMemory = false;
#endif
};
struct VkSemaphore_Info {
VkDevice device;
zx_handle_t eventHandle = ZX_HANDLE_INVALID;
zx_koid_t eventKoid = ZX_KOID_INVALID;
std::optional<int> syncFd = {};
};
struct VkDescriptorUpdateTemplate_Info {
uint32_t templateEntryCount = 0;
VkDescriptorUpdateTemplateEntry* templateEntries;
uint32_t imageInfoCount = 0;
uint32_t bufferInfoCount = 0;
uint32_t bufferViewCount = 0;
uint32_t* imageInfoIndices;
uint32_t* bufferInfoIndices;
uint32_t* bufferViewIndices;
VkDescriptorImageInfo* imageInfos;
VkDescriptorBufferInfo* bufferInfos;
VkBufferView* bufferViews;
};
struct VkFence_Info {
VkDevice device;
bool external = false;
VkExportFenceCreateInfo exportFenceCreateInfo;
#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
int syncFd = -1;
#endif
};
struct VkDescriptorPool_Info {
uint32_t unused;
};
struct VkDescriptorSet_Info {
uint32_t unused;
};
struct VkDescriptorSetLayout_Info {
uint32_t unused;
};
struct VkCommandPool_Info {
uint32_t unused;
};
struct VkSampler_Info {
uint32_t unused;
};
struct VkBufferCollectionFUCHSIA_Info {
#ifdef VK_USE_PLATFORM_FUCHSIA
android::base::Optional<
fuchsia_sysmem::wire::BufferCollectionConstraints>
constraints;
android::base::Optional<VkBufferCollectionPropertiesFUCHSIA> properties;
// the index of corresponding createInfo for each image format
// constraints in |constraints|.
std::vector<uint32_t> createInfoIndex;
#endif // VK_USE_PLATFORM_FUCHSIA
};
#define HANDLE_REGISTER_IMPL_IMPL(type) \
std::unordered_map<type, type##_Info> info_##type; \
void register_##type(type obj) { \
AutoLock<RecursiveLock> lock(mLock); \
info_##type[obj] = type##_Info(); \
} \
#define HANDLE_UNREGISTER_IMPL_IMPL(type) \
void unregister_##type(type obj) { \
AutoLock<RecursiveLock> lock(mLock); \
info_##type.erase(obj); \
} \
GOLDFISH_VK_LIST_HANDLE_TYPES(HANDLE_REGISTER_IMPL_IMPL)
GOLDFISH_VK_LIST_TRIVIAL_HANDLE_TYPES(HANDLE_UNREGISTER_IMPL_IMPL)
void unregister_VkInstance(VkInstance instance) {
AutoLock<RecursiveLock> lock(mLock);
auto it = info_VkInstance.find(instance);
if (it == info_VkInstance.end()) return;
auto info = it->second;
info_VkInstance.erase(instance);
lock.unlock();
}
void unregister_VkDevice(VkDevice device) {
AutoLock<RecursiveLock> lock(mLock);
auto it = info_VkDevice.find(device);
if (it == info_VkDevice.end()) return;
auto info = it->second;
info_VkDevice.erase(device);
lock.unlock();
}
void unregister_VkCommandPool(VkCommandPool pool) {
if (!pool) return;
clearCommandPool(pool);
AutoLock<RecursiveLock> lock(mLock);
info_VkCommandPool.erase(pool);
}
void unregister_VkSampler(VkSampler sampler) {
if (!sampler) return;
AutoLock<RecursiveLock> lock(mLock);
info_VkSampler.erase(sampler);
}
void unregister_VkCommandBuffer(VkCommandBuffer commandBuffer) {
resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */, true /* also clear pending descriptor sets */);
struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
if (!cb) return;
if (cb->lastUsedEncoder) { cb->lastUsedEncoder->decRef(); }
eraseObjects(&cb->subObjects);
forAllObjects(cb->poolObjects, [cb](void* commandPool) {
struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool((VkCommandPool)commandPool);
eraseObject(&p->subObjects, (void*)cb);
});
eraseObjects(&cb->poolObjects);
if (cb->userPtr) {
CommandBufferPendingDescriptorSets* pendingSets = (CommandBufferPendingDescriptorSets*)cb->userPtr;
delete pendingSets;
}
AutoLock<RecursiveLock> lock(mLock);
info_VkCommandBuffer.erase(commandBuffer);
}
void unregister_VkQueue(VkQueue queue) {
struct goldfish_VkQueue* q = as_goldfish_VkQueue(queue);
if (!q) return;
if (q->lastUsedEncoder) { q->lastUsedEncoder->decRef(); }
AutoLock<RecursiveLock> lock(mLock);
info_VkQueue.erase(queue);
}
void unregister_VkDeviceMemory(VkDeviceMemory mem) {
AutoLock<RecursiveLock> lock(mLock);
auto it = info_VkDeviceMemory.find(mem);
if (it == info_VkDeviceMemory.end()) return;
auto& memInfo = it->second;
#ifdef VK_USE_PLATFORM_ANDROID_KHR
if (memInfo.ahw) {
AHardwareBuffer_release(memInfo.ahw);
}
#endif
if (memInfo.vmoHandle != ZX_HANDLE_INVALID) {
zx_handle_close(memInfo.vmoHandle);
}
info_VkDeviceMemory.erase(mem);
}
void unregister_VkImage(VkImage img) {
AutoLock<RecursiveLock> lock(mLock);
auto it = info_VkImage.find(img);
if (it == info_VkImage.end()) return;
auto& imageInfo = it->second;
info_VkImage.erase(img);
}
void unregister_VkBuffer(VkBuffer buf) {
AutoLock<RecursiveLock> lock(mLock);
auto it = info_VkBuffer.find(buf);
if (it == info_VkBuffer.end()) return;
info_VkBuffer.erase(buf);
}
void unregister_VkSemaphore(VkSemaphore sem) {
AutoLock<RecursiveLock> lock(mLock);
auto it = info_VkSemaphore.find(sem);
if (it == info_VkSemaphore.end()) return;
auto& semInfo = it->second;
if (semInfo.eventHandle != ZX_HANDLE_INVALID) {
zx_handle_close(semInfo.eventHandle);
}
#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
if (semInfo.syncFd.value_or(-1) >= 0) {
close(semInfo.syncFd.value());
}
#endif
info_VkSemaphore.erase(sem);
}
void unregister_VkDescriptorUpdateTemplate(VkDescriptorUpdateTemplate templ) {
AutoLock<RecursiveLock> lock(mLock);
auto it = info_VkDescriptorUpdateTemplate.find(templ);
if (it == info_VkDescriptorUpdateTemplate.end())
return;
auto& info = it->second;
if (info.templateEntryCount) delete [] info.templateEntries;
if (info.imageInfoCount) {
delete [] info.imageInfoIndices;
delete [] info.imageInfos;
}
if (info.bufferInfoCount) {
delete [] info.bufferInfoIndices;
delete [] info.bufferInfos;
}
if (info.bufferViewCount) {
delete [] info.bufferViewIndices;
delete [] info.bufferViews;
}
info_VkDescriptorUpdateTemplate.erase(it);
}
void unregister_VkFence(VkFence fence) {
AutoLock<RecursiveLock> lock(mLock);
auto it = info_VkFence.find(fence);
if (it == info_VkFence.end()) return;
auto& fenceInfo = it->second;
(void)fenceInfo;
#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
if (fenceInfo.syncFd >= 0) {
close(fenceInfo.syncFd);
}
#endif
info_VkFence.erase(fence);
}
#ifdef VK_USE_PLATFORM_FUCHSIA
void unregister_VkBufferCollectionFUCHSIA(
VkBufferCollectionFUCHSIA collection) {
AutoLock<RecursiveLock> lock(mLock);
info_VkBufferCollectionFUCHSIA.erase(collection);
}
#endif
void unregister_VkDescriptorSet_locked(VkDescriptorSet set) {
struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(set);
delete ds->reified;
info_VkDescriptorSet.erase(set);
}
void unregister_VkDescriptorSet(VkDescriptorSet set) {
if (!set) return;
AutoLock<RecursiveLock> lock(mLock);
unregister_VkDescriptorSet_locked(set);
}
void unregister_VkDescriptorSetLayout(VkDescriptorSetLayout setLayout) {
if (!setLayout) return;
AutoLock<RecursiveLock> lock(mLock);
delete as_goldfish_VkDescriptorSetLayout(setLayout)->layoutInfo;
info_VkDescriptorSetLayout.erase(setLayout);
}
VkResult allocAndInitializeDescriptorSets(
void* context,
VkDevice device,
const VkDescriptorSetAllocateInfo* ci,
VkDescriptorSet* sets) {
if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
// Using the pool ID's we collected earlier from the host
VkResult poolAllocResult = validateAndApplyVirtualDescriptorSetAllocation(ci, sets);
if (poolAllocResult != VK_SUCCESS) return poolAllocResult;
for (uint32_t i = 0; i < ci->descriptorSetCount; ++i) {
register_VkDescriptorSet(sets[i]);
VkDescriptorSetLayout setLayout = as_goldfish_VkDescriptorSet(sets[i])->reified->setLayout;
// Need to add ref to the set layout in the virtual case
// because the set itself might not be realized on host at the
// same time
struct goldfish_VkDescriptorSetLayout* dsl = as_goldfish_VkDescriptorSetLayout(setLayout);
++dsl->layoutInfo->refcount;
}
} else {
// Pass through and use host allocation
VkEncoder* enc = (VkEncoder*)context;
VkResult allocRes = enc->vkAllocateDescriptorSets(device, ci, sets, true /* do lock */);
if (allocRes != VK_SUCCESS) return allocRes;
for (uint32_t i = 0; i < ci->descriptorSetCount; ++i) {
applyDescriptorSetAllocation(ci->descriptorPool, ci->pSetLayouts[i]);
fillDescriptorSetInfoForPool(ci->descriptorPool, ci->pSetLayouts[i], sets[i]);
}
}
return VK_SUCCESS;
}
VkDescriptorImageInfo createImmutableSamplersFilteredImageInfo(
VkDescriptorType descType,
VkDescriptorSet descSet,
uint32_t binding,
const VkDescriptorImageInfo* pImageInfo) {
VkDescriptorImageInfo res = *pImageInfo;
if (descType != VK_DESCRIPTOR_TYPE_SAMPLER &&
descType != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) return res;
bool immutableSampler = as_goldfish_VkDescriptorSet(descSet)->reified->bindingIsImmutableSampler[binding];
if (!immutableSampler) return res;
res.sampler = 0;
return res;
}
bool descriptorBindingIsImmutableSampler(
VkDescriptorSet dstSet,
uint32_t dstBinding) {
return as_goldfish_VkDescriptorSet(dstSet)->reified->bindingIsImmutableSampler[dstBinding];
}
VkDescriptorImageInfo
filterNonexistentSampler(
const VkDescriptorImageInfo& inputInfo) {
VkSampler sampler =
inputInfo.sampler;
VkDescriptorImageInfo res = inputInfo;
if (sampler) {
auto it = info_VkSampler.find(sampler);
bool samplerExists = it != info_VkSampler.end();
if (!samplerExists) res.sampler = 0;
}
return res;
}
void freeDescriptorSetsIfHostAllocated(VkEncoder* enc, VkDevice device, uint32_t descriptorSetCount, const VkDescriptorSet* sets) {
for (uint32_t i = 0; i < descriptorSetCount; ++i) {
struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(sets[i]);
if (ds->reified->allocationPending) {
unregister_VkDescriptorSet(sets[i]);
delete_goldfish_VkDescriptorSet(sets[i]);
} else {
enc->vkFreeDescriptorSets(device, ds->reified->pool, 1, &sets[i], false /* no lock */);
}
}
}
void clearDescriptorPoolAndUnregisterDescriptorSets(void* context, VkDevice device, VkDescriptorPool pool) {
std::vector<VkDescriptorSet> toClear =
clearDescriptorPool(pool, mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate);
for (auto set : toClear) {
if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
VkDescriptorSetLayout setLayout = as_goldfish_VkDescriptorSet(set)->reified->setLayout;
decDescriptorSetLayoutRef(context, device, setLayout, nullptr);
}
unregister_VkDescriptorSet(set);
delete_goldfish_VkDescriptorSet(set);
}
}
void unregister_VkDescriptorPool(VkDescriptorPool pool) {
if (!pool) return;
AutoLock<RecursiveLock> lock(mLock);
struct goldfish_VkDescriptorPool* dp = as_goldfish_VkDescriptorPool(pool);
delete dp->allocInfo;
info_VkDescriptorPool.erase(pool);
}
bool descriptorPoolSupportsIndividualFreeLocked(VkDescriptorPool pool) {
return as_goldfish_VkDescriptorPool(pool)->allocInfo->createFlags &
VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
}
static constexpr uint32_t kDefaultApiVersion = VK_MAKE_VERSION(1, 1, 0);
void setInstanceInfo(VkInstance instance,
uint32_t enabledExtensionCount,
const char* const* ppEnabledExtensionNames,
uint32_t apiVersion) {
AutoLock<RecursiveLock> lock(mLock);
auto& info = info_VkInstance[instance];
info.highestApiVersion = apiVersion;
if (!ppEnabledExtensionNames) return;
for (uint32_t i = 0; i < enabledExtensionCount; ++i) {
info.enabledExtensions.insert(ppEnabledExtensionNames[i]);
}
}
void setDeviceInfo(VkDevice device,
VkPhysicalDevice physdev,
VkPhysicalDeviceProperties props,
VkPhysicalDeviceMemoryProperties memProps,
uint32_t enabledExtensionCount,
const char* const* ppEnabledExtensionNames,
const void* pNext) {
AutoLock<RecursiveLock> lock(mLock);
auto& info = info_VkDevice[device];
info.physdev = physdev;
info.props = props;
info.memProps = memProps;
info.apiVersion = props.apiVersion;
const VkBaseInStructure *extensionCreateInfo =
reinterpret_cast<const VkBaseInStructure *>(pNext);
while(extensionCreateInfo) {
if(extensionCreateInfo->sType
== VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT) {
auto deviceMemoryReportCreateInfo =
reinterpret_cast<const VkDeviceDeviceMemoryReportCreateInfoEXT *>(
extensionCreateInfo);
if(deviceMemoryReportCreateInfo->pfnUserCallback != nullptr) {
info.deviceMemoryReportCallbacks.emplace_back(
deviceMemoryReportCreateInfo->pfnUserCallback,
deviceMemoryReportCreateInfo->pUserData);
}
}
extensionCreateInfo = extensionCreateInfo->pNext;
}
if (!ppEnabledExtensionNames) return;
for (uint32_t i = 0; i < enabledExtensionCount; ++i) {
info.enabledExtensions.insert(ppEnabledExtensionNames[i]);
}
}
void emitDeviceMemoryReport(VkDevice_Info info,
VkDeviceMemoryReportEventTypeEXT type,
uint64_t memoryObjectId,
VkDeviceSize size,
VkObjectType objectType,
uint64_t objectHandle,
uint32_t heapIndex = 0) {
if(info.deviceMemoryReportCallbacks.empty()) return;
const VkDeviceMemoryReportCallbackDataEXT callbackData = {
VK_STRUCTURE_TYPE_DEVICE_MEMORY_REPORT_CALLBACK_DATA_EXT, // sType
nullptr, // pNext
0, // flags
type, // type
memoryObjectId, // memoryObjectId
size, // size
objectType, // objectType
objectHandle, // objectHandle
heapIndex, // heapIndex
};
for(const auto &callback : info.deviceMemoryReportCallbacks) {
callback.first(&callbackData, callback.second);
}
}
void setDeviceMemoryInfo(VkDevice device,
VkDeviceMemory memory,
VkDeviceSize allocationSize,
uint8_t* ptr,
uint32_t memoryTypeIndex,
AHardwareBuffer* ahw = nullptr,
bool imported = false,
zx_handle_t vmoHandle = ZX_HANDLE_INVALID) {
AutoLock<RecursiveLock> lock(mLock);
auto& info = info_VkDeviceMemory[memory];
info.device = device;
info.allocationSize = allocationSize;
info.ptr = ptr;
info.memoryTypeIndex = memoryTypeIndex;
#ifdef VK_USE_PLATFORM_ANDROID_KHR
info.ahw = ahw;
#endif
info.imported = imported;
info.vmoHandle = vmoHandle;
}
void setImageInfo(VkImage image,
VkDevice device,
const VkImageCreateInfo *pCreateInfo) {
AutoLock<RecursiveLock> lock(mLock);
auto& info = info_VkImage[image];
info.device = device;
info.createInfo = *pCreateInfo;
}
uint8_t* getMappedPointer(VkDeviceMemory memory) {
AutoLock<RecursiveLock> lock(mLock);
const auto it = info_VkDeviceMemory.find(memory);
if (it == info_VkDeviceMemory.end()) return nullptr;
const auto& info = it->second;
return info.ptr;
}
VkDeviceSize getMappedSize(VkDeviceMemory memory) {
AutoLock<RecursiveLock> lock(mLock);
const auto it = info_VkDeviceMemory.find(memory);
if (it == info_VkDeviceMemory.end()) return 0;
const auto& info = it->second;
return info.allocationSize;
}
bool isValidMemoryRange(const VkMappedMemoryRange& range) const {
AutoLock<RecursiveLock> lock(mLock);
const auto it = info_VkDeviceMemory.find(range.memory);
if (it == info_VkDeviceMemory.end()) return false;
const auto& info = it->second;
if (!info.ptr) return false;
VkDeviceSize offset = range.offset;
VkDeviceSize size = range.size;
if (size == VK_WHOLE_SIZE) {
return offset <= info.allocationSize;
}
return offset + size <= info.allocationSize;
}
void setupFeatures(const EmulatorFeatureInfo* features) {
if (!features || mFeatureInfo) return;
mFeatureInfo.reset(new EmulatorFeatureInfo);
*mFeatureInfo = *features;
if (mFeatureInfo->hasDirectMem) {
mGoldfishAddressSpaceBlockProvider.reset(
new GoldfishAddressSpaceBlockProvider(
GoldfishAddressSpaceSubdeviceType::NoSubdevice));
}
#ifdef VK_USE_PLATFORM_FUCHSIA
if (mFeatureInfo->hasVulkan) {
fidl::ClientEnd<fuchsia_hardware_goldfish::ControlDevice> channel{
zx::channel(GetConnectToServiceFunction()("/loader-gpu-devices/class/goldfish-control/000"))};
if (!channel) {
ALOGE("failed to open control device");
abort();
}
mControlDevice =
fidl::WireSyncClient<fuchsia_hardware_goldfish::ControlDevice>(
std::move(channel));
fidl::ClientEnd<fuchsia_sysmem::Allocator> sysmem_channel{
zx::channel(GetConnectToServiceFunction()("/svc/fuchsia.sysmem.Allocator"))};
if (!sysmem_channel) {
ALOGE("failed to open sysmem connection");
}
mSysmemAllocator =
fidl::WireSyncClient<fuchsia_sysmem::Allocator>(
std::move(sysmem_channel));
char name[ZX_MAX_NAME_LEN] = {};
zx_object_get_property(zx_process_self(), ZX_PROP_NAME, name, sizeof(name));
std::string client_name(name);
client_name += "-goldfish";
zx_info_handle_basic_t info;
zx_object_get_info(zx_process_self(), ZX_INFO_HANDLE_BASIC, &info, sizeof(info),
nullptr, nullptr);
mSysmemAllocator->SetDebugClientInfo(fidl::StringView::FromExternal(client_name),
info.koid);
}
#endif
if (mFeatureInfo->hasVulkanNullOptionalStrings) {
ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT;
}
if (mFeatureInfo->hasVulkanIgnoredHandles) {
ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT;
}
if (mFeatureInfo->hasVulkanShaderFloat16Int8) {
ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_SHADER_FLOAT16_INT8_BIT;
}
if (mFeatureInfo->hasVulkanQueueSubmitWithCommands) {
ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
}
}
void setThreadingCallbacks(const ResourceTracker::ThreadingCallbacks& callbacks) {
ResourceTracker::threadingCallbacks = callbacks;
}
bool hostSupportsVulkan() const {
if (!mFeatureInfo) return false;
return mFeatureInfo->hasVulkan;
}
bool usingDirectMapping() const {
return true;
}
uint32_t getStreamFeatures() const {
return ResourceTracker::streamFeatureBits;
}
bool supportsDeferredCommands() const {
if (!mFeatureInfo) return false;
return mFeatureInfo->hasDeferredVulkanCommands;
}
bool supportsAsyncQueueSubmit() const {
if (!mFeatureInfo) return false;
return mFeatureInfo->hasVulkanAsyncQueueSubmit;
}
bool supportsCreateResourcesWithRequirements() const {
if (!mFeatureInfo) return false;
return mFeatureInfo->hasVulkanCreateResourcesWithRequirements;
}
int getHostInstanceExtensionIndex(const std::string& extName) const {
int i = 0;
for (const auto& prop : mHostInstanceExtensions) {
if (extName == std::string(prop.extensionName)) {
return i;
}
++i;
}
return -1;
}
int getHostDeviceExtensionIndex(const std::string& extName) const {
int i = 0;
for (const auto& prop : mHostDeviceExtensions) {
if (extName == std::string(prop.extensionName)) {
return i;
}
++i;
}
return -1;
}
void deviceMemoryTransform_tohost(
VkDeviceMemory* memory, uint32_t memoryCount,
VkDeviceSize* offset, uint32_t offsetCount,
VkDeviceSize* size, uint32_t sizeCount,
uint32_t* typeIndex, uint32_t typeIndexCount,
uint32_t* typeBits, uint32_t typeBitsCount) {
(void)memoryCount;
(void)offsetCount;
(void)sizeCount;
(void)typeIndex;
(void)typeIndexCount;
(void)typeBits;
(void)typeBitsCount;
if (memory) {
AutoLock<RecursiveLock> lock (mLock);
for (uint32_t i = 0; i < memoryCount; ++i) {
VkDeviceMemory mem = memory[i];
auto it = info_VkDeviceMemory.find(mem);
if (it == info_VkDeviceMemory.end())
return;
const auto& info = it->second;
if (!info.coherentMemory)
continue;
memory[i] = info.coherentMemory->getDeviceMemory();
if (offset) {
offset[i] = info.coherentMemoryOffset + offset[i];
}
if (size && size[i] == VK_WHOLE_SIZE) {
size[i] = info.allocationSize;
}
// TODO
(void)memory;
(void)offset;
(void)size;
}
}
}
void deviceMemoryTransform_fromhost(
VkDeviceMemory* memory, uint32_t memoryCount,
VkDeviceSize* offset, uint32_t offsetCount,
VkDeviceSize* size, uint32_t sizeCount,
uint32_t* typeIndex, uint32_t typeIndexCount,
uint32_t* typeBits, uint32_t typeBitsCount) {
(void)memory;
(void)memoryCount;
(void)offset;
(void)offsetCount;
(void)size;
(void)sizeCount;
(void)typeIndex;
(void)typeIndexCount;
(void)typeBits;
(void)typeBitsCount;
}
void transformImpl_VkExternalMemoryProperties_fromhost(
VkExternalMemoryProperties* pProperties,
uint32_t) {
VkExternalMemoryHandleTypeFlags supportedHandleType = 0u;
#ifdef VK_USE_PLATFORM_FUCHSIA
supportedHandleType |=
VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
#endif // VK_USE_PLATFORM_FUCHSIA
#ifdef VK_USE_PLATFORM_ANDROID_KHR
supportedHandleType |=
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
#endif // VK_USE_PLATFORM_ANDROID_KHR
if (supportedHandleType) {
pProperties->compatibleHandleTypes &= supportedHandleType;
pProperties->exportFromImportedHandleTypes &= supportedHandleType;
}
}
VkResult on_vkEnumerateInstanceExtensionProperties(
void* context,
VkResult,
const char*,
uint32_t* pPropertyCount,
VkExtensionProperties* pProperties) {
std::vector<const char*> allowedExtensionNames = {
"VK_KHR_get_physical_device_properties2",
"VK_KHR_sampler_ycbcr_conversion",
#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
"VK_KHR_external_semaphore_capabilities",
"VK_KHR_external_memory_capabilities",
"VK_KHR_external_fence_capabilities",
#endif
};
VkEncoder* enc = (VkEncoder*)context;
// Only advertise a select set of extensions.
if (mHostInstanceExtensions.empty()) {
uint32_t hostPropCount = 0;
enc->vkEnumerateInstanceExtensionProperties(nullptr, &hostPropCount, nullptr, true /* do lock */);
mHostInstanceExtensions.resize(hostPropCount);
VkResult hostRes =
enc->vkEnumerateInstanceExtensionProperties(
nullptr, &hostPropCount, mHostInstanceExtensions.data(), true /* do lock */);
if (hostRes != VK_SUCCESS) {
return hostRes;
}
}
std::vector<VkExtensionProperties> filteredExts;
for (size_t i = 0; i < allowedExtensionNames.size(); ++i) {
auto extIndex = getHostInstanceExtensionIndex(allowedExtensionNames[i]);
if (extIndex != -1) {
filteredExts.push_back(mHostInstanceExtensions[extIndex]);
}
}
VkExtensionProperties anbExtProps[] = {
#ifdef VK_USE_PLATFORM_FUCHSIA
{ "VK_KHR_external_memory_capabilities", 1},
{ "VK_KHR_external_semaphore_capabilities", 1},
#endif
};
for (auto& anbExtProp: anbExtProps) {
filteredExts.push_back(anbExtProp);
}
// Spec:
//
// https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateInstanceExtensionProperties.html
//
// If pProperties is NULL, then the number of extensions properties
// available is returned in pPropertyCount. Otherwise, pPropertyCount
// must point to a variable set by the user to the number of elements
// in the pProperties array, and on return the variable is overwritten
// with the number of structures actually written to pProperties. If
// pPropertyCount is less than the number of extension properties
// available, at most pPropertyCount structures will be written. If
// pPropertyCount is smaller than the number of extensions available,
// VK_INCOMPLETE will be returned instead of VK_SUCCESS, to indicate
// that not all the available properties were returned.
//
// pPropertyCount must be a valid pointer to a uint32_t value
if (!pPropertyCount) return VK_ERROR_INITIALIZATION_FAILED;
if (!pProperties) {
*pPropertyCount = (uint32_t)filteredExts.size();
return VK_SUCCESS;
} else {
auto actualExtensionCount = (uint32_t)filteredExts.size();
if (*pPropertyCount > actualExtensionCount) {
*pPropertyCount = actualExtensionCount;
}
for (uint32_t i = 0; i < *pPropertyCount; ++i) {
pProperties[i] = filteredExts[i];
}
if (actualExtensionCount > *pPropertyCount) {
return VK_INCOMPLETE;
}
return VK_SUCCESS;
}
}
VkResult on_vkEnumerateDeviceExtensionProperties(
void* context,
VkResult,
VkPhysicalDevice physdev,
const char*,
uint32_t* pPropertyCount,
VkExtensionProperties* pProperties) {
std::vector<const char*> allowedExtensionNames = {
"VK_KHR_vulkan_memory_model",
"VK_KHR_buffer_device_address",
"VK_KHR_maintenance1",
"VK_KHR_maintenance2",
"VK_KHR_maintenance3",
"VK_KHR_bind_memory2",
"VK_KHR_dedicated_allocation",
"VK_KHR_get_memory_requirements2",
"VK_KHR_sampler_ycbcr_conversion",
"VK_KHR_shader_float16_int8",
// Timeline semaphores buggy in newer NVIDIA drivers
// (vkWaitSemaphoresKHR causes further vkCommandBuffer dispatches to deadlock)
#ifndef VK_USE_PLATFORM_ANDROID_KHR
"VK_KHR_timeline_semaphore",
#endif
"VK_AMD_gpu_shader_half_float",
"VK_NV_shader_subgroup_partitioned",
"VK_KHR_shader_subgroup_extended_types",
"VK_EXT_subgroup_size_control",
"VK_EXT_provoking_vertex",
"VK_EXT_line_rasterization",
"VK_KHR_shader_terminate_invocation",
"VK_EXT_transform_feedback",
"VK_EXT_primitive_topology_list_restart",
"VK_EXT_index_type_uint8",
"VK_EXT_load_store_op_none",
"VK_EXT_swapchain_colorspace",
"VK_EXT_image_robustness",
"VK_EXT_custom_border_color",
"VK_EXT_shader_stencil_export",
"VK_KHR_image_format_list",
"VK_KHR_incremental_present",
"VK_KHR_pipeline_executable_properties",
"VK_EXT_queue_family_foreign",
#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
"VK_KHR_external_semaphore",
"VK_KHR_external_semaphore_fd",
// "VK_KHR_external_semaphore_win32", not exposed because it's translated to fd
"VK_KHR_external_memory",
"VK_KHR_external_fence",
"VK_KHR_external_fence_fd",
"VK_EXT_device_memory_report",
#endif
#if !defined(VK_USE_PLATFORM_ANDROID_KHR) && defined(__linux__)
"VK_KHR_create_renderpass2",
"VK_KHR_imageless_framebuffer",
#endif
};
VkEncoder* enc = (VkEncoder*)context;
if (mHostDeviceExtensions.empty()) {
uint32_t hostPropCount = 0;
enc->vkEnumerateDeviceExtensionProperties(physdev, nullptr, &hostPropCount, nullptr, true /* do lock */);
mHostDeviceExtensions.resize(hostPropCount);
VkResult hostRes =
enc->vkEnumerateDeviceExtensionProperties(
physdev, nullptr, &hostPropCount, mHostDeviceExtensions.data(), true /* do lock */);
if (hostRes != VK_SUCCESS) {
return hostRes;
}
}
bool hostHasWin32ExternalSemaphore =
getHostDeviceExtensionIndex(
"VK_KHR_external_semaphore_win32") != -1;
bool hostHasPosixExternalSemaphore =
getHostDeviceExtensionIndex(
"VK_KHR_external_semaphore_fd") != -1;
D("%s: host has ext semaphore? win32 %d posix %d\n", __func__,
hostHasWin32ExternalSemaphore,
hostHasPosixExternalSemaphore);
bool hostSupportsExternalSemaphore =
hostHasWin32ExternalSemaphore ||
hostHasPosixExternalSemaphore;
std::vector<VkExtensionProperties> filteredExts;
for (size_t i = 0; i < allowedExtensionNames.size(); ++i) {
auto extIndex = getHostDeviceExtensionIndex(allowedExtensionNames[i]);
if (extIndex != -1) {
filteredExts.push_back(mHostDeviceExtensions[extIndex]);
}
}
VkExtensionProperties anbExtProps[] = {
#ifdef VK_USE_PLATFORM_ANDROID_KHR
{ "VK_ANDROID_native_buffer", 7 },
#endif
#ifdef VK_USE_PLATFORM_FUCHSIA
{ "VK_KHR_external_memory", 1 },
{ "VK_KHR_external_semaphore", 1 },
{ "VK_FUCHSIA_external_semaphore", 1 },
#endif
};
for (auto& anbExtProp: anbExtProps) {
filteredExts.push_back(anbExtProp);
}
#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
bool hostSupportsExternalFenceFd =
getHostDeviceExtensionIndex(
"VK_KHR_external_fence_fd") != -1;
if (!hostSupportsExternalFenceFd) {
filteredExts.push_back(
VkExtensionProperties { "VK_KHR_external_fence_fd", 1});
}
#endif
#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
if (hostSupportsExternalSemaphore &&
!hostHasPosixExternalSemaphore) {
filteredExts.push_back(
VkExtensionProperties { "VK_KHR_external_semaphore_fd", 1});
}
#endif
bool win32ExtMemAvailable =
getHostDeviceExtensionIndex(
"VK_KHR_external_memory_win32") != -1;
bool posixExtMemAvailable =
getHostDeviceExtensionIndex(
"VK_KHR_external_memory_fd") != -1;
bool moltenVkExtAvailable =
getHostDeviceExtensionIndex(
"VK_MVK_moltenvk") != -1;
bool hostHasExternalMemorySupport =
win32ExtMemAvailable || posixExtMemAvailable || moltenVkExtAvailable;
if (hostHasExternalMemorySupport) {
#ifdef VK_USE_PLATFORM_ANDROID_KHR
filteredExts.push_back(
VkExtensionProperties {
"VK_ANDROID_external_memory_android_hardware_buffer", 7
});
filteredExts.push_back(
VkExtensionProperties { "VK_EXT_queue_family_foreign", 1 });
#endif
#ifdef VK_USE_PLATFORM_FUCHSIA
filteredExts.push_back(
VkExtensionProperties { "VK_FUCHSIA_external_memory", 1});
filteredExts.push_back(
VkExtensionProperties { "VK_FUCHSIA_buffer_collection", 1 });
#endif
#if !defined(VK_USE_PLATFORM_ANDROID_KHR) && defined(__linux__)
filteredExts.push_back(
VkExtensionProperties {
"VK_KHR_external_memory_fd", 1
});
filteredExts.push_back(
VkExtensionProperties { "VK_EXT_external_memory_dma_buf", 1 });
#endif
}
// Spec:
//
// https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateDeviceExtensionProperties.html
//
// pPropertyCount is a pointer to an integer related to the number of
// extension properties available or queried, and is treated in the
// same fashion as the
// vkEnumerateInstanceExtensionProperties::pPropertyCount parameter.
//
// https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateInstanceExtensionProperties.html
//
// If pProperties is NULL, then the number of extensions properties
// available is returned in pPropertyCount. Otherwise, pPropertyCount
// must point to a variable set by the user to the number of elements
// in the pProperties array, and on return the variable is overwritten
// with the number of structures actually written to pProperties. If
// pPropertyCount is less than the number of extension properties
// available, at most pPropertyCount structures will be written. If
// pPropertyCount is smaller than the number of extensions available,
// VK_INCOMPLETE will be returned instead of VK_SUCCESS, to indicate
// that not all the available properties were returned.
//
// pPropertyCount must be a valid pointer to a uint32_t value
if (!pPropertyCount) return VK_ERROR_INITIALIZATION_FAILED;
if (!pProperties) {
*pPropertyCount = (uint32_t)filteredExts.size();
return VK_SUCCESS;
} else {
auto actualExtensionCount = (uint32_t)filteredExts.size();
if (*pPropertyCount > actualExtensionCount) {
*pPropertyCount = actualExtensionCount;
}
for (uint32_t i = 0; i < *pPropertyCount; ++i) {
pProperties[i] = filteredExts[i];
}
if (actualExtensionCount > *pPropertyCount) {
return VK_INCOMPLETE;
}
return VK_SUCCESS;
}
}
VkResult on_vkEnumeratePhysicalDevices(
void* context, VkResult,
VkInstance instance, uint32_t* pPhysicalDeviceCount,
VkPhysicalDevice* pPhysicalDevices) {
VkEncoder* enc = (VkEncoder*)context;
if (!instance) return VK_ERROR_INITIALIZATION_FAILED;
if (!pPhysicalDeviceCount) return VK_ERROR_INITIALIZATION_FAILED;
AutoLock<RecursiveLock> lock(mLock);
// When this function is called, we actually need to do two things:
// - Get full information about physical devices from the host,
// even if the guest did not ask for it
// - Serve the guest query according to the spec:
//
// https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumeratePhysicalDevices.html
auto it = info_VkInstance.find(instance);
if (it == info_VkInstance.end()) return VK_ERROR_INITIALIZATION_FAILED;
auto& info = it->second;
// Get the full host information here if it doesn't exist already.
if (info.physicalDevices.empty()) {
uint32_t hostPhysicalDeviceCount = 0;
lock.unlock();
VkResult countRes = enc->vkEnumeratePhysicalDevices(
instance, &hostPhysicalDeviceCount, nullptr, false /* no lock */);
lock.lock();
if (countRes != VK_SUCCESS) {
ALOGE("%s: failed: could not count host physical devices. "
"Error %d\n", __func__, countRes);
return countRes;
}
info.physicalDevices.resize(hostPhysicalDeviceCount);
lock.unlock();
VkResult enumRes = enc->vkEnumeratePhysicalDevices(
instance, &hostPhysicalDeviceCount, info.physicalDevices.data(), false /* no lock */);
lock.lock();
if (enumRes != VK_SUCCESS) {
ALOGE("%s: failed: could not retrieve host physical devices. "
"Error %d\n", __func__, enumRes);
return enumRes;
}
}
// Serve the guest query according to the spec.
//
// https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumeratePhysicalDevices.html
//
// If pPhysicalDevices is NULL, then the number of physical devices
// available is returned in pPhysicalDeviceCount. Otherwise,
// pPhysicalDeviceCount must point to a variable set by the user to the
// number of elements in the pPhysicalDevices array, and on return the
// variable is overwritten with the number of handles actually written
// to pPhysicalDevices. If pPhysicalDeviceCount is less than the number
// of physical devices available, at most pPhysicalDeviceCount
// structures will be written. If pPhysicalDeviceCount is smaller than
// the number of physical devices available, VK_INCOMPLETE will be
// returned instead of VK_SUCCESS, to indicate that not all the
// available physical devices were returned.
if (!pPhysicalDevices) {
*pPhysicalDeviceCount = (uint32_t)info.physicalDevices.size();
return VK_SUCCESS;
} else {
uint32_t actualDeviceCount = (uint32_t)info.physicalDevices.size();
uint32_t toWrite = actualDeviceCount < *pPhysicalDeviceCount ? actualDeviceCount : *pPhysicalDeviceCount;
for (uint32_t i = 0; i < toWrite; ++i) {
pPhysicalDevices[i] = info.physicalDevices[i];
}
*pPhysicalDeviceCount = toWrite;
if (actualDeviceCount > *pPhysicalDeviceCount) {
return VK_INCOMPLETE;
}
return VK_SUCCESS;
}
}
void on_vkGetPhysicalDeviceProperties(
void*,
VkPhysicalDevice,
VkPhysicalDeviceProperties* pProperties) {
if (pProperties) {
pProperties->deviceType = VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU;
}
}
void on_vkGetPhysicalDeviceFeatures2(
void*,
VkPhysicalDevice,
VkPhysicalDeviceFeatures2* pFeatures) {
if (pFeatures) {
VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* memoryReportFeaturesEXT =
vk_find_struct<VkPhysicalDeviceDeviceMemoryReportFeaturesEXT>(pFeatures);
if (memoryReportFeaturesEXT) {
memoryReportFeaturesEXT->deviceMemoryReport = VK_TRUE;
}
}
}
void on_vkGetPhysicalDeviceProperties2(
void*,
VkPhysicalDevice,
VkPhysicalDeviceProperties2* pProperties) {
if (pProperties) {
pProperties->properties.deviceType =
VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU;
VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* memoryReportFeaturesEXT =
vk_find_struct<VkPhysicalDeviceDeviceMemoryReportFeaturesEXT>(pProperties);
if (memoryReportFeaturesEXT) {
memoryReportFeaturesEXT->deviceMemoryReport = VK_TRUE;
}
}
}
void on_vkGetPhysicalDeviceMemoryProperties(
void* context,
VkPhysicalDevice physicalDevice,
VkPhysicalDeviceMemoryProperties* out) {
// gfxstream decides which physical device to expose to the guest on startup.
// Otherwise, we would need a physical device to properties mapping.
*out = getPhysicalDeviceMemoryProperties(context, VK_NULL_HANDLE, physicalDevice);
}
void on_vkGetPhysicalDeviceMemoryProperties2(
void*,
VkPhysicalDevice physdev,
VkPhysicalDeviceMemoryProperties2* out) {
on_vkGetPhysicalDeviceMemoryProperties(nullptr, physdev, &out->memoryProperties);
}
void on_vkGetDeviceQueue(void*,
VkDevice device,
uint32_t,
uint32_t,
VkQueue* pQueue) {
AutoLock<RecursiveLock> lock(mLock);
info_VkQueue[*pQueue].device = device;
}
void on_vkGetDeviceQueue2(void*,
VkDevice device,
const VkDeviceQueueInfo2*,
VkQueue* pQueue) {
AutoLock<RecursiveLock> lock(mLock);
info_VkQueue[*pQueue].device = device;
}
VkResult on_vkCreateInstance(
void* context,
VkResult input_result,
const VkInstanceCreateInfo* createInfo,
const VkAllocationCallbacks*,
VkInstance* pInstance) {
if (input_result != VK_SUCCESS) return input_result;
VkEncoder* enc = (VkEncoder*)context;
uint32_t apiVersion;
VkResult enumInstanceVersionRes =
enc->vkEnumerateInstanceVersion(&apiVersion, false /* no lock */);
setInstanceInfo(
*pInstance,
createInfo->enabledExtensionCount,
createInfo->ppEnabledExtensionNames,
apiVersion);
return input_result;
}
VkResult on_vkCreateDevice(
void* context,
VkResult input_result,
VkPhysicalDevice physicalDevice,
const VkDeviceCreateInfo* pCreateInfo,
const VkAllocationCallbacks*,
VkDevice* pDevice) {
if (input_result != VK_SUCCESS) return input_result;
VkEncoder* enc = (VkEncoder*)context;
VkPhysicalDeviceProperties props;
VkPhysicalDeviceMemoryProperties memProps;
enc->vkGetPhysicalDeviceProperties(physicalDevice, &props, false /* no lock */);
enc->vkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProps, false /* no lock */);
setDeviceInfo(
*pDevice, physicalDevice, props, memProps,
pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames,
pCreateInfo->pNext);
return input_result;
}
void on_vkDestroyDevice_pre(
void* context,
VkDevice device,
const VkAllocationCallbacks*) {
(void)context;
AutoLock<RecursiveLock> lock(mLock);
auto it = info_VkDevice.find(device);
if (it == info_VkDevice.end()) return;
for (auto itr = info_VkDeviceMemory.cbegin() ; itr != info_VkDeviceMemory.cend(); ) {
auto& memInfo = itr->second;
if (memInfo.device == device) {
itr = info_VkDeviceMemory.erase(itr);
} else {
itr++;
}
}
}
#ifdef VK_USE_PLATFORM_ANDROID_KHR
VkResult on_vkGetAndroidHardwareBufferPropertiesANDROID(
void* context, VkResult,
VkDevice device,
const AHardwareBuffer* buffer,
VkAndroidHardwareBufferPropertiesANDROID* pProperties) {
const VkPhysicalDeviceMemoryProperties& memoryProperties =
getPhysicalDeviceMemoryProperties(context, device, VK_NULL_HANDLE);
auto grallocHelper =
ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper();
return getAndroidHardwareBufferPropertiesANDROID(
grallocHelper,
&memoryProperties,
device, buffer, pProperties);
}
VkResult on_vkGetMemoryAndroidHardwareBufferANDROID(
void*, VkResult,
VkDevice device,
const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo,
struct AHardwareBuffer** pBuffer) {
if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
if (!pInfo->memory) return VK_ERROR_INITIALIZATION_FAILED;
AutoLock<RecursiveLock> lock(mLock);
auto deviceIt = info_VkDevice.find(device);
if (deviceIt == info_VkDevice.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto memoryIt = info_VkDeviceMemory.find(pInfo->memory);
if (memoryIt == info_VkDeviceMemory.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto& info = memoryIt->second;
VkResult queryRes =
getMemoryAndroidHardwareBufferANDROID(&info.ahw);
if (queryRes != VK_SUCCESS) return queryRes;
*pBuffer = info.ahw;
return queryRes;
}
#endif
#ifdef VK_USE_PLATFORM_FUCHSIA
VkResult on_vkGetMemoryZirconHandleFUCHSIA(
void*, VkResult,
VkDevice device,
const VkMemoryGetZirconHandleInfoFUCHSIA* pInfo,
uint32_t* pHandle) {
if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
if (!pInfo->memory) return VK_ERROR_INITIALIZATION_FAILED;
AutoLock<RecursiveLock> lock(mLock);
auto deviceIt = info_VkDevice.find(device);
if (deviceIt == info_VkDevice.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto memoryIt = info_VkDeviceMemory.find(pInfo->memory);
if (memoryIt == info_VkDeviceMemory.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto& info = memoryIt->second;
if (info.vmoHandle == ZX_HANDLE_INVALID) {
ALOGE("%s: memory cannot be exported", __func__);
return VK_ERROR_INITIALIZATION_FAILED;
}
*pHandle = ZX_HANDLE_INVALID;
zx_handle_duplicate(info.vmoHandle, ZX_RIGHT_SAME_RIGHTS, pHandle);
return VK_SUCCESS;
}
VkResult on_vkGetMemoryZirconHandlePropertiesFUCHSIA(
void*, VkResult,
VkDevice device,
VkExternalMemoryHandleTypeFlagBits handleType,
uint32_t handle,
VkMemoryZirconHandlePropertiesFUCHSIA* pProperties) {
using fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal;
using fuchsia_hardware_goldfish::wire::kMemoryPropertyHostVisible;
if (handleType !=
VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA) {
return VK_ERROR_INITIALIZATION_FAILED;
}
zx_info_handle_basic_t handleInfo;
zx_status_t status = zx::unowned_vmo(handle)->get_info(
ZX_INFO_HANDLE_BASIC, &handleInfo, sizeof(handleInfo), nullptr,
nullptr);
if (status != ZX_OK || handleInfo.type != ZX_OBJ_TYPE_VMO) {
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
}
AutoLock<RecursiveLock> lock(mLock);
auto deviceIt = info_VkDevice.find(device);
if (deviceIt == info_VkDevice.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto& info = deviceIt->second;
zx::vmo vmo_dup;
status =
zx::unowned_vmo(handle)->duplicate(ZX_RIGHT_SAME_RIGHTS, &vmo_dup);
if (status != ZX_OK) {
ALOGE("zx_handle_duplicate() error: %d", status);
return VK_ERROR_INITIALIZATION_FAILED;
}
uint32_t memoryProperty = 0u;
auto result = mControlDevice->GetBufferHandleInfo(std::move(vmo_dup));
if (!result.ok()) {
ALOGE(
"mControlDevice->GetBufferHandleInfo fatal error: epitaph: %d",
result.status());
return VK_ERROR_INITIALIZATION_FAILED;
}
if (result.value().is_ok()) {
memoryProperty = result.value().value()->info.memory_property();
} else if (result.value().error_value() == ZX_ERR_NOT_FOUND) {
// If an VMO is allocated while ColorBuffer/Buffer is not created,
// it must be a device-local buffer, since for host-visible buffers,
// ColorBuffer/Buffer is created at sysmem allocation time.
memoryProperty = kMemoryPropertyDeviceLocal;
} else {
// Importing read-only host memory into the Vulkan driver should not
// work, but it is not an error to try to do so. Returning a
// VkMemoryZirconHandlePropertiesFUCHSIA with no available
// memoryType bits should be enough for clients. See fxbug.dev/24225
// for other issues this this flow.
ALOGW("GetBufferHandleInfo failed: %d", result.value().error_value());
pProperties->memoryTypeBits = 0;
return VK_SUCCESS;
}
pProperties->memoryTypeBits = 0;
for (uint32_t i = 0; i < info.memProps.memoryTypeCount; ++i) {
if (((memoryProperty & kMemoryPropertyDeviceLocal) &&
(info.memProps.memoryTypes[i].propertyFlags &
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) ||
((memoryProperty & kMemoryPropertyHostVisible) &&
(info.memProps.memoryTypes[i].propertyFlags &
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))) {
pProperties->memoryTypeBits |= 1ull << i;
}
}
return VK_SUCCESS;
}
zx_koid_t getEventKoid(zx_handle_t eventHandle) {
if (eventHandle == ZX_HANDLE_INVALID) {
return ZX_KOID_INVALID;
}
zx_info_handle_basic_t info;
zx_status_t status =
zx_object_get_info(eventHandle, ZX_INFO_HANDLE_BASIC, &info,
sizeof(info), nullptr, nullptr);
if (status != ZX_OK) {
ALOGE("Cannot get object info of handle %u: %d", eventHandle,
status);
return ZX_KOID_INVALID;
}
return info.koid;
}
VkResult on_vkImportSemaphoreZirconHandleFUCHSIA(
void*, VkResult,
VkDevice device,
const VkImportSemaphoreZirconHandleInfoFUCHSIA* pInfo) {
if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
if (!pInfo->semaphore) return VK_ERROR_INITIALIZATION_FAILED;
AutoLock<RecursiveLock> lock(mLock);
auto deviceIt = info_VkDevice.find(device);
if (deviceIt == info_VkDevice.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto semaphoreIt = info_VkSemaphore.find(pInfo->semaphore);
if (semaphoreIt == info_VkSemaphore.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto& info = semaphoreIt->second;
if (info.eventHandle != ZX_HANDLE_INVALID) {
zx_handle_close(info.eventHandle);
}
#if VK_HEADER_VERSION < 174
info.eventHandle = pInfo->handle;
#else // VK_HEADER_VERSION >= 174
info.eventHandle = pInfo->zirconHandle;
#endif // VK_HEADER_VERSION < 174
if (info.eventHandle != ZX_HANDLE_INVALID) {
info.eventKoid = getEventKoid(info.eventHandle);
}
return VK_SUCCESS;
}
VkResult on_vkGetSemaphoreZirconHandleFUCHSIA(
void*, VkResult,
VkDevice device,
const VkSemaphoreGetZirconHandleInfoFUCHSIA* pInfo,
uint32_t* pHandle) {
if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
if (!pInfo->semaphore) return VK_ERROR_INITIALIZATION_FAILED;
AutoLock<RecursiveLock> lock(mLock);
auto deviceIt = info_VkDevice.find(device);
if (deviceIt == info_VkDevice.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto semaphoreIt = info_VkSemaphore.find(pInfo->semaphore);
if (semaphoreIt == info_VkSemaphore.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto& info = semaphoreIt->second;
if (info.eventHandle == ZX_HANDLE_INVALID) {
return VK_ERROR_INITIALIZATION_FAILED;
}
*pHandle = ZX_HANDLE_INVALID;
zx_handle_duplicate(info.eventHandle, ZX_RIGHT_SAME_RIGHTS, pHandle);
return VK_SUCCESS;
}
VkResult on_vkCreateBufferCollectionFUCHSIA(
void*,
VkResult,
VkDevice,
const VkBufferCollectionCreateInfoFUCHSIA* pInfo,
const VkAllocationCallbacks*,
VkBufferCollectionFUCHSIA* pCollection) {
fidl::ClientEnd<::fuchsia_sysmem::BufferCollectionToken> token_client;
if (pInfo->collectionToken) {
token_client =
fidl::ClientEnd<::fuchsia_sysmem::BufferCollectionToken>(
zx::channel(pInfo->collectionToken));
} else {
auto endpoints = fidl::CreateEndpoints<
::fuchsia_sysmem::BufferCollectionToken>();
if (!endpoints.is_ok()) {
ALOGE("zx_channel_create failed: %d", endpoints.status_value());
return VK_ERROR_INITIALIZATION_FAILED;
}
auto result = mSysmemAllocator->AllocateSharedCollection(
std::move(endpoints->server));
if (!result.ok()) {
ALOGE("AllocateSharedCollection failed: %d", result.status());
return VK_ERROR_INITIALIZATION_FAILED;
}
token_client = std::move(endpoints->client);
}
auto endpoints =
fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollection>();
if (!endpoints.is_ok()) {
ALOGE("zx_channel_create failed: %d", endpoints.status_value());
return VK_ERROR_INITIALIZATION_FAILED;
}
auto [collection_client, collection_server] =
std::move(endpoints.value());
auto result = mSysmemAllocator->BindSharedCollection(
std::move(token_client), std::move(collection_server));
if (!result.ok()) {
ALOGE("BindSharedCollection failed: %d", result.status());
return VK_ERROR_INITIALIZATION_FAILED;
}
auto* sysmem_collection =
new fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>(
std::move(collection_client));
*pCollection =
reinterpret_cast<VkBufferCollectionFUCHSIA>(sysmem_collection);
register_VkBufferCollectionFUCHSIA(*pCollection);
return VK_SUCCESS;
}
void on_vkDestroyBufferCollectionFUCHSIA(
void*,
VkResult,
VkDevice,
VkBufferCollectionFUCHSIA collection,
const VkAllocationCallbacks*) {
auto sysmem_collection = reinterpret_cast<
fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
collection);
if (sysmem_collection) {
(*sysmem_collection)->Close();
}
delete sysmem_collection;
unregister_VkBufferCollectionFUCHSIA(collection);
}
inline fuchsia_sysmem::wire::BufferCollectionConstraints
defaultBufferCollectionConstraints(
size_t minSizeBytes,
size_t minBufferCount,
size_t maxBufferCount = 0u,
size_t minBufferCountForCamping = 0u,
size_t minBufferCountForDedicatedSlack = 0u,
size_t minBufferCountForSharedSlack = 0u) {
fuchsia_sysmem::wire::BufferCollectionConstraints constraints = {};
constraints.min_buffer_count = minBufferCount;
if (maxBufferCount > 0) {
constraints.max_buffer_count = maxBufferCount;
}
if (minBufferCountForCamping) {
constraints.min_buffer_count_for_camping = minBufferCountForCamping;
}
if (minBufferCountForSharedSlack) {
constraints.min_buffer_count_for_shared_slack =
minBufferCountForSharedSlack;
}
constraints.has_buffer_memory_constraints = true;
fuchsia_sysmem::wire::BufferMemoryConstraints& buffer_constraints =
constraints.buffer_memory_constraints;
buffer_constraints.min_size_bytes = minSizeBytes;
buffer_constraints.max_size_bytes = 0xffffffff;
buffer_constraints.physically_contiguous_required = false;
buffer_constraints.secure_required = false;
// No restrictions on coherency domain or Heaps.
buffer_constraints.ram_domain_supported = true;
buffer_constraints.cpu_domain_supported = true;
buffer_constraints.inaccessible_domain_supported = true;
buffer_constraints.heap_permitted_count = 2;
buffer_constraints.heap_permitted[0] =
fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
buffer_constraints.heap_permitted[1] =
fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
return constraints;
}
uint32_t getBufferCollectionConstraintsVulkanImageUsage(
const VkImageCreateInfo* pImageInfo) {
uint32_t usage = 0u;
VkImageUsageFlags imageUsage = pImageInfo->usage;
#define SetUsageBit(BIT, VALUE) \
if (imageUsage & VK_IMAGE_USAGE_##BIT##_BIT) { \
usage |= fuchsia_sysmem::wire::kVulkanImageUsage##VALUE; \
}
SetUsageBit(COLOR_ATTACHMENT, ColorAttachment);
SetUsageBit(TRANSFER_SRC, TransferSrc);
SetUsageBit(TRANSFER_DST, TransferDst);
SetUsageBit(SAMPLED, Sampled);
#undef SetUsageBit
return usage;
}
uint32_t getBufferCollectionConstraintsVulkanBufferUsage(
VkBufferUsageFlags bufferUsage) {
uint32_t usage = 0u;
#define SetUsageBit(BIT, VALUE) \
if (bufferUsage & VK_BUFFER_USAGE_##BIT##_BIT) { \
usage |= fuchsia_sysmem::wire::kVulkanBufferUsage##VALUE; \
}
SetUsageBit(TRANSFER_SRC, TransferSrc);
SetUsageBit(TRANSFER_DST, TransferDst);
SetUsageBit(UNIFORM_TEXEL_BUFFER, UniformTexelBuffer);
SetUsageBit(STORAGE_TEXEL_BUFFER, StorageTexelBuffer);
SetUsageBit(UNIFORM_BUFFER, UniformBuffer);
SetUsageBit(STORAGE_BUFFER, StorageBuffer);
SetUsageBit(INDEX_BUFFER, IndexBuffer);
SetUsageBit(VERTEX_BUFFER, VertexBuffer);
SetUsageBit(INDIRECT_BUFFER, IndirectBuffer);
#undef SetUsageBit
return usage;
}
uint32_t getBufferCollectionConstraintsVulkanBufferUsage(
const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
VkBufferUsageFlags bufferUsage =
pBufferConstraintsInfo->createInfo.usage;
return getBufferCollectionConstraintsVulkanBufferUsage(bufferUsage);
}
static fuchsia_sysmem::wire::PixelFormatType vkFormatTypeToSysmem(
VkFormat format) {
switch (format) {
case VK_FORMAT_B8G8R8A8_SINT:
case VK_FORMAT_B8G8R8A8_UNORM:
case VK_FORMAT_B8G8R8A8_SRGB:
case VK_FORMAT_B8G8R8A8_SNORM:
case VK_FORMAT_B8G8R8A8_SSCALED:
case VK_FORMAT_B8G8R8A8_USCALED:
return fuchsia_sysmem::wire::PixelFormatType::kBgra32;
case VK_FORMAT_R8G8B8A8_SINT:
case VK_FORMAT_R8G8B8A8_UNORM:
case VK_FORMAT_R8G8B8A8_SRGB:
case VK_FORMAT_R8G8B8A8_SNORM:
case VK_FORMAT_R8G8B8A8_SSCALED:
case VK_FORMAT_R8G8B8A8_USCALED:
return fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8;
case VK_FORMAT_R8_UNORM:
case VK_FORMAT_R8_UINT:
case VK_FORMAT_R8_USCALED:
case VK_FORMAT_R8_SNORM:
case VK_FORMAT_R8_SINT:
case VK_FORMAT_R8_SSCALED:
case VK_FORMAT_R8_SRGB:
return fuchsia_sysmem::wire::PixelFormatType::kR8;
case VK_FORMAT_R8G8_UNORM:
case VK_FORMAT_R8G8_UINT:
case VK_FORMAT_R8G8_USCALED:
case VK_FORMAT_R8G8_SNORM:
case VK_FORMAT_R8G8_SINT:
case VK_FORMAT_R8G8_SSCALED:
case VK_FORMAT_R8G8_SRGB:
return fuchsia_sysmem::wire::PixelFormatType::kR8G8;
default:
return fuchsia_sysmem::wire::PixelFormatType::kInvalid;
}
}
static bool vkFormatMatchesSysmemFormat(
VkFormat vkFormat,
fuchsia_sysmem::wire::PixelFormatType sysmemFormat) {
switch (vkFormat) {
case VK_FORMAT_B8G8R8A8_SINT:
case VK_FORMAT_B8G8R8A8_UNORM:
case VK_FORMAT_B8G8R8A8_SRGB:
case VK_FORMAT_B8G8R8A8_SNORM:
case VK_FORMAT_B8G8R8A8_SSCALED:
case VK_FORMAT_B8G8R8A8_USCALED:
return sysmemFormat ==
fuchsia_sysmem::wire::PixelFormatType::kBgra32;
case VK_FORMAT_R8G8B8A8_SINT:
case VK_FORMAT_R8G8B8A8_UNORM:
case VK_FORMAT_R8G8B8A8_SRGB:
case VK_FORMAT_R8G8B8A8_SNORM:
case VK_FORMAT_R8G8B8A8_SSCALED:
case VK_FORMAT_R8G8B8A8_USCALED:
return sysmemFormat ==
fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8;
case VK_FORMAT_R8_UNORM:
case VK_FORMAT_R8_UINT:
case VK_FORMAT_R8_USCALED:
case VK_FORMAT_R8_SNORM:
case VK_FORMAT_R8_SINT:
case VK_FORMAT_R8_SSCALED:
case VK_FORMAT_R8_SRGB:
return sysmemFormat ==
fuchsia_sysmem::wire::PixelFormatType::kR8 ||
sysmemFormat ==
fuchsia_sysmem::wire::PixelFormatType::kL8;
case VK_FORMAT_R8G8_UNORM:
case VK_FORMAT_R8G8_UINT:
case VK_FORMAT_R8G8_USCALED:
case VK_FORMAT_R8G8_SNORM:
case VK_FORMAT_R8G8_SINT:
case VK_FORMAT_R8G8_SSCALED:
case VK_FORMAT_R8G8_SRGB:
return sysmemFormat ==
fuchsia_sysmem::wire::PixelFormatType::kR8G8;
default:
return false;
}
}
static VkFormat sysmemPixelFormatTypeToVk(
fuchsia_sysmem::wire::PixelFormatType format) {
switch (format) {
case fuchsia_sysmem::wire::PixelFormatType::kBgra32:
return VK_FORMAT_B8G8R8A8_SRGB;
case fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8:
return VK_FORMAT_R8G8B8A8_SRGB;
case fuchsia_sysmem::wire::PixelFormatType::kL8:
case fuchsia_sysmem::wire::PixelFormatType::kR8:
return VK_FORMAT_R8_UNORM;
case fuchsia_sysmem::wire::PixelFormatType::kR8G8:
return VK_FORMAT_R8G8_UNORM;
default:
return VK_FORMAT_UNDEFINED;
}
}
// TODO(fxbug.dev/90856): This is currently only used for allocating
// memory for dedicated external images. It should be migrated to use
// SetBufferCollectionImageConstraintsFUCHSIA.
VkResult setBufferCollectionConstraintsFUCHSIA(
VkEncoder* enc,
VkDevice device,
fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* collection,
const VkImageCreateInfo* pImageInfo) {
if (pImageInfo == nullptr) {
ALOGE("setBufferCollectionConstraints: pImageInfo cannot be null.");
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
}
const VkSysmemColorSpaceFUCHSIA kDefaultColorSpace = {
.sType = VK_STRUCTURE_TYPE_SYSMEM_COLOR_SPACE_FUCHSIA,
.pNext = nullptr,
.colorSpace = static_cast<uint32_t>(
fuchsia_sysmem::wire::ColorSpaceType::kSrgb),
};
std::vector<VkImageFormatConstraintsInfoFUCHSIA> formatInfos;
if (pImageInfo->format == VK_FORMAT_UNDEFINED) {
const auto kFormats = {
VK_FORMAT_B8G8R8A8_SRGB,
VK_FORMAT_R8G8B8A8_SRGB,
};
for (auto format : kFormats) {
// shallow copy, using pNext from pImageInfo directly.
auto createInfo = *pImageInfo;
createInfo.format = format;
formatInfos.push_back(VkImageFormatConstraintsInfoFUCHSIA{
.sType =
VK_STRUCTURE_TYPE_IMAGE_FORMAT_CONSTRAINTS_INFO_FUCHSIA,
.pNext = nullptr,
.imageCreateInfo = createInfo,
.colorSpaceCount = 1,
.pColorSpaces = &kDefaultColorSpace,
});
}
} else {
formatInfos.push_back(VkImageFormatConstraintsInfoFUCHSIA{
.sType =
VK_STRUCTURE_TYPE_IMAGE_FORMAT_CONSTRAINTS_INFO_FUCHSIA,
.pNext = nullptr,
.imageCreateInfo = *pImageInfo,
.colorSpaceCount = 1,
.pColorSpaces = &kDefaultColorSpace,
});
}
VkImageConstraintsInfoFUCHSIA imageConstraints = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA,
.pNext = nullptr,
.formatConstraintsCount = static_cast<uint32_t>(formatInfos.size()),
.pFormatConstraints = formatInfos.data(),
.bufferCollectionConstraints =
VkBufferCollectionConstraintsInfoFUCHSIA{
.sType =
VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CONSTRAINTS_INFO_FUCHSIA,
.pNext = nullptr,
.minBufferCount = 1,
.maxBufferCount = 0,
.minBufferCountForCamping = 0,
.minBufferCountForDedicatedSlack = 0,
.minBufferCountForSharedSlack = 0,
},
.flags = 0u,
};
return setBufferCollectionImageConstraintsFUCHSIA(
enc, device, collection, &imageConstraints);
}
VkResult addImageBufferCollectionConstraintsFUCHSIA(
VkEncoder* enc,
VkDevice device,
VkPhysicalDevice physicalDevice,
const VkImageFormatConstraintsInfoFUCHSIA*
formatConstraints, // always non-zero
VkImageTiling tiling,
fuchsia_sysmem::wire::BufferCollectionConstraints* constraints) {
// First check if the format, tiling and usage is supported on host.
VkImageFormatProperties imageFormatProperties;
auto createInfo = &formatConstraints->imageCreateInfo;
auto result = enc->vkGetPhysicalDeviceImageFormatProperties(
physicalDevice, createInfo->format, createInfo->imageType, tiling,
createInfo->usage, createInfo->flags, &imageFormatProperties,
true /* do lock */);
if (result != VK_SUCCESS) {
ALOGD(
"%s: Image format (%u) type (%u) tiling (%u) "
"usage (%u) flags (%u) not supported by physical "
"device",
__func__, static_cast<uint32_t>(createInfo->format),
static_cast<uint32_t>(createInfo->imageType),
static_cast<uint32_t>(tiling),
static_cast<uint32_t>(createInfo->usage),
static_cast<uint32_t>(createInfo->flags));
return VK_ERROR_FORMAT_NOT_SUPPORTED;
}
// Check if format constraints contains unsupported format features.
{
VkFormatProperties formatProperties;
enc->vkGetPhysicalDeviceFormatProperties(
physicalDevice, createInfo->format, &formatProperties,
true /* do lock */);
auto supportedFeatures =
(tiling == VK_IMAGE_TILING_LINEAR)
? formatProperties.linearTilingFeatures
: formatProperties.optimalTilingFeatures;
auto requiredFeatures = formatConstraints->requiredFormatFeatures;
if ((~supportedFeatures) & requiredFeatures) {
ALOGD(
"%s: Host device support features for %s tiling: %08x, "
"required features: %08x, feature bits %08x missing",
__func__,
tiling == VK_IMAGE_TILING_LINEAR ? "LINEAR" : "OPTIMAL",
static_cast<uint32_t>(requiredFeatures),
static_cast<uint32_t>(supportedFeatures),
static_cast<uint32_t>((~supportedFeatures) &
requiredFeatures));
return VK_ERROR_FORMAT_NOT_SUPPORTED;
}
}
fuchsia_sysmem::wire::ImageFormatConstraints imageConstraints;
if (formatConstraints->sysmemPixelFormat != 0) {
auto pixelFormat =
static_cast<fuchsia_sysmem::wire::PixelFormatType>(
formatConstraints->sysmemPixelFormat);
if (createInfo->format != VK_FORMAT_UNDEFINED &&
!vkFormatMatchesSysmemFormat(createInfo->format, pixelFormat)) {
ALOGD("%s: VkFormat %u doesn't match sysmem pixelFormat %lu",
__func__, static_cast<uint32_t>(createInfo->format),
formatConstraints->sysmemPixelFormat);
return VK_ERROR_FORMAT_NOT_SUPPORTED;
}
imageConstraints.pixel_format.type = pixelFormat;
} else {
auto pixel_format = vkFormatTypeToSysmem(createInfo->format);
if (pixel_format ==
fuchsia_sysmem::wire::PixelFormatType::kInvalid) {
ALOGD("%s: Unsupported VkFormat %u", __func__,
static_cast<uint32_t>(createInfo->format));
return VK_ERROR_FORMAT_NOT_SUPPORTED;
}
imageConstraints.pixel_format.type = pixel_format;
}
imageConstraints.color_spaces_count =
formatConstraints->colorSpaceCount;
for (size_t i = 0; i < formatConstraints->colorSpaceCount; i++) {
imageConstraints.color_space[0].type =
static_cast<fuchsia_sysmem::wire::ColorSpaceType>(
formatConstraints->pColorSpaces[i].colorSpace);
}
// Get row alignment from host GPU.
VkDeviceSize offset = 0;
VkDeviceSize rowPitchAlignment = 1u;
if (tiling == VK_IMAGE_TILING_LINEAR) {
VkImageCreateInfo createInfoDup = *createInfo;
createInfoDup.pNext = nullptr;
enc->vkGetLinearImageLayout2GOOGLE(device, &createInfoDup, &offset,
&rowPitchAlignment,
true /* do lock */);
D("vkGetLinearImageLayout2GOOGLE: format %d offset %lu "
"rowPitchAlignment = %lu",
(int)createInfo->format, offset, rowPitchAlignment);
}
imageConstraints.min_coded_width = createInfo->extent.width;
imageConstraints.max_coded_width = 0xfffffff;
imageConstraints.min_coded_height = createInfo->extent.height;
imageConstraints.max_coded_height = 0xffffffff;
// The min_bytes_per_row can be calculated by sysmem using
// |min_coded_width|, |bytes_per_row_divisor| and color format.
imageConstraints.min_bytes_per_row = 0;
imageConstraints.max_bytes_per_row = 0xffffffff;
imageConstraints.max_coded_width_times_coded_height = 0xffffffff;
imageConstraints.layers = 1;
imageConstraints.coded_width_divisor = 1;
imageConstraints.coded_height_divisor = 1;
imageConstraints.bytes_per_row_divisor = rowPitchAlignment;
imageConstraints.start_offset_divisor = 1;
imageConstraints.display_width_divisor = 1;
imageConstraints.display_height_divisor = 1;
imageConstraints.pixel_format.has_format_modifier = true;
imageConstraints.pixel_format.format_modifier.value =
(tiling == VK_IMAGE_TILING_LINEAR)
? fuchsia_sysmem::wire::kFormatModifierLinear
: fuchsia_sysmem::wire::kFormatModifierGoogleGoldfishOptimal;
constraints->image_format_constraints
[constraints->image_format_constraints_count++] = imageConstraints;
return VK_SUCCESS;
}
struct SetBufferCollectionImageConstraintsResult {
VkResult result;
fuchsia_sysmem::wire::BufferCollectionConstraints constraints;
std::vector<uint32_t> createInfoIndex;
};
SetBufferCollectionImageConstraintsResult
setBufferCollectionImageConstraintsImpl(
VkEncoder* enc,
VkDevice device,
fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
const auto& collection = *pCollection;
if (!pImageConstraintsInfo ||
pImageConstraintsInfo->sType !=
VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA) {
ALOGE("%s: invalid pImageConstraintsInfo", __func__);
return {VK_ERROR_INITIALIZATION_FAILED};
}
if (pImageConstraintsInfo->formatConstraintsCount == 0) {
ALOGE("%s: formatConstraintsCount must be greater than 0",
__func__);
abort();
}
fuchsia_sysmem::wire::BufferCollectionConstraints constraints =
defaultBufferCollectionConstraints(
/* min_size_bytes */ 0,
pImageConstraintsInfo->bufferCollectionConstraints
.minBufferCount,
pImageConstraintsInfo->bufferCollectionConstraints
.maxBufferCount,
pImageConstraintsInfo->bufferCollectionConstraints
.minBufferCountForCamping,
pImageConstraintsInfo->bufferCollectionConstraints
.minBufferCountForDedicatedSlack,
pImageConstraintsInfo->bufferCollectionConstraints
.minBufferCountForSharedSlack);
std::vector<fuchsia_sysmem::wire::ImageFormatConstraints>
format_constraints;
VkPhysicalDevice physicalDevice;
{
AutoLock<RecursiveLock> lock(mLock);
auto deviceIt = info_VkDevice.find(device);
if (deviceIt == info_VkDevice.end()) {
return {VK_ERROR_INITIALIZATION_FAILED};
}
physicalDevice = deviceIt->second.physdev;
}
std::vector<uint32_t> createInfoIndex;
bool hasOptimalTiling = false;
for (uint32_t i = 0; i < pImageConstraintsInfo->formatConstraintsCount;
i++) {
const VkImageCreateInfo* createInfo =
&pImageConstraintsInfo->pFormatConstraints[i].imageCreateInfo;
const VkImageFormatConstraintsInfoFUCHSIA* formatConstraints =
&pImageConstraintsInfo->pFormatConstraints[i];
// add ImageFormatConstraints for *optimal* tiling
VkResult optimalResult = VK_ERROR_FORMAT_NOT_SUPPORTED;
if (createInfo->tiling == VK_IMAGE_TILING_OPTIMAL) {
optimalResult = addImageBufferCollectionConstraintsFUCHSIA(
enc, device, physicalDevice, formatConstraints,
VK_IMAGE_TILING_OPTIMAL, &constraints);
if (optimalResult == VK_SUCCESS) {
createInfoIndex.push_back(i);
hasOptimalTiling = true;
}
}
// Add ImageFormatConstraints for *linear* tiling
VkResult linearResult = addImageBufferCollectionConstraintsFUCHSIA(
enc, device, physicalDevice, formatConstraints,
VK_IMAGE_TILING_LINEAR, &constraints);
if (linearResult == VK_SUCCESS) {
createInfoIndex.push_back(i);
}
// Update usage and BufferMemoryConstraints
if (linearResult == VK_SUCCESS || optimalResult == VK_SUCCESS) {
constraints.usage.vulkan |=
getBufferCollectionConstraintsVulkanImageUsage(createInfo);
if (formatConstraints && formatConstraints->flags) {
ALOGW(
"%s: Non-zero flags (%08x) in image format "
"constraints; this is currently not supported, see "
"fxbug.dev/68833.",
__func__, formatConstraints->flags);
}
}
}
// Set buffer memory constraints based on optimal/linear tiling support
// and flags.
VkImageConstraintsInfoFlagsFUCHSIA flags = pImageConstraintsInfo->flags;
if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_RARELY_FUCHSIA)
constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageRead;
if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_OFTEN_FUCHSIA)
constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageReadOften;
if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_RARELY_FUCHSIA)
constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageWrite;
if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_OFTEN_FUCHSIA)
constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageWriteOften;
constraints.has_buffer_memory_constraints = true;
auto& memory_constraints = constraints.buffer_memory_constraints;
memory_constraints.cpu_domain_supported = true;
memory_constraints.ram_domain_supported = true;
memory_constraints.inaccessible_domain_supported =
hasOptimalTiling &&
!(flags & (VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_RARELY_FUCHSIA |
VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_OFTEN_FUCHSIA |
VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_RARELY_FUCHSIA |
VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_OFTEN_FUCHSIA));
if (memory_constraints.inaccessible_domain_supported) {
memory_constraints.heap_permitted_count = 2;
memory_constraints.heap_permitted[0] =
fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
memory_constraints.heap_permitted[1] =
fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
} else {
memory_constraints.heap_permitted_count = 1;
memory_constraints.heap_permitted[0] =
fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
}
if (constraints.image_format_constraints_count == 0) {
ALOGE("%s: none of the specified formats is supported by device",
__func__);
return {VK_ERROR_FORMAT_NOT_SUPPORTED};
}
constexpr uint32_t kVulkanPriority = 5;
const char kName[] = "GoldfishSysmemShared";
collection->SetName(kVulkanPriority, fidl::StringView(kName));
auto result = collection->SetConstraints(true, constraints);
if (!result.ok()) {
ALOGE("setBufferCollectionConstraints: SetConstraints failed: %d",
result.status());
return {VK_ERROR_INITIALIZATION_FAILED};
}
return {VK_SUCCESS, constraints, std::move(createInfoIndex)};
}
VkResult setBufferCollectionImageConstraintsFUCHSIA(
VkEncoder* enc,
VkDevice device,
fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
const auto& collection = *pCollection;
auto setConstraintsResult = setBufferCollectionImageConstraintsImpl(
enc, device, pCollection, pImageConstraintsInfo);
if (setConstraintsResult.result != VK_SUCCESS) {
return setConstraintsResult.result;
}
// copy constraints to info_VkBufferCollectionFUCHSIA if
// |collection| is a valid VkBufferCollectionFUCHSIA handle.
AutoLock<RecursiveLock> lock(mLock);
VkBufferCollectionFUCHSIA buffer_collection =
reinterpret_cast<VkBufferCollectionFUCHSIA>(pCollection);
if (info_VkBufferCollectionFUCHSIA.find(buffer_collection) !=
info_VkBufferCollectionFUCHSIA.end()) {
info_VkBufferCollectionFUCHSIA[buffer_collection].constraints =
android::base::makeOptional(
std::move(setConstraintsResult.constraints));
info_VkBufferCollectionFUCHSIA[buffer_collection].createInfoIndex =
std::move(setConstraintsResult.createInfoIndex);
}
return VK_SUCCESS;
}
struct SetBufferCollectionBufferConstraintsResult {
VkResult result;
fuchsia_sysmem::wire::BufferCollectionConstraints constraints;
};
SetBufferCollectionBufferConstraintsResult
setBufferCollectionBufferConstraintsImpl(
fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
const auto& collection = *pCollection;
if (pBufferConstraintsInfo == nullptr) {
ALOGE(
"setBufferCollectionBufferConstraints: "
"pBufferConstraintsInfo cannot be null.");
return {VK_ERROR_OUT_OF_DEVICE_MEMORY};
}
fuchsia_sysmem::wire::BufferCollectionConstraints constraints =
defaultBufferCollectionConstraints(
/* min_size_bytes */ pBufferConstraintsInfo->createInfo.size,
/* buffer_count */ pBufferConstraintsInfo
->bufferCollectionConstraints.minBufferCount);
constraints.usage.vulkan =
getBufferCollectionConstraintsVulkanBufferUsage(
pBufferConstraintsInfo);
constexpr uint32_t kVulkanPriority = 5;
const char kName[] = "GoldfishBufferSysmemShared";
collection->SetName(kVulkanPriority, fidl::StringView(kName));
auto result = collection->SetConstraints(true, constraints);
if (!result.ok()) {
ALOGE("setBufferCollectionConstraints: SetConstraints failed: %d",
result.status());
return {VK_ERROR_OUT_OF_DEVICE_MEMORY};
}
return {VK_SUCCESS, constraints};
}
VkResult setBufferCollectionBufferConstraintsFUCHSIA(
fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
auto setConstraintsResult = setBufferCollectionBufferConstraintsImpl(
pCollection, pBufferConstraintsInfo);
if (setConstraintsResult.result != VK_SUCCESS) {
return setConstraintsResult.result;
}
// copy constraints to info_VkBufferCollectionFUCHSIA if
// |collection| is a valid VkBufferCollectionFUCHSIA handle.
AutoLock<RecursiveLock> lock(mLock);
VkBufferCollectionFUCHSIA buffer_collection =
reinterpret_cast<VkBufferCollectionFUCHSIA>(pCollection);
if (info_VkBufferCollectionFUCHSIA.find(buffer_collection) !=
info_VkBufferCollectionFUCHSIA.end()) {
info_VkBufferCollectionFUCHSIA[buffer_collection].constraints =
android::base::makeOptional(setConstraintsResult.constraints);
}
return VK_SUCCESS;
}
VkResult on_vkSetBufferCollectionImageConstraintsFUCHSIA(
void* context,
VkResult,
VkDevice device,
VkBufferCollectionFUCHSIA collection,
const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
VkEncoder* enc = (VkEncoder*)context;
auto sysmem_collection = reinterpret_cast<
fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
collection);
return setBufferCollectionImageConstraintsFUCHSIA(
enc, device, sysmem_collection, pImageConstraintsInfo);
}
VkResult on_vkSetBufferCollectionBufferConstraintsFUCHSIA(
void*,
VkResult,
VkDevice,
VkBufferCollectionFUCHSIA collection,
const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
auto sysmem_collection = reinterpret_cast<
fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
collection);
return setBufferCollectionBufferConstraintsFUCHSIA(
sysmem_collection, pBufferConstraintsInfo);
}
VkResult getBufferCollectionImageCreateInfoIndexLocked(
VkBufferCollectionFUCHSIA collection,
fuchsia_sysmem::wire::BufferCollectionInfo2& info,
uint32_t* outCreateInfoIndex) {
if (!info_VkBufferCollectionFUCHSIA[collection]
.constraints.hasValue()) {
ALOGE("%s: constraints not set", __func__);
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
}
if (!info.settings.has_image_format_constraints) {
// no image format constraints, skip getting createInfoIndex.
return VK_SUCCESS;
}
const auto& constraints =
*info_VkBufferCollectionFUCHSIA[collection].constraints;
const auto& createInfoIndices =
info_VkBufferCollectionFUCHSIA[collection].createInfoIndex;
const auto& out = info.settings.image_format_constraints;
bool foundCreateInfo = false;
for (size_t imageFormatIndex = 0;
imageFormatIndex < constraints.image_format_constraints_count;
imageFormatIndex++) {
const auto& in =
constraints.image_format_constraints[imageFormatIndex];
// These checks are sorted in order of how often they're expected to
// mismatch, from most likely to least likely. They aren't always
// equality comparisons, since sysmem may change some values in
// compatible ways on behalf of the other participants.
if ((out.pixel_format.type != in.pixel_format.type) ||
(out.pixel_format.has_format_modifier !=
in.pixel_format.has_format_modifier) ||
(out.pixel_format.format_modifier.value !=
in.pixel_format.format_modifier.value) ||
(out.min_bytes_per_row < in.min_bytes_per_row) ||
(out.required_max_coded_width < in.required_max_coded_width) ||
(out.required_max_coded_height <
in.required_max_coded_height) ||
(in.bytes_per_row_divisor != 0 &&
out.bytes_per_row_divisor % in.bytes_per_row_divisor != 0)) {
continue;
}
// Check if the out colorspaces are a subset of the in color spaces.
bool all_color_spaces_found = true;
for (uint32_t j = 0; j < out.color_spaces_count; j++) {
bool found_matching_color_space = false;
for (uint32_t k = 0; k < in.color_spaces_count; k++) {
if (out.color_space[j].type == in.color_space[k].type) {
found_matching_color_space = true;
break;
}
}
if (!found_matching_color_space) {
all_color_spaces_found = false;
break;
}
}
if (!all_color_spaces_found) {
continue;
}
// Choose the first valid format for now.
*outCreateInfoIndex = createInfoIndices[imageFormatIndex];
return VK_SUCCESS;
}
ALOGE("%s: cannot find a valid image format in constraints", __func__);
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
}
VkResult on_vkGetBufferCollectionPropertiesFUCHSIA(
void* context,
VkResult,
VkDevice device,
VkBufferCollectionFUCHSIA collection,
VkBufferCollectionPropertiesFUCHSIA* pProperties) {
VkEncoder* enc = (VkEncoder*)context;
const auto& sysmem_collection = *reinterpret_cast<
fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
collection);
auto result = sysmem_collection->WaitForBuffersAllocated();
if (!result.ok() || result->status != ZX_OK) {
ALOGE("Failed wait for allocation: %d %d", result.status(),
GET_STATUS_SAFE(result, status));
return VK_ERROR_INITIALIZATION_FAILED;
}
fuchsia_sysmem::wire::BufferCollectionInfo2 info =
std::move(result->buffer_collection_info);
bool is_host_visible =
info.settings.buffer_settings.heap ==
fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
bool is_device_local =
info.settings.buffer_settings.heap ==
fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
if (!is_host_visible && !is_device_local) {
ALOGE("buffer collection uses a non-goldfish heap (type 0x%lu)",
static_cast<uint64_t>(info.settings.buffer_settings.heap));
return VK_ERROR_INITIALIZATION_FAILED;
}
// memoryTypeBits
// ====================================================================
{
AutoLock<RecursiveLock> lock(mLock);
auto deviceIt = info_VkDevice.find(device);
if (deviceIt == info_VkDevice.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto& deviceInfo = deviceIt->second;
// Device local memory type supported.
pProperties->memoryTypeBits = 0;
for (uint32_t i = 0; i < deviceInfo.memProps.memoryTypeCount; ++i) {
if ((is_device_local &&
(deviceInfo.memProps.memoryTypes[i].propertyFlags &
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) ||
(is_host_visible &&
(deviceInfo.memProps.memoryTypes[i].propertyFlags &
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))) {
pProperties->memoryTypeBits |= 1ull << i;
}
}
}
// bufferCount
// ====================================================================
pProperties->bufferCount = info.buffer_count;
auto storeProperties = [this, collection, pProperties]() -> VkResult {
// store properties to storage
AutoLock<RecursiveLock> lock(mLock);
if (info_VkBufferCollectionFUCHSIA.find(collection) ==
info_VkBufferCollectionFUCHSIA.end()) {
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
}
info_VkBufferCollectionFUCHSIA[collection].properties =
android::base::makeOptional(*pProperties);
// We only do a shallow copy so we should remove all pNext pointers.
info_VkBufferCollectionFUCHSIA[collection].properties->pNext =
nullptr;
info_VkBufferCollectionFUCHSIA[collection]
.properties->sysmemColorSpaceIndex.pNext = nullptr;
return VK_SUCCESS;
};
// The fields below only apply to buffer collections with image formats.
if (!info.settings.has_image_format_constraints) {
ALOGD("%s: buffer collection doesn't have image format constraints",
__func__);
return storeProperties();
}
// sysmemFormat
// ====================================================================
pProperties->sysmemPixelFormat = static_cast<uint64_t>(
info.settings.image_format_constraints.pixel_format.type);
// colorSpace
// ====================================================================
if (info.settings.image_format_constraints.color_spaces_count == 0) {
ALOGE(
"%s: color space missing from allocated buffer collection "
"constraints",
__func__);
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
}
// Only report first colorspace for now.
pProperties->sysmemColorSpaceIndex.colorSpace = static_cast<uint32_t>(
info.settings.image_format_constraints.color_space[0].type);
// createInfoIndex
// ====================================================================
{
AutoLock<RecursiveLock> lock(mLock);
auto getIndexResult = getBufferCollectionImageCreateInfoIndexLocked(
collection, info, &pProperties->createInfoIndex);
if (getIndexResult != VK_SUCCESS) {
return getIndexResult;
}
}
// formatFeatures
// ====================================================================
VkPhysicalDevice physicalDevice;
{
AutoLock<RecursiveLock> lock(mLock);
auto deviceIt = info_VkDevice.find(device);
if (deviceIt == info_VkDevice.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
physicalDevice = deviceIt->second.physdev;
}
VkFormat vkFormat = sysmemPixelFormatTypeToVk(
info.settings.image_format_constraints.pixel_format.type);
VkFormatProperties formatProperties;
enc->vkGetPhysicalDeviceFormatProperties(
physicalDevice, vkFormat, &formatProperties, true /* do lock */);
if (is_device_local) {
pProperties->formatFeatures =
formatProperties.optimalTilingFeatures;
}
if (is_host_visible) {
pProperties->formatFeatures = formatProperties.linearTilingFeatures;
}
// YCbCr properties
// ====================================================================
// TODO(59804): Implement this correctly when we support YUV pixel
// formats in goldfish ICD.
pProperties->samplerYcbcrConversionComponents.r =
VK_COMPONENT_SWIZZLE_IDENTITY;
pProperties->samplerYcbcrConversionComponents.g =
VK_COMPONENT_SWIZZLE_IDENTITY;
pProperties->samplerYcbcrConversionComponents.b =
VK_COMPONENT_SWIZZLE_IDENTITY;
pProperties->samplerYcbcrConversionComponents.a =
VK_COMPONENT_SWIZZLE_IDENTITY;
pProperties->suggestedYcbcrModel =
VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY;
pProperties->suggestedYcbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL;
pProperties->suggestedXChromaOffset = VK_CHROMA_LOCATION_MIDPOINT;
pProperties->suggestedYChromaOffset = VK_CHROMA_LOCATION_MIDPOINT;
return storeProperties();
}
#endif
CoherentMemoryPtr createCoherentMemory(VkDevice device,
VkDeviceMemory mem,
const VkMemoryAllocateInfo& hostAllocationInfo,
VkEncoder* enc,
VkResult& res)
{
CoherentMemoryPtr coherentMemory = nullptr;
if (mFeatureInfo->hasDirectMem) {
uint64_t gpuAddr = 0;
GoldfishAddressSpaceBlockPtr block = nullptr;
res = enc->vkMapMemoryIntoAddressSpaceGOOGLE(device, mem, &gpuAddr, true);
if (res != VK_SUCCESS) {
return coherentMemory;
}
{
AutoLock<RecursiveLock> lock(mLock);
auto it = info_VkDeviceMemory.find(mem);
if (it == info_VkDeviceMemory.end()) {
res = VK_ERROR_OUT_OF_HOST_MEMORY;
return coherentMemory;
}
auto& info = it->second;
block = info.goldfishBlock;
info.goldfishBlock = nullptr;
coherentMemory =
std::make_shared<CoherentMemory>(block, gpuAddr, hostAllocationInfo.allocationSize, device, mem);
}
} else if (mFeatureInfo->hasVirtioGpuNext) {
struct VirtGpuCreateBlob createBlob = { 0 };
uint64_t hvaSizeId[3];
res = enc->vkGetMemoryHostAddressInfoGOOGLE(device, mem,
&hvaSizeId[0], &hvaSizeId[1], &hvaSizeId[2], true /* do lock */);
if(res != VK_SUCCESS) {
return coherentMemory;
}
{
AutoLock<RecursiveLock> lock(mLock);
VirtGpuDevice& instance = VirtGpuDevice::getInstance((enum VirtGpuCapset)3);
createBlob.blobMem = kBlobMemHost3d;
createBlob.flags = kBlobFlagMappable;
createBlob.blobId = hvaSizeId[2];
createBlob.size = hostAllocationInfo.allocationSize;
auto blob = instance.createBlob(createBlob);
if (!blob) {
res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
return coherentMemory;
}
VirtGpuBlobMappingPtr mapping = blob->createMapping();
if (!mapping) {
res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
return coherentMemory;
}
coherentMemory =
std::make_shared<CoherentMemory>(mapping, createBlob.size, device, mem);
}
} else {
ALOGE("FATAL: Unsupported virtual memory feature");
abort();
}
return coherentMemory;
}
VkResult allocateCoherentMemory(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo,
VkEncoder* enc, VkDeviceMemory* pMemory) {
uint64_t offset = 0;
uint8_t *ptr = nullptr;
VkMemoryAllocateFlagsInfo allocFlagsInfo;
VkMemoryOpaqueCaptureAddressAllocateInfo opaqueCaptureAddressAllocInfo;
const VkMemoryAllocateFlagsInfo* allocFlagsInfoPtr =
vk_find_struct<VkMemoryAllocateFlagsInfo>(pAllocateInfo);
const VkMemoryOpaqueCaptureAddressAllocateInfo* opaqueCaptureAddressAllocInfoPtr =
vk_find_struct<VkMemoryOpaqueCaptureAddressAllocateInfo>(pAllocateInfo);
bool deviceAddressMemoryAllocation =
allocFlagsInfoPtr &&
((allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT) ||
(allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT));
bool dedicated = deviceAddressMemoryAllocation;
VkMemoryAllocateInfo hostAllocationInfo = vk_make_orphan_copy(*pAllocateInfo);
vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&hostAllocationInfo);
if (dedicated) {
hostAllocationInfo.allocationSize =
((pAllocateInfo