| /*************************************************************************** |
| * |
| * Copyright (c) 2015-2023 The Khronos Group Inc. |
| * Copyright (c) 2015-2023 Valve Corporation |
| * Copyright (c) 2015-2023 LunarG, Inc. |
| * |
| * Licensed under the Apache License, Version 2.0 (the "License"); |
| * you may not use this file except in compliance with the License. |
| * You may obtain a copy of the License at |
| * |
| * http://www.apache.org/licenses/LICENSE-2.0 |
| * |
| * Unless required by applicable law or agreed to in writing, software |
| * distributed under the License is distributed on an "AS IS" BASIS, |
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| * See the License for the specific language governing permissions and |
| * limitations under the License. |
| ****************************************************************************/ |
| |
| #include "generated/chassis.h" |
| #include "generated/layer_chassis_dispatch.h" |
| #include "generated/vk_safe_struct.h" |
| #include "state_tracker/pipeline_state.h" |
| |
| std::shared_mutex dispatch_lock; |
| |
| #ifdef VK_USE_PLATFORM_METAL_EXT |
| // The vkExportMetalObjects extension returns data from the driver -- we've created a copy of the pNext chain, so |
| // copy the returned data to the caller |
| void CopyExportMetalObjects(const void *src_chain, const void *dst_chain) { |
| while (src_chain && dst_chain) |
| { |
| const VkStructureType type = reinterpret_cast<const VkBaseOutStructure *>(src_chain)->sType; |
| switch (type) { |
| case VK_STRUCTURE_TYPE_EXPORT_METAL_DEVICE_INFO_EXT: |
| { |
| auto *pSrc = reinterpret_cast<const VkExportMetalDeviceInfoEXT*>(src_chain); |
| auto *pDstConst = reinterpret_cast<const VkExportMetalDeviceInfoEXT*>(dst_chain); |
| auto* pDst = const_cast<VkExportMetalDeviceInfoEXT*>(pDstConst); |
| pDst->mtlDevice = pSrc->mtlDevice; |
| break; |
| } |
| case VK_STRUCTURE_TYPE_EXPORT_METAL_COMMAND_QUEUE_INFO_EXT: |
| { |
| const auto*pSrc = reinterpret_cast<const VkExportMetalCommandQueueInfoEXT*>(src_chain); |
| auto *pDstConst = reinterpret_cast<const VkExportMetalCommandQueueInfoEXT*>(dst_chain); |
| auto* pDst = const_cast<VkExportMetalCommandQueueInfoEXT*>(pDstConst); |
| pDst->mtlCommandQueue = pSrc->mtlCommandQueue; |
| break; |
| } |
| case VK_STRUCTURE_TYPE_EXPORT_METAL_BUFFER_INFO_EXT: |
| { |
| const auto*pSrc = reinterpret_cast<const VkExportMetalBufferInfoEXT*>(src_chain); |
| auto *pDstConst = reinterpret_cast<const VkExportMetalBufferInfoEXT*>(dst_chain); |
| auto* pDst = const_cast<VkExportMetalBufferInfoEXT*>(pDstConst); |
| pDst->mtlBuffer = pSrc->mtlBuffer; |
| break; |
| } |
| case VK_STRUCTURE_TYPE_EXPORT_METAL_TEXTURE_INFO_EXT: |
| { |
| const auto*pSrc = reinterpret_cast<const VkExportMetalTextureInfoEXT*>(src_chain); |
| auto *pDstConst = reinterpret_cast<const VkExportMetalTextureInfoEXT*>(dst_chain); |
| auto* pDst = const_cast<VkExportMetalTextureInfoEXT*>(pDstConst); |
| pDst->mtlTexture = pSrc->mtlTexture; |
| break; |
| } |
| case VK_STRUCTURE_TYPE_EXPORT_METAL_IO_SURFACE_INFO_EXT: |
| { |
| const auto*pSrc = reinterpret_cast<const VkExportMetalIOSurfaceInfoEXT*>(src_chain); |
| auto *pDstConst = reinterpret_cast<const VkExportMetalIOSurfaceInfoEXT*>(dst_chain); |
| auto* pDst = const_cast<VkExportMetalIOSurfaceInfoEXT*>(pDstConst); |
| pDst->ioSurface = pSrc->ioSurface; |
| break; |
| } |
| case VK_STRUCTURE_TYPE_EXPORT_METAL_SHARED_EVENT_INFO_EXT: |
| { |
| const auto*pSrc = reinterpret_cast<const VkExportMetalSharedEventInfoEXT*>(src_chain); |
| auto *pDstConst = reinterpret_cast<const VkExportMetalSharedEventInfoEXT*>(dst_chain); |
| auto* pDst = const_cast<VkExportMetalSharedEventInfoEXT*>(pDstConst); |
| pDst->mtlSharedEvent = pSrc->mtlSharedEvent; |
| break; |
| } |
| default: |
| assert(false); |
| break; |
| } |
| |
| // Handle pNext chaining |
| src_chain = reinterpret_cast<const VkBaseOutStructure *>(src_chain)->pNext; |
| dst_chain = reinterpret_cast<const VkBaseOutStructure *>(dst_chain)->pNext; |
| } |
| } |
| |
| void DispatchExportMetalObjectsEXT( |
| VkDevice device, |
| VkExportMetalObjectsInfoEXT* pMetalObjectsInfo) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) return layer_data->device_dispatch_table.ExportMetalObjectsEXT(device, pMetalObjectsInfo); |
| safe_VkExportMetalObjectsInfoEXT var_local_pMetalObjectsInfo; |
| safe_VkExportMetalObjectsInfoEXT *local_pMetalObjectsInfo = nullptr; { |
| if (pMetalObjectsInfo) { |
| local_pMetalObjectsInfo = &var_local_pMetalObjectsInfo; |
| local_pMetalObjectsInfo->initialize(pMetalObjectsInfo); |
| WrapPnextChainHandles(layer_data, local_pMetalObjectsInfo->pNext); |
| } |
| } |
| layer_data->device_dispatch_table.ExportMetalObjectsEXT(device, (VkExportMetalObjectsInfoEXT*)local_pMetalObjectsInfo); |
| if (pMetalObjectsInfo) { CopyExportMetalObjects(local_pMetalObjectsInfo->pNext, pMetalObjectsInfo->pNext); } |
| |
| } |
| |
| #endif // VK_USE_PLATFORM_METAL_EXT |
| |
| // The VK_EXT_pipeline_creation_feedback extension returns data from the driver -- we've created a copy of the pnext chain, so |
| // copy the returned data to the caller before freeing the copy's data. |
| void CopyCreatePipelineFeedbackData(const void *src_chain, const void *dst_chain) { |
| auto src_feedback_struct = vku::FindStructInPNextChain<VkPipelineCreationFeedbackCreateInfoEXT>(src_chain); |
| if (!src_feedback_struct) return; |
| auto dst_feedback_struct = const_cast<VkPipelineCreationFeedbackCreateInfoEXT *>( |
| vku::FindStructInPNextChain<VkPipelineCreationFeedbackCreateInfoEXT>(dst_chain)); |
| *dst_feedback_struct->pPipelineCreationFeedback = *src_feedback_struct->pPipelineCreationFeedback; |
| for (uint32_t i = 0; i < src_feedback_struct->pipelineStageCreationFeedbackCount; i++) { |
| dst_feedback_struct->pPipelineStageCreationFeedbacks[i] = src_feedback_struct->pPipelineStageCreationFeedbacks[i]; |
| } |
| } |
| |
| VkResult DispatchCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, |
| const VkGraphicsPipelineCreateInfo *pCreateInfos, |
| const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) return layer_data->device_dispatch_table.CreateGraphicsPipelines(device, pipelineCache, createInfoCount, |
| pCreateInfos, pAllocator, pPipelines); |
| safe_VkGraphicsPipelineCreateInfo *local_pCreateInfos = nullptr; |
| if (pCreateInfos) { |
| local_pCreateInfos = new safe_VkGraphicsPipelineCreateInfo[createInfoCount]; |
| ReadLockGuard lock(dispatch_lock); |
| for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) { |
| bool uses_color_attachment = false; |
| bool uses_depthstencil_attachment = false; |
| { |
| const auto subpasses_uses_it = layer_data->renderpasses_states.find(layer_data->Unwrap(pCreateInfos[idx0].renderPass)); |
| if (subpasses_uses_it != layer_data->renderpasses_states.end()) { |
| const auto &subpasses_uses = subpasses_uses_it->second; |
| if (subpasses_uses.subpasses_using_color_attachment.count(pCreateInfos[idx0].subpass)) |
| uses_color_attachment = true; |
| if (subpasses_uses.subpasses_using_depthstencil_attachment.count(pCreateInfos[idx0].subpass)) |
| uses_depthstencil_attachment = true; |
| } |
| } |
| |
| auto dynamic_rendering = vku::FindStructInPNextChain<VkPipelineRenderingCreateInfo>(pCreateInfos[idx0].pNext); |
| if (dynamic_rendering) { |
| uses_color_attachment = (dynamic_rendering->colorAttachmentCount > 0); |
| uses_depthstencil_attachment = (dynamic_rendering->depthAttachmentFormat != VK_FORMAT_UNDEFINED || |
| dynamic_rendering->stencilAttachmentFormat != VK_FORMAT_UNDEFINED); |
| } |
| |
| auto& graphics_info = pCreateInfos[idx0]; |
| auto state_info = dynamic_cast<ValidationStateTracker*>(layer_data); |
| PNextCopyState pnext_copy_state = { |
| [state_info, &graphics_info](VkBaseOutStructure* safe_struct, const VkBaseOutStructure *in_struct) -> bool { |
| return PIPELINE_STATE::PnextRenderingInfoCustomCopy(state_info, graphics_info, safe_struct, in_struct); |
| } |
| }; |
| local_pCreateInfos[idx0].initialize(&pCreateInfos[idx0], uses_color_attachment, uses_depthstencil_attachment, &pnext_copy_state); |
| |
| if (pCreateInfos[idx0].basePipelineHandle) { |
| local_pCreateInfos[idx0].basePipelineHandle = layer_data->Unwrap(pCreateInfos[idx0].basePipelineHandle); |
| } |
| if (pCreateInfos[idx0].layout) { |
| local_pCreateInfos[idx0].layout = layer_data->Unwrap(pCreateInfos[idx0].layout); |
| } |
| if (pCreateInfos[idx0].pStages) { |
| for (uint32_t idx1 = 0; idx1 < pCreateInfos[idx0].stageCount; ++idx1) { |
| if (pCreateInfos[idx0].pStages[idx1].module) { |
| local_pCreateInfos[idx0].pStages[idx1].module = layer_data->Unwrap(pCreateInfos[idx0].pStages[idx1].module); |
| } |
| } |
| } |
| if (pCreateInfos[idx0].renderPass) { |
| local_pCreateInfos[idx0].renderPass = layer_data->Unwrap(pCreateInfos[idx0].renderPass); |
| } |
| |
| auto* link_info = vku::FindStructInPNextChain<VkPipelineLibraryCreateInfoKHR>(local_pCreateInfos[idx0].pNext); |
| if (link_info) { |
| auto* unwrapped_libs = const_cast<VkPipeline*>(link_info->pLibraries); |
| for (uint32_t idx1 = 0; idx1 < link_info->libraryCount; ++idx1) { |
| unwrapped_libs[idx1] = layer_data->Unwrap(link_info->pLibraries[idx1]); |
| } |
| } |
| } |
| } |
| if (pipelineCache) { |
| pipelineCache = layer_data->Unwrap(pipelineCache); |
| } |
| |
| VkResult result = layer_data->device_dispatch_table.CreateGraphicsPipelines(device, pipelineCache, createInfoCount, |
| local_pCreateInfos->ptr(), pAllocator, pPipelines); |
| for (uint32_t i = 0; i < createInfoCount; ++i) { |
| if (pCreateInfos[i].pNext != VK_NULL_HANDLE) { |
| CopyCreatePipelineFeedbackData(local_pCreateInfos[i].pNext, pCreateInfos[i].pNext); |
| } |
| } |
| |
| delete[] local_pCreateInfos; |
| { |
| for (uint32_t i = 0; i < createInfoCount; ++i) { |
| if (pPipelines[i] != VK_NULL_HANDLE) { |
| pPipelines[i] = layer_data->WrapNew(pPipelines[i]); |
| } |
| } |
| } |
| return result; |
| } |
| |
| template <typename T> |
| static void UpdateCreateRenderPassState(ValidationObject *layer_data, const T *pCreateInfo, VkRenderPass renderPass) { |
| auto &renderpass_state = layer_data->renderpasses_states[renderPass]; |
| |
| for (uint32_t subpass = 0; subpass < pCreateInfo->subpassCount; ++subpass) { |
| bool uses_color = false; |
| for (uint32_t i = 0; i < pCreateInfo->pSubpasses[subpass].colorAttachmentCount && !uses_color; ++i) |
| if (pCreateInfo->pSubpasses[subpass].pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) uses_color = true; |
| |
| bool uses_depthstencil = false; |
| if (pCreateInfo->pSubpasses[subpass].pDepthStencilAttachment) |
| if (pCreateInfo->pSubpasses[subpass].pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) |
| uses_depthstencil = true; |
| |
| if (uses_color) renderpass_state.subpasses_using_color_attachment.insert(subpass); |
| if (uses_depthstencil) renderpass_state.subpasses_using_depthstencil_attachment.insert(subpass); |
| } |
| } |
| |
| VkResult DispatchCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo, |
| const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| VkResult result = layer_data->device_dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass); |
| if (!wrap_handles) return result; |
| if (VK_SUCCESS == result) { |
| WriteLockGuard lock(dispatch_lock); |
| UpdateCreateRenderPassState(layer_data, pCreateInfo, *pRenderPass); |
| *pRenderPass = layer_data->WrapNew(*pRenderPass); |
| } |
| return result; |
| } |
| |
| VkResult DispatchCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo, |
| const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| VkResult result = layer_data->device_dispatch_table.CreateRenderPass2KHR(device, pCreateInfo, pAllocator, pRenderPass); |
| if (!wrap_handles) return result; |
| if (VK_SUCCESS == result) { |
| WriteLockGuard lock(dispatch_lock); |
| UpdateCreateRenderPassState(layer_data, pCreateInfo, *pRenderPass); |
| *pRenderPass = layer_data->WrapNew(*pRenderPass); |
| } |
| return result; |
| } |
| |
| VkResult DispatchCreateRenderPass2(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo, |
| const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| VkResult result = layer_data->device_dispatch_table.CreateRenderPass2(device, pCreateInfo, pAllocator, pRenderPass); |
| if (!wrap_handles) return result; |
| if (VK_SUCCESS == result) { |
| WriteLockGuard lock(dispatch_lock); |
| UpdateCreateRenderPassState(layer_data, pCreateInfo, *pRenderPass); |
| *pRenderPass = layer_data->WrapNew(*pRenderPass); |
| } |
| return result; |
| } |
| |
| void DispatchDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) return layer_data->device_dispatch_table.DestroyRenderPass(device, renderPass, pAllocator); |
| uint64_t renderPass_id = CastToUint64(renderPass); |
| |
| auto iter = unique_id_mapping.pop(renderPass_id); |
| if (iter != unique_id_mapping.end()) { |
| renderPass = (VkRenderPass)iter->second; |
| } else { |
| renderPass = (VkRenderPass)0; |
| } |
| |
| layer_data->device_dispatch_table.DestroyRenderPass(device, renderPass, pAllocator); |
| |
| WriteLockGuard lock(dispatch_lock); |
| layer_data->renderpasses_states.erase(renderPass); |
| } |
| |
| VkResult DispatchCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo, |
| const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) return layer_data->device_dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain); |
| safe_VkSwapchainCreateInfoKHR *local_pCreateInfo = nullptr; |
| if (pCreateInfo) { |
| local_pCreateInfo = new safe_VkSwapchainCreateInfoKHR(pCreateInfo); |
| local_pCreateInfo->oldSwapchain = layer_data->Unwrap(pCreateInfo->oldSwapchain); |
| // Surface is instance-level object |
| local_pCreateInfo->surface = layer_data->Unwrap(pCreateInfo->surface); |
| } |
| |
| VkResult result = layer_data->device_dispatch_table.CreateSwapchainKHR(device, local_pCreateInfo->ptr(), pAllocator, pSwapchain); |
| delete local_pCreateInfo; |
| |
| if (VK_SUCCESS == result) { |
| *pSwapchain = layer_data->WrapNew(*pSwapchain); |
| } |
| return result; |
| } |
| |
| VkResult DispatchCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR *pCreateInfos, |
| const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) |
| return layer_data->device_dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, |
| pSwapchains); |
| safe_VkSwapchainCreateInfoKHR *local_pCreateInfos = nullptr; |
| { |
| if (pCreateInfos) { |
| local_pCreateInfos = new safe_VkSwapchainCreateInfoKHR[swapchainCount]; |
| for (uint32_t i = 0; i < swapchainCount; ++i) { |
| local_pCreateInfos[i].initialize(&pCreateInfos[i]); |
| if (pCreateInfos[i].surface) { |
| // Surface is instance-level object |
| local_pCreateInfos[i].surface = layer_data->Unwrap(pCreateInfos[i].surface); |
| } |
| if (pCreateInfos[i].oldSwapchain) { |
| local_pCreateInfos[i].oldSwapchain = layer_data->Unwrap(pCreateInfos[i].oldSwapchain); |
| } |
| } |
| } |
| } |
| VkResult result = layer_data->device_dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, local_pCreateInfos->ptr(), |
| pAllocator, pSwapchains); |
| delete[] local_pCreateInfos; |
| if (VK_SUCCESS == result) { |
| for (uint32_t i = 0; i < swapchainCount; i++) { |
| pSwapchains[i] = layer_data->WrapNew(pSwapchains[i]); |
| } |
| } |
| return result; |
| } |
| |
| VkResult DispatchGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount, |
| VkImage *pSwapchainImages) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) |
| return layer_data->device_dispatch_table.GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages); |
| VkSwapchainKHR wrapped_swapchain_handle = swapchain; |
| if (VK_NULL_HANDLE != swapchain) { |
| swapchain = layer_data->Unwrap(swapchain); |
| } |
| VkResult result = |
| layer_data->device_dispatch_table.GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages); |
| if ((VK_SUCCESS == result) || (VK_INCOMPLETE == result)) { |
| if ((*pSwapchainImageCount > 0) && pSwapchainImages) { |
| WriteLockGuard lock(dispatch_lock); |
| auto &wrapped_swapchain_image_handles = layer_data->swapchain_wrapped_image_handle_map[wrapped_swapchain_handle]; |
| for (uint32_t i = static_cast<uint32_t>(wrapped_swapchain_image_handles.size()); i < *pSwapchainImageCount; i++) { |
| wrapped_swapchain_image_handles.emplace_back(layer_data->WrapNew(pSwapchainImages[i])); |
| } |
| for (uint32_t i = 0; i < *pSwapchainImageCount; i++) { |
| pSwapchainImages[i] = wrapped_swapchain_image_handles[i]; |
| } |
| } |
| } |
| return result; |
| } |
| |
| void DispatchDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) return layer_data->device_dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator); |
| WriteLockGuard lock(dispatch_lock); |
| |
| auto &image_array = layer_data->swapchain_wrapped_image_handle_map[swapchain]; |
| for (auto &image_handle : image_array) { |
| unique_id_mapping.erase(HandleToUint64(image_handle)); |
| } |
| layer_data->swapchain_wrapped_image_handle_map.erase(swapchain); |
| lock.unlock(); |
| |
| uint64_t swapchain_id = HandleToUint64(swapchain); |
| |
| auto iter = unique_id_mapping.pop(swapchain_id); |
| if (iter != unique_id_mapping.end()) { |
| swapchain = (VkSwapchainKHR)iter->second; |
| } else { |
| swapchain = (VkSwapchainKHR)0; |
| } |
| |
| layer_data->device_dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator); |
| } |
| |
| VkResult DispatchQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map); |
| if (!wrap_handles) return layer_data->device_dispatch_table.QueuePresentKHR(queue, pPresentInfo); |
| safe_VkPresentInfoKHR *local_pPresentInfo = nullptr; |
| { |
| if (pPresentInfo) { |
| local_pPresentInfo = new safe_VkPresentInfoKHR(pPresentInfo); |
| if (local_pPresentInfo->pWaitSemaphores) { |
| for (uint32_t index1 = 0; index1 < local_pPresentInfo->waitSemaphoreCount; ++index1) { |
| local_pPresentInfo->pWaitSemaphores[index1] = layer_data->Unwrap(pPresentInfo->pWaitSemaphores[index1]); |
| } |
| } |
| if (local_pPresentInfo->pSwapchains) { |
| for (uint32_t index1 = 0; index1 < local_pPresentInfo->swapchainCount; ++index1) { |
| local_pPresentInfo->pSwapchains[index1] = layer_data->Unwrap(pPresentInfo->pSwapchains[index1]); |
| } |
| } |
| WrapPnextChainHandles(layer_data, local_pPresentInfo->pNext); |
| } |
| } |
| VkResult result = layer_data->device_dispatch_table.QueuePresentKHR(queue, local_pPresentInfo->ptr()); |
| |
| // pResults is an output array embedded in a structure. The code generator neglects to copy back from the safe_* version, |
| // so handle it as a special case here: |
| if (pPresentInfo && pPresentInfo->pResults) { |
| for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) { |
| pPresentInfo->pResults[i] = local_pPresentInfo->pResults[i]; |
| } |
| } |
| delete local_pPresentInfo; |
| return result; |
| } |
| |
| void DispatchDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) return layer_data->device_dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator); |
| WriteLockGuard lock(dispatch_lock); |
| |
| // remove references to implicitly freed descriptor sets |
| for(auto descriptor_set : layer_data->pool_descriptor_sets_map[descriptorPool]) { |
| unique_id_mapping.erase(CastToUint64(descriptor_set)); |
| } |
| layer_data->pool_descriptor_sets_map.erase(descriptorPool); |
| lock.unlock(); |
| |
| uint64_t descriptorPool_id = CastToUint64(descriptorPool); |
| |
| auto iter = unique_id_mapping.pop(descriptorPool_id); |
| if (iter != unique_id_mapping.end()) { |
| descriptorPool = (VkDescriptorPool)iter->second; |
| } else { |
| descriptorPool = (VkDescriptorPool)0; |
| } |
| |
| layer_data->device_dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator); |
| } |
| |
| VkResult DispatchResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) return layer_data->device_dispatch_table.ResetDescriptorPool(device, descriptorPool, flags); |
| VkDescriptorPool local_descriptor_pool = VK_NULL_HANDLE; |
| { |
| local_descriptor_pool = layer_data->Unwrap(descriptorPool); |
| } |
| VkResult result = layer_data->device_dispatch_table.ResetDescriptorPool(device, local_descriptor_pool, flags); |
| if (VK_SUCCESS == result) { |
| WriteLockGuard lock(dispatch_lock); |
| // remove references to implicitly freed descriptor sets |
| for(auto descriptor_set : layer_data->pool_descriptor_sets_map[descriptorPool]) { |
| unique_id_mapping.erase(CastToUint64(descriptor_set)); |
| } |
| layer_data->pool_descriptor_sets_map[descriptorPool].clear(); |
| } |
| |
| return result; |
| } |
| |
| VkResult DispatchAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, |
| VkDescriptorSet *pDescriptorSets) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) return layer_data->device_dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets); |
| safe_VkDescriptorSetAllocateInfo *local_pAllocateInfo = nullptr; |
| { |
| if (pAllocateInfo) { |
| local_pAllocateInfo = new safe_VkDescriptorSetAllocateInfo(pAllocateInfo); |
| if (pAllocateInfo->descriptorPool) { |
| local_pAllocateInfo->descriptorPool = layer_data->Unwrap(pAllocateInfo->descriptorPool); |
| } |
| if (local_pAllocateInfo->pSetLayouts) { |
| for (uint32_t index1 = 0; index1 < local_pAllocateInfo->descriptorSetCount; ++index1) { |
| local_pAllocateInfo->pSetLayouts[index1] = layer_data->Unwrap(local_pAllocateInfo->pSetLayouts[index1]); |
| } |
| } |
| } |
| } |
| VkResult result = layer_data->device_dispatch_table.AllocateDescriptorSets( |
| device, (const VkDescriptorSetAllocateInfo *)local_pAllocateInfo, pDescriptorSets); |
| if (local_pAllocateInfo) { |
| delete local_pAllocateInfo; |
| } |
| if (VK_SUCCESS == result) { |
| WriteLockGuard lock(dispatch_lock); |
| auto &pool_descriptor_sets = layer_data->pool_descriptor_sets_map[pAllocateInfo->descriptorPool]; |
| for (uint32_t index0 = 0; index0 < pAllocateInfo->descriptorSetCount; index0++) { |
| pDescriptorSets[index0] = layer_data->WrapNew(pDescriptorSets[index0]); |
| pool_descriptor_sets.insert(pDescriptorSets[index0]); |
| } |
| } |
| return result; |
| } |
| |
| VkResult DispatchFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, |
| const VkDescriptorSet *pDescriptorSets) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) |
| return layer_data->device_dispatch_table.FreeDescriptorSets(device, descriptorPool, descriptorSetCount, pDescriptorSets); |
| VkDescriptorSet *local_pDescriptorSets = nullptr; |
| VkDescriptorPool local_descriptor_pool = VK_NULL_HANDLE; |
| { |
| local_descriptor_pool = layer_data->Unwrap(descriptorPool); |
| if (pDescriptorSets) { |
| local_pDescriptorSets = new VkDescriptorSet[descriptorSetCount]; |
| for (uint32_t index0 = 0; index0 < descriptorSetCount; ++index0) { |
| local_pDescriptorSets[index0] = layer_data->Unwrap(pDescriptorSets[index0]); |
| } |
| } |
| } |
| VkResult result = layer_data->device_dispatch_table.FreeDescriptorSets(device, local_descriptor_pool, descriptorSetCount, |
| (const VkDescriptorSet *)local_pDescriptorSets); |
| if (local_pDescriptorSets) delete[] local_pDescriptorSets; |
| if ((VK_SUCCESS == result) && (pDescriptorSets)) { |
| WriteLockGuard lock(dispatch_lock); |
| auto &pool_descriptor_sets = layer_data->pool_descriptor_sets_map[descriptorPool]; |
| for (uint32_t index0 = 0; index0 < descriptorSetCount; index0++) { |
| VkDescriptorSet handle = pDescriptorSets[index0]; |
| pool_descriptor_sets.erase(handle); |
| uint64_t unique_id = CastToUint64(handle); |
| unique_id_mapping.erase(unique_id); |
| } |
| } |
| return result; |
| } |
| |
| // This is the core version of this routine. The extension version is below. |
| VkResult DispatchCreateDescriptorUpdateTemplate(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo, |
| const VkAllocationCallbacks *pAllocator, |
| VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) |
| return layer_data->device_dispatch_table.CreateDescriptorUpdateTemplate(device, pCreateInfo, pAllocator, |
| pDescriptorUpdateTemplate); |
| safe_VkDescriptorUpdateTemplateCreateInfo var_local_pCreateInfo; |
| safe_VkDescriptorUpdateTemplateCreateInfo *local_pCreateInfo = nullptr; |
| if (pCreateInfo) { |
| local_pCreateInfo = &var_local_pCreateInfo; |
| local_pCreateInfo->initialize(pCreateInfo); |
| if (pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET) { |
| local_pCreateInfo->descriptorSetLayout = layer_data->Unwrap(pCreateInfo->descriptorSetLayout); |
| } |
| if (pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR) { |
| local_pCreateInfo->pipelineLayout = layer_data->Unwrap(pCreateInfo->pipelineLayout); |
| } |
| } |
| VkResult result = layer_data->device_dispatch_table.CreateDescriptorUpdateTemplate(device, local_pCreateInfo->ptr(), pAllocator, |
| pDescriptorUpdateTemplate); |
| if (VK_SUCCESS == result) { |
| *pDescriptorUpdateTemplate = layer_data->WrapNew(*pDescriptorUpdateTemplate); |
| |
| // Shadow template createInfo for later updates |
| if (local_pCreateInfo) { |
| WriteLockGuard lock(dispatch_lock); |
| std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_pCreateInfo)); |
| layer_data->desc_template_createinfo_map[(uint64_t)*pDescriptorUpdateTemplate] = std::move(template_state); |
| } |
| } |
| return result; |
| } |
| |
| // This is the extension version of this routine. The core version is above. |
| VkResult DispatchCreateDescriptorUpdateTemplateKHR(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo, |
| const VkAllocationCallbacks *pAllocator, |
| VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) |
| return layer_data->device_dispatch_table.CreateDescriptorUpdateTemplateKHR(device, pCreateInfo, pAllocator, |
| pDescriptorUpdateTemplate); |
| safe_VkDescriptorUpdateTemplateCreateInfo var_local_pCreateInfo; |
| safe_VkDescriptorUpdateTemplateCreateInfo *local_pCreateInfo = nullptr; |
| if (pCreateInfo) { |
| local_pCreateInfo = &var_local_pCreateInfo; |
| local_pCreateInfo->initialize(pCreateInfo); |
| if (pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET) { |
| local_pCreateInfo->descriptorSetLayout = layer_data->Unwrap(pCreateInfo->descriptorSetLayout); |
| } |
| if (pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR) { |
| local_pCreateInfo->pipelineLayout = layer_data->Unwrap(pCreateInfo->pipelineLayout); |
| } |
| } |
| VkResult result = layer_data->device_dispatch_table.CreateDescriptorUpdateTemplateKHR(device, local_pCreateInfo->ptr(), |
| pAllocator, pDescriptorUpdateTemplate); |
| |
| if (VK_SUCCESS == result) { |
| *pDescriptorUpdateTemplate = layer_data->WrapNew(*pDescriptorUpdateTemplate); |
| |
| // Shadow template createInfo for later updates |
| if (local_pCreateInfo) { |
| WriteLockGuard lock(dispatch_lock); |
| std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_pCreateInfo)); |
| layer_data->desc_template_createinfo_map[(uint64_t)*pDescriptorUpdateTemplate] = std::move(template_state); |
| } |
| } |
| return result; |
| } |
| |
| // This is the core version of this routine. The extension version is below. |
| void DispatchDestroyDescriptorUpdateTemplate(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, |
| const VkAllocationCallbacks *pAllocator) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) |
| return layer_data->device_dispatch_table.DestroyDescriptorUpdateTemplate(device, descriptorUpdateTemplate, pAllocator); |
| WriteLockGuard lock(dispatch_lock); |
| uint64_t descriptor_update_template_id = CastToUint64(descriptorUpdateTemplate); |
| layer_data->desc_template_createinfo_map.erase(descriptor_update_template_id); |
| lock.unlock(); |
| |
| auto iter = unique_id_mapping.pop(descriptor_update_template_id); |
| if (iter != unique_id_mapping.end()) { |
| descriptorUpdateTemplate = (VkDescriptorUpdateTemplate)iter->second; |
| } else { |
| descriptorUpdateTemplate = (VkDescriptorUpdateTemplate)0; |
| } |
| |
| layer_data->device_dispatch_table.DestroyDescriptorUpdateTemplate(device, descriptorUpdateTemplate, pAllocator); |
| } |
| |
| // This is the extension version of this routine. The core version is above. |
| void DispatchDestroyDescriptorUpdateTemplateKHR(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, |
| const VkAllocationCallbacks *pAllocator) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) |
| return layer_data->device_dispatch_table.DestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator); |
| WriteLockGuard lock(dispatch_lock); |
| uint64_t descriptor_update_template_id = CastToUint64(descriptorUpdateTemplate); |
| layer_data->desc_template_createinfo_map.erase(descriptor_update_template_id); |
| lock.unlock(); |
| |
| auto iter = unique_id_mapping.pop(descriptor_update_template_id); |
| if (iter != unique_id_mapping.end()) { |
| descriptorUpdateTemplate = (VkDescriptorUpdateTemplate)iter->second; |
| } else { |
| descriptorUpdateTemplate = (VkDescriptorUpdateTemplate)0; |
| } |
| |
| layer_data->device_dispatch_table.DestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator); |
| } |
| |
| void *BuildUnwrappedUpdateTemplateBuffer(ValidationObject *layer_data, uint64_t descriptorUpdateTemplate, const void *pData) { |
| auto const template_map_entry = layer_data->desc_template_createinfo_map.find(descriptorUpdateTemplate); |
| auto const &create_info = template_map_entry->second->create_info; |
| size_t allocation_size = 0; |
| std::vector<std::tuple<size_t, VulkanObjectType, uint64_t, size_t>> template_entries; |
| |
| for (uint32_t i = 0; i < create_info.descriptorUpdateEntryCount; i++) { |
| for (uint32_t j = 0; j < create_info.pDescriptorUpdateEntries[i].descriptorCount; j++) { |
| size_t offset = create_info.pDescriptorUpdateEntries[i].offset + j * create_info.pDescriptorUpdateEntries[i].stride; |
| char *update_entry = (char *)(pData) + offset; |
| |
| switch (create_info.pDescriptorUpdateEntries[i].descriptorType) { |
| case VK_DESCRIPTOR_TYPE_SAMPLER: |
| case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: |
| case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: |
| case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: |
| case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: { |
| auto image_entry = reinterpret_cast<VkDescriptorImageInfo *>(update_entry); |
| allocation_size = std::max(allocation_size, offset + sizeof(VkDescriptorImageInfo)); |
| |
| VkDescriptorImageInfo *wrapped_entry = new VkDescriptorImageInfo(*image_entry); |
| wrapped_entry->sampler = layer_data->Unwrap(image_entry->sampler); |
| wrapped_entry->imageView = layer_data->Unwrap(image_entry->imageView); |
| template_entries.emplace_back(offset, kVulkanObjectTypeImage, CastToUint64(wrapped_entry), 0); |
| } break; |
| |
| case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: |
| case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: |
| case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: |
| case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: { |
| auto buffer_entry = reinterpret_cast<VkDescriptorBufferInfo *>(update_entry); |
| allocation_size = std::max(allocation_size, offset + sizeof(VkDescriptorBufferInfo)); |
| |
| VkDescriptorBufferInfo *wrapped_entry = new VkDescriptorBufferInfo(*buffer_entry); |
| wrapped_entry->buffer = layer_data->Unwrap(buffer_entry->buffer); |
| template_entries.emplace_back(offset, kVulkanObjectTypeBuffer, CastToUint64(wrapped_entry), 0); |
| } break; |
| |
| case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: |
| case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: { |
| auto buffer_view_handle = reinterpret_cast<VkBufferView *>(update_entry); |
| allocation_size = std::max(allocation_size, offset + sizeof(VkBufferView)); |
| |
| VkBufferView wrapped_entry = layer_data->Unwrap(*buffer_view_handle); |
| template_entries.emplace_back(offset, kVulkanObjectTypeBufferView, CastToUint64(wrapped_entry), 0); |
| } break; |
| case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: { |
| size_t numBytes = create_info.pDescriptorUpdateEntries[i].descriptorCount; |
| allocation_size = std::max(allocation_size, offset + numBytes); |
| // nothing to unwrap, just plain data |
| template_entries.emplace_back(offset, kVulkanObjectTypeUnknown, CastToUint64(update_entry), |
| numBytes); |
| // to break out of the loop |
| j = create_info.pDescriptorUpdateEntries[i].descriptorCount; |
| } break; |
| case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV:{ |
| auto accstruct_nv_handle = reinterpret_cast<VkAccelerationStructureNV *>(update_entry); |
| allocation_size = std::max(allocation_size, offset + sizeof(VkAccelerationStructureNV )); |
| |
| VkAccelerationStructureNV wrapped_entry = layer_data->Unwrap(*accstruct_nv_handle); |
| template_entries.emplace_back(offset, kVulkanObjectTypeAccelerationStructureNV, CastToUint64(wrapped_entry), 0); |
| } break; |
| case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR: { |
| auto accstruct_khr_handle = reinterpret_cast<VkAccelerationStructureKHR *>(update_entry); |
| allocation_size = std::max(allocation_size, offset + sizeof(VkAccelerationStructureKHR )); |
| |
| VkAccelerationStructureKHR wrapped_entry = layer_data->Unwrap(*accstruct_khr_handle); |
| template_entries.emplace_back(offset, kVulkanObjectTypeAccelerationStructureKHR, CastToUint64(wrapped_entry), 0); |
| } break; |
| default: |
| assert(0); |
| break; |
| } |
| } |
| } |
| // Allocate required buffer size and populate with source/unwrapped data |
| void *unwrapped_data = malloc(allocation_size); |
| for (auto &this_entry : template_entries) { |
| VulkanObjectType type = std::get<1>(this_entry); |
| void *destination = (char *)unwrapped_data + std::get<0>(this_entry); |
| uint64_t source = std::get<2>(this_entry); |
| size_t size = std::get<3>(this_entry); |
| |
| if (size != 0) { |
| assert(type == kVulkanObjectTypeUnknown); |
| memcpy(destination, CastFromUint64<void *>(source), size); |
| } else { |
| switch (type) { |
| case kVulkanObjectTypeImage: |
| *(reinterpret_cast<VkDescriptorImageInfo *>(destination)) = |
| *(reinterpret_cast<VkDescriptorImageInfo *>(source)); |
| delete CastFromUint64<VkDescriptorImageInfo *>(source); |
| break; |
| case kVulkanObjectTypeBuffer: |
| *(reinterpret_cast<VkDescriptorBufferInfo *>(destination)) = |
| *(CastFromUint64<VkDescriptorBufferInfo *>(source)); |
| delete CastFromUint64<VkDescriptorBufferInfo *>(source); |
| break; |
| case kVulkanObjectTypeBufferView: |
| *(reinterpret_cast<VkBufferView *>(destination)) = CastFromUint64<VkBufferView>(source); |
| break; |
| case kVulkanObjectTypeAccelerationStructureKHR: |
| *(reinterpret_cast<VkAccelerationStructureKHR *>(destination)) = CastFromUint64<VkAccelerationStructureKHR>(source); |
| break; |
| case kVulkanObjectTypeAccelerationStructureNV: |
| *(reinterpret_cast<VkAccelerationStructureNV *>(destination)) = CastFromUint64<VkAccelerationStructureNV>(source); |
| break; |
| default: |
| assert(0); |
| break; |
| } |
| } |
| } |
| return (void *)unwrapped_data; |
| } |
| |
| void DispatchUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet, |
| VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void *pData) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) |
| return layer_data->device_dispatch_table.UpdateDescriptorSetWithTemplate(device, descriptorSet, descriptorUpdateTemplate, |
| pData); |
| uint64_t template_handle = CastToUint64(descriptorUpdateTemplate); |
| void *unwrapped_buffer = nullptr; |
| { |
| ReadLockGuard lock(dispatch_lock); |
| descriptorSet = layer_data->Unwrap(descriptorSet); |
| descriptorUpdateTemplate = (VkDescriptorUpdateTemplate)layer_data->Unwrap(descriptorUpdateTemplate); |
| unwrapped_buffer = BuildUnwrappedUpdateTemplateBuffer(layer_data, template_handle, pData); |
| } |
| layer_data->device_dispatch_table.UpdateDescriptorSetWithTemplate(device, descriptorSet, descriptorUpdateTemplate, unwrapped_buffer); |
| free(unwrapped_buffer); |
| } |
| |
| void DispatchUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet, |
| VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void *pData) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) |
| return layer_data->device_dispatch_table.UpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, |
| pData); |
| uint64_t template_handle = CastToUint64(descriptorUpdateTemplate); |
| void *unwrapped_buffer = nullptr; |
| { |
| ReadLockGuard lock(dispatch_lock); |
| descriptorSet = layer_data->Unwrap(descriptorSet); |
| descriptorUpdateTemplate = layer_data->Unwrap(descriptorUpdateTemplate); |
| unwrapped_buffer = BuildUnwrappedUpdateTemplateBuffer(layer_data, template_handle, pData); |
| } |
| layer_data->device_dispatch_table.UpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, unwrapped_buffer); |
| free(unwrapped_buffer); |
| } |
| |
| void DispatchCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer, |
| VkDescriptorUpdateTemplate descriptorUpdateTemplate, VkPipelineLayout layout, |
| uint32_t set, const void *pData) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map); |
| if (!wrap_handles) |
| return layer_data->device_dispatch_table.CmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, |
| layout, set, pData); |
| uint64_t template_handle = CastToUint64(descriptorUpdateTemplate); |
| void *unwrapped_buffer = nullptr; |
| { |
| ReadLockGuard lock(dispatch_lock); |
| descriptorUpdateTemplate = layer_data->Unwrap(descriptorUpdateTemplate); |
| layout = layer_data->Unwrap(layout); |
| unwrapped_buffer = BuildUnwrappedUpdateTemplateBuffer(layer_data, template_handle, pData); |
| } |
| layer_data->device_dispatch_table.CmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set, |
| unwrapped_buffer); |
| free(unwrapped_buffer); |
| } |
| |
| VkResult DispatchGetPhysicalDeviceDisplayPropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount, |
| VkDisplayPropertiesKHR *pProperties) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map); |
| VkResult result = |
| layer_data->instance_dispatch_table.GetPhysicalDeviceDisplayPropertiesKHR(physicalDevice, pPropertyCount, pProperties); |
| if (!wrap_handles) return result; |
| if ((result == VK_SUCCESS || result == VK_INCOMPLETE) && pProperties) { |
| for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) { |
| pProperties[idx0].display = layer_data->MaybeWrapDisplay(pProperties[idx0].display, layer_data); |
| } |
| } |
| return result; |
| } |
| |
| VkResult DispatchGetPhysicalDeviceDisplayProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount, |
| VkDisplayProperties2KHR *pProperties) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map); |
| VkResult result = |
| layer_data->instance_dispatch_table.GetPhysicalDeviceDisplayProperties2KHR(physicalDevice, pPropertyCount, pProperties); |
| if (!wrap_handles) return result; |
| if ((result == VK_SUCCESS || result == VK_INCOMPLETE) && pProperties) { |
| for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) { |
| pProperties[idx0].displayProperties.display = |
| layer_data->MaybeWrapDisplay(pProperties[idx0].displayProperties.display, layer_data); |
| } |
| } |
| return result; |
| } |
| |
| VkResult DispatchGetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount, |
| VkDisplayPlanePropertiesKHR *pProperties) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map); |
| VkResult result = |
| layer_data->instance_dispatch_table.GetPhysicalDeviceDisplayPlanePropertiesKHR(physicalDevice, pPropertyCount, pProperties); |
| if (!wrap_handles) return result; |
| if ((result == VK_SUCCESS || result == VK_INCOMPLETE) && pProperties) { |
| for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) { |
| VkDisplayKHR &opt_display = pProperties[idx0].currentDisplay; |
| if (opt_display) opt_display = layer_data->MaybeWrapDisplay(opt_display, layer_data); |
| } |
| } |
| return result; |
| } |
| |
| VkResult DispatchGetPhysicalDeviceDisplayPlaneProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount, |
| VkDisplayPlaneProperties2KHR *pProperties) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map); |
| VkResult result = layer_data->instance_dispatch_table.GetPhysicalDeviceDisplayPlaneProperties2KHR(physicalDevice, |
| pPropertyCount, pProperties); |
| if (!wrap_handles) return result; |
| if ((result == VK_SUCCESS || result == VK_INCOMPLETE) && pProperties) { |
| for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) { |
| VkDisplayKHR &opt_display = pProperties[idx0].displayPlaneProperties.currentDisplay; |
| if (opt_display) opt_display = layer_data->MaybeWrapDisplay(opt_display, layer_data); |
| } |
| } |
| return result; |
| } |
| |
| VkResult DispatchGetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t *pDisplayCount, |
| VkDisplayKHR *pDisplays) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map); |
| VkResult result = layer_data->instance_dispatch_table.GetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex, |
| pDisplayCount, pDisplays); |
| if ((result == VK_SUCCESS || result == VK_INCOMPLETE) && pDisplays) { |
| if (!wrap_handles) return result; |
| for (uint32_t i = 0; i < *pDisplayCount; ++i) { |
| if (pDisplays[i]) pDisplays[i] = layer_data->MaybeWrapDisplay(pDisplays[i], layer_data); |
| } |
| } |
| return result; |
| } |
| |
| VkResult DispatchGetDisplayModePropertiesKHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t *pPropertyCount, |
| VkDisplayModePropertiesKHR *pProperties) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map); |
| if (!wrap_handles) |
| return layer_data->instance_dispatch_table.GetDisplayModePropertiesKHR(physicalDevice, display, pPropertyCount, |
| pProperties); |
| { |
| display = layer_data->Unwrap(display); |
| } |
| |
| VkResult result = layer_data->instance_dispatch_table.GetDisplayModePropertiesKHR(physicalDevice, display, pPropertyCount, pProperties); |
| if ((result == VK_SUCCESS || result == VK_INCOMPLETE) && pProperties) { |
| for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) { |
| pProperties[idx0].displayMode = layer_data->WrapNew(pProperties[idx0].displayMode); |
| } |
| } |
| return result; |
| } |
| |
| VkResult DispatchGetDisplayModeProperties2KHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t *pPropertyCount, |
| VkDisplayModeProperties2KHR *pProperties) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map); |
| if (!wrap_handles) |
| return layer_data->instance_dispatch_table.GetDisplayModeProperties2KHR(physicalDevice, display, pPropertyCount, |
| pProperties); |
| { |
| display = layer_data->Unwrap(display); |
| } |
| |
| VkResult result = |
| layer_data->instance_dispatch_table.GetDisplayModeProperties2KHR(physicalDevice, display, pPropertyCount, pProperties); |
| if ((result == VK_SUCCESS || result == VK_INCOMPLETE) && pProperties) { |
| for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) { |
| pProperties[idx0].displayModeProperties.displayMode = layer_data->WrapNew(pProperties[idx0].displayModeProperties.displayMode); |
| } |
| } |
| return result; |
| } |
| |
| VkResult DispatchEnumerateDeviceExtensionProperties( |
| VkPhysicalDevice physicalDevice, |
| const char* pLayerName, |
| uint32_t* pPropertyCount, |
| VkExtensionProperties* pProperties) |
| { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map); |
| VkResult result = layer_data->instance_dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, pLayerName, pPropertyCount, pProperties); |
| |
| return result; |
| } |
| |
| VkResult DispatchDebugMarkerSetObjectTagEXT(VkDevice device, const VkDebugMarkerObjectTagInfoEXT *pTagInfo) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) return layer_data->device_dispatch_table.DebugMarkerSetObjectTagEXT(device, pTagInfo); |
| safe_VkDebugMarkerObjectTagInfoEXT local_tag_info(pTagInfo); |
| { |
| auto it = unique_id_mapping.find(CastToUint64(local_tag_info.object)); |
| if (it != unique_id_mapping.end()) { |
| local_tag_info.object = it->second; |
| } |
| } |
| VkResult result = layer_data->device_dispatch_table.DebugMarkerSetObjectTagEXT(device, |
| reinterpret_cast<VkDebugMarkerObjectTagInfoEXT *>(&local_tag_info)); |
| return result; |
| } |
| |
| VkResult DispatchDebugMarkerSetObjectNameEXT(VkDevice device, const VkDebugMarkerObjectNameInfoEXT *pNameInfo) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) return layer_data->device_dispatch_table.DebugMarkerSetObjectNameEXT(device, pNameInfo); |
| safe_VkDebugMarkerObjectNameInfoEXT local_name_info(pNameInfo); |
| { |
| auto it = unique_id_mapping.find(CastToUint64(local_name_info.object)); |
| if (it != unique_id_mapping.end()) { |
| local_name_info.object = it->second; |
| } |
| } |
| VkResult result = layer_data->device_dispatch_table.DebugMarkerSetObjectNameEXT( |
| device, reinterpret_cast<VkDebugMarkerObjectNameInfoEXT *>(&local_name_info)); |
| return result; |
| } |
| |
| // VK_EXT_debug_utils |
| VkResult DispatchSetDebugUtilsObjectTagEXT(VkDevice device, const VkDebugUtilsObjectTagInfoEXT *pTagInfo) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) return layer_data->device_dispatch_table.SetDebugUtilsObjectTagEXT(device, pTagInfo); |
| safe_VkDebugUtilsObjectTagInfoEXT local_tag_info(pTagInfo); |
| { |
| auto it = unique_id_mapping.find(CastToUint64(local_tag_info.objectHandle)); |
| if (it != unique_id_mapping.end()) { |
| local_tag_info.objectHandle = it->second; |
| } |
| } |
| VkResult result = layer_data->device_dispatch_table.SetDebugUtilsObjectTagEXT( |
| device, reinterpret_cast<const VkDebugUtilsObjectTagInfoEXT *>(&local_tag_info)); |
| return result; |
| } |
| |
| VkResult DispatchSetDebugUtilsObjectNameEXT(VkDevice device, const VkDebugUtilsObjectNameInfoEXT *pNameInfo) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) return layer_data->device_dispatch_table.SetDebugUtilsObjectNameEXT(device, pNameInfo); |
| safe_VkDebugUtilsObjectNameInfoEXT local_name_info(pNameInfo); |
| { |
| auto it = unique_id_mapping.find(CastToUint64(local_name_info.objectHandle)); |
| if (it != unique_id_mapping.end()) { |
| local_name_info.objectHandle = it->second; |
| } |
| } |
| VkResult result = layer_data->device_dispatch_table.SetDebugUtilsObjectNameEXT( |
| device, reinterpret_cast<const VkDebugUtilsObjectNameInfoEXT *>(&local_name_info)); |
| return result; |
| } |
| |
| VkResult DispatchGetPhysicalDeviceToolPropertiesEXT( |
| VkPhysicalDevice physicalDevice, |
| uint32_t* pToolCount, |
| VkPhysicalDeviceToolPropertiesEXT* pToolProperties) |
| { |
| VkResult result = VK_SUCCESS; |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map); |
| if (layer_data->instance_dispatch_table.GetPhysicalDeviceToolPropertiesEXT == nullptr) { |
| // This layer is the terminator. Set pToolCount to zero. |
| *pToolCount = 0; |
| } else { |
| result = layer_data->instance_dispatch_table.GetPhysicalDeviceToolPropertiesEXT(physicalDevice, pToolCount, pToolProperties); |
| } |
| |
| return result; |
| } |
| |
| bool NotDispatchableHandle(VkObjectType object_type) { |
| bool not_dispatchable = true; |
| if ((object_type == VK_OBJECT_TYPE_INSTANCE) || |
| (object_type == VK_OBJECT_TYPE_PHYSICAL_DEVICE) || |
| (object_type == VK_OBJECT_TYPE_DEVICE) || |
| (object_type == VK_OBJECT_TYPE_QUEUE) || |
| (object_type == VK_OBJECT_TYPE_COMMAND_BUFFER)) { |
| not_dispatchable = false; |
| } |
| return not_dispatchable; |
| } |
| |
| VkResult DispatchSetPrivateDataEXT( |
| VkDevice device, |
| VkObjectType objectType, |
| uint64_t objectHandle, |
| VkPrivateDataSlotEXT privateDataSlot, |
| uint64_t data) |
| { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) return layer_data->device_dispatch_table.SetPrivateDataEXT(device, objectType, objectHandle, privateDataSlot, data); |
| privateDataSlot = layer_data->Unwrap(privateDataSlot); |
| if (NotDispatchableHandle(objectType)) { |
| objectHandle = layer_data->Unwrap(objectHandle); |
| } |
| VkResult result = layer_data->device_dispatch_table.SetPrivateDataEXT(device, objectType, objectHandle, privateDataSlot, data); |
| return result; |
| } |
| |
| VkResult DispatchSetPrivateData( |
| VkDevice device, |
| VkObjectType objectType, |
| uint64_t objectHandle, |
| VkPrivateDataSlot privateDataSlot, |
| uint64_t data) |
| { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) return layer_data->device_dispatch_table.SetPrivateData(device, objectType, objectHandle, privateDataSlot, data); |
| privateDataSlot = layer_data->Unwrap(privateDataSlot); |
| if (NotDispatchableHandle(objectType)) { |
| objectHandle = layer_data->Unwrap(objectHandle); |
| } |
| VkResult result = layer_data->device_dispatch_table.SetPrivateData(device, objectType, objectHandle, privateDataSlot, data); |
| return result; |
| } |
| |
| void DispatchGetPrivateDataEXT( |
| VkDevice device, |
| VkObjectType objectType, |
| uint64_t objectHandle, |
| VkPrivateDataSlotEXT privateDataSlot, |
| uint64_t* pData) |
| { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) return layer_data->device_dispatch_table.GetPrivateDataEXT(device, objectType, objectHandle, privateDataSlot, pData); |
| privateDataSlot = layer_data->Unwrap(privateDataSlot); |
| if (NotDispatchableHandle(objectType)) { |
| objectHandle = layer_data->Unwrap(objectHandle); |
| } |
| layer_data->device_dispatch_table.GetPrivateDataEXT(device, objectType, objectHandle, privateDataSlot, pData); |
| } |
| |
| void DispatchGetPrivateData( |
| VkDevice device, |
| VkObjectType objectType, |
| uint64_t objectHandle, |
| VkPrivateDataSlot privateDataSlot, |
| uint64_t* pData) |
| { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) return layer_data->device_dispatch_table.GetPrivateData(device, objectType, objectHandle, privateDataSlot, pData); |
| privateDataSlot = layer_data->Unwrap(privateDataSlot); |
| if (NotDispatchableHandle(objectType)) { |
| objectHandle = layer_data->Unwrap(objectHandle); |
| } |
| layer_data->device_dispatch_table.GetPrivateData(device, objectType, objectHandle, privateDataSlot, pData); |
| } |
| |
| vvl::unordered_map<VkCommandBuffer, VkCommandPool> secondary_cb_map{}; |
| |
| std::shared_mutex dispatch_secondary_cb_map_mutex; |
| |
| ReadLockGuard dispatch_cb_read_lock() { |
| return ReadLockGuard(dispatch_secondary_cb_map_mutex); |
| } |
| |
| WriteLockGuard dispatch_cb_write_lock() { |
| return WriteLockGuard(dispatch_secondary_cb_map_mutex); |
| } |
| |
| VkResult DispatchAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo* pAllocateInfo, VkCommandBuffer* pCommandBuffers) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) return layer_data->device_dispatch_table.AllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers); |
| safe_VkCommandBufferAllocateInfo var_local_pAllocateInfo; |
| safe_VkCommandBufferAllocateInfo *local_pAllocateInfo = nullptr; |
| if (pAllocateInfo) { |
| local_pAllocateInfo = &var_local_pAllocateInfo; |
| local_pAllocateInfo->initialize(pAllocateInfo); |
| if (pAllocateInfo->commandPool) { |
| local_pAllocateInfo->commandPool = layer_data->Unwrap(pAllocateInfo->commandPool); |
| } |
| } |
| VkResult result = layer_data->device_dispatch_table.AllocateCommandBuffers(device, (const VkCommandBufferAllocateInfo*)local_pAllocateInfo, pCommandBuffers); |
| if ((result == VK_SUCCESS) && pAllocateInfo && (pAllocateInfo->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)) { |
| auto lock = dispatch_cb_write_lock(); |
| for (uint32_t cb_index = 0; cb_index < pAllocateInfo->commandBufferCount; cb_index++) { |
| secondary_cb_map.emplace(pCommandBuffers[cb_index], pAllocateInfo->commandPool); |
| } |
| } |
| return result; |
| } |
| |
| void DispatchFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) return layer_data->device_dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers); |
| commandPool = layer_data->Unwrap(commandPool); |
| layer_data->device_dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers); |
| auto lock = dispatch_cb_write_lock(); |
| for (uint32_t cb_index = 0; cb_index < commandBufferCount; cb_index++) { |
| secondary_cb_map.erase(pCommandBuffers[cb_index]); |
| } |
| } |
| |
| void DispatchDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator) { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) return layer_data->device_dispatch_table.DestroyCommandPool(device, commandPool, pAllocator); |
| uint64_t commandPool_id = CastToUint64(commandPool); |
| auto iter = unique_id_mapping.pop(commandPool_id); |
| if (iter != unique_id_mapping.end()) { |
| commandPool = (VkCommandPool)iter->second; |
| } else { |
| commandPool = (VkCommandPool)0; |
| } |
| layer_data->device_dispatch_table.DestroyCommandPool(device, commandPool, pAllocator); |
| auto lock = dispatch_cb_write_lock(); |
| for (auto item = secondary_cb_map.begin(); item != secondary_cb_map.end();) { |
| if (item->second == commandPool) { |
| item = secondary_cb_map.erase(item); |
| } else { |
| ++item; |
| } |
| } |
| } |
| |
| VkResult DispatchBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo) { |
| bool cb_is_primary; |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map); |
| { |
| auto lock = dispatch_cb_read_lock(); |
| cb_is_primary = (secondary_cb_map.find(commandBuffer) == secondary_cb_map.end()); |
| } |
| if (!wrap_handles || cb_is_primary) return layer_data->device_dispatch_table.BeginCommandBuffer(commandBuffer, pBeginInfo); |
| safe_VkCommandBufferBeginInfo var_local_pBeginInfo; |
| safe_VkCommandBufferBeginInfo *local_pBeginInfo = nullptr; |
| if (pBeginInfo) { |
| local_pBeginInfo = &var_local_pBeginInfo; |
| local_pBeginInfo->initialize(pBeginInfo); |
| if (local_pBeginInfo->pInheritanceInfo) { |
| if (pBeginInfo->pInheritanceInfo->renderPass) { |
| local_pBeginInfo->pInheritanceInfo->renderPass = layer_data->Unwrap(pBeginInfo->pInheritanceInfo->renderPass); |
| } |
| if (pBeginInfo->pInheritanceInfo->framebuffer) { |
| local_pBeginInfo->pInheritanceInfo->framebuffer = layer_data->Unwrap(pBeginInfo->pInheritanceInfo->framebuffer); |
| } |
| } |
| } |
| VkResult result = layer_data->device_dispatch_table.BeginCommandBuffer(commandBuffer, (const VkCommandBufferBeginInfo*)local_pBeginInfo); |
| return result; |
| } |
| |
| VkResult DispatchCreateRayTracingPipelinesKHR( |
| VkDevice device, |
| VkDeferredOperationKHR deferredOperation, |
| VkPipelineCache pipelineCache, |
| uint32_t createInfoCount, |
| const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, |
| const VkAllocationCallbacks* pAllocator, |
| VkPipeline* pPipelines) |
| { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| safe_VkRayTracingPipelineCreateInfoKHR *local_pCreateInfos = (safe_VkRayTracingPipelineCreateInfoKHR *)(pCreateInfos); |
| if (wrap_handles) { |
| deferredOperation = layer_data->Unwrap(deferredOperation); |
| pipelineCache = layer_data->Unwrap(pipelineCache); |
| if (pCreateInfos) { |
| local_pCreateInfos = new safe_VkRayTracingPipelineCreateInfoKHR[createInfoCount]; |
| for (uint32_t index0 = 0; index0 < createInfoCount; ++index0) { |
| local_pCreateInfos[index0].initialize(&pCreateInfos[index0]); |
| if (local_pCreateInfos[index0].pStages) { |
| for (uint32_t index1 = 0; index1 < local_pCreateInfos[index0].stageCount; ++index1) { |
| if (pCreateInfos[index0].pStages[index1].module) { |
| local_pCreateInfos[index0].pStages[index1].module = layer_data->Unwrap(pCreateInfos[index0].pStages[index1].module); |
| } |
| } |
| } |
| if (local_pCreateInfos[index0].pLibraryInfo) { |
| if (local_pCreateInfos[index0].pLibraryInfo->pLibraries) { |
| for (uint32_t index2 = 0; index2 < local_pCreateInfos[index0].pLibraryInfo->libraryCount; ++index2) { |
| local_pCreateInfos[index0].pLibraryInfo->pLibraries[index2] = layer_data->Unwrap(local_pCreateInfos[index0].pLibraryInfo->pLibraries[index2]); |
| } |
| } |
| } |
| if (pCreateInfos[index0].layout) { |
| local_pCreateInfos[index0].layout = layer_data->Unwrap(pCreateInfos[index0].layout); |
| } |
| if (pCreateInfos[index0].basePipelineHandle) { |
| local_pCreateInfos[index0].basePipelineHandle = layer_data->Unwrap(pCreateInfos[index0].basePipelineHandle); |
| } |
| } |
| } |
| } |
| VkResult result = layer_data->device_dispatch_table.CreateRayTracingPipelinesKHR(device, deferredOperation, pipelineCache, createInfoCount, (const VkRayTracingPipelineCreateInfoKHR*)local_pCreateInfos, pAllocator, pPipelines); |
| if (wrap_handles) { |
| for (uint32_t i = 0; i < createInfoCount; ++i) { |
| if (pCreateInfos[i].pNext != VK_NULL_HANDLE) { |
| CopyCreatePipelineFeedbackData(local_pCreateInfos[i].pNext, pCreateInfos[i].pNext); |
| } |
| } |
| } |
| |
| // Fix check for deferred ray tracing pipeline creation |
| // https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/5817 |
| const bool is_operation_deferred = (deferredOperation != VK_NULL_HANDLE) && (result == VK_OPERATION_DEFERRED_KHR); |
| if (is_operation_deferred) { |
| std::vector<std::function<void()>> post_completion_fns; |
| auto completion_find = layer_data->deferred_operation_post_completion.pop(deferredOperation); |
| if(completion_find->first) { |
| post_completion_fns = std::move(completion_find->second); |
| } |
| if (wrap_handles) { |
| auto cleanup_fn = [local_pCreateInfos, deferredOperation, pPipelines, createInfoCount, layer_data](){ |
| if (local_pCreateInfos) { |
| delete[] local_pCreateInfos; |
| } |
| std::vector<VkPipeline> pipes_wrapped; |
| for (uint32_t index0 = 0; index0 < createInfoCount; index0++) { |
| if (pPipelines[index0] != VK_NULL_HANDLE) { |
| pPipelines[index0] = layer_data->WrapNew(pPipelines[index0]); |
| pipes_wrapped.emplace_back(pPipelines[index0]); |
| } |
| } |
| layer_data->deferred_operation_pipelines.insert(deferredOperation, std::move(pipes_wrapped)); |
| }; |
| post_completion_fns.emplace_back(cleanup_fn); |
| } else { |
| auto cleanup_fn = [deferredOperation, pPipelines, createInfoCount, layer_data](){ |
| std::vector<VkPipeline> pipes; |
| for (uint32_t index0 = 0; index0 < createInfoCount; index0++) { |
| if (pPipelines[index0] != VK_NULL_HANDLE) { |
| pipes.emplace_back(pPipelines[index0]); |
| } |
| } |
| layer_data->deferred_operation_pipelines.insert(deferredOperation, std::move(pipes)); |
| }; |
| post_completion_fns.emplace_back(cleanup_fn); |
| } |
| layer_data->deferred_operation_post_completion.insert(deferredOperation, std::move(post_completion_fns)); |
| } else if (wrap_handles){ |
| if (local_pCreateInfos) { |
| delete[] local_pCreateInfos; |
| } |
| for (uint32_t index0 = 0; index0 < createInfoCount; index0++) { |
| if (pPipelines[index0] != VK_NULL_HANDLE) { |
| pPipelines[index0] = layer_data->WrapNew(pPipelines[index0]); |
| } |
| } |
| } |
| |
| return result; |
| } |
| |
| VkResult DispatchDeferredOperationJoinKHR( |
| VkDevice device, |
| VkDeferredOperationKHR operation) |
| { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (wrap_handles) { |
| operation = layer_data->Unwrap(operation); |
| } |
| VkResult result = layer_data->device_dispatch_table.DeferredOperationJoinKHR(device, operation); |
| |
| // If this thread completed the operation, free any retained memory. |
| if (result == VK_SUCCESS) |
| { |
| auto iter = layer_data->deferred_operation_post_completion.pop(operation); |
| if (iter != layer_data->deferred_operation_post_completion.end()) |
| { |
| for(auto &cleanup_fn : iter->second) { |
| cleanup_fn(); |
| } |
| } |
| } |
| |
| return result; |
| } |
| |
| |
| VkResult DispatchGetDeferredOperationResultKHR( |
| VkDevice device, |
| VkDeferredOperationKHR operation) |
| { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (wrap_handles) { |
| operation = layer_data->Unwrap(operation); |
| } |
| VkResult result = layer_data->device_dispatch_table.GetDeferredOperationResultKHR(device, operation); |
| |
| // Add created pipelines if successful |
| if (result == VK_SUCCESS) |
| { |
| auto iter_fn = layer_data->deferred_operation_post_check.pop(operation); |
| auto iter_pipes = layer_data->deferred_operation_pipelines.pop(operation); |
| if (iter_fn->first && iter_pipes->first) |
| { |
| for(auto &cleanup_fn : iter_fn->second) |
| { |
| cleanup_fn(iter_pipes->second); |
| } |
| } |
| } |
| |
| return result; |
| } |
| |
| VkResult DispatchBuildAccelerationStructuresKHR( |
| VkDevice device, |
| VkDeferredOperationKHR deferredOperation, |
| uint32_t infoCount, |
| const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, |
| const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos) |
| { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) return layer_data->device_dispatch_table.BuildAccelerationStructuresKHR(device, deferredOperation, infoCount, pInfos, ppBuildRangeInfos); |
| safe_VkAccelerationStructureBuildGeometryInfoKHR *local_pInfos = nullptr; |
| { |
| deferredOperation = layer_data->Unwrap(deferredOperation); |
| if (pInfos) { |
| local_pInfos = new safe_VkAccelerationStructureBuildGeometryInfoKHR[infoCount]; |
| for (uint32_t index0 = 0; index0 < infoCount; ++index0) { |
| local_pInfos[index0].initialize(&pInfos[index0], true, ppBuildRangeInfos[index0]); |
| if (pInfos[index0].srcAccelerationStructure) { |
| local_pInfos[index0].srcAccelerationStructure = layer_data->Unwrap(pInfos[index0].srcAccelerationStructure); |
| } |
| if (pInfos[index0].dstAccelerationStructure) { |
| local_pInfos[index0].dstAccelerationStructure = layer_data->Unwrap(pInfos[index0].dstAccelerationStructure); |
| } |
| for (uint32_t geometry_index = 0; geometry_index < local_pInfos[index0].geometryCount; ++geometry_index) { |
| safe_VkAccelerationStructureGeometryKHR &geometry_info = local_pInfos[index0].pGeometries != nullptr ? local_pInfos[index0].pGeometries[geometry_index] : *(local_pInfos[index0].ppGeometries[geometry_index]); |
| if (geometry_info.geometryType == VK_GEOMETRY_TYPE_INSTANCES_KHR) { |
| if (geometry_info.geometry.instances.arrayOfPointers) { |
| const uint8_t *byte_ptr = reinterpret_cast<const uint8_t*>(geometry_info.geometry.instances.data.hostAddress); |
| VkAccelerationStructureInstanceKHR **instances = (VkAccelerationStructureInstanceKHR **)(byte_ptr + ppBuildRangeInfos[index0][geometry_index].primitiveOffset); |
| for (uint32_t instance_index = 0; instance_index < ppBuildRangeInfos[index0][geometry_index].primitiveCount; ++instance_index) { |
| instances[instance_index]->accelerationStructureReference = layer_data->Unwrap(instances[instance_index]->accelerationStructureReference); |
| } |
| } else { |
| const uint8_t *byte_ptr = reinterpret_cast<const uint8_t*>(geometry_info.geometry.instances.data.hostAddress); |
| VkAccelerationStructureInstanceKHR *instances = (VkAccelerationStructureInstanceKHR *)(byte_ptr + ppBuildRangeInfos[index0][geometry_index].primitiveOffset); |
| for (uint32_t instance_index = 0; instance_index < ppBuildRangeInfos[index0][geometry_index].primitiveCount; ++instance_index) { |
| instances[instance_index].accelerationStructureReference = layer_data->Unwrap(instances[instance_index].accelerationStructureReference); |
| } |
| } |
| } |
| } |
| } |
| } |
| } |
| VkResult result = layer_data->device_dispatch_table.BuildAccelerationStructuresKHR(device, deferredOperation, infoCount, (const VkAccelerationStructureBuildGeometryInfoKHR*)local_pInfos, ppBuildRangeInfos); |
| if (local_pInfos) { |
| // Fix check for deferred ray tracing pipeline creation |
| // https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/5817 |
| const bool is_operation_deferred = (deferredOperation != VK_NULL_HANDLE) && (result == VK_OPERATION_DEFERRED_KHR); |
| if (is_operation_deferred) { |
| std::vector<std::function<void()>> cleanup{ [local_pInfos](){ delete[] local_pInfos; } }; |
| layer_data->deferred_operation_post_completion.insert(deferredOperation, cleanup); |
| } else { |
| delete[] local_pInfos; |
| } |
| } |
| return result; |
| } |
| |
| void DispatchGetAccelerationStructureBuildSizesKHR( |
| VkDevice device, |
| VkAccelerationStructureBuildTypeKHR buildType, |
| const VkAccelerationStructureBuildGeometryInfoKHR* pBuildInfo, |
| const uint32_t* pMaxPrimitiveCounts, |
| VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo) |
| { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) return layer_data->device_dispatch_table.GetAccelerationStructureBuildSizesKHR(device, buildType, pBuildInfo, pMaxPrimitiveCounts, pSizeInfo); |
| safe_VkAccelerationStructureBuildGeometryInfoKHR var_local_pBuildInfo; |
| safe_VkAccelerationStructureBuildGeometryInfoKHR *local_pBuildInfo = nullptr; |
| { |
| if (pBuildInfo) { |
| local_pBuildInfo = &var_local_pBuildInfo; |
| local_pBuildInfo->initialize(pBuildInfo, false, nullptr); |
| if (pBuildInfo->srcAccelerationStructure) { |
| local_pBuildInfo->srcAccelerationStructure = layer_data->Unwrap(pBuildInfo->srcAccelerationStructure); |
| } |
| if (pBuildInfo->dstAccelerationStructure) { |
| local_pBuildInfo->dstAccelerationStructure = layer_data->Unwrap(pBuildInfo->dstAccelerationStructure); |
| } |
| for (uint32_t geometry_index = 0; geometry_index < local_pBuildInfo->geometryCount; ++geometry_index) { |
| safe_VkAccelerationStructureGeometryKHR &geometry_info = local_pBuildInfo->pGeometries != nullptr ? local_pBuildInfo->pGeometries[geometry_index] : *(local_pBuildInfo->ppGeometries[geometry_index]); |
| if (geometry_info.geometryType == VK_GEOMETRY_TYPE_TRIANGLES_KHR) { |
| WrapPnextChainHandles(layer_data, geometry_info.geometry.triangles.pNext); |
| } |
| } |
| } |
| } |
| layer_data->device_dispatch_table.GetAccelerationStructureBuildSizesKHR(device, buildType, (const VkAccelerationStructureBuildGeometryInfoKHR*)local_pBuildInfo, pMaxPrimitiveCounts, pSizeInfo); |
| |
| } |
| |
| void DispatchGetDescriptorEXT( |
| VkDevice device, |
| const VkDescriptorGetInfoEXT* pDescriptorInfo, |
| size_t dataSize, |
| void* pDescriptor) |
| { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) return layer_data->device_dispatch_table.GetDescriptorEXT(device, pDescriptorInfo, dataSize, pDescriptor); |
| safe_VkDescriptorGetInfoEXT var_local_pDescriptorInfo; |
| safe_VkDescriptorGetInfoEXT *local_pDescriptorInfo = NULL; |
| { |
| if (pDescriptorInfo) { |
| local_pDescriptorInfo = &var_local_pDescriptorInfo; |
| local_pDescriptorInfo->initialize(pDescriptorInfo); |
| |
| switch (pDescriptorInfo->type) |
| { |
| case VK_DESCRIPTOR_TYPE_SAMPLER: |
| { |
| if (local_pDescriptorInfo->data.pSampler) |
| *(VkSampler*)local_pDescriptorInfo->data.pSampler = layer_data->Unwrap(*pDescriptorInfo->data.pSampler); |
| |
| break; |
| } |
| case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: |
| { |
| if (local_pDescriptorInfo->data.pCombinedImageSampler) { |
| if (pDescriptorInfo->data.pCombinedImageSampler->sampler) { |
| *(VkSampler*)&local_pDescriptorInfo->data.pCombinedImageSampler->sampler = layer_data->Unwrap(pDescriptorInfo->data.pCombinedImageSampler->sampler); |
| } |
| if (pDescriptorInfo->data.pCombinedImageSampler->imageView) { |
| *(VkImageView*)&local_pDescriptorInfo->data.pCombinedImageSampler->imageView = layer_data->Unwrap(pDescriptorInfo->data.pCombinedImageSampler->imageView); |
| } |
| } |
| break; |
| } |
| case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: |
| { |
| if (local_pDescriptorInfo->data.pSampledImage) { |
| if (pDescriptorInfo->data.pSampledImage->sampler) { |
| *(VkSampler*)&local_pDescriptorInfo->data.pSampledImage->sampler = layer_data->Unwrap(pDescriptorInfo->data.pSampledImage->sampler); |
| } |
| if (pDescriptorInfo->data.pSampledImage->imageView) { |
| *(VkImageView*)&local_pDescriptorInfo->data.pSampledImage->imageView = layer_data->Unwrap(pDescriptorInfo->data.pSampledImage->imageView); |
| } |
| } |
| break; |
| } |
| case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: |
| { |
| if (local_pDescriptorInfo->data.pStorageImage) { |
| if (pDescriptorInfo->data.pStorageImage->sampler) { |
| *(VkSampler*)&local_pDescriptorInfo->data.pStorageImage->sampler = layer_data->Unwrap(pDescriptorInfo->data.pStorageImage->sampler); |
| } |
| if (pDescriptorInfo->data.pStorageImage->imageView) { |
| *(VkImageView*)&local_pDescriptorInfo->data.pStorageImage->imageView = layer_data->Unwrap(pDescriptorInfo->data.pStorageImage->imageView); |
| } |
| } |
| break; |
| } |
| case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: |
| { |
| if (local_pDescriptorInfo->data.pInputAttachmentImage) { |
| if (pDescriptorInfo->data.pInputAttachmentImage->sampler) { |
| *(VkSampler*)&local_pDescriptorInfo->data.pInputAttachmentImage->sampler = layer_data->Unwrap(pDescriptorInfo->data.pInputAttachmentImage->sampler); |
| } |
| if (pDescriptorInfo->data.pInputAttachmentImage->imageView) { |
| *(VkImageView*)&local_pDescriptorInfo->data.pInputAttachmentImage->imageView = layer_data->Unwrap(pDescriptorInfo->data.pInputAttachmentImage->imageView); |
| } |
| } |
| break; |
| } |
| default: break; |
| } |
| } |
| } |
| |
| layer_data->device_dispatch_table.GetDescriptorEXT(device, (const VkDescriptorGetInfoEXT*)local_pDescriptorInfo, dataSize, pDescriptor); |
| } |
| |
| VkResult DispatchCreateComputePipelines( |
| VkDevice device, |
| VkPipelineCache pipelineCache, |
| uint32_t createInfoCount, |
| const VkComputePipelineCreateInfo* pCreateInfos, |
| const VkAllocationCallbacks* pAllocator, |
| VkPipeline* pPipelines) |
| { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) return layer_data->device_dispatch_table.CreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines); |
| safe_VkComputePipelineCreateInfo *local_pCreateInfos = nullptr; |
| { |
| pipelineCache = layer_data->Unwrap(pipelineCache); |
| if (pCreateInfos) { |
| local_pCreateInfos = new safe_VkComputePipelineCreateInfo[createInfoCount]; |
| for (uint32_t index0 = 0; index0 < createInfoCount; ++index0) { |
| local_pCreateInfos[index0].initialize(&pCreateInfos[index0]); |
| WrapPnextChainHandles(layer_data, local_pCreateInfos[index0].pNext); |
| if (pCreateInfos[index0].stage.module) { |
| local_pCreateInfos[index0].stage.module = layer_data->Unwrap(pCreateInfos[index0].stage.module); |
| } |
| WrapPnextChainHandles(layer_data, local_pCreateInfos[index0].stage.pNext); |
| if (pCreateInfos[index0].layout) { |
| local_pCreateInfos[index0].layout = layer_data->Unwrap(pCreateInfos[index0].layout); |
| } |
| if (pCreateInfos[index0].basePipelineHandle) { |
| local_pCreateInfos[index0].basePipelineHandle = layer_data->Unwrap(pCreateInfos[index0].basePipelineHandle); |
| } |
| } |
| } |
| } |
| VkResult result = layer_data->device_dispatch_table.CreateComputePipelines(device, pipelineCache, createInfoCount, (const VkComputePipelineCreateInfo*)local_pCreateInfos, pAllocator, pPipelines); |
| for (uint32_t i = 0; i < createInfoCount; ++i) { |
| if (pCreateInfos[i].pNext != VK_NULL_HANDLE) { |
| CopyCreatePipelineFeedbackData(local_pCreateInfos[i].pNext, pCreateInfos[i].pNext); |
| } |
| } |
| |
| if (local_pCreateInfos) { |
| delete[] local_pCreateInfos; |
| } |
| { |
| for (uint32_t index0 = 0; index0 < createInfoCount; index0++) { |
| if (pPipelines[index0] != VK_NULL_HANDLE) { |
| pPipelines[index0] = layer_data->WrapNew(pPipelines[index0]); |
| } |
| } |
| } |
| return result; |
| } |
| |
| VkResult DispatchCreateRayTracingPipelinesNV( |
| VkDevice device, |
| VkPipelineCache pipelineCache, |
| uint32_t createInfoCount, |
| const VkRayTracingPipelineCreateInfoNV* pCreateInfos, |
| const VkAllocationCallbacks* pAllocator, |
| VkPipeline* pPipelines) |
| { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) return layer_data->device_dispatch_table.CreateRayTracingPipelinesNV(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines); |
| safe_VkRayTracingPipelineCreateInfoNV *local_pCreateInfos = nullptr; |
| { |
| pipelineCache = layer_data->Unwrap(pipelineCache); |
| if (pCreateInfos) { |
| local_pCreateInfos = new safe_VkRayTracingPipelineCreateInfoNV[createInfoCount]; |
| for (uint32_t index0 = 0; index0 < createInfoCount; ++index0) { |
| local_pCreateInfos[index0].initialize(&pCreateInfos[index0]); |
| if (local_pCreateInfos[index0].pStages) { |
| for (uint32_t index1 = 0; index1 < local_pCreateInfos[index0].stageCount; ++index1) { |
| if (pCreateInfos[index0].pStages[index1].module) { |
| local_pCreateInfos[index0].pStages[index1].module = layer_data->Unwrap(pCreateInfos[index0].pStages[index1].module); |
| } |
| } |
| } |
| if (pCreateInfos[index0].layout) { |
| local_pCreateInfos[index0].layout = layer_data->Unwrap(pCreateInfos[index0].layout); |
| } |
| if (pCreateInfos[index0].basePipelineHandle) { |
| local_pCreateInfos[index0].basePipelineHandle = layer_data->Unwrap(pCreateInfos[index0].basePipelineHandle); |
| } |
| } |
| } |
| } |
| VkResult result = layer_data->device_dispatch_table.CreateRayTracingPipelinesNV(device, pipelineCache, createInfoCount, (const VkRayTracingPipelineCreateInfoNV*)local_pCreateInfos, pAllocator, pPipelines); |
| for (uint32_t i = 0; i < createInfoCount; ++i) { |
| if (pCreateInfos[i].pNext != VK_NULL_HANDLE) { |
| CopyCreatePipelineFeedbackData(local_pCreateInfos[i].pNext, pCreateInfos[i].pNext); |
| } |
| } |
| |
| if (local_pCreateInfos) { |
| delete[] local_pCreateInfos; |
| } |
| { |
| for (uint32_t index0 = 0; index0 < createInfoCount; index0++) { |
| if (pPipelines[index0] != VK_NULL_HANDLE) { |
| pPipelines[index0] = layer_data->WrapNew(pPipelines[index0]); |
| } |
| } |
| } |
| return result; |
| } |
| |
| VkResult DispatchReleasePerformanceConfigurationINTEL( |
| VkDevice device, |
| VkPerformanceConfigurationINTEL configuration) |
| { |
| auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| if (!wrap_handles) return layer_data->device_dispatch_table.ReleasePerformanceConfigurationINTEL(device, configuration); |
| { |
| configuration = layer_data->Unwrap(configuration); |
| } |
| VkResult result = layer_data->device_dispatch_table.ReleasePerformanceConfigurationINTEL(device, configuration); |
| |
| return result; |
| } |