Merge "Merge branch 'remotes/upstream/vulkan-cts-1.0.2' into master"
diff --git a/external/vulkancts/modules/vulkan/api/vktApiBufferTests.cpp b/external/vulkancts/modules/vulkan/api/vktApiBufferTests.cpp
index 024e826..256ec1e 100644
--- a/external/vulkancts/modules/vulkan/api/vktApiBufferTests.cpp
+++ b/external/vulkancts/modules/vulkan/api/vktApiBufferTests.cpp
@@ -565,11 +565,20 @@
const deUint32 heapTypeIndex = static_cast<deUint32>(deCtz32(memReqs.memoryRequirements.memoryTypeBits));
const VkMemoryType memoryType = memoryProperties.memoryTypes[heapTypeIndex];
const VkMemoryHeap memoryHeap = memoryProperties.memoryHeaps[memoryType.heapIndex];
- const VkDeviceSize maxBufferSize = deAlign64(memoryHeap.size >> 1u, memReqs.memoryRequirements.alignment);
const deUint32 shrinkBits = 4u; // number of bits to shift when reducing the size with each iteration
+ // Buffer size - Choose half of the reported heap size for the maximum buffer size, we
+ // should attempt to test as large a portion as possible.
+ //
+ // However on a system where device memory is shared with the system, the maximum size
+ // should be tested against the platform memory limits as a significant portion of the heap
+ // may already be in use by the operating system and other running processes.
+ const VkDeviceSize maxBufferSize = getMaxBufferSize(memoryHeap.size,
+ memReqs.memoryRequirements.alignment,
+ getPlatformMemoryLimits(m_context));
+
Move<VkDeviceMemory> memory;
- size = std::min(size, maxBufferSize);
+ size = deAlign64(std::min(size, maxBufferSize >> 1), memReqs.memoryRequirements.alignment);
while (*memory == DE_NULL)
{
// Create the buffer
diff --git a/external/vulkancts/modules/vulkan/api/vktApiCopiesAndBlittingTests.cpp b/external/vulkancts/modules/vulkan/api/vktApiCopiesAndBlittingTests.cpp
index 86edde0..7022834 100644
--- a/external/vulkancts/modules/vulkan/api/vktApiCopiesAndBlittingTests.cpp
+++ b/external/vulkancts/modules/vulkan/api/vktApiCopiesAndBlittingTests.cpp
@@ -3185,7 +3185,7 @@
0u, // deUint32 baseMipLevel;
1u, // deUint32 mipLevels;
0u, // deUint32 baseArraySlice;
- getArraySize(m_params.dst.image) // deUint32 arraySize;
+ getArraySize(m_params.src.image) // deUint32 arraySize;
}
},
// destination image
diff --git a/external/vulkancts/modules/vulkan/api/vktApiDescriptorPoolTests.cpp b/external/vulkancts/modules/vulkan/api/vktApiDescriptorPoolTests.cpp
index 64d863f..5e5b596 100644
--- a/external/vulkancts/modules/vulkan/api/vktApiDescriptorPoolTests.cpp
+++ b/external/vulkancts/modules/vulkan/api/vktApiDescriptorPoolTests.cpp
@@ -212,7 +212,13 @@
DE_NULL, // const VkSampler* pImmutableSamplers;
};
- const vector<VkDescriptorSetLayoutBinding> descriptorSetLayoutBindings (params.bindingCount, descriptorSetLayoutBinding);
+ vector<VkDescriptorSetLayoutBinding> descriptorSetLayoutBindings (params.bindingCount, descriptorSetLayoutBinding);
+
+ for (deUint32 binding = 0; binding < deUint32(descriptorSetLayoutBindings.size()); ++binding)
+ {
+ descriptorSetLayoutBindings[binding].binding = binding;
+ }
+
const VkDescriptorSetLayoutCreateInfo descriptorSetLayoutInfo =
{
VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, // VkStructureType sType;
diff --git a/external/vulkancts/modules/vulkan/api/vktApiExternalMemoryTests.cpp b/external/vulkancts/modules/vulkan/api/vktApiExternalMemoryTests.cpp
index 9508e7c..c0ccece 100644
--- a/external/vulkancts/modules/vulkan/api/vktApiExternalMemoryTests.cpp
+++ b/external/vulkancts/modules/vulkan/api/vktApiExternalMemoryTests.cpp
@@ -503,6 +503,71 @@
VK_CHECK(vkd.queueSubmit(queue, 1, &submit, (vk::VkFence)0u));
}
+void submitDummySignalAndGetSemaphoreNative ( const vk::DeviceInterface& vk,
+ vk::VkDevice device,
+ vk::VkQueue queue,
+ deUint32 queueFamilyIndex,
+ vk::VkSemaphore semaphore,
+ vk::VkExternalSemaphoreHandleTypeFlagBitsKHR externalType,
+ NativeHandle& nativeHandle)
+{
+ const vk::Unique<vk::VkCommandPool> cmdPool(createCommandPool(vk, device, vk::VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex, DE_NULL));
+ const vk::Unique<vk::VkCommandBuffer> cmdBuffer(allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
+
+ const vk::VkEventCreateInfo eventCreateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_EVENT_CREATE_INFO,
+ DE_NULL,
+ 0u
+ };
+
+ const vk::Unique<vk::VkEvent> event(createEvent(vk, device, &eventCreateInfo, DE_NULL));
+
+ const vk::VkCommandBufferBeginInfo cmdBufferBeginInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
+ DE_NULL,
+ vk::VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
+ DE_NULL,
+ };
+
+ VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufferBeginInfo));
+ /*
+ The submitDummySignal function calls vkQueueSubmit with an empty VkSubmitInfo structure and a
+ VkSemaphore to be signalled when the work is finished. Because there is no work in the submission, vkQueueSubmit
+ may signal the semaphore immediately. When a semaphore's file descriptor is obtained using vkGetFenceFdKHR, if the
+ handle type is VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR, vkGetFenceFdKHR is allowed to return -1 if the fence
+ is already signalled, instead of a file descriptor, . In order to make sure that a valid file descriptor is returned
+ we use vkCmdWaitEvents to make sure that vkQueueSubmit doesn't signal the fence.
+ */
+ vk.cmdWaitEvents(*cmdBuffer, 1, &event.get(), vk::VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, DE_NULL, 0, DE_NULL, 0, DE_NULL);
+ vk.endCommandBuffer(*cmdBuffer);
+
+ const vk::VkSubmitInfo submit =
+ {
+ vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,
+ DE_NULL,
+
+ 0u,
+ DE_NULL,
+ DE_NULL,
+
+ 1u,
+ &cmdBuffer.get(),
+
+ 1u,
+ &semaphore
+ };
+
+ VK_CHECK(vk.queueSubmit(queue, 1, &submit, (vk::VkFence)0u));
+
+ getSemaphoreNative(vk, device, semaphore, externalType, nativeHandle);
+
+ VK_CHECK(vk.setEvent(device, *event));
+
+ VK_CHECK(vk.queueWaitIdle(queue));
+}
+
void submitDummyWait (const vk::DeviceInterface& vkd,
vk::VkQueue queue,
vk::VkSemaphore semaphore)
@@ -550,6 +615,71 @@
VK_CHECK(vkd.queueSubmit(queue, 1, &submit, fence));
}
+void submitDummySignalAndGetFenceNative ( const vk::DeviceInterface& vk,
+ vk::VkDevice device,
+ vk::VkQueue queue,
+ deUint32 queueFamilyIndex,
+ vk::VkFence fence,
+ vk::VkExternalFenceHandleTypeFlagBitsKHR externalType,
+ NativeHandle& nativeHandle)
+{
+ const vk::Unique<vk::VkCommandPool> cmdPool(createCommandPool(vk, device, vk::VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex, DE_NULL));
+ const vk::Unique<vk::VkCommandBuffer> cmdBuffer(allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
+
+ const vk::VkEventCreateInfo eventCreateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_EVENT_CREATE_INFO,
+ DE_NULL,
+ 0u
+ };
+
+ const vk::Unique<vk::VkEvent> event(createEvent(vk, device, &eventCreateInfo, DE_NULL));
+
+ const vk::VkCommandBufferBeginInfo cmdBufferBeginInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
+ DE_NULL,
+ vk::VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
+ DE_NULL,
+ };
+
+ VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufferBeginInfo));
+ /*
+ The submitDummySignal function calls vkQueueSubmit with an empty VkSubmitInfo structure and a
+ VkFence to be signalled when the work is finished. Because there is no work in the submission, vkQueueSubmit
+ could signal the fence immediately. When a fence's file descriptor is obtained using vkGetFenceFdKHR, if the
+ handle type is VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR, vkGetFenceFdKHR is allowed to return -1 instead of a
+ file descriptor, if the fence is already signalled. In order to make sure that a valid file descriptor is returned
+ we use vkCmdWaitEvents to make sure that vkQueueSubmit doesn't signal the fence.
+ */
+ vk.cmdWaitEvents(*cmdBuffer, 1, &event.get(), vk::VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, DE_NULL, 0, DE_NULL, 0, DE_NULL);
+ vk.endCommandBuffer(*cmdBuffer);
+
+ const vk::VkSubmitInfo submit =
+ {
+ vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,
+ DE_NULL,
+
+ 0u,
+ DE_NULL,
+ DE_NULL,
+
+ 1u,
+ &cmdBuffer.get(),
+
+ 0u,
+ DE_NULL
+ };
+
+ VK_CHECK(vk.queueSubmit(queue, 1, &submit, fence));
+
+ getFenceNative(vk, device, fence, externalType, nativeHandle);
+
+ VK_CHECK(vk.setEvent(device, *event));
+
+ VK_CHECK(vk.queueWaitIdle(queue));
+}
+
tcu::TestStatus testSemaphoreQueries (Context& context, vk::VkExternalSemaphoreHandleTypeFlagBitsKHR externalType)
{
const vk::PlatformInterface& vkp (context.getPlatformInterface());
@@ -685,12 +815,12 @@
const vk::DeviceDriver vkd (vki, *device);
const vk::VkQueue queue (getQueue(vkd, *device, queueFamilyIndex));
const vk::Unique<vk::VkSemaphore> semaphore (createExportableSemaphore(vkd, *device, config.externalType));
+ NativeHandle handleA;
if (transference == TRANSFERENCE_COPY)
- submitDummySignal(vkd, queue, *semaphore);
-
- NativeHandle handleA;
- getSemaphoreNative(vkd, *device, *semaphore, config.externalType, handleA);
+ submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphore, config.externalType, handleA);
+ else
+ getSemaphoreNative(vkd, *device, *semaphore, config.externalType, handleA);
{
NativeHandle handleB (handleA);
@@ -733,12 +863,12 @@
const vk::VkQueue queue (getQueue(vkd, *device, queueFamilyIndex));
const vk::Unique<vk::VkSemaphore> semaphoreA (createExportableSemaphore(vkd, *device, config.externalType));
+ NativeHandle handleA;
if (transference == TRANSFERENCE_COPY)
- submitDummySignal(vkd, queue, *semaphoreA);
-
- NativeHandle handleA;
- getSemaphoreNative(vkd, *device, *semaphoreA, config.externalType, handleA);
+ submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, handleA);
+ else
+ getSemaphoreNative(vkd, *device, *semaphoreA, config.externalType, handleA);
NativeHandle handleB (handleA);
const vk::VkSemaphoreImportFlagsKHR flags = config.permanence == PERMANENCE_TEMPORARY ? vk::VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR : (vk::VkSemaphoreImportFlagBitsKHR)0u;
@@ -778,13 +908,10 @@
const vk::DeviceDriver vkd (vki, *device);
const vk::VkQueue queue (getQueue(vkd, *device, queueFamilyIndex));
const vk::Unique<vk::VkSemaphore> semaphoreA (createExportableSemaphore(vkd, *device, config.externalType));
-
- submitDummySignal(vkd, queue, *semaphoreA);
-
{
NativeHandle handle;
- getSemaphoreNative(vkd, *device, *semaphoreA, config.externalType, handle);
+ submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, handle);
{
const vk::VkSemaphoreImportFlagsKHR flags = config.permanence == PERMANENCE_TEMPORARY ? vk::VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR : (vk::VkSemaphoreImportFlagBitsKHR)0u;
@@ -857,7 +984,7 @@
const vk::Unique<vk::VkSemaphore> semaphoreA (createExportableSemaphore(vkd, *device, config.externalType));
NativeHandle handle;
- getSemaphoreNative(vkd, *device, *semaphoreA, config.externalType, handle);
+ submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, handle);
const vk::Unique<vk::VkSemaphore> semaphoreB (createAndImportSemaphore(vkd, *device, config.externalType, handle, flags));
@@ -896,9 +1023,9 @@
VK_CHECK(vkd.queueWaitIdle(queue));
if (transference == TRANSFERENCE_COPY)
- submitDummySignal(vkd, queue, *semaphoreA);
-
- getSemaphoreNative(vkd, *device, *semaphoreA, config.externalType, handle);
+ submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, handle);
+ else
+ getSemaphoreNative(vkd, *device, *semaphoreA, config.externalType, handle);
importSemaphore(vkd, *device, *semaphoreB, config.externalType, handle, flags);
@@ -941,9 +1068,9 @@
NativeHandle handle;
if (transference == TRANSFERENCE_COPY)
- submitDummySignal(vkd, queue, *semaphoreA);
-
- getSemaphoreNative(vkd, *device, *semaphoreA, config.externalType, handle);
+ submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, handle);
+ else
+ getSemaphoreNative(vkd, *device, *semaphoreA, config.externalType, handle);
submitDummySignal(vkd, queue, *semaphoreB);
submitDummyWait(vkd, queue, *semaphoreB);
@@ -992,9 +1119,9 @@
NativeHandle handle;
if (transference == TRANSFERENCE_COPY)
- submitDummySignal(vkd, queue, *semaphore);
-
- getSemaphoreNative(vkd, *device, *semaphore, config.externalType, handle);
+ submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphore, config.externalType, handle);
+ else
+ getSemaphoreNative(vkd, *device, *semaphore, config.externalType, handle);
}
submitDummySignal(vkd, queue, *semaphore);
@@ -1028,9 +1155,9 @@
NativeHandle handleA;
if (transference == TRANSFERENCE_COPY)
- submitDummySignal(vkd, queue, *semaphoreA);
-
- getSemaphoreNative(vkd, *device, *semaphoreA, config.externalType, handleA);
+ submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, handleA);
+ else
+ getSemaphoreNative(vkd, *device, *semaphoreA, config.externalType, handleA);
for (size_t importNdx = 0; importNdx < importCount; importNdx++)
{
@@ -1078,9 +1205,7 @@
const vk::Unique<vk::VkSemaphore> semaphoreA (createExportableSemaphore(vkd, *device, config.externalType));
NativeHandle handle;
- submitDummySignal(vkd, queue, *semaphoreA);
-
- getSemaphoreNative(vkd, *device, *semaphoreA, config.externalType, handle);
+ submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, handle);
{
const vk::Unique<vk::VkSemaphore> semaphoreB (createAndImportSemaphore(vkd, *device, config.externalType, handle, flags));
@@ -1173,11 +1298,14 @@
TestLog& log = context.getTestContext().getLog();
const vk::Unique<vk::VkSemaphore> semaphoreA (createExportableSemaphore(vkd, *device, config.externalType));
- if (transference == TRANSFERENCE_COPY)
- submitDummySignal(vkd, queue, *semaphoreA);
-
{
- const NativeHandle fd (getSemaphoreFd(vkd, *device, *semaphoreA, config.externalType));
+ NativeHandle fd;
+
+ if (transference == TRANSFERENCE_COPY)
+ submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, fd);
+ else
+ getSemaphoreNative(vkd, *device, *semaphoreA, config.externalType, fd);
+
NativeHandle newFd (dup(fd.getFd()));
if (newFd.getFd() < 0)
@@ -1234,15 +1362,20 @@
const vk::Unique<vk::VkSemaphore> semaphoreA (createExportableSemaphore(vkd, *device, config.externalType));
const vk::Unique<vk::VkSemaphore> semaphoreB (createExportableSemaphore(vkd, *device, config.externalType));
- if (transference == TRANSFERENCE_COPY)
{
- submitDummySignal(vkd, queue, *semaphoreA);
- submitDummySignal(vkd, queue, *semaphoreB);
- }
+ NativeHandle fd, secondFd;
- {
- const NativeHandle fd (getSemaphoreFd(vkd, *device, *semaphoreA, config.externalType));
- NativeHandle secondFd (getSemaphoreFd(vkd, *device, *semaphoreB, config.externalType));
+ if (transference == TRANSFERENCE_COPY)
+ {
+ submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, fd);
+ submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreB, config.externalType, secondFd);
+ }
+ else
+ {
+ getSemaphoreNative(vkd, *device, *semaphoreA, config.externalType, fd);
+ getSemaphoreNative(vkd, *device, *semaphoreB, config.externalType, secondFd);
+ }
+
int newFd (dup2(fd.getFd(), secondFd.getFd()));
if (newFd < 0)
@@ -1298,16 +1431,21 @@
const vk::Unique<vk::VkSemaphore> semaphoreA (createExportableSemaphore(vkd, *device, config.externalType));
const vk::Unique<vk::VkSemaphore> semaphoreB (createExportableSemaphore(vkd, *device, config.externalType));
- if (transference == TRANSFERENCE_COPY)
{
- submitDummySignal(vkd, queue, *semaphoreA);
- submitDummySignal(vkd, queue, *semaphoreB);
- }
+ NativeHandle fd, secondFd;
- {
+ if (transference == TRANSFERENCE_COPY)
+ {
+ submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, fd);
+ submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreB, config.externalType, secondFd);
+ }
+ else
+ {
+ getSemaphoreNative(vkd, *device, *semaphoreA, config.externalType, fd);
+ getSemaphoreNative(vkd, *device, *semaphoreB, config.externalType, secondFd);
+ }
+
const vk::VkSemaphoreImportFlagsKHR flags = config.permanence == PERMANENCE_TEMPORARY ? vk::VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR : (vk::VkSemaphoreImportFlagBitsKHR)0u;
- const NativeHandle fd (getSemaphoreFd(vkd, *device, *semaphoreA, config.externalType));
- NativeHandle secondFd (getSemaphoreFd(vkd, *device, *semaphoreB, config.externalType));
const int newFd (dup3(fd.getFd(), secondFd.getFd(), 0));
if (newFd < 0)
@@ -1361,11 +1499,12 @@
TestLog& log = context.getTestContext().getLog();
const vk::Unique<vk::VkSemaphore> semaphore (createExportableSemaphore(vkd, *device, config.externalType));
+ NativeHandle fd;
if (transference == TRANSFERENCE_COPY)
- submitDummySignal(vkd, queue, *semaphore);
-
- const NativeHandle fd (getSemaphoreFd(vkd, *device, *semaphore, config.externalType));
+ submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphore, config.externalType, fd);
+ else
+ getSemaphoreNative(vkd, *device, *semaphore, config.externalType, fd);
{
int sv[2];
@@ -1616,12 +1755,12 @@
const vk::DeviceDriver vkd (vki, *device);
const vk::VkQueue queue (getQueue(vkd, *device, queueFamilyIndex));
const vk::Unique<vk::VkFence> fence (createExportableFence(vkd, *device, config.externalType));
+ NativeHandle handleA;
if (transference == TRANSFERENCE_COPY)
- submitDummySignal(vkd, queue, *fence);
-
- NativeHandle handleA;
- getFenceNative(vkd, *device, *fence, config.externalType, handleA);
+ submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fence, config.externalType, handleA);
+ else
+ getFenceNative(vkd, *device, *fence, config.externalType, handleA);
{
NativeHandle handleB (handleA);
@@ -1664,12 +1803,12 @@
const vk::VkQueue queue (getQueue(vkd, *device, queueFamilyIndex));
const vk::Unique<vk::VkFence> fenceA (createExportableFence(vkd, *device, config.externalType));
+ NativeHandle handleA;
if (transference == TRANSFERENCE_COPY)
- submitDummySignal(vkd, queue, *fenceA);
-
- NativeHandle handleA;
- getFenceNative(vkd, *device, *fenceA, config.externalType, handleA);
+ submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, handleA);
+ else
+ getFenceNative(vkd, *device, *fenceA, config.externalType, handleA);
NativeHandle handleB (handleA);
const vk::VkFenceImportFlagsKHR flags = config.permanence == PERMANENCE_TEMPORARY ? vk::VK_FENCE_IMPORT_TEMPORARY_BIT_KHR : (vk::VkFenceImportFlagBitsKHR)0u;
@@ -1710,12 +1849,10 @@
const vk::VkQueue queue (getQueue(vkd, *device, queueFamilyIndex));
const vk::Unique<vk::VkFence> fenceA (createExportableFence(vkd, *device, config.externalType));
- submitDummySignal(vkd, queue, *fenceA);
-
{
NativeHandle handle;
- getFenceNative(vkd, *device, *fenceA, config.externalType, handle);
+ submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, handle);
{
const vk::VkFenceImportFlagsKHR flags = config.permanence == PERMANENCE_TEMPORARY ? vk::VK_FENCE_IMPORT_TEMPORARY_BIT_KHR : (vk::VkFenceImportFlagBitsKHR)0u;
@@ -1827,9 +1964,9 @@
VK_CHECK(vkd.queueWaitIdle(queue));
if (transference == TRANSFERENCE_COPY)
- submitDummySignal(vkd, queue, *fenceA);
-
- getFenceNative(vkd, *device, *fenceA, config.externalType, handle);
+ submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, handle);
+ else
+ getFenceNative(vkd, *device, *fenceA, config.externalType, handle);
importFence(vkd, *device, *fenceB, config.externalType, handle, flags);
@@ -1875,9 +2012,7 @@
submitDummySignal(vkd, queue, *fenceB);
VK_CHECK(vkd.queueWaitIdle(queue));
- submitDummySignal(vkd, queue, *fenceA);
-
- getFenceNative(vkd, *device, *fenceA, config.externalType, handle);
+ submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, handle);
{
NativeHandle handleB (handle);
importFence(vkd, *device, *fenceB, config.externalType, handleB, flags);
@@ -1941,9 +2076,9 @@
NativeHandle handle;
if (transference == TRANSFERENCE_COPY)
- submitDummySignal(vkd, queue, *fenceA);
-
- getFenceNative(vkd, *device, *fenceA, config.externalType, handle);
+ submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, handle);
+ else
+ getFenceNative(vkd, *device, *fenceA, config.externalType, handle);
submitDummySignal(vkd, queue, *fenceB);
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceB, VK_TRUE, ~0ull));
@@ -1992,9 +2127,9 @@
NativeHandle handle;
if (transference == TRANSFERENCE_COPY)
- submitDummySignal(vkd, queue, *fence);
-
- getFenceNative(vkd, *device, *fence, config.externalType, handle);
+ submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fence, config.externalType, handle);
+ else
+ getFenceNative(vkd, *device, *fence, config.externalType, handle);
}
submitDummySignal(vkd, queue, *fence);
@@ -2028,9 +2163,9 @@
NativeHandle handleA;
if (transference == TRANSFERENCE_COPY)
- submitDummySignal(vkd, queue, *fenceA);
-
- getFenceNative(vkd, *device, *fenceA, config.externalType, handleA);
+ submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, handleA);
+ else
+ getFenceNative(vkd, *device, *fenceA, config.externalType, handleA);
for (size_t importNdx = 0; importNdx < importCount; importNdx++)
{
@@ -2078,9 +2213,7 @@
const vk::Unique<vk::VkFence> fenceA (createExportableFence(vkd, *device, config.externalType));
NativeHandle handle;
- submitDummySignal(vkd, queue, *fenceA);
-
- getFenceNative(vkd, *device, *fenceA, config.externalType, handle);
+ submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, handle);
{
const vk::Unique<vk::VkFence> fenceB (createAndImportFence(vkd, *device, config.externalType, handle, flags));
@@ -2179,11 +2312,14 @@
TestLog& log = context.getTestContext().getLog();
const vk::Unique<vk::VkFence> fenceA (createExportableFence(vkd, *device, config.externalType));
- if (transference == TRANSFERENCE_COPY)
- submitDummySignal(vkd, queue, *fenceA);
-
{
- const NativeHandle fd (getFenceFd(vkd, *device, *fenceA, config.externalType));
+ NativeHandle fd;
+
+ if (transference == TRANSFERENCE_COPY)
+ submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, fd);
+ else
+ getFenceNative(vkd, *device, *fenceA, config.externalType, fd);
+
NativeHandle newFd (dup(fd.getFd()));
if (newFd.getFd() < 0)
@@ -2240,15 +2376,20 @@
const vk::Unique<vk::VkFence> fenceA (createExportableFence(vkd, *device, config.externalType));
const vk::Unique<vk::VkFence> fenceB (createExportableFence(vkd, *device, config.externalType));
- if (transference == TRANSFERENCE_COPY)
{
- submitDummySignal(vkd, queue, *fenceA);
- submitDummySignal(vkd, queue, *fenceB);
- }
+ NativeHandle fd, secondFd;
- {
- const NativeHandle fd (getFenceFd(vkd, *device, *fenceA, config.externalType));
- NativeHandle secondFd (getFenceFd(vkd, *device, *fenceB, config.externalType));
+ if (transference == TRANSFERENCE_COPY)
+ {
+ submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, fd);
+ submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceB, config.externalType, secondFd);
+ }
+ else
+ {
+ getFenceNative(vkd, *device, *fenceA, config.externalType, fd);
+ getFenceNative(vkd, *device, *fenceB, config.externalType, secondFd);
+ }
+
int newFd (dup2(fd.getFd(), secondFd.getFd()));
if (newFd < 0)
@@ -2304,16 +2445,21 @@
const vk::Unique<vk::VkFence> fenceA (createExportableFence(vkd, *device, config.externalType));
const vk::Unique<vk::VkFence> fenceB (createExportableFence(vkd, *device, config.externalType));
- if (transference == TRANSFERENCE_COPY)
{
- submitDummySignal(vkd, queue, *fenceA);
- submitDummySignal(vkd, queue, *fenceB);
- }
+ NativeHandle fd, secondFd;
- {
+ if (transference == TRANSFERENCE_COPY)
+ {
+ submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, fd);
+ submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceB, config.externalType, secondFd);
+ }
+ else
+ {
+ getFenceNative(vkd, *device, *fenceA, config.externalType, fd);
+ getFenceNative(vkd, *device, *fenceB, config.externalType, secondFd);
+ }
+
const vk::VkFenceImportFlagsKHR flags = config.permanence == PERMANENCE_TEMPORARY ? vk::VK_FENCE_IMPORT_TEMPORARY_BIT_KHR : (vk::VkFenceImportFlagBitsKHR)0u;
- const NativeHandle fd (getFenceFd(vkd, *device, *fenceA, config.externalType));
- NativeHandle secondFd (getFenceFd(vkd, *device, *fenceB, config.externalType));
const int newFd (dup3(fd.getFd(), secondFd.getFd(), 0));
if (newFd < 0)
@@ -2367,11 +2513,12 @@
TestLog& log = context.getTestContext().getLog();
const vk::Unique<vk::VkFence> fence (createExportableFence(vkd, *device, config.externalType));
+ NativeHandle fd;
if (transference == TRANSFERENCE_COPY)
- submitDummySignal(vkd, queue, *fence);
-
- const NativeHandle fd (getFenceFd(vkd, *device, *fence, config.externalType));
+ submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fence, config.externalType, fd);
+ else
+ getFenceNative(vkd, *device, *fence, config.externalType, fd);
{
int sv[2];
diff --git a/external/vulkancts/modules/vulkan/api/vktApiFeatureInfo.cpp b/external/vulkancts/modules/vulkan/api/vktApiFeatureInfo.cpp
index af8c011..f70b924 100644
--- a/external/vulkancts/modules/vulkan/api/vktApiFeatureInfo.cpp
+++ b/external/vulkancts/modules/vulkan/api/vktApiFeatureInfo.cpp
@@ -104,6 +104,18 @@
{
bool limitsOk = true;
VkPhysicalDeviceLimits* limits = &properties->limits;
+ deUint32 shaderStages = 3;
+
+ if (features->tessellationShader)
+ {
+ shaderStages += 2;
+ }
+
+ if (features->geometryShader)
+ {
+ shaderStages++;
+ }
+
struct FeatureLimitTable
{
deUint32 offset;
@@ -139,13 +151,13 @@
{ LIMIT(maxPerStageDescriptorStorageImages), 4, 0, 0, 0.0f, LIMIT_FORMAT_UNSIGNED_INT, LIMIT_TYPE_MIN , -1 },
{ LIMIT(maxPerStageDescriptorInputAttachments), 4, 0, 0, 0.0f, LIMIT_FORMAT_UNSIGNED_INT, LIMIT_TYPE_MIN , -1 },
{ LIMIT(maxPerStageResources), 0, 0, 0, 0.0f, LIMIT_FORMAT_UNSIGNED_INT, LIMIT_TYPE_NONE , -1 },
- { LIMIT(maxDescriptorSetSamplers), 96, 0, 0, 0.0f, LIMIT_FORMAT_UNSIGNED_INT, LIMIT_TYPE_MIN, -1 },
- { LIMIT(maxDescriptorSetUniformBuffers), 72, 0, 0, 0.0f, LIMIT_FORMAT_UNSIGNED_INT, LIMIT_TYPE_MIN , -1 },
+ { LIMIT(maxDescriptorSetSamplers), shaderStages * 16, 0, 0, 0.0f, LIMIT_FORMAT_UNSIGNED_INT, LIMIT_TYPE_MIN, -1 },
+ { LIMIT(maxDescriptorSetUniformBuffers), shaderStages * 12, 0, 0, 0.0f, LIMIT_FORMAT_UNSIGNED_INT, LIMIT_TYPE_MIN, -1 },
{ LIMIT(maxDescriptorSetUniformBuffersDynamic), 8, 0, 0, 0.0f, LIMIT_FORMAT_UNSIGNED_INT, LIMIT_TYPE_MIN, -1 },
- { LIMIT(maxDescriptorSetStorageBuffers), 24, 0, 0, 0.0f, LIMIT_FORMAT_UNSIGNED_INT, LIMIT_TYPE_MIN , -1 },
+ { LIMIT(maxDescriptorSetStorageBuffers), shaderStages * 4, 0, 0, 0.0f, LIMIT_FORMAT_UNSIGNED_INT, LIMIT_TYPE_MIN, -1 },
{ LIMIT(maxDescriptorSetStorageBuffersDynamic), 4, 0, 0, 0.0f, LIMIT_FORMAT_UNSIGNED_INT, LIMIT_TYPE_MIN , -1 },
- { LIMIT(maxDescriptorSetSampledImages), 96, 0, 0, 0.0f, LIMIT_FORMAT_UNSIGNED_INT, LIMIT_TYPE_MIN , -1 },
- { LIMIT(maxDescriptorSetStorageImages), 24, 0, 0, 0.0f, LIMIT_FORMAT_UNSIGNED_INT, LIMIT_TYPE_MIN , -1 },
+ { LIMIT(maxDescriptorSetSampledImages), shaderStages * 16, 0, 0, 0.0f, LIMIT_FORMAT_UNSIGNED_INT, LIMIT_TYPE_MIN, -1 },
+ { LIMIT(maxDescriptorSetStorageImages), shaderStages * 4, 0, 0, 0.0f, LIMIT_FORMAT_UNSIGNED_INT, LIMIT_TYPE_MIN, -1 },
{ LIMIT(maxDescriptorSetInputAttachments), 0, 0, 0, 0.0f, LIMIT_FORMAT_UNSIGNED_INT, LIMIT_TYPE_NONE , -1 },
{ LIMIT(maxVertexInputAttributes), 16, 0, 0, 0.0f, LIMIT_FORMAT_UNSIGNED_INT, LIMIT_TYPE_MIN , -1 },
{ LIMIT(maxVertexInputBindings), 16, 0, 0, 0.0f, LIMIT_FORMAT_UNSIGNED_INT, LIMIT_TYPE_MIN , -1 },
@@ -660,7 +672,6 @@
"VK_KHR_external_memory_capabilities",
"VK_KHR_external_semaphore_capabilities",
"VK_KHR_external_fence_capabilities",
- "VK_KHR_sampler_ycbcr_conversion"
};
checkKhrExtensions(results, extensions, DE_LENGTH_OF_ARRAY(s_allowedInstanceKhrExtensions), s_allowedInstanceKhrExtensions);
diff --git a/external/vulkancts/modules/vulkan/api/vktApiSmokeTests.cpp b/external/vulkancts/modules/vulkan/api/vktApiSmokeTests.cpp
index 41bdd3e..233dcaf 100644
--- a/external/vulkancts/modules/vulkan/api/vktApiSmokeTests.cpp
+++ b/external/vulkancts/modules/vulkan/api/vktApiSmokeTests.cpp
@@ -595,7 +595,7 @@
VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // sType
DE_NULL, // pNext
0u, // flags
- VK_TRUE, // depthClampEnable
+ VK_FALSE, // depthClampEnable
VK_FALSE, // rasterizerDiscardEnable
VK_POLYGON_MODE_FILL, // polygonMode
VK_CULL_MODE_NONE, // cullMode
diff --git a/external/vulkancts/modules/vulkan/draw/vktBasicDrawTests.cpp b/external/vulkancts/modules/vulkan/draw/vktBasicDrawTests.cpp
index 65f80a0..e59d45e 100644
--- a/external/vulkancts/modules/vulkan/draw/vktBasicDrawTests.cpp
+++ b/external/vulkancts/modules/vulkan/draw/vktBasicDrawTests.cpp
@@ -319,6 +319,18 @@
const vk::VkDevice device = m_context.getDevice();
const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
+ const vk::VkPhysicalDeviceFeatures features = m_context.getDeviceFeatures();
+
+ if (features.geometryShader == VK_FALSE &&
+ (m_data.topology == vk::VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY ||
+ m_data.topology == vk::VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY ||
+ m_data.topology == vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY ||
+ m_data.topology == vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY)
+ )
+ {
+ TCU_THROW(NotSupportedError, "Geometry Not Supported");
+ }
+
const PipelineLayoutCreateInfo pipelineLayoutCreateInfo;
m_pipelineLayout = vk::createPipelineLayout(m_vk, device, &pipelineLayoutCreateInfo);
diff --git a/external/vulkancts/modules/vulkan/draw/vktDrawInstancedTests.cpp b/external/vulkancts/modules/vulkan/draw/vktDrawInstancedTests.cpp
index 13b80e3..1e5d734 100644
--- a/external/vulkancts/modules/vulkan/draw/vktDrawInstancedTests.cpp
+++ b/external/vulkancts/modules/vulkan/draw/vktDrawInstancedTests.cpp
@@ -121,23 +121,23 @@
}
template<typename T>
-de::SharedPtr<Buffer> createAndUploadBuffer(const std::vector<T> data, const vk::DeviceInterface& vk, const Context& context)
+de::SharedPtr<Buffer> createAndUploadBuffer(const std::vector<T> data, const vk::DeviceInterface& vk, const Context& context, vk::VkBufferUsageFlags usage)
{
const vk::VkDeviceSize dataSize = data.size() * sizeof(T);
- de::SharedPtr<Buffer> vertexBuffer = Buffer::createAndAlloc(vk, context.getDevice(),
- BufferCreateInfo(dataSize, vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT),
- context.getDefaultAllocator(),
- vk::MemoryRequirement::HostVisible);
+ de::SharedPtr<Buffer> buffer = Buffer::createAndAlloc(vk, context.getDevice(),
+ BufferCreateInfo(dataSize, usage),
+ context.getDefaultAllocator(),
+ vk::MemoryRequirement::HostVisible);
- deUint8* ptr = reinterpret_cast<deUint8*>(vertexBuffer->getBoundMemory().getHostPtr());
+ deUint8* ptr = reinterpret_cast<deUint8*>(buffer->getBoundMemory().getHostPtr());
deMemcpy(ptr, &data[0], static_cast<size_t>(dataSize));
vk::flushMappedMemoryRange(vk, context.getDevice(),
- vertexBuffer->getBoundMemory().getMemory(),
- vertexBuffer->getBoundMemory().getOffset(),
+ buffer->getBoundMemory().getMemory(),
+ buffer->getBoundMemory().getOffset(),
VK_WHOLE_SIZE);
- return vertexBuffer;
+ return buffer;
}
class TestVertShader : public rr::VertexShader
@@ -455,8 +455,8 @@
const deUint32 firstInstance = firstInstanceIndices[firstInstanceIndexNdx];
prepareVertexData(instanceCount, firstInstance);
- const de::SharedPtr<Buffer> vertexBuffer = createAndUploadBuffer(m_data, m_vk, m_context);
- const de::SharedPtr<Buffer> instancedVertexBuffer = createAndUploadBuffer(m_instancedColor, m_vk, m_context);
+ const de::SharedPtr<Buffer> vertexBuffer = createAndUploadBuffer(m_data, m_vk, m_context, vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
+ const de::SharedPtr<Buffer> instancedVertexBuffer = createAndUploadBuffer(m_instancedColor, m_vk, m_context, vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
de::SharedPtr<Buffer> indexBuffer;
de::SharedPtr<Buffer> indirectBuffer;
m_vk.beginCommandBuffer(*m_cmdBuffer, &beginInfo);
@@ -486,7 +486,7 @@
if (m_params.function == TestParams::FUNCTION_DRAW_INDEXED || m_params.function == TestParams::FUNCTION_DRAW_INDEXED_INDIRECT)
{
- indexBuffer = createAndUploadBuffer(m_indexes, m_vk, m_context);
+ indexBuffer = createAndUploadBuffer(m_indexes, m_vk, m_context, vk::VK_BUFFER_USAGE_INDEX_BUFFER_BIT);
m_vk.cmdBindIndexBuffer(*m_cmdBuffer, indexBuffer->object(), 0, vk::VK_INDEX_TYPE_UINT32);
}
@@ -530,7 +530,7 @@
};
std::vector<vk::VkDrawIndirectCommand> drawCommands;
drawCommands.push_back(drawCommand);
- indirectBuffer = createAndUploadBuffer(drawCommands, m_vk, m_context);
+ indirectBuffer = createAndUploadBuffer(drawCommands, m_vk, m_context, vk::VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT);
m_vk.cmdDrawIndirect(*m_cmdBuffer, indirectBuffer->object(), 0, 1u, 0u);
break;
@@ -547,7 +547,7 @@
};
std::vector<vk::VkDrawIndexedIndirectCommand> drawCommands;
drawCommands.push_back(drawCommand);
- indirectBuffer = createAndUploadBuffer(drawCommands, m_vk, m_context);
+ indirectBuffer = createAndUploadBuffer(drawCommands, m_vk, m_context, vk::VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT);
m_vk.cmdDrawIndexedIndirect(*m_cmdBuffer, indirectBuffer->object(), 0, 1u, 0u);
break;
diff --git a/external/vulkancts/modules/vulkan/memory/vktMemoryPipelineBarrierTests.cpp b/external/vulkancts/modules/vulkan/memory/vktMemoryPipelineBarrierTests.cpp
index 7069ef4..29476ab 100644
--- a/external/vulkancts/modules/vulkan/memory/vktMemoryPipelineBarrierTests.cpp
+++ b/external/vulkancts/modules/vulkan/memory/vktMemoryPipelineBarrierTests.cpp
@@ -83,7 +83,8 @@
enum
{
MAX_UNIFORM_BUFFER_SIZE = 1024,
- MAX_STORAGE_BUFFER_SIZE = (1<<28)
+ MAX_STORAGE_BUFFER_SIZE = (1<<28),
+ MAX_SIZE = (128 * 1024)
};
// \todo [mika] Add to utilities
@@ -6129,7 +6130,7 @@
const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-buffer.frag"), 0));
vector<vk::VkDescriptorSetLayoutBinding> bindings;
- m_bufferSize = context.getBufferSize();
+ m_bufferSize = de::min(context.getBufferSize(), (vk::VkDeviceSize)MAX_SIZE);
m_targetWidth = context.getTargetWidth();
m_targetHeight = context.getTargetHeight();
@@ -8345,6 +8346,71 @@
DE_FATAL("Unknown stage");
}
+void removeIllegalAccessFlags (vk::VkAccessFlags& accessflags, vk::VkPipelineStageFlags stageflags)
+{
+ if (!(stageflags & vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT))
+ accessflags &= ~vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
+
+ if (!(stageflags & vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT))
+ accessflags &= ~vk::VK_ACCESS_INDEX_READ_BIT;
+
+ if (!(stageflags & vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT))
+ accessflags &= ~vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
+
+ if (!(stageflags & (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
+ vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
+ vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
+ vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
+ vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
+ vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)))
+ accessflags &= ~vk::VK_ACCESS_UNIFORM_READ_BIT;
+
+ if (!(stageflags & vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT))
+ accessflags &= ~vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
+
+ if (!(stageflags & (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
+ vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
+ vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
+ vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
+ vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
+ vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)))
+ accessflags &= ~vk::VK_ACCESS_SHADER_READ_BIT;
+
+ if (!(stageflags & (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
+ vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
+ vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
+ vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
+ vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
+ vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)))
+ accessflags &= ~vk::VK_ACCESS_SHADER_WRITE_BIT;
+
+ if (!(stageflags & vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT))
+ accessflags &= ~vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
+
+ if (!(stageflags & vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT))
+ accessflags &= ~vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+
+ if (!(stageflags & (vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
+ vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT)))
+ accessflags &= ~vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
+
+ if (!(stageflags & (vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
+ vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT)))
+ accessflags &= ~vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+
+ if (!(stageflags & vk::VK_PIPELINE_STAGE_TRANSFER_BIT))
+ accessflags &= ~vk::VK_ACCESS_TRANSFER_READ_BIT;
+
+ if (!(stageflags & vk::VK_PIPELINE_STAGE_TRANSFER_BIT))
+ accessflags &= ~vk::VK_ACCESS_TRANSFER_WRITE_BIT;
+
+ if (!(stageflags & vk::VK_PIPELINE_STAGE_HOST_BIT))
+ accessflags &= ~vk::VK_ACCESS_HOST_READ_BIT;
+
+ if (!(stageflags & vk::VK_PIPELINE_STAGE_HOST_BIT))
+ accessflags &= ~vk::VK_ACCESS_HOST_WRITE_BIT;
+}
+
void applyOp (State& state, const Memory& memory, Op op, Usage usage)
{
switch (op)
@@ -8486,6 +8552,9 @@
if (!srcStages)
srcStages = dstStages;
+ removeIllegalAccessFlags(dstAccesses, dstStages);
+ removeIllegalAccessFlags(srcAccesses, srcStages);
+
if (srcLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED)
state.imageDefined = false;
@@ -8615,6 +8684,9 @@
if (!srcStages)
srcStages = dstStages;
+ removeIllegalAccessFlags(dstAccesses, dstStages);
+ removeIllegalAccessFlags(srcAccesses, srcStages);
+
state.commandBufferIsEmpty = false;
state.cache.barrier(srcStages, srcAccesses, dstStages, dstAccesses);
break;
@@ -8804,6 +8876,9 @@
if (!srcStages)
srcStages = dstStages;
+ removeIllegalAccessFlags(dstAccesses, dstStages);
+ removeIllegalAccessFlags(srcAccesses, srcStages);
+
return de::MovePtr<CmdCommand>(new ImageTransition(srcStages, srcAccesses, dstStages, dstAccesses, srcLayout, dstLayout));
}
@@ -8853,6 +8928,9 @@
if (!srcStages)
srcStages = dstStages;
+ removeIllegalAccessFlags(dstAccesses, dstStages);
+ removeIllegalAccessFlags(srcAccesses, srcStages);
+
PipelineBarrier::Type type;
if (op == OP_PIPELINE_BARRIER_IMAGE)
diff --git a/external/vulkancts/modules/vulkan/pipeline/vktPipelineMultisampleInterpolationTests.cpp b/external/vulkancts/modules/vulkan/pipeline/vktPipelineMultisampleInterpolationTests.cpp
index 46f94b5..dc09eeb 100644
--- a/external/vulkancts/modules/vulkan/pipeline/vktPipelineMultisampleInterpolationTests.cpp
+++ b/external/vulkancts/modules/vulkan/pipeline/vktPipelineMultisampleInterpolationTests.cpp
@@ -899,6 +899,8 @@
TestInstance* MSCaseSampleQualifierDistinctValues::createInstance (Context& context) const
{
+ if (!context.getDeviceFeatures().sampleRateShading)
+ TCU_THROW(NotSupportedError, "sampleRateShading support required");
return new MSInstanceDistinctValues(context, m_imageMSParams);
}
diff --git a/external/vulkancts/modules/vulkan/renderpass/vktRenderPassTests.cpp b/external/vulkancts/modules/vulkan/renderpass/vktRenderPassTests.cpp
index 68faea6..d00cf5f 100644
--- a/external/vulkancts/modules/vulkan/renderpass/vktRenderPassTests.cpp
+++ b/external/vulkancts/modules/vulkan/renderpass/vktRenderPassTests.cpp
@@ -677,7 +677,7 @@
const vector<Attachment>& getAttachments (void) const { return m_attachments; }
const vector<Subpass>& getSubpasses (void) const { return m_subpasses; }
const vector<SubpassDependency>& getDependencies (void) const { return m_dependencies; }
- const vector<VkInputAttachmentAspectReferenceKHR> getInputAspects (void) const { return m_inputAspects; }
+ const vector<VkInputAttachmentAspectReferenceKHR>& getInputAspects (void) const { return m_inputAspects; }
private:
const vector<Attachment> m_attachments;
diff --git a/external/vulkancts/modules/vulkan/shaderexecutor/vktShaderBuiltinPrecisionTests.cpp b/external/vulkancts/modules/vulkan/shaderexecutor/vktShaderBuiltinPrecisionTests.cpp
index 4e406d8..a5c09c7 100644
--- a/external/vulkancts/modules/vulkan/shaderexecutor/vktShaderBuiltinPrecisionTests.cpp
+++ b/external/vulkancts/modules/vulkan/shaderexecutor/vktShaderBuiltinPrecisionTests.cpp
@@ -63,6 +63,15 @@
// set this to true to dump even passing results
#define GLS_LOG_ALL_RESULTS false
+enum
+{
+ // Computing reference intervals can take a non-trivial amount of time, especially on
+ // platforms where toggling floating-point rounding mode is slow (emulated arm on x86).
+ // As a workaround watchdog is kept happy by touching it periodically during reference
+ // interval computation.
+ TOUCH_WATCHDOG_VALUE_FREQUENCY = 4096
+};
+
namespace vkt
{
namespace shaderexecutor
@@ -4489,6 +4498,9 @@
typename Traits<Out0>::IVal reference0;
typename Traits<Out1>::IVal reference1;
+ if (valueNdx % (size_t)TOUCH_WATCHDOG_VALUE_FREQUENCY == 0)
+ m_context.getTestContext().touchWatchdog();
+
env.lookup(*m_variables.in0) = convert<In0>(fmt, round(fmt, inputs.in0[valueNdx]));
env.lookup(*m_variables.in1) = convert<In1>(fmt, round(fmt, inputs.in1[valueNdx]));
env.lookup(*m_variables.in2) = convert<In2>(fmt, round(fmt, inputs.in2[valueNdx]));
diff --git a/external/vulkancts/modules/vulkan/shaderrender/vktShaderRenderBuiltinVarTests.cpp b/external/vulkancts/modules/vulkan/shaderrender/vktShaderRenderBuiltinVarTests.cpp
index eaedb30..a3b977a 100644
--- a/external/vulkancts/modules/vulkan/shaderrender/vktShaderRenderBuiltinVarTests.cpp
+++ b/external/vulkancts/modules/vulkan/shaderrender/vktShaderRenderBuiltinVarTests.cpp
@@ -953,8 +953,8 @@
drawState.numSamples = m_samples;
drawState.sampleShadingEnable = true;
- vulkanProgram.descriptorSetLayout = descriptorSetLayout;
- vulkanProgram.descriptorSet = descriptorSet;
+ vulkanProgram.descriptorSetLayout = *descriptorSetLayout;
+ vulkanProgram.descriptorSet = *descriptorSet;
VulkanDrawContext vulkanDrawContext(m_context, drawState, drawCallData, vulkanProgram);
vulkanDrawContext.draw();
@@ -1502,8 +1502,8 @@
drawState.numSamples = m_sampleCount;
drawState.sampleShadingEnable = true;
- vulkanProgram.descriptorSetLayout = descriptorSetLayout;
- vulkanProgram.descriptorSet = descriptorSet;
+ vulkanProgram.descriptorSetLayout = *descriptorSetLayout;
+ vulkanProgram.descriptorSet = *descriptorSet;
VulkanDrawContext vulkanDrawContext(m_context, drawState, drawCallData, vulkanProgram);
vulkanDrawContext.draw();
diff --git a/external/vulkancts/modules/vulkan/shaderrender/vktShaderRenderMatrixTests.cpp b/external/vulkancts/modules/vulkan/shaderrender/vktShaderRenderMatrixTests.cpp
index 4d40ddf..e5511c9 100644
--- a/external/vulkancts/modules/vulkan/shaderrender/vktShaderRenderMatrixTests.cpp
+++ b/external/vulkancts/modules/vulkan/shaderrender/vktShaderRenderMatrixTests.cpp
@@ -1434,10 +1434,10 @@
for (int attribNdx = 0; attribNdx < 4; attribNdx++)
{
m_userAttribTransforms[attribNdx] = Mat4(0.0f);
- m_userAttribTransforms[attribNdx]( 0, 3) = 0.2f; // !< prevent matrix*vec from going into zero (assuming vec.w != 0)
- m_userAttribTransforms[attribNdx]( 1, 3) = 0.1f; // !<
- m_userAttribTransforms[attribNdx]( 2, 3) = 0.4f + 0.15f * float(attribNdx); // !<
- m_userAttribTransforms[attribNdx]( 3, 3) = 0.7f; // !<
+ m_userAttribTransforms[attribNdx]( 0, 3) = (op == OP_INVERSE ? -0.5f : 0.2f); // prevent matrix*vec from going into zero (assuming vec.w != 0).
+ m_userAttribTransforms[attribNdx]( 1, 3) = (op == OP_INVERSE ? -1.3f : 0.1f); // Modified input for OP_INVERSE case, as determinant of final input
+ m_userAttribTransforms[attribNdx]( 2, 3) = 0.4f + 0.15f * float(attribNdx); // matrix is spanning both sides of 0, so 0 (and division by 0) may happen on mediump.
+ m_userAttribTransforms[attribNdx]( 3, 3) = (op == OP_INVERSE ? -3.0f : 0.7f); // Modified OP_INVERSE final input matrix is same signed in whole input range.
m_userAttribTransforms[attribNdx]((0 + attribNdx) % 4, 0) = 1.0f;
m_userAttribTransforms[attribNdx]((1 + attribNdx) % 4, 1) = 1.0f;
m_userAttribTransforms[attribNdx]((2 + attribNdx) % 4, 2) = 1.0f;
diff --git a/external/vulkancts/modules/vulkan/spirv_assembly/vktSpvAsmInstructionTests.cpp b/external/vulkancts/modules/vulkan/spirv_assembly/vktSpvAsmInstructionTests.cpp
index 848f1ce..0b556ac 100644
--- a/external/vulkancts/modules/vulkan/spirv_assembly/vktSpvAsmInstructionTests.cpp
+++ b/external/vulkancts/modules/vulkan/spirv_assembly/vktSpvAsmInstructionTests.cpp
@@ -5851,16 +5851,21 @@
if (m_features == COMPUTE_TEST_USES_INT16)
{
- m_asmTypes["int_capabilities"] = "OpCapability Int16\n";
+ m_asmTypes["int_capabilities"] = "OpCapability Int16\n"
+ "OpCapability StorageUniformBufferBlock16\n";
+ m_asmTypes["int_extensions"] = "OpExtension \"SPV_KHR_16bit_storage\"\n";
}
else if (m_features == COMPUTE_TEST_USES_INT64)
{
m_asmTypes["int_capabilities"] = "OpCapability Int64\n";
+ m_asmTypes["int_extensions"] = "";
}
else if (m_features == COMPUTE_TEST_USES_INT16_INT64)
{
- m_asmTypes["int_capabilities"] = string("OpCapability Int16\n") +
- "OpCapability Int64\n";
+ m_asmTypes["int_capabilities"] = "OpCapability Int16\n"
+ "OpCapability StorageUniformBufferBlock16\n"
+ "OpCapability Int64\n";
+ m_asmTypes["int_extensions"] = "OpExtension \"SPV_KHR_16bit_storage\"\n";
}
else
{
@@ -5889,6 +5894,7 @@
const StringTemplate shader (
"OpCapability Shader\n"
"${int_capabilities}"
+ "${int_extensions}"
"OpMemoryModel Logical GLSL450\n"
"OpEntryPoint GLCompute %main \"main\" %id\n"
"OpExecutionMode %main LocalSize 1 1 1\n"
@@ -5981,6 +5987,11 @@
spec.outputs.push_back(test->m_outputBuffer);
spec.numWorkGroups = IVec3(1, 1, 1);
+ if (test->m_features == COMPUTE_TEST_USES_INT16 || test->m_features == COMPUTE_TEST_USES_INT16_INT64)
+ {
+ spec.extensions.push_back("VK_KHR_16bit_storage");
+ }
+
group->addChild(new SpvAsmComputeShaderCase(testCtx, test->m_name.c_str(), "Convert integers with OpSConvert.", spec, test->m_features));
}
@@ -6019,6 +6030,11 @@
spec.outputs.push_back(test->m_outputBuffer);
spec.numWorkGroups = IVec3(1, 1, 1);
+ if (test->m_features == COMPUTE_TEST_USES_INT16 || test->m_features == COMPUTE_TEST_USES_INT16_INT64)
+ {
+ spec.extensions.push_back("VK_KHR_16bit_storage");
+ }
+
group->addChild(new SpvAsmComputeShaderCase(testCtx, test->m_name.c_str(), "Convert integers with OpUConvert.", spec, test->m_features));
}
return group.release();
diff --git a/external/vulkancts/modules/vulkan/spirv_assembly/vktSpvAsmVariablePointersTests.cpp b/external/vulkancts/modules/vulkan/spirv_assembly/vktSpvAsmVariablePointersTests.cpp
index 758e7e5..9224c9f 100644
--- a/external/vulkancts/modules/vulkan/spirv_assembly/vktSpvAsmVariablePointersTests.cpp
+++ b/external/vulkancts/modules/vulkan/spirv_assembly/vktSpvAsmVariablePointersTests.cpp
@@ -1665,6 +1665,7 @@
"OpDecorate %arr2_v4float ArrayStride 16 \n"
"OpDecorate %arr2_inner_struct ArrayStride 64 \n"
"OpDecorate %mat2x2_inner_struct ArrayStride 128 \n"
+ "OpDecorate %mat2x2_ptr ArrayStride 128 \n"
"OpDecorate %sb_buf ArrayStride 256 \n"
"OpDecorate %v4f32_ptr ArrayStride 16 \n"
@@ -1743,6 +1744,10 @@
"%param = OpFunctionParameter %v4f32\n"
"%entry = OpLabel\n"
+ // Define base pointers for OpPtrAccessChain
+ "%in_a_matptr = OpAccessChain %mat2x2_ptr %in_a %c_i32_0\n"
+ "%in_b_matptr = OpAccessChain %mat2x2_ptr %in_b %c_i32_0\n"
+
// Define the 2 pointers from which we're going to choose one.
"${a_loc} \n"
"${b_loc} \n"
@@ -1799,34 +1804,34 @@
"%b_loc = OpAccessChain %sb_f32ptr %in_b %c_i32_0 %c_i32_1 %c_i32_1 %c_i32_1 %c_i32_1 %c_i32_3"};
const string inputAPtrAccessChain[] = { "",
- "%a_loc = OpPtrAccessChain %mat2x2_ptr %in_a %c_i32_0 %c_i32_0",
- "%a_loc = OpPtrAccessChain %arr2_ptr %in_a %c_i32_0 %c_i32_0 %c_i32_0",
- "%a_loc = OpPtrAccessChain %inner_struct_ptr %in_a %c_i32_0 %c_i32_0 %c_i32_1 %c_i32_1",
- "%a_loc = OpPtrAccessChain %arr_v4f32_ptr %in_a %c_i32_0 %c_i32_0 %c_i32_0 %c_i32_0 %c_i32_1",
- "%a_loc = OpPtrAccessChain %v4f32_ptr %in_a %c_i32_0 %c_i32_0 %c_i32_1 %c_i32_0 %c_i32_0 %c_i32_0",
+ "%a_loc = OpPtrAccessChain %mat2x2_ptr %in_a_matptr %c_i32_0",
+ "%a_loc = OpPtrAccessChain %arr2_ptr %in_a_matptr %c_i32_0 %c_i32_0",
+ "%a_loc = OpPtrAccessChain %inner_struct_ptr %in_a_matptr %c_i32_0 %c_i32_1 %c_i32_1",
+ "%a_loc = OpPtrAccessChain %arr_v4f32_ptr %in_a_matptr %c_i32_0 %c_i32_0 %c_i32_0 %c_i32_1",
+ "%a_loc = OpPtrAccessChain %v4f32_ptr %in_a_matptr %c_i32_0 %c_i32_1 %c_i32_0 %c_i32_0 %c_i32_0",
// Next case emulates:
- // %a_loc = OpPtrAccessChain %sb_f32ptr %in_a %c_i32_0 %c_i32_0 %c_i32_1 %c_i32_1 %c_i32_1 %c_i32_1 %c_i32_3
+ // %a_loc = OpPtrAccessChain %sb_f32ptr %in_a_matptr %c_i32_0 %c_i32_1 %c_i32_1 %c_i32_1 %c_i32_1 %c_i32_3
// But rewrite it to exercise OpPtrAccessChain with a non-zero first index:
// %a_loc_arr is a pointer to an array that we want to index with 1.
// But instead of just using OpAccessChain with first index 1, use OpAccessChain with index 0 to
// get a pointer to the first element, then send that into OpPtrAccessChain with index 1.
- "%a_loc_arr = OpPtrAccessChain %arr_v4f32_ptr %in_a %c_i32_0 %c_i32_0 %c_i32_1 %c_i32_1 %c_i32_1 "
+ "%a_loc_arr = OpPtrAccessChain %arr_v4f32_ptr %in_a_matptr %c_i32_0 %c_i32_1 %c_i32_1 %c_i32_1 "
"%a_loc_first_elem = OpAccessChain %v4f32_ptr %a_loc_arr %c_i32_0 "
"%a_loc = OpPtrAccessChain %sb_f32ptr %a_loc_first_elem %c_i32_1 %c_i32_3"};
const string inputBPtrAccessChain[] = { "",
- "%b_loc = OpPtrAccessChain %mat2x2_ptr %in_b %c_i32_0 %c_i32_0",
- "%b_loc = OpPtrAccessChain %arr2_ptr %in_b %c_i32_0 %c_i32_0 %c_i32_0",
- "%b_loc = OpPtrAccessChain %inner_struct_ptr %in_b %c_i32_0 %c_i32_0 %c_i32_1 %c_i32_1",
- "%b_loc = OpPtrAccessChain %arr_v4f32_ptr %in_b %c_i32_0 %c_i32_0 %c_i32_0 %c_i32_0 %c_i32_1",
- "%b_loc = OpPtrAccessChain %v4f32_ptr %in_b %c_i32_0 %c_i32_0 %c_i32_1 %c_i32_0 %c_i32_0 %c_i32_0",
+ "%b_loc = OpPtrAccessChain %mat2x2_ptr %in_b_matptr %c_i32_0",
+ "%b_loc = OpPtrAccessChain %arr2_ptr %in_b_matptr %c_i32_0 %c_i32_0",
+ "%b_loc = OpPtrAccessChain %inner_struct_ptr %in_b_matptr %c_i32_0 %c_i32_1 %c_i32_1",
+ "%b_loc = OpPtrAccessChain %arr_v4f32_ptr %in_b_matptr %c_i32_0 %c_i32_0 %c_i32_0 %c_i32_1",
+ "%b_loc = OpPtrAccessChain %v4f32_ptr %in_b_matptr %c_i32_0 %c_i32_1 %c_i32_0 %c_i32_0 %c_i32_0",
// Next case emulates:
- // %b_loc = OpPtrAccessChain %sb_f32ptr %in_b %c_i32_0 %c_i32_0 %c_i32_1 %c_i32_1 %c_i32_1 %c_i32_1 %c_i32_3
+ // %b_loc = OpPtrAccessChain %sb_f32ptr %in_b_matptr %c_i32_0 %c_i32_1 %c_i32_1 %c_i32_1 %c_i32_1 %c_i32_3
// But rewrite it to exercise OpPtrAccessChain with a non-zero first index:
// %b_loc_arr is a pointer to an array that we want to index with 1.
// But instead of just using OpAccessChain with first index 1, use OpAccessChain with index 0 to
// get a pointer to the first element, then send that into OpPtrAccessChain with index 1.
- "%b_loc_arr = OpPtrAccessChain %arr_v4f32_ptr %in_b %c_i32_0 %c_i32_0 %c_i32_1 %c_i32_1 %c_i32_1 "
+ "%b_loc_arr = OpPtrAccessChain %arr_v4f32_ptr %in_b_matptr %c_i32_0 %c_i32_1 %c_i32_1 %c_i32_1 "
"%b_loc_first_elem = OpAccessChain %v4f32_ptr %b_loc_arr %c_i32_0 "
"%b_loc = OpPtrAccessChain %sb_f32ptr %b_loc_first_elem %c_i32_1 %c_i32_3"};
@@ -2466,7 +2471,6 @@
// VARIABLES
"%input = OpVariable %float_struct_ptr StorageBuffer \n"
- "%float_var = OpVariable %sb_f32ptr StorageBuffer %c_null_ptr \n"
);
const StringTemplate testFunction (
@@ -2488,7 +2492,7 @@
"OpReturnValue %output_color\n"
"OpFunctionEnd\n");
- // float_var has been inintialized to NULL.
+ // f32_ptr_var has been inintialized to NULL.
// Now set it to the input variable and return it as output
{
GraphicsResources resources;
diff --git a/external/vulkancts/modules/vulkan/synchronization/vktSynchronizationCrossInstanceSharingTests.cpp b/external/vulkancts/modules/vulkan/synchronization/vktSynchronizationCrossInstanceSharingTests.cpp
index 1b8d48e..fbe2749 100644
--- a/external/vulkancts/modules/vulkan/synchronization/vktSynchronizationCrossInstanceSharingTests.cpp
+++ b/external/vulkancts/modules/vulkan/synchronization/vktSynchronizationCrossInstanceSharingTests.cpp
@@ -72,6 +72,178 @@
const bool dedicated;
};
+// A helper class to test for extensions upfront and throw not supported to speed up test runtimes compared to failing only
+// after creating unnecessary vkInstances. A common example of this is win32 platforms taking a long time to run _fd tests.
+class NotSupportedChecker
+{
+public:
+ NotSupportedChecker (const Context& context,
+ TestConfig config,
+ const OperationSupport& writeOp,
+ const OperationSupport& readOp)
+ : m_context (context)
+ {
+ // Check instance support
+ requireInstanceExtension("VK_KHR_get_physical_device_properties2");
+
+ requireInstanceExtension("VK_KHR_external_semaphore_capabilities");
+ requireInstanceExtension("VK_KHR_external_memory_capabilities");
+
+ // Check device support
+ if (config.dedicated)
+ requireDeviceExtension("VK_KHR_dedicated_allocation");
+
+ requireDeviceExtension("VK_KHR_external_semaphore");
+ requireDeviceExtension("VK_KHR_external_memory");
+
+ if (config.memoryHandleType == vk::VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR
+ || config.semaphoreHandleType == vk::VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR
+ || config.semaphoreHandleType == vk::VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR)
+ {
+ requireDeviceExtension("VK_KHR_external_semaphore_fd");
+ requireDeviceExtension("VK_KHR_external_memory_fd");
+ }
+
+ if (config.memoryHandleType == vk::VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR
+ || config.memoryHandleType == vk::VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR
+ || config.semaphoreHandleType == vk::VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR
+ || config.semaphoreHandleType == vk::VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR)
+ {
+ requireDeviceExtension("VK_KHR_external_semaphore_win32");
+ requireDeviceExtension("VK_KHR_external_memory_win32");
+ }
+
+ TestLog& log = context.getTestContext().getLog();
+ const vk::InstanceInterface& vki = context.getInstanceInterface();
+ const vk::VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
+
+ // Check resource support
+ if (config.resource.type == RESOURCE_TYPE_IMAGE)
+ {
+ const vk::VkPhysicalDeviceExternalImageFormatInfoKHR externalInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR,
+ DE_NULL,
+ config.memoryHandleType
+ };
+ const vk::VkPhysicalDeviceImageFormatInfo2KHR imageFormatInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR,
+ &externalInfo,
+ config.resource.imageFormat,
+ config.resource.imageType,
+ vk::VK_IMAGE_TILING_OPTIMAL,
+ readOp.getResourceUsageFlags() | writeOp.getResourceUsageFlags(),
+ 0u
+ };
+ vk::VkExternalImageFormatPropertiesKHR externalProperties =
+ {
+ vk::VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR,
+ DE_NULL,
+ { 0u, 0u, 0u }
+ };
+ vk::VkImageFormatProperties2KHR formatProperties =
+ {
+ vk::VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR,
+ &externalProperties,
+ {
+ { 0u, 0u, 0u },
+ 0u,
+ 0u,
+ 0u,
+ 0u,
+ }
+ };
+
+ {
+ const vk::VkResult res = vki.getPhysicalDeviceImageFormatProperties2KHR(physicalDevice, &imageFormatInfo, &formatProperties);
+
+ if (res == vk::VK_ERROR_FORMAT_NOT_SUPPORTED)
+ TCU_THROW(NotSupportedError, "Image format not supported");
+
+ VK_CHECK(res); // Check other errors
+ }
+
+ log << TestLog::Message << "External image format properties: " << imageFormatInfo << "\n"<< externalProperties << TestLog::EndMessage;
+
+ if ((externalProperties.externalMemoryProperties.externalMemoryFeatures & vk::VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR) == 0)
+ TCU_THROW(NotSupportedError, "Exporting image resource not supported");
+
+ if ((externalProperties.externalMemoryProperties.externalMemoryFeatures & vk::VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR) == 0)
+ TCU_THROW(NotSupportedError, "Importing image resource not supported");
+
+ if (!config.dedicated && (externalProperties.externalMemoryProperties.externalMemoryFeatures & vk::VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHR) != 0)
+ {
+ TCU_THROW(NotSupportedError, "Handle requires dedicated allocation, but test uses suballocated memory");
+ }
+ }
+ else
+ {
+ const vk::VkPhysicalDeviceExternalBufferInfoKHR info =
+ {
+ vk::VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO_KHR,
+ DE_NULL,
+
+ 0u,
+ readOp.getResourceUsageFlags() | writeOp.getResourceUsageFlags(),
+ config.memoryHandleType
+ };
+ vk::VkExternalBufferPropertiesKHR properties =
+ {
+ vk::VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES_KHR,
+ DE_NULL,
+ { 0u, 0u, 0u}
+ };
+ vki.getPhysicalDeviceExternalBufferPropertiesKHR(physicalDevice, &info, &properties);
+
+ log << TestLog::Message << "External buffer properties: " << info << "\n" << properties << TestLog::EndMessage;
+
+ if ((properties.externalMemoryProperties.externalMemoryFeatures & vk::VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR) == 0
+ || (properties.externalMemoryProperties.externalMemoryFeatures & vk::VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR) == 0)
+ TCU_THROW(NotSupportedError, "Exporting and importing memory type not supported");
+
+ if (!config.dedicated && (properties.externalMemoryProperties.externalMemoryFeatures & vk::VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHR) != 0)
+ {
+ TCU_THROW(NotSupportedError, "Handle requires dedicated allocation, but test uses suballocated memory");
+ }
+ }
+
+ // Check semaphore support
+ {
+ const vk::VkPhysicalDeviceExternalSemaphoreInfoKHR info =
+ {
+ vk::VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR,
+ DE_NULL,
+ config.semaphoreHandleType
+ };
+ vk::VkExternalSemaphorePropertiesKHR properties;
+
+ vki.getPhysicalDeviceExternalSemaphorePropertiesKHR(physicalDevice, &info, &properties);
+
+ log << TestLog::Message << info << "\n" << properties << TestLog::EndMessage;
+
+ if ((properties.externalSemaphoreFeatures & vk::VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR) == 0
+ || (properties.externalSemaphoreFeatures & vk::VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR) == 0)
+ TCU_THROW(NotSupportedError, "Exporting and importing semaphore type not supported");
+ }
+ }
+
+private:
+ void requireDeviceExtension(const char* name) const
+ {
+ if (!de::contains(m_context.getDeviceExtensions().begin(), m_context.getDeviceExtensions().end(), name))
+ TCU_THROW(NotSupportedError, (std::string(name) + " is not supported").c_str());
+ }
+
+ void requireInstanceExtension(const char* name) const
+ {
+ if (!de::contains(m_context.getInstanceExtensions().begin(), m_context.getInstanceExtensions().end(), name))
+ TCU_THROW(NotSupportedError, (std::string(name) + " is not supported").c_str());
+ }
+
+ const Context& m_context;
+};
+
bool checkQueueFlags (vk::VkQueueFlags availableFlags, const vk::VkQueueFlags neededFlags)
{
if ((availableFlags & (vk::VK_QUEUE_GRAPHICS_BIT | vk::VK_QUEUE_COMPUTE_BIT)) != 0)
@@ -820,6 +992,7 @@
const TestConfig m_config;
const de::UniquePtr<OperationSupport> m_supportWriteOp;
const de::UniquePtr<OperationSupport> m_supportReadOp;
+ const NotSupportedChecker m_notSupportedChecker; // Must declare before VkInstance to effectively reduce runtimes!
const vk::Unique<vk::VkInstance> m_instanceA;
@@ -857,6 +1030,7 @@
, m_config (config)
, m_supportWriteOp (makeOperationSupport(config.writeOp, config.resource))
, m_supportReadOp (makeOperationSupport(config.readOp, config.resource))
+ , m_notSupportedChecker (context, m_config, *m_supportWriteOp, *m_supportReadOp)
, m_instanceA (createInstance(context.getPlatformInterface()))
@@ -884,117 +1058,6 @@
, m_queueANdx (0)
, m_queueBNdx (0)
{
- TestLog& log = m_context.getTestContext().getLog();
-
- // Check resource support
- if (m_config.resource.type == RESOURCE_TYPE_IMAGE)
- {
- const vk::VkPhysicalDeviceExternalImageFormatInfoKHR externalInfo =
- {
- vk::VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR,
- DE_NULL,
- m_memoryHandleType
- };
- const vk::VkPhysicalDeviceImageFormatInfo2KHR imageFormatInfo =
- {
- vk::VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR,
- &externalInfo,
- m_config.resource.imageFormat,
- m_config.resource.imageType,
- vk::VK_IMAGE_TILING_OPTIMAL,
- m_supportReadOp->getResourceUsageFlags() | m_supportWriteOp->getResourceUsageFlags(),
- 0u
- };
- vk::VkExternalImageFormatPropertiesKHR externalProperties =
- {
- vk::VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR,
- DE_NULL,
- { 0u, 0u, 0u }
- };
- vk::VkImageFormatProperties2KHR formatProperties =
- {
- vk::VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR,
- &externalProperties,
- {
- { 0u, 0u, 0u },
- 0u,
- 0u,
- 0u,
- 0u,
- }
- };
-
- {
- const vk::VkResult res = m_vkiA.getPhysicalDeviceImageFormatProperties2KHR(m_physicalDeviceA, &imageFormatInfo, &formatProperties);
-
- if (res == vk::VK_ERROR_FORMAT_NOT_SUPPORTED)
- TCU_THROW(NotSupportedError, "Image format not supported");
-
- VK_CHECK(res); // Check other errors
- }
-
- log << TestLog::Message << "External image format properties: " << imageFormatInfo << "\n"<< externalProperties << TestLog::EndMessage;
-
- if ((externalProperties.externalMemoryProperties.externalMemoryFeatures & vk::VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR) == 0)
- TCU_THROW(NotSupportedError, "Exporting image resource not supported");
-
- if ((externalProperties.externalMemoryProperties.externalMemoryFeatures & vk::VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR) == 0)
- TCU_THROW(NotSupportedError, "Importing image resource not supported");
-
- if (!m_config.dedicated && (externalProperties.externalMemoryProperties.externalMemoryFeatures & vk::VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHR) != 0)
- {
- TCU_THROW(NotSupportedError, "Handle requires dedicated allocation, but test uses suballocated memory");
- }
- }
- else
- {
- const vk::VkPhysicalDeviceExternalBufferInfoKHR info =
- {
- vk::VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO_KHR,
- DE_NULL,
-
- 0u,
- m_supportReadOp->getResourceUsageFlags() | m_supportWriteOp->getResourceUsageFlags(),
- m_memoryHandleType
- };
- vk::VkExternalBufferPropertiesKHR properties =
- {
- vk::VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES_KHR,
- DE_NULL,
- { 0u, 0u, 0u}
- };
- m_vkiA.getPhysicalDeviceExternalBufferPropertiesKHR(m_physicalDeviceA, &info, &properties);
-
- log << TestLog::Message << "External buffer properties: " << info << "\n" << properties << TestLog::EndMessage;
-
- if ((properties.externalMemoryProperties.externalMemoryFeatures & vk::VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR) == 0
- || (properties.externalMemoryProperties.externalMemoryFeatures & vk::VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR) == 0)
- TCU_THROW(NotSupportedError, "Exporting and importing memory type not supported");
-
- if (!m_config.dedicated && (properties.externalMemoryProperties.externalMemoryFeatures & vk::VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHR) != 0)
- {
- TCU_THROW(NotSupportedError, "Handle requires dedicated allocation, but test uses suballocated memory");
- }
- }
-
- // Check semaphore support
- {
- const vk::VkPhysicalDeviceExternalSemaphoreInfoKHR info =
- {
- vk::VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR,
- DE_NULL,
- m_semaphoreHandleType
- };
- vk::VkExternalSemaphorePropertiesKHR properties;
-
- m_vkiA.getPhysicalDeviceExternalSemaphorePropertiesKHR(m_physicalDeviceA, &info, &properties);
-
- log << TestLog::Message << info << "\n" << properties << TestLog::EndMessage;
-
- if ((properties.externalSemaphoreFeatures & vk::VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR) == 0
- || (properties.externalSemaphoreFeatures & vk::VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR) == 0)
- TCU_THROW(NotSupportedError, "Exporting and importing semaphore type not supported");
- }
}
tcu::TestStatus SharingTestInstance::iterate (void)
diff --git a/external/vulkancts/modules/vulkan/texture/vktTextureTestUtil.cpp b/external/vulkancts/modules/vulkan/texture/vktTextureTestUtil.cpp
index 2548e97..5911f82 100644
--- a/external/vulkancts/modules/vulkan/texture/vktTextureTestUtil.cpp
+++ b/external/vulkancts/modules/vulkan/texture/vktTextureTestUtil.cpp
@@ -1094,8 +1094,8 @@
else
DE_ASSERT(DE_FALSE);
- Unique<VkShaderModule> vertexShaderModule (createShaderModule(vkd, vkDevice, m_context.getBinaryCollection().get("vertext_" + std::string(getProgramName(progSpec))), 0));
- Unique<VkShaderModule> fragmentShaderModule (createShaderModule(vkd, vkDevice, m_context.getBinaryCollection().get("fragment_" + std::string(getProgramName(progSpec))), 0));
+ Unique<VkShaderModule> vertexShaderModule (createShaderModule(vkd, vkDevice, m_context.getBinaryCollection().get("vertext_" + std::string(getProgramName(progSpec))), 0));
+ Unique<VkShaderModule> fragmentShaderModule (createShaderModule(vkd, vkDevice, m_context.getBinaryCollection().get("fragment_" + std::string(getProgramName(progSpec))), 0));
Move<VkSampler> sampler;
Move<VkDescriptorSet> descriptorSet[2];
@@ -1106,10 +1106,14 @@
Move<VkPipeline> graphicsPipeline;
Move<VkBuffer> vertexBuffer;
de::MovePtr<Allocation> vertexBufferMemory;
- const deUint32 positionDataSize = deUint32(sizeof(float) * 4 * 4);
- const deUint32 textureCoordDataSize = deUint32(sizeof(float) * numComps * 4);
- const VkPhysicalDeviceProperties properties = m_context.getDeviceProperties();
+ const VkDeviceSize vertexBufferOffset = 0;
+ const deUint32 vertexPositionStrideSize = deUint32(sizeof(tcu::Vec4));
+ const deUint32 vertexTextureStrideSize = deUint32(numComps * sizeof(float));
+ const deUint32 positionDataSize = vertexPositionStrideSize * 4u;
+ const deUint32 textureCoordDataSize = vertexTextureStrideSize * 4u;
+
+ const VkPhysicalDeviceProperties properties = m_context.getDeviceProperties();
if (positionDataSize > properties.limits.maxVertexInputAttributeOffset)
{
@@ -1142,9 +1146,6 @@
}
};
- const deUint32 vertexPositionStrideSize = deUint32(sizeof(tcu::Vec4));
- const deUint32 vertexTextureStrideSize = deUint32(numComps * sizeof(float));
-
const VkVertexInputBindingDescription vertexInputBindingDescription[2] =
{
{
@@ -1395,12 +1396,17 @@
// Create Vertex Buffer
{
+ VkDeviceSize bufferSize = positionDataSize + textureCoordDataSize;
+
+ // Pad the buffer size to a stride multiple for the last element so that it isn't out of bounds
+ bufferSize += vertexTextureStrideSize - ((bufferSize - vertexBufferOffset) % vertexTextureStrideSize);
+
const VkBufferCreateInfo vertexBufferParams =
{
VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
DE_NULL, // const void* pNext;
0u, // VkBufferCreateFlags flags;
- positionDataSize + textureCoordDataSize, // VkDeviceSize size;
+ bufferSize, // VkDeviceSize size;
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, // VkBufferUsageFlags usage;
VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1u, // deUint32 queueFamilyCount;
@@ -1453,8 +1459,6 @@
vkd.cmdBeginRenderPass(*commandBuffer, &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
}
- const VkDeviceSize vertexBufferOffset = 0;
-
vkd.cmdBindPipeline(*commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *graphicsPipeline);
vkd.cmdBindDescriptorSets(*commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1, &descriptorSet[0].get(), 0u, DE_NULL);
vkd.cmdBindDescriptorSets(*commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 1u, 1, &descriptorSet[1].get(), 0u, DE_NULL);
diff --git a/external/vulkancts/modules/vulkan/vktDrawUtil.cpp b/external/vulkancts/modules/vulkan/vktDrawUtil.cpp
index 2325f54..17f5714 100644
--- a/external/vulkancts/modules/vulkan/vktDrawUtil.cpp
+++ b/external/vulkancts/modules/vulkan/vktDrawUtil.cpp
@@ -654,7 +654,7 @@
0u, // write mask
0u); // reference
- if (m_drawState.depthBoundsTestEnable && context.getDeviceFeatures().depthBounds)
+ if (m_drawState.depthBoundsTestEnable && !context.getDeviceFeatures().depthBounds)
TCU_THROW(NotSupportedError, "depthBounds not supported");
const VkPipelineDepthStencilStateCreateInfo pipelineDepthStencilStateInfo =
diff --git a/external/vulkancts/modules/vulkan/wsi/vktWsiDisplayTimingTests.cpp b/external/vulkancts/modules/vulkan/wsi/vktWsiDisplayTimingTests.cpp
index 540e511..53a0450 100644
--- a/external/vulkancts/modules/vulkan/wsi/vktWsiDisplayTimingTests.cpp
+++ b/external/vulkancts/modules/vulkan/wsi/vktWsiDisplayTimingTests.cpp
@@ -1095,7 +1095,7 @@
deUint32 imageIndex;
// Acquire next image
- VK_CHECK(m_vkd.acquireNextImageKHR(*m_device, *m_swapchain, foreverNs, currentAcquireSemaphore, fence, &imageIndex));
+ VK_CHECK(m_vkd.acquireNextImageKHR(*m_device, *m_swapchain, foreverNs, currentAcquireSemaphore, (vk::VkFence)0, &imageIndex));
// Create command buffer
m_commandBuffers[m_frameNdx % m_commandBuffers.size()] = createCommandBuffer(m_vkd, *m_device, *m_commandPool, *m_pipelineLayout, *m_renderPass, m_framebuffers[imageIndex], *m_pipeline, m_frameNdx, m_quadCount, width, height).disown();
@@ -1208,7 +1208,7 @@
¤tRenderSemaphore
};
- VK_CHECK(m_vkd.queueSubmit(m_queue, 1u, &submitInfo, (vk::VkFence)0));
+ VK_CHECK(m_vkd.queueSubmit(m_queue, 1u, &submitInfo, fence));
}
// Present frame
diff --git a/external/vulkancts/modules/vulkan/wsi/vktWsiIncrementalPresentTests.cpp b/external/vulkancts/modules/vulkan/wsi/vktWsiIncrementalPresentTests.cpp
index b72a910..1457146 100644
--- a/external/vulkancts/modules/vulkan/wsi/vktWsiIncrementalPresentTests.cpp
+++ b/external/vulkancts/modules/vulkan/wsi/vktWsiIncrementalPresentTests.cpp
@@ -1190,7 +1190,7 @@
deUint32 imageIndex;
// Acquire next image
- VK_CHECK(m_vkd.acquireNextImageKHR(*m_device, *m_swapchain, foreverNs, currentAcquireSemaphore, fence, &imageIndex));
+ VK_CHECK(m_vkd.acquireNextImageKHR(*m_device, *m_swapchain, foreverNs, currentAcquireSemaphore, (vk::VkFence)0, &imageIndex));
// Create command buffer
{
@@ -1216,7 +1216,7 @@
¤tRenderSemaphore
};
- VK_CHECK(m_vkd.queueSubmit(m_queue, 1u, &submitInfo, (vk::VkFence)0));
+ VK_CHECK(m_vkd.queueSubmit(m_queue, 1u, &submitInfo, fence));
}
// Present frame
diff --git a/external/vulkancts/modules/vulkan/wsi/vktWsiSharedPresentableImageTests.cpp b/external/vulkancts/modules/vulkan/wsi/vktWsiSharedPresentableImageTests.cpp
index 97ee3a8..8ed1064 100644
--- a/external/vulkancts/modules/vulkan/wsi/vktWsiSharedPresentableImageTests.cpp
+++ b/external/vulkancts/modules/vulkan/wsi/vktWsiSharedPresentableImageTests.cpp
@@ -1055,11 +1055,12 @@
VK_CHECK(m_vkd.endCommandBuffer(*commandBuffer));
+ const vk::VkPipelineStageFlags waitDstStages[] = { vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT };
const vk::VkSubmitInfo submitInfo =
{
vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,
DE_NULL,
- 1, &*semaphore, DE_NULL,
+ 1, &*semaphore, waitDstStages,
1, &*commandBuffer,
0, DE_NULL,
};
diff --git a/external/vulkancts/modules/vulkan/wsi/vktWsiSwapchainTests.cpp b/external/vulkancts/modules/vulkan/wsi/vktWsiSwapchainTests.cpp
index d01c7a8..176d7cd 100644
--- a/external/vulkancts/modules/vulkan/wsi/vktWsiSwapchainTests.cpp
+++ b/external/vulkancts/modules/vulkan/wsi/vktWsiSwapchainTests.cpp
@@ -1398,7 +1398,7 @@
*swapchain,
std::numeric_limits<deUint64>::max(),
imageReadySemaphore,
- imageReadyFence,
+ (VkFence)0,
&imageNdx);
if (acquireResult == VK_SUBOPTIMAL_KHR)
@@ -1438,7 +1438,7 @@
};
renderer.recordFrame(commandBuffer, imageNdx, frameNdx);
- VK_CHECK(vkd.queueSubmit(devHelper.queue, 1u, &submitInfo, (VkFence)0));
+ VK_CHECK(vkd.queueSubmit(devHelper.queue, 1u, &submitInfo, imageReadyFence));
VK_CHECK(vkd.queuePresentKHR(devHelper.queue, &presentInfo));
}
}
diff --git a/external/vulkancts/modules/vulkan/ycbcr/vktYCbCrConversionTests.cpp b/external/vulkancts/modules/vulkan/ycbcr/vktYCbCrConversionTests.cpp
index d5bee20..442320a 100644
--- a/external/vulkancts/modules/vulkan/ycbcr/vktYCbCrConversionTests.cpp
+++ b/external/vulkancts/modules/vulkan/ycbcr/vktYCbCrConversionTests.cpp
@@ -1207,7 +1207,7 @@
const IVec2 chromaJRange (subsampledY ? calculateNearestIJRange(coordFormat, chromaV) : IVec2(j, j));
for (int chromaJ = chromaJRange.x(); chromaJ <= chromaJRange.y(); chromaJ++)
- for (int chromaI = chromaIRange.x(); chromaI <= chromaIRange.x(); chromaI++)
+ for (int chromaI = chromaIRange.x(); chromaI <= chromaIRange.y(); chromaI++)
{
const Interval srcColor[] =
{
@@ -1231,7 +1231,7 @@
const IVec2 chromaJRange (subsampledY ? calculateLinearIJRange(coordFormat, chromaV) : IVec2(j, j));
for (int chromaJ = chromaJRange.x(); chromaJ <= chromaJRange.y(); chromaJ++)
- for (int chromaI = chromaIRange.x(); chromaI <= chromaIRange.x(); chromaI++)
+ for (int chromaI = chromaIRange.x(); chromaI <= chromaIRange.y(); chromaI++)
{
const Interval chromaA (calculateAB(subTexelPrecisionBits, chromaU, chromaI));
const Interval chromaB (calculateAB(subTexelPrecisionBits, chromaV, chromaJ));
@@ -1396,7 +1396,7 @@
const IVec2 chromaJRange (calculateNearestIJRange(coordFormat, chromaV));
for (int chromaJ = chromaJRange.x(); chromaJ <= chromaJRange.y(); chromaJ++)
- for (int chromaI = chromaIRange.x(); chromaI <= chromaIRange.x(); chromaI++)
+ for (int chromaI = chromaIRange.x(); chromaI <= chromaIRange.y(); chromaI++)
{
const Interval srcColor[] =
{
@@ -1419,7 +1419,7 @@
const IVec2 chromaJRange (calculateNearestIJRange(coordFormat, chromaV));
for (int chromaJ = chromaJRange.x(); chromaJ <= chromaJRange.y(); chromaJ++)
- for (int chromaI = chromaIRange.x(); chromaI <= chromaIRange.x(); chromaI++)
+ for (int chromaI = chromaIRange.x(); chromaI <= chromaIRange.y(); chromaI++)
{
const Interval chromaA (calculateAB(subTexelPrecisionBits, chromaU, chromaI));
const Interval chromaB (calculateAB(subTexelPrecisionBits, chromaV, chromaJ));
@@ -1915,7 +1915,7 @@
log << TestLog::Message << "XChromaOffset: " << config.xChromaOffset << TestLog::EndMessage;
log << TestLog::Message << "YChromaOffset: " << config.yChromaOffset << TestLog::EndMessage;
log << TestLog::Message << "ExplicitReconstruction: " << (config.explicitReconstruction ? "true" : "false") << TestLog::EndMessage;
- log << TestLog::Message << "Disjoint: " << (config.explicitReconstruction ? "true" : "false") << TestLog::EndMessage;
+ log << TestLog::Message << "Disjoint: " << (config.disjoint ? "true" : "false") << TestLog::EndMessage;
log << TestLog::Message << "ColorRange: " << config.colorRange << TestLog::EndMessage;
log << TestLog::Message << "ColorModel: " << config.colorModel << TestLog::EndMessage;
log << TestLog::Message << "ComponentMapping: " << config.componentMapping << TestLog::EndMessage;
@@ -1995,22 +1995,26 @@
#endif
{
- const vk::PlanarFormatDescription planeInfo (vk::getPlanarFormatDescription(config.format));
- MultiPlaneImageData src (config.format, size);
+ const vk::PlanarFormatDescription planeInfo (vk::getPlanarFormatDescription(config.format));
+ MultiPlaneImageData src (config.format, size);
- deUint32 nullAccessData (0u);
- ChannelAccess nullAccess (tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT, 1u, IVec3(size.x(), size.y(), 1), IVec3(0, 0, 0), &nullAccessData, 0u);
- deUint32 nullAccessAlphaData (~0u);
- ChannelAccess nullAccessAlpha (tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT, 1u, IVec3(size.x(), size.y(), 1), IVec3(0, 0, 0), &nullAccessAlphaData, 0u);
- ChannelAccess rChannelAccess (planeInfo.hasChannelNdx(0) ? getChannelAccess(src, planeInfo, size, 0) : nullAccess);
- ChannelAccess gChannelAccess (planeInfo.hasChannelNdx(1) ? getChannelAccess(src, planeInfo, size, 1) : nullAccess);
- ChannelAccess bChannelAccess (planeInfo.hasChannelNdx(2) ? getChannelAccess(src, planeInfo, size, 2) : nullAccess);
- ChannelAccess aChannelAccess (planeInfo.hasChannelNdx(3) ? getChannelAccess(src, planeInfo, size, 3) : nullAccessAlpha);
+ deUint32 nullAccessData (0u);
+ ChannelAccess nullAccess (tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT, 1u, IVec3(size.x(), size.y(), 1), IVec3(0, 0, 0), &nullAccessData, 0u);
+ deUint32 nullAccessAlphaData (~0u);
+ ChannelAccess nullAccessAlpha (tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT, 1u, IVec3(size.x(), size.y(), 1), IVec3(0, 0, 0), &nullAccessAlphaData, 0u);
+ ChannelAccess rChannelAccess (planeInfo.hasChannelNdx(0) ? getChannelAccess(src, planeInfo, size, 0) : nullAccess);
+ ChannelAccess gChannelAccess (planeInfo.hasChannelNdx(1) ? getChannelAccess(src, planeInfo, size, 1) : nullAccess);
+ ChannelAccess bChannelAccess (planeInfo.hasChannelNdx(2) ? getChannelAccess(src, planeInfo, size, 2) : nullAccess);
+ ChannelAccess aChannelAccess (planeInfo.hasChannelNdx(3) ? getChannelAccess(src, planeInfo, size, 3) : nullAccessAlpha);
+ const bool implicitNearestCosited ((config.chromaFilter == vk::VK_FILTER_NEAREST && !config.explicitReconstruction) &&
+ (config.xChromaOffset == vk::VK_CHROMA_LOCATION_COSITED_EVEN_KHR || config.yChromaOffset == vk::VK_CHROMA_LOCATION_COSITED_EVEN_KHR));
vector<Vec2> sts;
vector<Vec4> results;
vector<Vec4> minBounds;
+ vector<Vec4> minMidpointBounds;
vector<Vec4> maxBounds;
+ vector<Vec4> maxMidpointBounds;
vector<Vec4> uvBounds;
vector<IVec4> ijBounds;
@@ -2050,6 +2054,12 @@
calculateBounds(rChannelAccess, gChannelAccess, bChannelAccess, aChannelAccess, bitDepth, sts, filteringPrecision, conversionPrecision, subTexelPrecisionBits, config.textureFilter, config.colorModel, config.colorRange, config.chromaFilter, config.xChromaOffset, config.yChromaOffset, config.componentMapping, explicitReconstruction, config.addressModeU, config.addressModeV, minBounds, maxBounds, uvBounds, ijBounds);
+ // Handle case: If implicit reconstruction and chromaFilter == NEAREST, an implementation may behave as if both chroma offsets are MIDPOINT.
+ if (implicitNearestCosited)
+ {
+ calculateBounds(rChannelAccess, gChannelAccess, bChannelAccess, aChannelAccess, bitDepth, sts, filteringPrecision, conversionPrecision, subTexelPrecisionBits, config.textureFilter, config.colorModel, config.colorRange, config.chromaFilter, vk::VK_CHROMA_LOCATION_MIDPOINT_KHR, vk::VK_CHROMA_LOCATION_MIDPOINT_KHR, config.componentMapping, explicitReconstruction, config.addressModeU, config.addressModeV, minMidpointBounds, maxMidpointBounds, uvBounds, ijBounds);
+ }
+
if (vk::isYCbCrFormat(config.format))
{
tcu::TextureLevel rImage (tcu::TextureFormat(tcu::TextureFormat::R, tcu::TextureFormat::FLOAT), rChannelAccess.getSize().x(), rChannelAccess.getSize().y());
@@ -2100,9 +2110,11 @@
evalShader(context, config.shaderType, src, size, config.format, config.imageTiling, config.disjoint, config.textureFilter, config.addressModeU, config.addressModeV, config.colorModel, config.colorRange, config.xChromaOffset, config.yChromaOffset, config.chromaFilter, config.componentMapping, config.explicitReconstruction, sts, results);
{
- tcu::TextureLevel minImage (tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::FLOAT), size.x() + (size.x() / 2), size.y() + (size.y() / 2));
- tcu::TextureLevel maxImage (tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::FLOAT), size.x() + (size.x() / 2), size.y() + (size.y() / 2));
- tcu::TextureLevel resImage (tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::FLOAT), size.x() + (size.x() / 2), size.y() + (size.y() / 2));
+ tcu::TextureLevel minImage (tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::FLOAT), size.x() + (size.x() / 2), size.y() + (size.y() / 2));
+ tcu::TextureLevel maxImage (tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::FLOAT), size.x() + (size.x() / 2), size.y() + (size.y() / 2));
+ tcu::TextureLevel minMidpointImage (tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::FLOAT), size.x() + (size.x() / 2), size.y() + (size.y() / 2));
+ tcu::TextureLevel maxMidpointImage (tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::FLOAT), size.x() + (size.x() / 2), size.y() + (size.y() / 2));
+ tcu::TextureLevel resImage (tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::FLOAT), size.x() + (size.x() / 2), size.y() + (size.y() / 2));
for (int y = 0; y < (int)(size.y() + (size.y() / 2)); y++)
for (int x = 0; x < (int)(size.x() + (size.x() / 2)); x++)
@@ -2119,12 +2131,30 @@
resImage.getAccess().setPixel(results[ndx], x, y);
}
+ if (implicitNearestCosited)
+ {
+ for (int y = 0; y < (int)(size.y() + (size.y() / 2)); y++)
+ for (int x = 0; x < (int)(size.x() + (size.x() / 2)); x++)
+ {
+ const int ndx = x + y * (int)(size.x() + (size.x() / 2));
+ minMidpointImage.getAccess().setPixel(minMidpointBounds[ndx], x, y);
+ maxMidpointImage.getAccess().setPixel(maxMidpointBounds[ndx], x, y);
+ }
+ }
+
{
const Vec4 scale (1.0f);
const Vec4 bias (0.0f);
log << TestLog::Image("MinBoundImage", "MinBoundImage", minImage.getAccess(), scale, bias);
log << TestLog::Image("MaxBoundImage", "MaxBoundImage", maxImage.getAccess(), scale, bias);
+
+ if (implicitNearestCosited)
+ {
+ log << TestLog::Image("MinMidpointBoundImage", "MinMidpointBoundImage", minMidpointImage.getAccess(), scale, bias);
+ log << TestLog::Image("MaxMidpointBoundImage", "MaxMidpointBoundImage", maxMidpointImage.getAccess(), scale, bias);
+ }
+
log << TestLog::Image("ResultImage", "ResultImage", resImage.getAccess(), scale, bias);
}
}
@@ -2133,7 +2163,18 @@
for (size_t ndx = 0; ndx < sts.size(); ndx++)
{
- if (tcu::boolAny(tcu::lessThan(results[ndx], minBounds[ndx])) || tcu::boolAny(tcu::greaterThan(results[ndx], maxBounds[ndx])))
+ bool fail;
+ if (implicitNearestCosited)
+ {
+ fail = (tcu::boolAny(tcu::lessThan(results[ndx], minMidpointBounds[ndx])) || tcu::boolAny(tcu::greaterThan(results[ndx], maxMidpointBounds[ndx]))) &&
+ (tcu::boolAny(tcu::lessThan(results[ndx], minBounds[ndx])) || tcu::boolAny(tcu::greaterThan(results[ndx], maxBounds[ndx])));
+ }
+ else
+ {
+ fail = tcu::boolAny(tcu::lessThan(results[ndx], minBounds[ndx])) || tcu::boolAny(tcu::greaterThan(results[ndx], maxBounds[ndx]));
+ }
+
+ if (fail)
{
log << TestLog::Message << "Fail: " << sts[ndx] << " " << results[ndx] << TestLog::EndMessage;
log << TestLog::Message << " Min : " << minBounds[ndx] << TestLog::EndMessage;
diff --git a/external/vulkancts/modules/vulkan/ycbcr/vktYCbCrCopyTests.cpp b/external/vulkancts/modules/vulkan/ycbcr/vktYCbCrCopyTests.cpp
index 8fabaef..77adfde 100644
--- a/external/vulkancts/modules/vulkan/ycbcr/vktYCbCrCopyTests.cpp
+++ b/external/vulkancts/modules/vulkan/ycbcr/vktYCbCrCopyTests.cpp
@@ -166,7 +166,7 @@
1u,
vk::VK_SAMPLE_COUNT_1_BIT,
tiling,
- vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT | vk::VK_IMAGE_USAGE_SAMPLED_BIT,
+ vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT | vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT,
vk::VK_SHARING_MODE_EXCLUSIVE,
0u,
(const deUint32*)DE_NULL,
@@ -809,7 +809,35 @@
VK_CHECK(vkd.beginCommandBuffer(*cmdBuffer, &beginInfo));
}
- vkd.cmdCopyImage(*cmdBuffer, *srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, (deUint32)copies.size(), &copies[0]);
+ for (size_t i = 0; i < copies.size(); i++)
+ {
+ vkd.cmdCopyImage(*cmdBuffer, *srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &copies[i]);
+
+ const vk::VkImageMemoryBarrier preCopyBarrier =
+ {
+ vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
+ DE_NULL,
+ vk::VK_ACCESS_TRANSFER_WRITE_BIT,
+ vk::VK_ACCESS_TRANSFER_WRITE_BIT,
+ vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ VK_QUEUE_FAMILY_IGNORED,
+ VK_QUEUE_FAMILY_IGNORED,
+ *dstImage,
+ { vk::VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u }
+ };
+
+ vkd.cmdPipelineBarrier(*cmdBuffer,
+ (vk::VkPipelineStageFlags)vk::VK_PIPELINE_STAGE_TRANSFER_BIT,
+ (vk::VkPipelineStageFlags)vk::VK_PIPELINE_STAGE_TRANSFER_BIT,
+ (vk::VkDependencyFlags)0u,
+ 0u,
+ (const vk::VkMemoryBarrier*)DE_NULL,
+ 0u,
+ (const vk::VkBufferMemoryBarrier*)DE_NULL,
+ 1u,
+ &preCopyBarrier);
+ }
VK_CHECK(vkd.endCommandBuffer(*cmdBuffer));
diff --git a/external/vulkancts/scripts/src/vulkan.h.in b/external/vulkancts/scripts/src/vulkan.h.in
index cd9843b..45418ba 100644
--- a/external/vulkancts/scripts/src/vulkan.h.in
+++ b/external/vulkancts/scripts/src/vulkan.h.in
@@ -4298,7 +4298,6 @@
VkSurfaceFormat2KHR* pSurfaceFormats);
#endif
-<<<<<<< HEAD
#define VK_KHR_external_fence_capabilities 1
#define VK_LUID_SIZE_KHR 8
#define VK_KHR_EXTERNAL_FENCE_CAPABILITIES_SPEC_VERSION 1
@@ -4444,8 +4443,6 @@
int* pFd);
#endif
-=======
->>>>>>> 6c472cc5... Test VK_KHR_dedicated_allocation mem requirement queries
#define VK_KHR_dedicated_allocation 1
#define VK_KHR_DEDICATED_ALLOCATION_SPEC_VERSION 1
#define VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME "VK_KHR_dedicated_allocation"
@@ -4464,10 +4461,6 @@
VkBuffer buffer;
} VkMemoryDedicatedAllocateInfoKHR;
-<<<<<<< HEAD
-
-=======
->>>>>>> 6c472cc5... Test VK_KHR_dedicated_allocation mem requirement queries
#define VK_KHR_get_memory_requirements2 1
#define VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION 1
#define VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME "VK_KHR_get_memory_requirements2"