blob: 8859d75c436eaf0787923b6715b7bfec9bfcfd94 [file] [log] [blame]
/*-------------------------------------------------------------------------
* OpenGL Conformance Test Suite
* -----------------------------
*
* Copyright (c) 2015-2016 The Khronos Group Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/ /*!
* \file
* \brief
*/ /*-------------------------------------------------------------------*/
/**
*/ /*!
* \file gl4cSparseBufferTests.cpp
* \brief Conformance tests for the GL_ARB_sparse_buffer functionality.
*/ /*-------------------------------------------------------------------*/
#include "gl4cSparseBufferTests.hpp"
#include "gluContextInfo.hpp"
#include "gluDefs.hpp"
#include "glwEnums.hpp"
#include "glwFunctions.hpp"
#include "tcuTestLog.hpp"
#include <string.h>
#include <vector>
#ifndef GL_SPARSE_BUFFER_PAGE_SIZE_ARB
#define GL_SPARSE_BUFFER_PAGE_SIZE_ARB 0x82F8
#endif
#ifndef GL_SPARSE_STORAGE_BIT_ARB
#define GL_SPARSE_STORAGE_BIT_ARB 0x0400
#endif
namespace gl4cts
{
/** Rounds up the provided offset so that it is aligned to the specified value (eg. page size).
* In other words, the result value meets the following requirements:
*
* 1) result value % input value = 0
* 2) result value >= offset
* 3) (result value - offset) < input value
*
* @param offset Offset to be used for the rounding operation.
* @param value Value to align the offset to.
*
* @return Result value.
**/
unsigned int SparseBufferTestUtilities::alignOffset(const unsigned int& offset, const unsigned int& value)
{
return offset + (value - offset % value) % value;
}
/** Builds a compute program object, using the user-specified CS code snippets.
*
* @param gl DEQP CTS GL functions container.
* @param cs_body_parts Code snippets to use for the compute shader. Must hold exactly
* @param n_cs_body_parts null-terminated text strings.
* @param n_cs_body_parts Number of code snippets accessible via @param cs_body_parts.
*
* @return Result PO id if program has been linked successfully, 0 otherwise.
**/
glw::GLuint SparseBufferTestUtilities::createComputeProgram(const glw::Functions& gl, const char** cs_body_parts,
unsigned int n_cs_body_parts)
{
glw::GLint compile_status = GL_FALSE;
glw::GLuint cs_id = 0;
glw::GLint link_status = GL_FALSE;
glw::GLuint po_id = 0;
bool result = true;
if (n_cs_body_parts > 0)
{
cs_id = gl.createShader(GL_COMPUTE_SHADER);
}
po_id = gl.createProgram();
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateProgram() / glCreateShader() call(s) failed.");
if (n_cs_body_parts > 0)
{
gl.attachShader(po_id, cs_id);
}
GLU_EXPECT_NO_ERROR(gl.getError(), "glAttachShader() call(s) failed.");
if (n_cs_body_parts > 0)
{
gl.shaderSource(cs_id, n_cs_body_parts, cs_body_parts, NULL); /* length */
}
GLU_EXPECT_NO_ERROR(gl.getError(), "glShaderSource() call(s) failed.");
gl.compileShader(cs_id);
GLU_EXPECT_NO_ERROR(gl.getError(), "glCompileShader() call failed.");
gl.getShaderiv(cs_id, GL_COMPILE_STATUS, &compile_status);
GLU_EXPECT_NO_ERROR(gl.getError(), "glGetShaderiv() call failed");
char temp[1024];
gl.getShaderInfoLog(cs_id, 1024, NULL, temp);
if (GL_TRUE != compile_status)
{
result = false;
goto end;
}
gl.linkProgram(po_id);
GLU_EXPECT_NO_ERROR(gl.getError(), "glLinkProgram() call failed.");
gl.getProgramiv(po_id, GL_LINK_STATUS, &link_status);
GLU_EXPECT_NO_ERROR(gl.getError(), "glGetProgramiv() call failed.");
if (GL_TRUE != link_status)
{
result = false;
goto end;
}
end:
if (cs_id != 0)
{
gl.deleteShader(cs_id);
cs_id = 0;
}
if (!result)
{
if (po_id != 0)
{
gl.deleteProgram(po_id);
po_id = 0;
}
} /* if (!result) */
return po_id;
}
/** Builds a program object, using the user-specified code snippets. Can optionally configure
* the PO to use pre-defined attribute locations & transform feed-back varyings.
*
* @param gl DEQP CTS GL functions container.
* @param fs_body_parts Code snippets to use for the fragment shader. Must hold exactly
* @param n_fs_body_parts null-terminated text strings. May only
* be NULL if @param n_fs_body_parts is 0.
* @param n_fs_body_parts See @param fs_body_parts definitions.
* @param vs_body_parts Code snippets to use for the vertex shader. Must hold exactly
* @param n_vs_body_parts null-terminated text strings. May only
* be NULL if @param n_vs_body_parts is 0.
* @param n_vs_body_parts See @param vs_body_parts definitions.
* @param attribute_names Null-terminated attribute names to pass to the
* glBindAttribLocation() call.
* May only be NULL if @param n_attribute_properties is 0.
* @param attribute_locations Attribute locations to pass to the glBindAttribLocation() call.
* May only be NULL if @param n_attribute_properties is 0.
* @param n_attribute_properties See @param attribute_names and @param attribute_locations definitions.
* @param tf_varyings Transform-feedback varying names to use for the
* glTransformFeedbackVaryings() call. May only be NULL if
* @param n_tf_varyings is 0.
* @param n_tf_varyings See @param tf_varyings definition.
* @param tf_varying_mode Transform feedback mode to use for the
* glTransformFeedbackVaryings() call. Only used if @param n_tf_varyings
* is 0.
*
* @return Result PO id if program has been linked successfully, 0 otherwise.
**/
glw::GLuint SparseBufferTestUtilities::createProgram(const glw::Functions& gl, const char** fs_body_parts,
unsigned int n_fs_body_parts, const char** vs_body_parts,
unsigned int n_vs_body_parts, const char** attribute_names,
const unsigned int* attribute_locations,
unsigned int n_attribute_properties,
const glw::GLchar* const* tf_varyings, unsigned int n_tf_varyings,
glw::GLenum tf_varying_mode)
{
glw::GLint compile_status = GL_FALSE;
glw::GLuint fs_id = 0;
glw::GLint link_status = GL_FALSE;
glw::GLuint po_id = 0;
bool result = true;
glw::GLuint vs_id = 0;
if (n_fs_body_parts > 0)
{
fs_id = gl.createShader(GL_FRAGMENT_SHADER);
}
po_id = gl.createProgram();
if (n_vs_body_parts > 0)
{
vs_id = gl.createShader(GL_VERTEX_SHADER);
}
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateProgram() / glCreateShader() call(s) failed.");
if (n_fs_body_parts > 0)
{
gl.attachShader(po_id, fs_id);
}
if (n_vs_body_parts > 0)
{
gl.attachShader(po_id, vs_id);
}
GLU_EXPECT_NO_ERROR(gl.getError(), "glAttachShader() call(s) failed.");
if (n_fs_body_parts > 0)
{
gl.shaderSource(fs_id, n_fs_body_parts, fs_body_parts, NULL); /* length */
}
if (n_vs_body_parts > 0)
{
gl.shaderSource(vs_id, n_vs_body_parts, vs_body_parts, NULL); /* length */
}
GLU_EXPECT_NO_ERROR(gl.getError(), "glShaderSource() call(s) failed.");
const glw::GLuint so_ids[] = { fs_id, vs_id };
const unsigned int n_so_ids = sizeof(so_ids) / sizeof(so_ids[0]);
for (unsigned int n_so_id = 0; n_so_id < n_so_ids; ++n_so_id)
{
if (so_ids[n_so_id] != 0)
{
gl.compileShader(so_ids[n_so_id]);
GLU_EXPECT_NO_ERROR(gl.getError(), "glCompileShader() call failed.");
gl.getShaderiv(so_ids[n_so_id], GL_COMPILE_STATUS, &compile_status);
GLU_EXPECT_NO_ERROR(gl.getError(), "glGetShaderiv() call failed");
char temp[1024];
gl.getShaderInfoLog(so_ids[n_so_id], 1024, NULL, temp);
if (GL_TRUE != compile_status)
{
result = false;
goto end;
}
} /* if (so_ids[n_so_id] != 0) */
} /* for (all shader object IDs) */
for (unsigned int n_attribute = 0; n_attribute < n_attribute_properties; ++n_attribute)
{
gl.bindAttribLocation(po_id, attribute_locations[n_attribute], attribute_names[n_attribute]);
GLU_EXPECT_NO_ERROR(gl.getError(), "glBindAttribLocation() call failed.");
} /* for (all attributes to configure) */
if (n_tf_varyings != 0)
{
gl.transformFeedbackVaryings(po_id, n_tf_varyings, tf_varyings, tf_varying_mode);
GLU_EXPECT_NO_ERROR(gl.getError(), "glTransformFeedbackVaryings() call failed.");
} /* if (n_tf_varyings != 0) */
gl.linkProgram(po_id);
GLU_EXPECT_NO_ERROR(gl.getError(), "glLinkProgram() call failed.");
gl.getProgramiv(po_id, GL_LINK_STATUS, &link_status);
GLU_EXPECT_NO_ERROR(gl.getError(), "glGetProgramiv() call failed.");
if (GL_TRUE != link_status)
{
result = false;
goto end;
}
end:
if (fs_id != 0)
{
gl.deleteShader(fs_id);
fs_id = 0;
}
if (vs_id != 0)
{
gl.deleteShader(vs_id);
vs_id = 0;
}
if (!result)
{
if (po_id != 0)
{
gl.deleteProgram(po_id);
po_id = 0;
}
} /* if (!result) */
return po_id;
}
/** Returns a string with textual representation of the @param flags bitfield
* holding bits applicable to the @param flags argument of glBufferStorage()
* calls.
*
* @param flags Flags argument, as supported by the @param flags argument of
* glBufferStorage() entry-point.
*
* @return Described string.
**/
std::string SparseBufferTestUtilities::getSparseBOFlagsString(glw::GLenum flags)
{
unsigned int n_flags_added = 0;
std::stringstream result_sstream;
if ((flags & GL_CLIENT_STORAGE_BIT) != 0)
{
result_sstream << "GL_CLIENT_STORAGE_BIT";
++n_flags_added;
}
if ((flags & GL_DYNAMIC_STORAGE_BIT) != 0)
{
result_sstream << ((n_flags_added) ? " | " : "") << "GL_DYNAMIC_STORAGE_BIT";
++n_flags_added;
}
if ((flags & GL_MAP_COHERENT_BIT) != 0)
{
result_sstream << ((n_flags_added) ? " | " : "") << "GL_MAP_COHERENT_BIT";
++n_flags_added;
}
if ((flags & GL_MAP_PERSISTENT_BIT) != 0)
{
result_sstream << ((n_flags_added) ? " | " : "") << "GL_MAP_PERSISTENT_BIT";
++n_flags_added;
}
if ((flags & GL_SPARSE_STORAGE_BIT_ARB) != 0)
{
result_sstream << ((n_flags_added) ? " | " : "") << "GL_SPARSE_STORAGE_BIT";
++n_flags_added;
}
return result_sstream.str();
}
/** Constructor.
*
* @param context Rendering context
* @param name Test name
* @param description Test description
*/
NegativeTests::NegativeTests(deqp::Context& context)
: TestCase(context, "NegativeTests", "Implements all negative tests described in CTS_ARB_sparse_buffer")
, m_helper_bo_id(0)
, m_immutable_bo_id(0)
, m_immutable_bo_size(1024768)
, m_sparse_bo_id(0)
{
/* Left blank intentionally */
}
/** Stub deinit method. */
void NegativeTests::deinit()
{
const glw::Functions& gl = m_context.getRenderContext().getFunctions();
if (m_helper_bo_id != 0)
{
gl.deleteBuffers(1, &m_helper_bo_id);
m_helper_bo_id = 0;
}
if (m_immutable_bo_id != 0)
{
gl.deleteBuffers(1, &m_immutable_bo_id);
m_immutable_bo_id = 0;
}
if (m_sparse_bo_id != 0)
{
gl.deleteBuffers(1, &m_sparse_bo_id);
m_sparse_bo_id = 0;
}
}
/** Stub init method */
void NegativeTests::init()
{
/* Nothing to do here */
}
/** Executes test iteration.
*
* @return Returns STOP when test has finished executing, CONTINUE if more iterations are needed.
*/
tcu::TestNode::IterateResult NegativeTests::iterate()
{
glw::GLvoid* data_ptr = DE_NULL;
const glw::Functions& gl = m_context.getRenderContext().getFunctions();
glw::GLint page_size = 0;
bool result = true;
/* Only execute if the implementation supports the GL_ARB_sparse_buffer extension */
if (!m_context.getContextInfo().isExtensionSupported("GL_ARB_sparse_buffer"))
{
throw tcu::NotSupportedError("GL_ARB_sparse_buffer is not supported");
}
/* Set up */
gl.getIntegerv(GL_SPARSE_BUFFER_PAGE_SIZE_ARB, &page_size);
GLU_EXPECT_NO_ERROR(gl.getError(), "glGetIntegerv() call failed.");
gl.genBuffers(1, &m_helper_bo_id);
gl.genBuffers(1, &m_immutable_bo_id);
gl.genBuffers(1, &m_sparse_bo_id);
GLU_EXPECT_NO_ERROR(gl.getError(), "glGenBuffers() call(s) failed.");
gl.bindBuffer(GL_ARRAY_BUFFER, m_sparse_bo_id);
gl.bindBuffer(GL_COPY_READ_BUFFER, m_immutable_bo_id);
gl.bindBuffer(GL_ELEMENT_ARRAY_BUFFER, m_helper_bo_id);
GLU_EXPECT_NO_ERROR(gl.getError(), "glBindBuffer() call failed.");
gl.bufferStorage(GL_ARRAY_BUFFER, page_size * 3, /* size as per test spec */
DE_NULL, /* data */
GL_SPARSE_STORAGE_BIT_ARB);
gl.bufferStorage(GL_COPY_READ_BUFFER, m_immutable_bo_size, /* size */
DE_NULL, /* data */
0);
GLU_EXPECT_NO_ERROR(gl.getError(), "glBufferStorage() call(s) failed.");
/** * Verify glBufferPageCommitmentARB() returns GL_INVALID_ENUM if <target> is
* set to GL_INTERLEAVED_ATTRIBS. */
glw::GLint error_code = GL_NO_ERROR;
gl.bufferPageCommitmentARB(GL_INTERLEAVED_ATTRIBS, 0, /* offset */
page_size, GL_TRUE); /* commit */
error_code = gl.getError();
if (error_code != GL_INVALID_ENUM)
{
m_testCtx.getLog() << tcu::TestLog::Message
<< "Invalid <target> value passed to a glBufferPageCommitmentARB() call"
" did not generate a GL_INVALID_ENUM error."
<< tcu::TestLog::EndMessage;
result = false;
}
/* * Verify glBufferStorage() throws a GL_INVALID_VALUE error if <flags> is
* set to (GL_SPARSE_STORAGE_BIT_ARB | GL_MAP_READ_BIT) or
* (GL_SPARSE_STORAGE_BIT_ARB | GL_MAP_WRITE_BIT). */
gl.bufferStorage(GL_ELEMENT_ARRAY_BUFFER, page_size * 3, /* size */
DE_NULL, /* data */
GL_SPARSE_STORAGE_BIT_ARB | GL_MAP_READ_BIT);
error_code = gl.getError();
if (error_code != GL_INVALID_VALUE)
{
m_testCtx.getLog() << tcu::TestLog::Message
<< "Invalid <flags> value set to GL_SPARSE_STORAGE_BIT_ARB | GL_MAP_READ_BIT "
"did not generate a GL_INVALID_VALUE error."
<< tcu::TestLog::EndMessage;
result = false;
}
gl.bufferStorage(GL_ELEMENT_ARRAY_BUFFER, page_size * 3, /* size */
DE_NULL, /* data */
GL_SPARSE_STORAGE_BIT_ARB | GL_MAP_WRITE_BIT);
error_code = gl.getError();
if (error_code != GL_INVALID_VALUE)
{
m_testCtx.getLog() << tcu::TestLog::Message
<< "Invalid <flags> value set to GL_SPARSE_STORAGE_BIT_ARB | GL_MAP_WRITE_BIT "
"did not generate a GL_INVALID_VALUE error."
<< tcu::TestLog::EndMessage;
result = false;
}
/* * Verify glBufferPageCommitmentARB() generates a GL_INVALID_OPERATION error if
* it is called for an immutable BO, which has not been initialized with the
* GL_SPARSE_STORAGE_BIT_ARB flag. */
gl.bufferPageCommitmentARB(GL_COPY_READ_BUFFER, 0, /* offset */
page_size, GL_TRUE); /* commit */
error_code = gl.getError();
if (error_code != GL_INVALID_OPERATION)
{
m_testCtx.getLog() << tcu::TestLog::Message << "Invalid error code generated by glBufferPageCommitmentARB() "
" issued against an immutable, non-sparse buffer object."
<< tcu::TestLog::EndMessage;
result = false;
}
/* * Verify glBufferPageCommitmentARB() issues a GL_INVALID_VALUE error if <offset>
* is set to (0.5 * GL_SPARSE_BUFFER_PAGE_SIZE_ARB). Skip if the constant's value
* is equal to 1. */
if (page_size != 1)
{
gl.bufferPageCommitmentARB(GL_ARRAY_BUFFER, page_size / 2, /* offset */
page_size, GL_TRUE); /* commit */
error_code = gl.getError();
if (error_code != GL_INVALID_VALUE)
{
m_testCtx.getLog() << tcu::TestLog::Message
<< "Invalid error code generated by glBufferPageCommitmentARB() "
"whose <offset> value was set to (page size / 2)."
<< tcu::TestLog::EndMessage;
result = false;
}
} /* if (page_size != 1) */
/* * Verify glBufferPageCommitmentARB() emits a GL_INVALID_VALUE error if <size>
* is set to (0.5 * GL_SPARSE_BUFFER_PAGE_SIZE_ARB). Skip if the constant's value
* is equal to 1. */
if (page_size != 1)
{
gl.bufferPageCommitmentARB(GL_ARRAY_BUFFER, 0, /* offset */
page_size / 2, GL_TRUE); /* commit */
error_code = gl.getError();
if (error_code != GL_INVALID_VALUE)
{
m_testCtx.getLog() << tcu::TestLog::Message
<< "Invalid error code generated by glBufferPageCommitmentARB() "
"whose <size> value was set to (page size / 2)."
<< tcu::TestLog::EndMessage;
result = false;
}
} /* if (page_size != 1) */
/* * Verify glBufferPageCommitmentARB() returns GL_INVALID_VALUE if <offset> is
* set to -1, but all other arguments are valid. */
gl.bufferPageCommitmentARB(GL_ARRAY_BUFFER, -1, /* offset */
page_size, GL_TRUE); /* commit */
error_code = gl.getError();
if (error_code != GL_INVALID_VALUE)
{
m_testCtx.getLog() << tcu::TestLog::Message << "Invalid error code generated by glBufferPageCommitmentARB() "
"whose <offset> argument was set to -1."
<< tcu::TestLog::EndMessage;
result = false;
}
/* * Verify glBufferPageCommitmentARB() returns GL_INVALID_VALUE if <size> is
* set to -1, but all other arguments are valid. */
gl.bufferPageCommitmentARB(GL_ARRAY_BUFFER, 0, /* offset */
-1, /* size */
GL_TRUE); /* commit */
error_code = gl.getError();
if (error_code != GL_INVALID_VALUE)
{
m_testCtx.getLog() << tcu::TestLog::Message << "Invalid error code generated by glBufferPageCommitmentARB() "
"whose <size> argument was set to -1."
<< tcu::TestLog::EndMessage;
result = false;
}
/* * Verify glBufferPageCommitmentARB() returns GL_INVALID_VALUE if BO's size is
* GL_SPARSE_BUFFER_PAGE_SIZE_ARB * 3, but the <offset> is set to 0 and <size>
* argument used for the call is set to GL_SPARSE_BUFFER_PAGE_SIZE_ARB * 4. */
gl.bufferPageCommitmentARB(GL_ARRAY_BUFFER, 0, /* offset */
page_size * 4, /* size */
GL_TRUE);
error_code = gl.getError();
if (error_code != GL_INVALID_VALUE)
{
m_testCtx.getLog() << tcu::TestLog::Message
<< "Invalid error code generated by glBufferPageCommitmentARB() "
"whose <offset> was set to 0 and <size> was set to (page size * 4), "
"when the buffer storage size had been configured to be (page size * 3)."
<< tcu::TestLog::EndMessage;
result = false;
}
/* * Verify glBufferPageCommitmentARB() returns GL_INVALID_VALUE if BO's size is
* GL_SPARSE_BUFFER_PAGE_SIZE_ARB * 3, but the <offset> is set to
* GL_SPARSE_BUFFER_PAGE_SIZE_ARB * 1 and <size> argument used for the call
* is set to GL_SPARSE_BUFFER_PAGE_SIZE_ARB * 3. */
gl.bufferPageCommitmentARB(GL_ARRAY_BUFFER, page_size * 1, /* offset */
page_size * 3, /* size */
GL_TRUE);
error_code = gl.getError();
if (error_code != GL_INVALID_VALUE)
{
m_testCtx.getLog() << tcu::TestLog::Message
<< "Invalid error code generated by glBufferPageCommitmentARB() "
"whose <offset> was set to (page size) and <size> was set to (page size * 3), "
"when the buffer storage size had been configured to be (page size * 3)."
<< tcu::TestLog::EndMessage;
result = false;
}
/* * Verify that calling glMapBuffer() or glMapBufferRange() against a sparse
* buffer generates a GL_INVALID_OPERATION error. */
data_ptr = gl.mapBuffer(GL_ARRAY_BUFFER, GL_READ_ONLY);
if (data_ptr != DE_NULL)
{
m_testCtx.getLog() << tcu::TestLog::Message
<< "Non-NULL pointer returned by an invalid glMapBuffer() call, issued "
"against a sparse buffer object"
<< tcu::TestLog::EndMessage;
result = false;
}
error_code = gl.getError();
if (error_code != GL_INVALID_OPERATION)
{
m_testCtx.getLog() << tcu::TestLog::Message
<< "Invalid error code generated by glMapBuffer() call, issued against "
"a sparse buffer object"
<< tcu::TestLog::EndMessage;
result = false;
}
data_ptr = gl.mapBufferRange(GL_ARRAY_BUFFER, 0, /* offset */
page_size, /* length */
GL_MAP_READ_BIT);
if (data_ptr != DE_NULL)
{
m_testCtx.getLog() << tcu::TestLog::Message
<< "Non-NULL pointer returned by an invalid glMapBufferRange() call, issued "
"against a sparse buffer object"
<< tcu::TestLog::EndMessage;
result = false;
}
error_code = gl.getError();
if (error_code != GL_INVALID_OPERATION)
{
m_testCtx.getLog() << tcu::TestLog::Message
<< "Invalid error code generated by glMapBufferRange() call, issued against "
"a sparse buffer object"
<< tcu::TestLog::EndMessage;
result = false;
}
m_testCtx.setTestResult(result ? QP_TEST_RESULT_PASS : QP_TEST_RESULT_FAIL, result ? "Pass" : "Fail");
return STOP;
}
/** Constructor.
*
* @param context Rendering context
* @param name Test name
* @param description Test description
*/
PageSizeGetterTest::PageSizeGetterTest(deqp::Context& context)
: TestCase(context, "PageSizeGetterTest",
"Verifies GL_SPARSE_BUFFER_PAGE_SIZE_ARB pname is recognized by the getter functions")
{
/* Left blank intentionally */
}
/** Stub deinit method. */
void PageSizeGetterTest::deinit()
{
/* Nothing to be done here */
}
/** Stub init method */
void PageSizeGetterTest::init()
{
/* Nothing to do here */
}
/** Executes test iteration.
*
* @return Returns STOP when test has finished executing, CONTINUE if more iterations are needed.
*/
tcu::TestNode::IterateResult PageSizeGetterTest::iterate()
{
const glw::Functions& gl = m_context.getRenderContext().getFunctions();
glw::GLboolean page_size_bool = false;
glw::GLdouble page_size_double = 0.0;
glw::GLfloat page_size_float = 0.0f;
glw::GLint page_size_int = 0;
glw::GLint64 page_size_int64 = 0;
bool result = true;
/* Only execute if the implementation supports the GL_ARB_sparse_buffer extension */
if (!m_context.getContextInfo().isExtensionSupported("GL_ARB_sparse_buffer"))
{
throw tcu::NotSupportedError("GL_ARB_sparse_buffer is not supported");
}
/* glGetIntegerv() */
gl.getIntegerv(GL_SPARSE_BUFFER_PAGE_SIZE_ARB, &page_size_int);
GLU_EXPECT_NO_ERROR(gl.getError(), "glGetIntegerv() call failed");
if (page_size_int < 1 || page_size_int > 65536)
{
m_testCtx.getLog() << tcu::TestLog::Message << "Page size reported by the implementation (" << page_size_int
<< ")"
" by glGetIntegerv() is out of the allowed range."
<< tcu::TestLog::EndMessage;
result = false;
}
/* glGetBooleanv() */
gl.getBooleanv(GL_SPARSE_BUFFER_PAGE_SIZE_ARB, &page_size_bool);
GLU_EXPECT_NO_ERROR(gl.getError(), "glGetBooleanv() call failed");
if (!page_size_bool)
{
m_testCtx.getLog() << tcu::TestLog::Message << "Invalid page size reported by glGetBooleanv()"
<< tcu::TestLog::EndMessage;
result = false;
}
/* glGetDoublev() */
gl.getDoublev(GL_SPARSE_BUFFER_PAGE_SIZE_ARB, &page_size_double);
GLU_EXPECT_NO_ERROR(gl.getError(), "glGetDoublev() call failed");
if (de::abs(page_size_double - page_size_int) > 1e-5)
{
m_testCtx.getLog() << tcu::TestLog::Message << "Invalid page size reported by glGetDoublev()"
" (reported value: "
<< page_size_double << ", expected value: " << page_size_int << ")"
<< tcu::TestLog::EndMessage;
result = false;
}
/* glGetFloatv() */
gl.getFloatv(GL_SPARSE_BUFFER_PAGE_SIZE_ARB, &page_size_float);
GLU_EXPECT_NO_ERROR(gl.getError(), "glGetFloatv() call failed");
if (de::abs(page_size_float - static_cast<float>(page_size_int)) > 1e-5f)
{
m_testCtx.getLog() << tcu::TestLog::Message << "Invalid page size reported by glGetFloatv()"
" (reported value: "
<< page_size_float << ", expected value: " << page_size_int << ")"
<< tcu::TestLog::EndMessage;
result = false;
}
/* glGetInteger64v() */
gl.getInteger64v(GL_SPARSE_BUFFER_PAGE_SIZE_ARB, &page_size_int64);
GLU_EXPECT_NO_ERROR(gl.getError(), "glGetFloatv() call failed");
if (page_size_int64 != page_size_int)
{
m_testCtx.getLog() << tcu::TestLog::Message << "Invalid page size reported by glGetInteger64v()"
" (reported value: "
<< page_size_int64 << ", expected value: " << page_size_int << ")"
<< tcu::TestLog::EndMessage;
result = false;
}
m_testCtx.setTestResult(result ? QP_TEST_RESULT_PASS : QP_TEST_RESULT_FAIL, result ? "Pass" : "Fail");
return STOP;
}
/** Constructor.
*
* @param gl GL entry-points container
* @param testContext CTS test context
* @param page_size Page size, as reported by implementation for the GL_SPARSE_BUFFER_PAGE_SIZE_ARB query.
* @param pGLBufferPageCommitmentARB Func ptr to glBufferPageCommitmentARB() entry-point.
* @param all_pages_committed true to run the test with all data memory pages committed,
* false to leave some of them without an actual memory backing.
*/
AtomicCounterBufferStorageTestCase::AtomicCounterBufferStorageTestCase(const glw::Functions& gl,
tcu::TestContext& testContext,
glw::GLint page_size, bool all_pages_committed)
: m_all_pages_committed(all_pages_committed)
, m_gl(gl)
, m_gl_atomic_counter_uniform_array_stride(0)
, m_gl_max_vertex_atomic_counters_value(0)
, m_helper_bo(0)
, m_helper_bo_size(0)
, m_helper_bo_size_rounded(0)
, m_n_draw_calls(3) /* as per test spec */
, m_page_size(page_size)
, m_po(0)
, m_sparse_bo(0)
, m_sparse_bo_data_size(0)
, m_sparse_bo_data_size_rounded(0)
, m_sparse_bo_data_start_offset(0)
, m_sparse_bo_data_start_offset_rounded(0)
, m_testCtx(testContext)
, m_vao(0)
{
/* Left blank intentionally */
}
/** Releases all GL objects used across all test case iterations.
*
* Called once during BufferStorage test run-time.
*/
void AtomicCounterBufferStorageTestCase::deinitTestCaseGlobal()
{
if (m_helper_bo != 0)
{
m_gl.deleteBuffers(1, &m_helper_bo);
m_helper_bo = 0;
}
if (m_po != 0)
{
m_gl.deleteProgram(m_po);
m_po = 0;
}
if (m_vao != 0)
{
m_gl.deleteVertexArrays(1, &m_vao);
m_vao = 0;
}
}
/** Releases temporary GL objects, created specifically for one test case iteration. */
void AtomicCounterBufferStorageTestCase::deinitTestCaseIteration()
{
if (m_sparse_bo != 0)
{
m_gl.bindBuffer(GL_ARRAY_BUFFER, m_sparse_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
m_gl.bufferPageCommitmentARB(GL_ARRAY_BUFFER, 0, /* offset */
m_helper_bo_size_rounded, GL_FALSE); /* commit */
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferPageCommitmentARB() call failed.");
m_sparse_bo = 0;
}
}
/** Executes a single test iteration. The BufferStorage test will call this method
* numerously during its life-time, testing various valid flag combinations applied
* to the tested sparse buffer object at glBufferStorage() call time.
*
* @param sparse_bo_storage_flags <flags> argument, used by the test in the glBufferStorage()
* call to set up the sparse buffer's storage.
*
* @return true if the test case executed correctly, false otherwise.
*/
bool AtomicCounterBufferStorageTestCase::execute(glw::GLuint sparse_bo_storage_flags)
{
(void)sparse_bo_storage_flags;
static const unsigned char data_zero = 0;
bool result = true;
/* Only execute if GL_MAX_VERTEX_ATOMIC_COUNTERS is > 0 */
if (m_gl_max_vertex_atomic_counters_value == 0)
{
m_testCtx.getLog() << tcu::TestLog::Message << "G_MAX_VERTEX_ATOMIC_COUNTERS is 0. Skipping the test."
<< tcu::TestLog::EndMessage;
goto end;
}
/* Bind the test program object */
m_gl.useProgram(m_po);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glUseProgram() call failed.");
m_gl.bindBuffer(GL_ATOMIC_COUNTER_BUFFER, m_sparse_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
/* Try using both ranged and non-ranged AC bindings.
*
* NOTE: It only makes sense to perform glBindBufferBase() test if all AC pages are
* committed
*/
for (unsigned int n_binding_type = (m_all_pages_committed) ? 0 : 1;
n_binding_type < 2; /* glBindBufferBase(), glBindBufferRange() */
++n_binding_type)
{
bool result_local = true;
if (n_binding_type == 0)
{
m_gl.bindBufferBase(GL_ATOMIC_COUNTER_BUFFER, 0, /* index */
m_sparse_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBufferBase() call failed.");
}
else
{
m_gl.bindBufferRange(GL_ATOMIC_COUNTER_BUFFER, 0, /* index */
m_sparse_bo, m_sparse_bo_data_start_offset, m_helper_bo_size);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBufferRange() call failed.");
}
/* Zero out the sparse buffer's contents */
m_gl.clearBufferData(GL_ATOMIC_COUNTER_BUFFER, GL_R8, GL_RED, GL_UNSIGNED_BYTE, &data_zero);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glClearBufferData() call failed.");
/* Run the test */
m_gl.drawArraysInstanced(GL_POINTS, 0, /* first */
m_gl_max_vertex_atomic_counters_value, /* count */
m_n_draw_calls);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glDrawArraysInstanced() call failed");
/* Retrieve the atomic counter values */
const glw::GLuint* ac_data = NULL;
const unsigned int n_expected_written_values =
(m_all_pages_committed) ? m_gl_max_vertex_atomic_counters_value : m_gl_max_vertex_atomic_counters_value / 2;
m_gl.bindBuffer(GL_COPY_READ_BUFFER, m_sparse_bo);
m_gl.bindBuffer(GL_COPY_WRITE_BUFFER, m_helper_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffeR() call failed");
m_gl.copyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER,
(n_binding_type == 0) ? 0 : m_sparse_bo_data_start_offset, 0, /* writeOffset */
m_sparse_bo_data_size);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glCopyBufferSubData() call failed.");
ac_data = (const glw::GLuint*)m_gl.mapBufferRange(GL_COPY_WRITE_BUFFER, 0, /* offset */
m_sparse_bo_data_size, GL_MAP_READ_BIT);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glMapBufferRange() call failed.");
for (unsigned int n_counter = 0; n_counter < n_expected_written_values && result_local; ++n_counter)
{
const unsigned int expected_value = m_n_draw_calls;
const unsigned int retrieved_value =
*((unsigned int*)((unsigned char*)ac_data + m_gl_atomic_counter_uniform_array_stride * n_counter));
if (expected_value != retrieved_value)
{
m_testCtx.getLog() << tcu::TestLog::Message << "Invalid atomic counter value "
"["
<< retrieved_value << "]"
" instead of the expected value "
"["
<< expected_value << "]"
" at index "
<< n_counter << " when using "
<< ((n_binding_type == 0) ? "glBindBufferBase()" : "glBindBufferRange()")
<< " for AC binding configuration" << tcu::TestLog::EndMessage;
result_local = false;
} /* if (expected_value != retrieved_value) */
} /* for (all draw calls that need to be executed) */
m_gl.unmapBuffer(GL_COPY_WRITE_BUFFER);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glUnmapBuffer() call failed.");
result &= result_local;
} /* for (both binding types) */
end:
return result;
}
/** Initializes GL objects used across all test case iterations.
*
* Called once during BufferStorage test run-time.
*/
bool AtomicCounterBufferStorageTestCase::initTestCaseGlobal()
{
const glw::GLuint ac_uniform_index = 0; /* only one uniform is defined in the VS below */
std::stringstream n_counters_sstream;
std::string n_counters_string;
bool result = true;
static const char* vs_body_preamble = "#version 430 core\n"
"\n";
static const char* vs_body_core = "layout(binding = 0) uniform atomic_uint counters[N_COUNTERS];\n"
"\n"
"void main()\n"
"{\n"
" for (uint n = 0; n < N_COUNTERS; ++n)\n"
" {\n"
" if (n == gl_VertexID)\n"
" {\n"
" atomicCounterIncrement(counters[n]);\n"
" }\n"
" }\n"
"\n"
" gl_Position = vec4(0.0, 0.0, 0.0, 1.0);\n"
"}\n";
const char* vs_body_parts[] = { vs_body_preamble, DE_NULL, /* will be set to n_counters_string.c_str() */
vs_body_core };
const unsigned int n_vs_body_parts = sizeof(vs_body_parts) / sizeof(vs_body_parts[0]);
/* Retrieve GL_MAX_VERTEX_ATOMIC_COUNTERS value. The test will only be executed if it's >= 1 */
m_gl.getIntegerv(GL_MAX_VERTEX_ATOMIC_COUNTERS, &m_gl_max_vertex_atomic_counters_value);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glGetIntegerv() call failed.");
if (m_gl_max_vertex_atomic_counters_value == 0)
{
goto end;
}
/* Form the N_COUNTERS declaration string */
n_counters_sstream << "#define N_COUNTERS " << m_gl_max_vertex_atomic_counters_value << "\n";
n_counters_string = n_counters_sstream.str();
vs_body_parts[1] = n_counters_string.c_str();
/* Set up the program object */
DE_ASSERT(m_po == 0);
m_po =
SparseBufferTestUtilities::createProgram(m_gl, DE_NULL, /* fs_body_parts */
0, /* n_fs_body_parts */
vs_body_parts, n_vs_body_parts, DE_NULL, /* attribute_names */
DE_NULL, /* attribute_locations */
0); /* n_attribute_properties */
if (m_po == 0)
{
result = false;
goto end;
}
/* Helper BO will be used to hold the atomic counter buffer data.
* Determine how much space will be needed.
*
* Min max for the GL constant value is 0. Bail out if that's the
* value we are returned - it is pointless to execute the test in
* such environment.
*/
m_gl.getActiveUniformsiv(m_po, 1, /* uniformCount */
&ac_uniform_index, GL_UNIFORM_ARRAY_STRIDE, &m_gl_atomic_counter_uniform_array_stride);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glGetActiveUniformsiv() call failed.");
DE_ASSERT(m_gl_atomic_counter_uniform_array_stride >= (int)sizeof(unsigned int));
m_helper_bo_size = m_gl_atomic_counter_uniform_array_stride * m_gl_max_vertex_atomic_counters_value;
m_helper_bo_size_rounded = SparseBufferTestUtilities::alignOffset(m_helper_bo_size, m_page_size);
/* Set up the helper BO */
DE_ASSERT(m_helper_bo == 0);
m_gl.genBuffers(1, &m_helper_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glGenBuffers() call failed.");
m_gl.bindBuffer(GL_COPY_READ_BUFFER, m_helper_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
m_gl.bufferStorage(GL_COPY_READ_BUFFER, m_helper_bo_size_rounded, DE_NULL, GL_MAP_READ_BIT); /* flags */
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferStorage() call failed.");
/* Set up the vertex array object */
DE_ASSERT(m_vao == 0);
m_gl.genVertexArrays(1, &m_vao);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glGenVertexArrays() call failed.");
m_gl.bindVertexArray(m_vao);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindVertexArray() call failed.");
end:
return result;
}
/** Initializes GL objects which are needed for a single test case iteration.
*
* deinitTestCaseIteration() will be called after the test case is executed in ::execute()
* to release these objects.
**/
bool AtomicCounterBufferStorageTestCase::initTestCaseIteration(glw::GLuint sparse_bo)
{
bool result = true;
/* Cache the BO id, if not cached already */
DE_ASSERT(m_sparse_bo == 0 || m_sparse_bo == sparse_bo);
m_sparse_bo = sparse_bo;
/* Set up the sparse bufffer. */
int sparse_bo_data_size = 0;
DE_ASSERT(m_helper_bo_size_rounded != 0);
m_gl.bindBuffer(GL_ARRAY_BUFFER, m_sparse_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
if (m_all_pages_committed)
{
/* Commit all required pages */
sparse_bo_data_size = m_helper_bo_size_rounded;
}
else
{
/* Only commit the first half of the required pages */
DE_ASSERT((m_helper_bo_size_rounded % m_page_size) == 0);
sparse_bo_data_size = (m_helper_bo_size_rounded / m_page_size) * m_page_size / 2;
}
/* NOTE: We need to ensure that the memory region assigned to the atomic counter buffer spans
* at least through two separate pages.
*
* Since we align up, we need to move one page backward and then apply the alignment function
* to determine the start page index.
*/
const int sparse_bo_data_start_offset = m_page_size - m_helper_bo_size_rounded / 2;
int sparse_bo_data_start_offset_minus_page = sparse_bo_data_start_offset - m_page_size;
if (sparse_bo_data_start_offset_minus_page < 0)
{
sparse_bo_data_start_offset_minus_page = 0;
}
m_sparse_bo_data_start_offset = sparse_bo_data_start_offset;
m_sparse_bo_data_start_offset_rounded =
SparseBufferTestUtilities::alignOffset(sparse_bo_data_start_offset_minus_page, m_page_size);
m_sparse_bo_data_size = sparse_bo_data_size;
m_sparse_bo_data_size_rounded =
SparseBufferTestUtilities::alignOffset(m_sparse_bo_data_start_offset + sparse_bo_data_size, m_page_size);
DE_ASSERT((m_sparse_bo_data_size_rounded % m_page_size) == 0);
DE_ASSERT((m_sparse_bo_data_start_offset_rounded % m_page_size) == 0);
m_gl.bufferPageCommitmentARB(GL_ARRAY_BUFFER, m_sparse_bo_data_start_offset_rounded, m_sparse_bo_data_size_rounded,
GL_TRUE); /* commit */
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferPageCommitmentARB() call failed.");
return result;
}
/** Constructor.
*
* @param gl GL entry-points container
* @param context CTS rendering context
* @param testContext CTS test context
* @param page_size Page size, as reported by implementation for the GL_SPARSE_BUFFER_PAGE_SIZE_ARB query.
* @param pGLBufferPageCommitmentARB Func ptr to glBufferPageCommitmentARB() entry-point.
*/
BufferTextureStorageTestCase::BufferTextureStorageTestCase(const glw::Functions& gl, deqp::Context& context,
tcu::TestContext& testContext, glw::GLint page_size)
: m_gl(gl)
, m_helper_bo(0)
, m_helper_bo_data(DE_NULL)
, m_helper_bo_data_size(0)
, m_is_texture_buffer_range_supported(false)
, m_page_size(page_size)
, m_po(0)
, m_po_local_wg_size(1024)
, m_sparse_bo(0)
, m_sparse_bo_size(0)
, m_sparse_bo_size_rounded(0)
, m_ssbo(0)
, m_ssbo_zero_data(DE_NULL)
, m_ssbo_zero_data_size(0)
, m_testCtx(testContext)
, m_to(0)
, m_to_width(65536) /* min max for GL_MAX_TEXTURE_BUFFER_SIZE_ARB */
{
const glu::ContextInfo& context_info = context.getContextInfo();
glu::RenderContext& render_context = context.getRenderContext();
if (glu::contextSupports(render_context.getType(), glu::ApiType::core(4, 3)) ||
context_info.isExtensionSupported("GL_ARB_texture_buffer_range"))
{
m_is_texture_buffer_range_supported = true;
}
}
/** Releases all GL objects used across all test case iterations.
*
* Called once during BufferStorage test run-time.
*/
void BufferTextureStorageTestCase::deinitTestCaseGlobal()
{
if (m_helper_bo != 0)
{
m_gl.deleteBuffers(1, &m_helper_bo);
m_helper_bo = 0;
}
if (m_helper_bo_data != DE_NULL)
{
delete[] m_helper_bo_data;
m_helper_bo_data = DE_NULL;
}
if (m_po != 0)
{
m_gl.deleteProgram(m_po);
m_po = 0;
}
if (m_ssbo != 0)
{
m_gl.deleteBuffers(1, &m_ssbo);
m_ssbo = 0;
}
if (m_ssbo_zero_data != DE_NULL)
{
delete[] m_ssbo_zero_data;
m_ssbo_zero_data = DE_NULL;
}
if (m_to != 0)
{
m_gl.deleteTextures(1, &m_to);
m_to = 0;
}
}
/** Releases temporary GL objects, created specifically for one test case iteration. */
void BufferTextureStorageTestCase::deinitTestCaseIteration()
{
if (m_sparse_bo != 0)
{
m_gl.bindBuffer(GL_ARRAY_BUFFER, m_sparse_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
m_gl.bufferPageCommitmentARB(GL_ARRAY_BUFFER, 0, /* offset */
m_sparse_bo_size_rounded, GL_FALSE); /* commit */
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferPageCommitmentARB() call failed.");
m_sparse_bo = 0;
}
}
/** Executes a single test iteration. The BufferStorage test will call this method
* numerously during its life-time, testing various valid flag combinations applied
* to the tested sparse buffer object at glBufferStorage() call time.
*
* @param sparse_bo_storage_flags <flags> argument, used by the test in the glBufferStorage()
* call to set up the sparse buffer's storage.
*
* @return true if the test case executed correctly, false otherwise.
*/
bool BufferTextureStorageTestCase::execute(glw::GLuint sparse_bo_storage_flags)
{
(void)sparse_bo_storage_flags;
bool result = true;
/* Bind the program object */
m_gl.useProgram(m_po);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glUseProgram() call failed.");
m_gl.bindBuffer(GL_SHADER_STORAGE_BUFFER, m_ssbo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
m_gl.bindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, /* index */
m_ssbo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBufferBase() call failed.");
/* Set up bindings for the copy ops */
m_gl.bindBuffer(GL_COPY_READ_BUFFER, m_helper_bo);
m_gl.bindBuffer(GL_COPY_WRITE_BUFFER, m_sparse_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call(s) failed.");
/* Run the test in two iterations:
*
* a) All required pages are committed.
* b) Only half of the pages are committed. */
for (unsigned int n_iteration = 0; n_iteration < 2; ++n_iteration)
{
/* Test glTexBuffer() and glTexBufferRange() separately. */
for (int n_entry_point = 0; n_entry_point < (m_is_texture_buffer_range_supported ? 2 : 1); ++n_entry_point)
{
bool result_local = true;
/* Set up the sparse buffer's memory backing. */
const unsigned int tbo_commit_start_offset = (n_iteration == 0) ? 0 : m_sparse_bo_size_rounded / 2;
const unsigned int tbo_commit_size =
(n_iteration == 0) ? m_sparse_bo_size_rounded : m_sparse_bo_size_rounded / 2;
m_gl.bindBuffer(GL_TEXTURE_BUFFER, m_sparse_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
m_gl.bufferPageCommitmentARB(GL_TEXTURE_BUFFER, tbo_commit_start_offset, tbo_commit_size,
GL_TRUE); /* commit */
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferPageCommitmentARB() call failed.");
/* Set up the buffer texture's backing */
if (n_entry_point == 0)
{
m_gl.texBuffer(GL_TEXTURE_BUFFER, GL_RGBA8, m_sparse_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glTexBuffer() call failed.");
}
else
{
m_gl.texBufferRange(GL_TEXTURE_BUFFER, GL_RGBA8, m_sparse_bo, 0, /* offset */
m_sparse_bo_size);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glTexBufferRange() call failed.");
}
/* Set up the sparse buffer's data storage */
m_gl.copyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, 0, /* readOffset */
0, /* writeOffset */
m_helper_bo_data_size);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glCopyBufferSubData() call failed.");
/* Run the compute program */
DE_ASSERT((m_to_width % m_po_local_wg_size) == 0);
m_gl.dispatchCompute(m_to_width / m_po_local_wg_size, 1, /* num_groups_y */
1); /* num_groups_z */
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glDispatchCompute() call failed.");
/* Flush the caches */
m_gl.memoryBarrier(GL_BUFFER_UPDATE_BARRIER_BIT);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glMemoryBarrier() call failed.");
/* Map the SSBO into process space, so we can check if the texture buffer's
* contents was found valid by the compute shader */
unsigned int current_tb_offset = 0;
const unsigned int* ssbo_data_ptr =
(const unsigned int*)m_gl.mapBuffer(GL_SHADER_STORAGE_BUFFER, GL_READ_ONLY);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glMapBuffer() call failed.");
for (unsigned int n_texel = 0; n_texel < m_to_width && result_local;
++n_texel, current_tb_offset += 4 /* rgba */)
{
/* NOTE: Since the CS uses std140 layout, we need to move by 4 ints for
* each result value */
if (current_tb_offset >= tbo_commit_start_offset &&
current_tb_offset < (tbo_commit_start_offset + tbo_commit_size) && ssbo_data_ptr[n_texel * 4] != 1)
{
m_testCtx.getLog() << tcu::TestLog::Message << "A texel read from the texture buffer at index "
"["
<< n_texel << "]"
" was marked as invalid by the CS invocation."
<< tcu::TestLog::EndMessage;
result_local = false;
} /* if (ssbo_data_ptr[n_texel] != 1) */
} /* for (all result values) */
result &= result_local;
m_gl.unmapBuffer(GL_SHADER_STORAGE_BUFFER);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glUnmapBuffer() call failed.");
/* Remove the physical backing from the sparse buffer */
m_gl.bufferPageCommitmentARB(GL_TEXTURE_BUFFER, 0, /* offset */
m_sparse_bo_size_rounded, GL_FALSE); /* commit */
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferPageCommitmentARB() call failed.");
/* Reset SSBO's contents */
m_gl.bufferSubData(GL_SHADER_STORAGE_BUFFER, 0, /* offset */
m_ssbo_zero_data_size, m_ssbo_zero_data);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferSubData() call failed.");
} /* for (both entry-points) */
} /* for (both iterations) */
return result;
}
/** Initializes GL objects used across all test case iterations.
*
* Called once during BufferStorage test run-time.
*/
bool BufferTextureStorageTestCase::initTestCaseGlobal()
{
/* Set up the test program */
static const char* cs_body =
"#version 430 core\n"
"\n"
"layout(local_size_x = 1024) in;\n"
"\n"
"layout(std140, binding = 0) buffer data\n"
"{\n"
" restrict writeonly int result[];\n"
"};\n"
"\n"
"uniform samplerBuffer input_texture;\n"
"\n"
"void main()\n"
"{\n"
" uint texel_index = gl_GlobalInvocationID.x;\n"
"\n"
" if (texel_index < 65536)\n"
" {\n"
" vec4 expected_texel_data = vec4 (float((texel_index) % 255) / 255.0,\n"
" float((texel_index + 35) % 255) / 255.0,\n"
" float((texel_index + 78) % 255) / 255.0,\n"
" float((texel_index + 131) % 255) / 255.0);\n"
" vec4 texel_data = texelFetch(input_texture, int(texel_index) );\n"
"\n"
" if (abs(texel_data.r - expected_texel_data.r) > 1.0 / 255.0 ||\n"
" abs(texel_data.g - expected_texel_data.g) > 1.0 / 255.0 ||\n"
" abs(texel_data.b - expected_texel_data.b) > 1.0 / 255.0 ||\n"
" abs(texel_data.a - expected_texel_data.a) > 1.0 / 255.0)\n"
" {\n"
" result[texel_index] = 0;\n"
" }\n"
" else\n"
" {\n"
" result[texel_index] = 1;\n"
" }\n"
" }\n"
"}\n";
m_po = SparseBufferTestUtilities::createComputeProgram(m_gl, &cs_body, 1); /* n_cs_body_parts */
/* Set up a data buffer we will use to initialize the SSBO with default data.
*
* CS uses a std140 layout for the SSBO, so we need to add the additional padding.
*/
m_ssbo_zero_data_size = static_cast<unsigned int>(4 * sizeof(int) * m_to_width);
m_ssbo_zero_data = new unsigned char[m_ssbo_zero_data_size];
memset(m_ssbo_zero_data, 0, m_ssbo_zero_data_size);
/* Set up the SSBO */
m_gl.genBuffers(1, &m_ssbo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glGenBuffers() call failed.");
m_gl.bindBuffer(GL_SHADER_STORAGE_BUFFER, m_ssbo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
m_gl.bufferData(GL_SHADER_STORAGE_BUFFER, m_ssbo_zero_data_size, m_ssbo_zero_data, GL_STATIC_DRAW);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferData() call failed.");
/* During execution, we will need to use a helper buffer object. The BO will hold
* data we will be copying into the sparse buffer object for each iteration.
*
* Create an array to hold the helper buffer's data and fill it with info that
* the compute shader is going to be expecting */
unsigned char* helper_bo_data_traveller_ptr = NULL;
m_helper_bo_data_size = m_to_width * 4; /* rgba */
m_helper_bo_data = new unsigned char[m_helper_bo_data_size];
helper_bo_data_traveller_ptr = m_helper_bo_data;
for (unsigned int n_texel = 0; n_texel < m_to_width; ++n_texel)
{
/* Red */
*helper_bo_data_traveller_ptr = static_cast<unsigned char>(n_texel % 255);
++helper_bo_data_traveller_ptr;
/* Green */
*helper_bo_data_traveller_ptr = static_cast<unsigned char>((n_texel + 35) % 255);
++helper_bo_data_traveller_ptr;
/* Blue */
*helper_bo_data_traveller_ptr = static_cast<unsigned char>((n_texel + 78) % 255);
++helper_bo_data_traveller_ptr;
/* Alpha */
*helper_bo_data_traveller_ptr = static_cast<unsigned char>((n_texel + 131) % 255);
++helper_bo_data_traveller_ptr;
} /* for (all texels to be accessible via the buffer texture) */
/* Set up the helper buffer object which we are going to use to copy data into
* the sparse buffer object. */
m_gl.genBuffers(1, &m_helper_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glGenBuffers() call failed.");
m_gl.bindBuffer(GL_COPY_READ_BUFFER, m_helper_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
m_gl.bufferData(GL_COPY_READ_BUFFER, m_helper_bo_data_size, m_helper_bo_data, GL_STATIC_DRAW);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferData() call failed.");
/* Set up the texture buffer object. We will attach the actual buffer storage
* in execute() */
m_gl.genTextures(1, &m_to);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glGenTextures() call failed.");
m_gl.bindTexture(GL_TEXTURE_BUFFER, m_to);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindTexture() call failed.");
/* Determine the number of bytes both the helper and the sparse buffer
* object need to be able to hold, at maximum */
m_sparse_bo_size = static_cast<unsigned int>(m_to_width * sizeof(int));
m_sparse_bo_size_rounded = SparseBufferTestUtilities::alignOffset(m_sparse_bo_size, m_page_size);
return true;
}
/** Initializes GL objects which are needed for a single test case iteration.
*
* deinitTestCaseIteration() will be called after the test case is executed in ::execute()
* to release these objects.
**/
bool BufferTextureStorageTestCase::initTestCaseIteration(glw::GLuint sparse_bo)
{
bool result = true;
/* Cache the BO id, if not cached already */
DE_ASSERT(m_sparse_bo == 0 || m_sparse_bo == sparse_bo);
m_sparse_bo = sparse_bo;
return result;
}
/** Constructor.
*
* @param gl GL entry-points container
* @param testContext CTS test context
* @param page_size Page size, as reported by implementation for the GL_SPARSE_BUFFER_PAGE_SIZE_ARB query.
* @param pGLBufferPageCommitmentARB Func ptr to glBufferPageCommitmentARB() entry-point.
*/
ClearOpsBufferStorageTestCase::ClearOpsBufferStorageTestCase(const glw::Functions& gl, tcu::TestContext& testContext,
glw::GLint page_size)
: m_gl(gl)
, m_helper_bo(0)
, m_initial_data(DE_NULL)
, m_n_pages_to_use(16)
, m_page_size(page_size)
, m_sparse_bo(0)
, m_sparse_bo_size_rounded(0)
, m_testCtx(testContext)
{
/* Left blank intentionally */
}
/** Releases all GL objects used across all test case iterations.
*
* Called once during BufferStorage test run-time.
*/
void ClearOpsBufferStorageTestCase::deinitTestCaseGlobal()
{
if (m_helper_bo != 0)
{
m_gl.deleteBuffers(1, &m_helper_bo);
m_helper_bo = 0;
}
if (m_initial_data != DE_NULL)
{
delete[] m_initial_data;
m_initial_data = DE_NULL;
}
}
/** Releases temporary GL objects, created specifically for one test case iteration. */
void ClearOpsBufferStorageTestCase::deinitTestCaseIteration()
{
if (m_sparse_bo != 0)
{
m_gl.bindBuffer(GL_ARRAY_BUFFER, m_sparse_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
m_gl.bufferPageCommitmentARB(GL_ARRAY_BUFFER, 0, /* offset */
m_sparse_bo_size_rounded, GL_FALSE); /* commit */
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferPageCommitmentARB() call failed.");
m_sparse_bo = 0;
}
}
/** Executes a single test iteration. The BufferStorage test will call this method
* numerously during its life-time, testing various valid flag combinations applied
* to the tested sparse buffer object at glBufferStorage() call time.
*
* @param sparse_bo_storage_flags <flags> argument, used by the test in the glBufferStorage()
* call to set up the sparse buffer's storage.
*
* @return true if the test case executed correctly, false otherwise.
*/
bool ClearOpsBufferStorageTestCase::execute(glw::GLuint sparse_bo_storage_flags)
{
(void)sparse_bo_storage_flags;
bool result = true;
const unsigned int data_rgba8 = 0x12345678;
for (unsigned int n_clear_op_type = 0; n_clear_op_type < 2; /* glClearBufferData(), glClearBufferSubData() */
++n_clear_op_type)
{
const bool use_clear_buffer_data_call = (n_clear_op_type == 0);
/* We will run the test case in two iterations:
*
* 1) All pages will have a physical backing.
* 2) Half of the pages will have a physical backing.
*/
for (unsigned int n_iteration = 0; n_iteration < 2; ++n_iteration)
{
/* By default, for each iteration all sparse buffer pages are commited.
*
* For the last iteration, we need to de-commit the latter half before
* proceeding with the test.
*/
const bool all_pages_committed = (n_iteration == 0);
if (!all_pages_committed)
{
m_gl.bindBuffer(GL_ARRAY_BUFFER, m_sparse_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
m_gl.bufferPageCommitmentARB(GL_ARRAY_BUFFER, m_sparse_bo_size_rounded / 2, /* offset */
m_sparse_bo_size_rounded / 2, /* size */
GL_TRUE); /* commit */
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferPageCommitmentARB() call failed.");
}
/* Set up the sparse buffer contents */
m_gl.bindBuffer(GL_ARRAY_BUFFER, m_helper_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
m_gl.bufferSubData(GL_ARRAY_BUFFER, 0, /* offset */
m_sparse_bo_size_rounded, m_initial_data);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferSubData() call failed.");
m_gl.bindBuffer(GL_COPY_READ_BUFFER, m_helper_bo);
m_gl.bindBuffer(GL_COPY_WRITE_BUFFER, m_sparse_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call(s) failed.");
m_gl.copyBufferSubData(GL_COPY_READ_BUFFER, /* readTarget */
GL_COPY_WRITE_BUFFER, /* writeTarget */
0, /* readOffset */
0, /* writeOffset */
m_sparse_bo_size_rounded);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glCopyBufferSubData() call failed.");
/* Issue the clear call */
unsigned int clear_region_size = 0;
unsigned int clear_region_start_offset = 0;
if (use_clear_buffer_data_call)
{
DE_ASSERT((m_sparse_bo_size_rounded % sizeof(unsigned int)) == 0);
clear_region_size = m_sparse_bo_size_rounded;
clear_region_start_offset = 0;
m_gl.clearBufferData(GL_COPY_WRITE_BUFFER, GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, &data_rgba8);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glClearBufferData() call failed.");
}
else
{
DE_ASSERT(((m_sparse_bo_size_rounded / 2) % sizeof(unsigned int)) == 0);
DE_ASSERT(((m_sparse_bo_size_rounded) % sizeof(unsigned int)) == 0);
clear_region_size = m_sparse_bo_size_rounded / 2;
clear_region_start_offset = m_sparse_bo_size_rounded / 2;
m_gl.clearBufferSubData(GL_COPY_WRITE_BUFFER, GL_RGBA8, clear_region_start_offset, clear_region_size,
GL_RGBA, GL_UNSIGNED_BYTE, &data_rgba8);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glClearBufferSubData() call failed.");
}
/* Retrieve the modified buffer's contents */
const unsigned char* result_data = NULL;
m_gl.copyBufferSubData(GL_COPY_WRITE_BUFFER, /* readTarget */
GL_COPY_READ_BUFFER, /* writeTarget */
0, /* readOffset */
0, /* writeOffset */
m_sparse_bo_size_rounded);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glCopyBufferSubData() call failed.");
result_data = (unsigned char*)m_gl.mapBufferRange(GL_COPY_READ_BUFFER, 0, /* offset */
m_sparse_bo_size_rounded, GL_MAP_READ_BIT);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glMapBufferRange() call failed.");
/* Verify the result data: unmodified region */
bool result_local = true;
const unsigned int unmodified_region_size = (use_clear_buffer_data_call) ? 0 : clear_region_start_offset;
const unsigned int unmodified_region_start_offset = 0;
for (unsigned int n_current_byte = unmodified_region_start_offset;
(n_current_byte < unmodified_region_start_offset + unmodified_region_size) && result_local;
++n_current_byte)
{
const unsigned int current_initial_data_offset = n_current_byte - unmodified_region_start_offset;
const unsigned char expected_value = m_initial_data[current_initial_data_offset];
const unsigned char found_value = result_data[n_current_byte];
if (expected_value != found_value)
{
m_testCtx.getLog() << tcu::TestLog::Message
<< "Unmodified buffer object region has invalid contents. Expected byte "
<< "[" << (int)expected_value << "]"
", found byte:"
"["
<< (int)found_value << "]"
" at index "
"["
<< n_current_byte << "]; "
"call type:"
"["
<< ((use_clear_buffer_data_call) ? "glClearBufferData()" :
"glClearBufferSubData()")
<< "]"
", all required pages committed?:"
"["
<< ((all_pages_committed) ? "yes" : "no") << "]" << tcu::TestLog::EndMessage;
result_local = false;
break;
}
}
result &= result_local;
result_local = true;
/* Verify the result data: modified region (clamped to the memory region
* with actual physical backing) */
const unsigned int modified_region_size = (all_pages_committed) ? clear_region_size : 0;
const unsigned int modified_region_start_offset = clear_region_start_offset;
for (unsigned int n_current_byte = modified_region_start_offset;
(n_current_byte < modified_region_start_offset + modified_region_size) && result_local;
++n_current_byte)
{
const unsigned char component_offset = n_current_byte % 4;
const unsigned char expected_value =
static_cast<unsigned char>((data_rgba8 & (0xFFu << (component_offset * 8))) >> (component_offset * 8));
const unsigned char found_value = result_data[n_current_byte];
if (expected_value != found_value)
{
m_testCtx.getLog() << tcu::TestLog::Message
<< "Modified buffer object region has invalid contents. Expected byte "
<< "[" << (int)expected_value << "]"
", found byte:"
"["
<< (int)found_value << "]"
" at index "
"["
<< n_current_byte << "]; "
"call type:"
"["
<< ((use_clear_buffer_data_call) ? "glClearBufferData()" :
"glClearBufferSubData()")
<< "]"
", all required pages committed?:"
"["
<< ((all_pages_committed) ? "yes" : "no") << "]" << tcu::TestLog::EndMessage;
result_local = false;
break;
}
}
result &= result_local;
/* Unmap the storage before proceeding */
m_gl.unmapBuffer(GL_COPY_READ_BUFFER);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glUnmapBuffer() call failed.");
} /* for (both iterations) */
} /* for (both clear types) */
return result;
}
/** Initializes GL objects used across all test case iterations.
*
* Called once during BufferStorage test run-time.
*/
bool ClearOpsBufferStorageTestCase::initTestCaseGlobal()
{
unsigned int n_bytes_filled = 0;
const unsigned int n_bytes_needed = m_n_pages_to_use * m_page_size;
/* Determine the number of bytes both the helper and the sparse buffer
* object need to be able to hold, at maximum */
m_sparse_bo_size_rounded = SparseBufferTestUtilities::alignOffset(n_bytes_needed, m_page_size);
/* Set up the helper BO */
DE_ASSERT(m_helper_bo == 0);
m_gl.genBuffers(1, &m_helper_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glGenBuffers() call failed.");
m_gl.bindBuffer(GL_COPY_READ_BUFFER, m_helper_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
m_gl.bufferStorage(GL_COPY_READ_BUFFER, m_sparse_bo_size_rounded, DE_NULL,
GL_DYNAMIC_STORAGE_BIT | GL_MAP_READ_BIT); /* flags */
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferStorage() call failed.");
/* Set up a client-side data buffer we will use to fill the sparse BO with data,
* to be later cleared with the clear ops */
DE_ASSERT(m_initial_data == DE_NULL);
m_initial_data = new unsigned char[m_sparse_bo_size_rounded];
while (n_bytes_filled < m_sparse_bo_size_rounded)
{
m_initial_data[n_bytes_filled] = static_cast<unsigned char>(n_bytes_filled % 256);
++n_bytes_filled;
}
return true;
}
/** Initializes GL objects which are needed for a single test case iteration.
*
* deinitTestCaseIteration() will be called after the test case is executed in ::execute()
* to release these objects.
**/
bool ClearOpsBufferStorageTestCase::initTestCaseIteration(glw::GLuint sparse_bo)
{
bool result = true;
/* Cache the BO id, if not cached already */
DE_ASSERT(m_sparse_bo == 0 || m_sparse_bo == sparse_bo);
m_sparse_bo = sparse_bo;
/* Set up the sparse bufffer. */
m_gl.bindBuffer(GL_ARRAY_BUFFER, m_sparse_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
m_gl.bufferPageCommitmentARB(GL_ARRAY_BUFFER, 0, /* offset */
m_sparse_bo_size_rounded, GL_TRUE); /* commit */
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferPageCommitmentARB() call failed.");
return result;
}
/** Constructor.
*
* @param gl GL entry-points container
* @param testContext CTS test context
* @param page_size Page size, as reported by implementation for the GL_SPARSE_BUFFER_PAGE_SIZE_ARB query.
* @param pGLBufferPageCommitmentARB Func ptr to glBufferPageCommitmentARB() entry-point.
*/
CopyOpsBufferStorageTestCase::CopyOpsBufferStorageTestCase(const glw::Functions& gl, tcu::TestContext& testContext,
glw::GLint page_size)
: m_gl(gl)
, m_helper_bo(0)
, m_immutable_bo(0)
, m_page_size(page_size)
, m_sparse_bo_size(0)
, m_sparse_bo_size_rounded(0)
, m_testCtx(testContext)
{
m_ref_data[0] = DE_NULL;
m_ref_data[1] = DE_NULL;
m_ref_data[2] = DE_NULL;
m_sparse_bos[0] = 0;
m_sparse_bos[1] = 0;
}
/** Releases all GL objects used across all test case iterations.
*
* Called once during BufferStorage test run-time.
*/
void CopyOpsBufferStorageTestCase::deinitTestCaseGlobal()
{
if (m_helper_bo != 0)
{
m_gl.deleteBuffers(1, &m_helper_bo);
m_helper_bo = 0;
}
if (m_immutable_bo != 0)
{
m_gl.deleteBuffers(1, &m_immutable_bo);
m_immutable_bo = 0;
}
for (unsigned int n_ref_data_buffer = 0; n_ref_data_buffer < sizeof(m_ref_data) / sizeof(m_ref_data[0]);
++n_ref_data_buffer)
{
if (m_ref_data[n_ref_data_buffer] != DE_NULL)
{
delete[] m_ref_data[n_ref_data_buffer];
m_ref_data[n_ref_data_buffer] = DE_NULL;
}
}
/* Only release the test case-owned BO */
if (m_sparse_bos[1] != 0)
{
m_gl.deleteBuffers(1, m_sparse_bos + 1);
m_sparse_bos[1] = 0;
}
}
/** Releases temporary GL objects, created specifically for one test case iteration. */
void CopyOpsBufferStorageTestCase::deinitTestCaseIteration()
{
for (unsigned int n_sparse_bo = 0; n_sparse_bo < sizeof(m_sparse_bos) / sizeof(m_sparse_bos[0]); ++n_sparse_bo)
{
const glw::GLuint sparse_bo_id = m_sparse_bos[n_sparse_bo];
if (sparse_bo_id != 0)
{
m_gl.bindBuffer(GL_ARRAY_BUFFER, sparse_bo_id);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
m_gl.bufferPageCommitmentARB(GL_ARRAY_BUFFER, 0, /* offset */
m_sparse_bo_size_rounded, GL_FALSE); /* commit */
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferPageCommitmentARB() call failed.");
} /* if (sparse_bo_id != 0) */
} /* for (both BOs) */
}
/** Executes a single test iteration. The BufferStorage test will call this method
* numerously during its life-time, testing various valid flag combinations applied
* to the tested sparse buffer object at glBufferStorage() call time.
*
* @param sparse_bo_storage_flags <flags> argument, used by the test in the glBufferStorage()
* call to set up the sparse buffer's storage.
*
* @return true if the test case executed correctly, false otherwise.
*/
bool CopyOpsBufferStorageTestCase::execute(glw::GLuint sparse_bo_storage_flags)
{
(void)sparse_bo_storage_flags;
bool result = true;
/* Iterate over all test cases */
DE_ASSERT(m_immutable_bo != 0);
DE_ASSERT(m_sparse_bos[0] != 0);
DE_ASSERT(m_sparse_bos[1] != 0);
for (_test_cases_const_iterator test_iterator = m_test_cases.begin(); test_iterator != m_test_cases.end();
++test_iterator)
{
bool result_local = true;
const _test_case& test_case = *test_iterator;
const glw::GLuint dst_bo_id =
test_case.dst_bo_is_sparse ? m_sparse_bos[test_case.dst_bo_sparse_id] : m_immutable_bo;
const glw::GLuint src_bo_id =
test_case.src_bo_is_sparse ? m_sparse_bos[test_case.src_bo_sparse_id] : m_immutable_bo;
/* Initialize immutable BO data (if used) */
if (dst_bo_id == m_immutable_bo || src_bo_id == m_immutable_bo)
{
m_gl.bindBuffer(GL_ARRAY_BUFFER, m_immutable_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
m_gl.bufferSubData(GL_ARRAY_BUFFER, 0, /* offset */
m_sparse_bo_size_rounded, m_ref_data[0]);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferSubData() call failed.");
}
/* Initialize sparse BO data storage */
for (unsigned int n_sparse_bo = 0; n_sparse_bo < sizeof(m_sparse_bos) / sizeof(m_sparse_bos[0]); ++n_sparse_bo)
{
const bool is_dst_bo = (dst_bo_id == m_sparse_bos[n_sparse_bo]);
const bool is_src_bo = (src_bo_id == m_sparse_bos[n_sparse_bo]);
if (!is_dst_bo && !is_src_bo)
continue;
m_gl.bindBuffer(GL_COPY_READ_BUFFER, m_helper_bo);
m_gl.bindBuffer(GL_COPY_WRITE_BUFFER, m_sparse_bos[n_sparse_bo]);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call(s) failed.");
if (is_dst_bo)
{
m_gl.bufferPageCommitmentARB(GL_COPY_WRITE_BUFFER, test_case.dst_bo_commit_start_offset,
test_case.dst_bo_commit_size, GL_TRUE); /* commit */
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferPageCommitmentARB() call failed.");
}
if (is_src_bo)
{
m_gl.bufferPageCommitmentARB(GL_COPY_WRITE_BUFFER, test_case.src_bo_commit_start_offset,
test_case.src_bo_commit_size, GL_TRUE); /* commit */
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferPageCommitmentARB() call failed.");
}
m_gl.bufferSubData(GL_COPY_READ_BUFFER, 0, /* offset */
m_sparse_bo_size_rounded, m_ref_data[1 + n_sparse_bo]);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferSubData() call failed.");
m_gl.copyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, 0, /* readOffset */
0, /* writeOffset */
m_sparse_bo_size_rounded);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glCopyBufferSubData() call failed.");
} /* for (both sparse BOs) */
/* Set up the bindings */
m_gl.bindBuffer(GL_COPY_READ_BUFFER, src_bo_id);
m_gl.bindBuffer(GL_COPY_WRITE_BUFFER, dst_bo_id);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
/* Issue the copy op */
m_gl.copyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, test_case.src_bo_start_offset,
test_case.dst_bo_start_offset, test_case.n_bytes_to_copy);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glCopyBufferSubData() call failed.");
/* Retrieve the destination buffer's contents. The BO used for the previous copy op might have
* been a sparse BO, so copy its storage to a helper immutable BO */
const unsigned short* dst_bo_data_ptr = NULL;
m_gl.bindBuffer(GL_COPY_READ_BUFFER, dst_bo_id);
m_gl.bindBuffer(GL_COPY_WRITE_BUFFER, m_helper_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
m_gl.copyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, 0, /* readOffset */
0, /* writeOffset */
m_sparse_bo_size_rounded);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glCopyBufferSubData() call failed.");
dst_bo_data_ptr = (const unsigned short*)m_gl.mapBufferRange(GL_COPY_WRITE_BUFFER, 0, /* offset */
m_sparse_bo_size_rounded, GL_MAP_READ_BIT);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glMapBufferRange() call failed.");
/* Verify the retrieved data:
*
* 1. Check the bytes which precede the copy op dst offset. These should be equal to
* the destination buffer's reference data within the committed memory region.
**/
if (test_case.dst_bo_start_offset != 0 && test_case.dst_bo_commit_start_offset < test_case.dst_bo_start_offset)
{
DE_ASSERT(((test_case.dst_bo_start_offset - test_case.dst_bo_commit_start_offset) % sizeof(short)) == 0);
const unsigned int n_valid_values = static_cast<unsigned int>(
(test_case.dst_bo_start_offset - test_case.dst_bo_commit_start_offset) / sizeof(short));
for (unsigned int n_value = 0; n_value < n_valid_values && result_local; ++n_value)
{
const int dst_data_offset = static_cast<int>(sizeof(short) * n_value);
if (dst_data_offset >= test_case.dst_bo_commit_start_offset &&
dst_data_offset < test_case.dst_bo_commit_start_offset + test_case.dst_bo_commit_size)
{
const unsigned short expected_short_value =
*(unsigned short*)((unsigned char*)test_case.dst_bo_ref_data + dst_data_offset);
const unsigned short found_short_value =
*(unsigned short*)((unsigned char*)dst_bo_data_ptr + dst_data_offset);
if (expected_short_value != found_short_value)
{
m_testCtx.getLog()
<< tcu::TestLog::Message << "Malformed data found in the copy op's destination BO, "
"preceding the region modified by the copy op. "
<< "Destination BO id:" << dst_bo_id << " ("
<< ((test_case.dst_bo_is_sparse) ? "sparse buffer)" : "immutable buffer)")
<< ", commited region: " << test_case.dst_bo_commit_start_offset << ":"
<< (test_case.dst_bo_commit_start_offset + test_case.dst_bo_commit_size)
<< ", copy region: " << test_case.dst_bo_start_offset << ":"
<< (test_case.dst_bo_start_offset + test_case.n_bytes_to_copy)
<< ". Source BO id:" << src_bo_id << " ("
<< ((test_case.src_bo_is_sparse) ? "sparse buffer)" : "immutable buffer)")
<< ", commited region: " << test_case.src_bo_commit_start_offset << ":"
<< (test_case.src_bo_commit_start_offset + test_case.src_bo_commit_size)
<< ", copy region: " << test_case.src_bo_start_offset << ":"
<< (test_case.src_bo_start_offset + test_case.n_bytes_to_copy) << ". Expected value of "
<< expected_short_value << ", found value of " << found_short_value
<< " at dst data offset of " << dst_data_offset << "." << tcu::TestLog::EndMessage;
result_local = false;
}
}
} /* for (all preceding values which should not have been affected by the copy op) */
} /* if (copy op did not modify the beginning of the destination buffer storage) */
/* 2. Check if the data written to the destination buffer object is correct. */
for (unsigned int n_copied_short_value = 0;
n_copied_short_value < test_case.n_bytes_to_copy / sizeof(short) && result_local; ++n_copied_short_value)
{
const int src_data_offset =
static_cast<unsigned int>(test_case.src_bo_start_offset + sizeof(short) * n_copied_short_value);
const int dst_data_offset =
static_cast<unsigned int>(test_case.dst_bo_start_offset + sizeof(short) * n_copied_short_value);
if (dst_data_offset >= test_case.dst_bo_commit_start_offset &&
dst_data_offset < test_case.dst_bo_commit_start_offset + test_case.dst_bo_commit_size &&
src_data_offset >= test_case.src_bo_commit_start_offset &&
src_data_offset < test_case.src_bo_commit_start_offset + test_case.src_bo_commit_size)
{
const unsigned short expected_short_value =
*(unsigned short*)((unsigned char*)test_case.src_bo_ref_data + src_data_offset);
const unsigned short found_short_value =
*(unsigned short*)((unsigned char*)dst_bo_data_ptr + dst_data_offset);
if (expected_short_value != found_short_value)
{
m_testCtx.getLog() << tcu::TestLog::Message
<< "Malformed data found in the copy op's destination BO. "
<< "Destination BO id:" << dst_bo_id << " ("
<< ((test_case.dst_bo_is_sparse) ? "sparse buffer)" : "immutable buffer)")
<< ", commited region: " << test_case.dst_bo_commit_start_offset << ":"
<< (test_case.dst_bo_commit_start_offset + test_case.dst_bo_commit_size)
<< ", copy region: " << test_case.dst_bo_start_offset << ":"
<< (test_case.dst_bo_start_offset + test_case.n_bytes_to_copy)
<< ". Source BO id:" << src_bo_id << " ("
<< ((test_case.src_bo_is_sparse) ? "sparse buffer)" : "immutable buffer)")
<< ", commited region: " << test_case.src_bo_commit_start_offset << ":"
<< (test_case.src_bo_commit_start_offset + test_case.src_bo_commit_size)
<< ", copy region: " << test_case.src_bo_start_offset << ":"
<< (test_case.src_bo_start_offset + test_case.n_bytes_to_copy)
<< ". Expected value of " << expected_short_value << ", found value of "
<< found_short_value << " at dst data offset of " << dst_data_offset << "."
<< tcu::TestLog::EndMessage;
result_local = false;
}
}
}
/* 3. Verify the remaining data in the committed part of the destination buffer object is left intact. */
const unsigned int commit_region_end_offset =
test_case.dst_bo_commit_start_offset + test_case.dst_bo_commit_size;
const unsigned int copy_region_end_offset = test_case.dst_bo_start_offset + test_case.n_bytes_to_copy;
if (commit_region_end_offset > copy_region_end_offset)
{
DE_ASSERT(((commit_region_end_offset - copy_region_end_offset) % sizeof(short)) == 0);
const unsigned int n_valid_values =
static_cast<unsigned int>((commit_region_end_offset - copy_region_end_offset) / sizeof(short));
for (unsigned int n_value = 0; n_value < n_valid_values && result_local; ++n_value)
{
const int dst_data_offset = static_cast<int>(copy_region_end_offset + sizeof(short) * n_value);
if (dst_data_offset >= test_case.dst_bo_commit_start_offset &&
dst_data_offset < test_case.dst_bo_commit_start_offset + test_case.dst_bo_commit_size)
{
const unsigned short expected_short_value =
*(unsigned short*)((unsigned char*)test_case.dst_bo_ref_data + dst_data_offset);
const unsigned short found_short_value =
*(unsigned short*)((unsigned char*)dst_bo_data_ptr + dst_data_offset);
if (expected_short_value != found_short_value)
{
m_testCtx.getLog()
<< tcu::TestLog::Message << "Malformed data found in the copy op's destination BO, "
"following the region modified by the copy op. "
<< "Destination BO id:" << dst_bo_id << " ("
<< ((test_case.dst_bo_is_sparse) ? "sparse buffer)" : "immutable buffer)")
<< ", commited region: " << test_case.dst_bo_commit_start_offset << ":"
<< (test_case.dst_bo_commit_start_offset + test_case.dst_bo_commit_size)
<< ", copy region: " << test_case.dst_bo_start_offset << ":"
<< (test_case.dst_bo_start_offset + test_case.n_bytes_to_copy)
<< ". Source BO id:" << src_bo_id << " ("
<< ((test_case.src_bo_is_sparse) ? "sparse buffer)" : "immutable buffer)")
<< ", commited region: " << test_case.src_bo_commit_start_offset << ":"
<< (test_case.src_bo_commit_start_offset + test_case.src_bo_commit_size)
<< ", copy region: " << test_case.src_bo_start_offset << ":"
<< (test_case.src_bo_start_offset + test_case.n_bytes_to_copy) << ". Expected value of "
<< expected_short_value << ", found value of " << found_short_value
<< " at dst data offset of " << dst_data_offset << "." << tcu::TestLog::EndMessage;
result_local = false;
}
}
} /* for (all preceding values which should not have been affected by the copy op) */
} /* if (copy op did not modify the beginning of the destination buffer storage) */
/* Unmap the buffer storage */
m_gl.unmapBuffer(GL_COPY_WRITE_BUFFER);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glUnmapBuffer() call failed.");
/* Clean up */
for (unsigned int n_sparse_bo = 0; n_sparse_bo < sizeof(m_sparse_bos) / sizeof(m_sparse_bos[0]); ++n_sparse_bo)
{
const bool is_dst_bo = (dst_bo_id == m_sparse_bos[n_sparse_bo]);
const bool is_src_bo = (src_bo_id == m_sparse_bos[n_sparse_bo]);
if (is_dst_bo || is_src_bo)
{
m_gl.bindBuffer(GL_ARRAY_BUFFER, m_sparse_bos[n_sparse_bo]);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
m_gl.bufferPageCommitmentARB(GL_ARRAY_BUFFER, 0, m_sparse_bo_size_rounded, GL_FALSE); /* commit */
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferPageCommitmentARB() call failed.");
}
}
result &= result_local;
} /* for (all test cases) */
return result;
}
/** Allocates reference buffers, fills them with data and updates the m_ref_data array. */
void CopyOpsBufferStorageTestCase::initReferenceData()
{
DE_ASSERT(m_sparse_bo_size_rounded != 0);
DE_ASSERT((m_sparse_bo_size_rounded % 2) == 0);
DE_ASSERT(sizeof(short) == 2);
for (unsigned int n_ref_data_buffer = 0; n_ref_data_buffer < sizeof(m_ref_data) / sizeof(m_ref_data[0]);
++n_ref_data_buffer)
{
DE_ASSERT(m_ref_data[n_ref_data_buffer] == DE_NULL);
m_ref_data[n_ref_data_buffer] = new unsigned short[m_sparse_bo_size_rounded / 2];
/* Write reference values. */
for (unsigned int n_short_value = 0; n_short_value < m_sparse_bo_size_rounded / 2; ++n_short_value)
{
m_ref_data[n_ref_data_buffer][n_short_value] =
(unsigned short)((n_ref_data_buffer + 1) * (n_short_value + 1));
}
} /* for (all reference data buffers) */
}
/** Initializes GL objects used across all test case iterations.
*
* Called once during BufferStorage test run-time.
*/
bool CopyOpsBufferStorageTestCase::initTestCaseGlobal()
{
m_sparse_bo_size = 2 * 3 * 4 * m_page_size;
m_sparse_bo_size_rounded = SparseBufferTestUtilities::alignOffset(m_sparse_bo_size, m_page_size);
initReferenceData();
/* Initialize the sparse buffer object */
m_gl.genBuffers(1, m_sparse_bos + 1);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glGenBuffers() call failed.");
m_gl.bindBuffer(GL_ARRAY_BUFFER, m_sparse_bos[1]);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
m_gl.bufferStorage(GL_ARRAY_BUFFER, m_sparse_bo_size_rounded, DE_NULL, /* data */
GL_SPARSE_STORAGE_BIT_ARB);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferStorage() call failed.");
/* Initialize the immutable buffer objects used by the test */
for (unsigned int n_bo = 0; n_bo < 2; /* helper + immutable BO used for the copy ops */
++n_bo)
{
glw::GLuint* bo_id_ptr = (n_bo == 0) ? &m_helper_bo : &m_immutable_bo;
glw::GLenum flags = GL_DYNAMIC_STORAGE_BIT;
if (n_bo == 0)
{
flags |= GL_MAP_READ_BIT;
}
/* Initialize the immutable buffer object */
m_gl.genBuffers(1, bo_id_ptr);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glGenBuffers() call failed.");
m_gl.bindBuffer(GL_ARRAY_BUFFER, *bo_id_ptr);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
m_gl.bufferStorage(GL_ARRAY_BUFFER, m_sparse_bo_size_rounded, m_ref_data[0], flags);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferStorage() call failed.");
}
return true;
}
/** Initializes GL objects which are needed for a single test case iteration.
*
* deinitTestCaseIteration() will be called after the test case is executed in ::execute()
* to release these objects.
**/
bool CopyOpsBufferStorageTestCase::initTestCaseIteration(glw::GLuint sparse_bo)
{
bool result = true;
/* Remember the BO id */
m_sparse_bos[0] = sparse_bo;
/* Initialize test cases, if this is the first call to initTestCaseIteration() */
if (m_test_cases.size() == 0)
{
initTestCases();
}
/* Make sure all pages of the provided sparse BO are de-committed before
* ::execute() is called. */
m_gl.bindBuffer(GL_ARRAY_BUFFER, m_sparse_bos[0]);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
m_gl.bufferPageCommitmentARB(GL_ARRAY_BUFFER, 0, /* offset */
m_sparse_bo_size_rounded, GL_FALSE); /* commit */
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferPageCommitmentARB() call failed.");
return result;
}
/** Fills m_test_cases with test case descriptors. Each such descriptor defines
* a single copy op use case.
*
* The descriptors are then iterated over in ::execute(), defining the testing
* behavior of the test copy ops buffer storage test case.
*/
void CopyOpsBufferStorageTestCase::initTestCases()
{
/* We need to use the following destination & source BO configurations:
*
* Dst: sparse BO 1; Src: sparse BO 2
* Dst: sparse BO 1; Src: immutable BO
* Dst: immutable BO; Src: sparse BO 1
* Dst: sparse BO 1; Src: sparse BO 1
*/
unsigned int n_test_case = 0;
for (unsigned int n_bo_configuration = 0; n_bo_configuration < 4; /* as per the comment */
++n_bo_configuration, ++n_test_case)
{
glw::GLuint dst_bo_sparse_id = 0;
bool dst_bo_is_sparse = false;
unsigned short* dst_bo_ref_data = DE_NULL;
glw::GLuint src_bo_sparse_id = 0;
bool src_bo_is_sparse = false;
unsigned short* src_bo_ref_data = DE_NULL;
switch (n_bo_configuration)
{
case 0:
{
dst_bo_sparse_id = 0;
dst_bo_is_sparse = true;
dst_bo_ref_data = m_ref_data[1];
src_bo_sparse_id = 1;
src_bo_is_sparse = true;
src_bo_ref_data = m_ref_data[2];
break;
}
case 1:
{
dst_bo_sparse_id = 0;
dst_bo_is_sparse = true;
dst_bo_ref_data = m_ref_data[1];
src_bo_is_sparse = false;
src_bo_ref_data = m_ref_data[0];
break;
}
case 2:
{
dst_bo_is_sparse = false;
dst_bo_ref_data = m_ref_data[0];
src_bo_sparse_id = 0;
src_bo_is_sparse = true;
src_bo_ref_data = m_ref_data[1];
break;
}
case 3:
{
dst_bo_sparse_id = 0;
dst_bo_is_sparse = true;
dst_bo_ref_data = m_ref_data[1];
src_bo_sparse_id = 0;
src_bo_is_sparse = true;
src_bo_ref_data = m_ref_data[1];
break;
}
default:
{
TCU_FAIL("Invalid BO configuration index");
}
} /* switch (n_bo_configuration) */
/* Need to test the copy operation in three different scenarios,
* in regard to the destination buffer:
*
* a) All pages of the destination region are committed.
* b) Half of the pages of the destination region are committed.
* c) None of the pages of the destination region are committed.
*
* Destination region spans from 0 to half of the memory we use
* for the testing purposes.
*/
DE_ASSERT((m_sparse_bo_size_rounded % m_page_size) == 0);
DE_ASSERT((m_sparse_bo_size_rounded % 2) == 0);
DE_ASSERT((m_sparse_bo_size_rounded % 4) == 0);
for (unsigned int n_dst_region = 0; n_dst_region < 3; /* as per the comment */
++n_dst_region)
{
glw::GLuint dst_bo_commit_size = 0;
glw::GLuint dst_bo_commit_start_offset = 0;
switch (n_dst_region)
{
case 0:
{
dst_bo_commit_start_offset = 0;
dst_bo_commit_size = m_sparse_bo_size_rounded / 2;
break;
}
case 1:
{
dst_bo_commit_start_offset = m_sparse_bo_size_rounded / 4;
dst_bo_commit_size = m_sparse_bo_size_rounded / 4;
break;
}
case 2:
{
dst_bo_commit_start_offset = 0;
dst_bo_commit_size = 0;
break;
}
default:
{
TCU_FAIL("Invalid destination region configuration index");
}
} /* switch (n_dst_region) */
/* Same goes for the source region.
*
* Source region spans from m_sparse_bo_size_rounded / 2 to
* m_sparse_bo_size_rounded.
*
**/
for (unsigned int n_src_region = 0; n_src_region < 3; /* as per the comment */
++n_src_region)
{
glw::GLuint src_bo_commit_size = 0;
glw::GLuint src_bo_commit_start_offset = 0;
switch (n_src_region)
{
case 0:
{
src_bo_commit_start_offset = m_sparse_bo_size_rounded / 2;
src_bo_commit_size = m_sparse_bo_size_rounded / 2;
break;
}
case 1:
{
src_bo_commit_start_offset = 3 * m_sparse_bo_size_rounded / 4;
src_bo_commit_size = m_sparse_bo_size_rounded / 4;
break;
}
case 2:
{
src_bo_commit_start_offset = m_sparse_bo_size_rounded / 2;
src_bo_commit_size = 0;
break;
}
default:
{
TCU_FAIL("Invalid source region configuration index");
}
} /* switch (n_src_region) */
/* Initialize the test case descriptor */
_test_case test_case;
test_case.dst_bo_commit_size = dst_bo_commit_size;
test_case.dst_bo_commit_start_offset = dst_bo_commit_start_offset;
test_case.dst_bo_sparse_id = dst_bo_sparse_id;
test_case.dst_bo_is_sparse = dst_bo_is_sparse;
test_case.dst_bo_ref_data = dst_bo_ref_data;
test_case.dst_bo_start_offset = static_cast<glw::GLint>(sizeof(short) * n_test_case);
test_case.n_bytes_to_copy = static_cast<glw::GLint>(
m_sparse_bo_size_rounded / 2 - test_case.dst_bo_start_offset - sizeof(short) * n_test_case);
test_case.src_bo_commit_size = src_bo_commit_size;
test_case.src_bo_commit_start_offset = src_bo_commit_start_offset;
test_case.src_bo_sparse_id = src_bo_sparse_id;
test_case.src_bo_is_sparse = src_bo_is_sparse;
test_case.src_bo_ref_data = src_bo_ref_data;
test_case.src_bo_start_offset = m_sparse_bo_size_rounded / 2;
DE_ASSERT(test_case.dst_bo_commit_size >= 0);
DE_ASSERT(test_case.dst_bo_commit_start_offset >= 0);
DE_ASSERT(test_case.dst_bo_ref_data != DE_NULL);
DE_ASSERT(test_case.dst_bo_start_offset >= 0);
DE_ASSERT(test_case.n_bytes_to_copy >= 0);
DE_ASSERT(test_case.src_bo_commit_size >= 0);
DE_ASSERT(test_case.src_bo_commit_start_offset >= 0);
DE_ASSERT(test_case.src_bo_ref_data != DE_NULL);
DE_ASSERT(test_case.src_bo_start_offset >= 0);
m_test_cases.push_back(test_case);
} /* for (all source region commit configurations) */
} /* for (all destination region commit configurations) */
} /* for (all BO configurations which need to be tested) */
}
/** Constructor.
*
* @param gl GL entry-points container
* @param testContext CTS test context
* @param page_size Page size, as reported by implementation for the GL_SPARSE_BUFFER_PAGE_SIZE_ARB query.
* @param pGLBufferPageCommitmentARB Func ptr to glBufferPageCommitmentARB() entry-point.
*/
IndirectDispatchBufferStorageTestCase::IndirectDispatchBufferStorageTestCase(const glw::Functions& gl,
tcu::TestContext& testContext,
glw::GLint page_size)
: m_dispatch_draw_call_args_start_offset(-1)
, m_expected_ac_value(0)
, m_gl(gl)
, m_global_wg_size_x(2048)
, m_helper_bo(0)
, m_local_wg_size_x(1023) /* must stay in sync with the local work-groups's size hardcoded in m_po's body! */
, m_page_size(page_size)
, m_po(0)
, m_sparse_bo(0)
, m_sparse_bo_size(0)
, m_sparse_bo_size_rounded(0)
, m_testCtx(testContext)
{
/* Left blank intentionally */
}
/** Releases all GL objects used across all test case iterations.
*
* Called once during BufferStorage test run-time.
*/
void IndirectDispatchBufferStorageTestCase::deinitTestCaseGlobal()
{
if (m_helper_bo != 0)
{
m_gl.deleteBuffers(1, &m_helper_bo);
m_helper_bo = 0;
}
if (m_po != 0)
{
m_gl.deleteProgram(m_po);
m_po = 0;
}
}
/** Releases temporary GL objects, created specifically for one test case iteration. */
void IndirectDispatchBufferStorageTestCase::deinitTestCaseIteration()
{
if (m_sparse_bo != 0)
{
m_gl.bindBuffer(GL_ARRAY_BUFFER, m_sparse_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
m_gl.bufferPageCommitmentARB(GL_ARRAY_BUFFER, 0, /* offset */
m_sparse_bo_size_rounded, GL_FALSE); /* commit */
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferPageCommitmentARB() call failed.");
m_sparse_bo = 0;
}
}
/** Executes a single test iteration. The BufferStorage test will call this method
* numerously during its life-time, testing various valid flag combinations applied
* to the tested sparse buffer object at glBufferStorage() call time.
*
* @param sparse_bo_storage_flags <flags> argument, used by the test in the glBufferStorage()
* call to set up the sparse buffer's storage.
*
* @return true if the test case executed correctly, false otherwise.
*/
bool IndirectDispatchBufferStorageTestCase::execute(glw::GLuint sparse_bo_storage_flags)
{
(void)sparse_bo_storage_flags;
bool result = true;
/* Set up the buffer bindings */
m_gl.bindBuffer(GL_ATOMIC_COUNTER_BUFFER, m_helper_bo);
m_gl.bindBuffer(GL_DISPATCH_INDIRECT_BUFFER, m_sparse_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call(s) failed");
m_gl.bindBufferRange(GL_ATOMIC_COUNTER_BUFFER, 0, /* index */
m_helper_bo, 12, /* offset */
4); /* size */
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBufferRange() call failed.");
/* Bind the compute program */
m_gl.useProgram(m_po);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glUseProgram() call failed.");
/* Zero out atomic counter value. */
const unsigned int zero_ac_value = 0;
m_gl.bufferSubData(GL_ATOMIC_COUNTER_BUFFER, 12, /* offset */
4, /* size */
&zero_ac_value);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferSubData() call failed.");
m_expected_ac_value = zero_ac_value;
/* Run the test only in a configuration where all arguments are local in
* committed memory page(s): reading arguments from uncommitted pages means
* reading undefined data, which can result in huge dispatches that
* effectively hang the test.
*/
m_gl.bufferPageCommitmentARB(GL_DISPATCH_INDIRECT_BUFFER, 0, /* offset */
m_sparse_bo_size_rounded, GL_TRUE); /* commit */
m_expected_ac_value += m_global_wg_size_x * m_local_wg_size_x;
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferPageCommitmentARB() call(s) failed.");
/* Copy the indirect dispatch call args data from the helper BO to the sparse BO */
m_gl.bindBuffer(GL_COPY_READ_BUFFER, m_helper_bo);
m_gl.bindBuffer(GL_COPY_WRITE_BUFFER, m_sparse_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
m_gl.copyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, 0, /* readOffset */
m_dispatch_draw_call_args_start_offset, sizeof(unsigned int) * 3);
/* Run the program */
m_gl.dispatchComputeIndirect(m_dispatch_draw_call_args_start_offset);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glDispatchComputeIndirect() call failed.");
/* Extract the AC value and verify it */
const unsigned int* ac_data_ptr =
(const unsigned int*)m_gl.mapBufferRange(GL_ATOMIC_COUNTER_BUFFER, 12, /* offset */
4, /* length */
GL_MAP_READ_BIT);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glMapBufferRange() call failed.");
if (*ac_data_ptr != m_expected_ac_value && result)
{
m_testCtx.getLog() << tcu::TestLog::Message << "Invalid atomic counter value encountered. "
"Expected value: ["
<< m_expected_ac_value << "]"
", found:"
"["
<< *ac_data_ptr << "]." << tcu::TestLog::EndMessage;
result = false;
}
/* Unmap the buffer before we move on with the next iteration */
m_gl.unmapBuffer(GL_ATOMIC_COUNTER_BUFFER);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glUnmapBuffer() call failed.");
return result;
}
/** Initializes GL objects used across all test case iterations.
*
* Called once during BufferStorage test run-time.
*/
bool IndirectDispatchBufferStorageTestCase::initTestCaseGlobal()
{
bool result = true;
/* One of the cases the test case implementation needs to support is the scenario
* where the indirect call arguments are located on the boundary of two (or more) memory pages,
* and some of the pages are not committed.
*
* There are two scenarios which can happen:
*
* a) page size >= sizeof(uint) * 3: Allocate two pages, arg start offset: (page_size - 4) aligned to 4.
* The alignment is a must, since we'll be feeding the offset to an indirect dispatch call.
* b) page size < sizeof(uint) * 3: Allocate as many pages as needed, disable some of the pages.
*
* For code clarity, the two cases are handled by separate branches, although they could be easily
* merged.
*/
const int n_indirect_dispatch_call_arg_bytes = sizeof(unsigned int) * 3;
if (m_page_size >= n_indirect_dispatch_call_arg_bytes)
{
/* Indirect dispatch call args must be aligned to 4 */
DE_ASSERT(m_page_size >= 4);
m_dispatch_draw_call_args_start_offset = SparseBufferTestUtilities::alignOffset(m_page_size - 4, 4);
m_sparse_bo_size = m_dispatch_draw_call_args_start_offset + n_indirect_dispatch_call_arg_bytes;
}
else
{
m_dispatch_draw_call_args_start_offset = 0;
m_sparse_bo_size = n_indirect_dispatch_call_arg_bytes;
}
m_sparse_bo_size_rounded = SparseBufferTestUtilities::alignOffset(m_sparse_bo_size, m_page_size);
/* Set up the helper buffer object. Its structure is as follows:
*
* [ 0-11]: Indirect dispatch call args
* [12-15]: Atomic counter value storage
*/
unsigned int helper_bo_data[4] = { 0 };
const unsigned int n_helper_bo_bytes = sizeof(helper_bo_data);
helper_bo_data[0] = m_global_wg_size_x; /* num_groups_x */
helper_bo_data[1] = 1; /* num_groups_y */
helper_bo_data[2] = 1; /* num_groups_z */
helper_bo_data[3] = 0; /* default atomic counter value */
m_gl.genBuffers(1, &m_helper_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glGenBuffers() call failed.");
m_gl.bindBuffer(GL_ARRAY_BUFFER, m_helper_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
m_gl.bufferData(GL_ARRAY_BUFFER, n_helper_bo_bytes, helper_bo_data, GL_STATIC_DRAW);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferData() call failed.");
/* Set up the test compute program object */
static const char* cs_body = "#version 430 core\n"
"\n"
"layout(local_size_x = 1023) in;\n"
"layout(binding = 0, offset = 0) uniform atomic_uint ac;\n"
"\n"
"void main()\n"
"{\n"
" atomicCounterIncrement(ac);\n"
"}\n";
m_po = SparseBufferTestUtilities::createComputeProgram(m_gl, &cs_body, 1); /* n_cs_body_parts */
result = (m_po != 0);
return result;
}
/** Initializes GL objects which are needed for a single test case iteration.
*
* deinitTestCaseIteration() will be called after the test case is executed in ::execute()
* to release these objects.
**/
bool IndirectDispatchBufferStorageTestCase::initTestCaseIteration(glw::GLuint sparse_bo)
{
bool result = true;
/* Cache the BO id, if not cached already */
DE_ASSERT(m_sparse_bo == 0 || m_sparse_bo == sparse_bo);
m_sparse_bo = sparse_bo;
/* Set up the sparse bufffer. */
m_gl.bindBuffer(GL_ARRAY_BUFFER, m_sparse_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
m_gl.bufferPageCommitmentARB(GL_ARRAY_BUFFER, 0, /* offset */
m_sparse_bo_size_rounded, GL_TRUE); /* commit */
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferPageCommitmentARB() call failed.");
return result;
}
/** Constructor.
*
* @param gl GL entry-points container
* @param testContext CTS test context
* @param page_size Page size, as reported by implementation for the GL_SPARSE_BUFFER_PAGE_SIZE_ARB query.
* @param pGLBufferPageCommitmentARB Func ptr to glBufferPageCommitmentARB() entry-point.
*/
InvalidateBufferStorageTestCase::InvalidateBufferStorageTestCase(const glw::Functions& gl,
tcu::TestContext& testContext, glw::GLint page_size)
: m_gl(gl)
, m_n_pages_to_use(4)
, m_page_size(page_size)
, m_sparse_bo(0)
, m_sparse_bo_size(0)
, m_sparse_bo_size_rounded(0)
{
(void)testContext;
DE_ASSERT((m_n_pages_to_use % 2) == 0);
}
/** Releases all GL objects used across all test case iterations.
*
* Called once during BufferStorage test run-time.
*/
void InvalidateBufferStorageTestCase::deinitTestCaseGlobal()
{
/* Stub */
}
/** Releases temporary GL objects, created specifically for one test case iteration. */
void InvalidateBufferStorageTestCase::deinitTestCaseIteration()
{
if (m_sparse_bo != 0)
{
m_gl.bindBuffer(GL_ARRAY_BUFFER, m_sparse_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
m_gl.bufferPageCommitmentARB(GL_ARRAY_BUFFER, 0, /* offset */
m_sparse_bo_size_rounded, GL_FALSE); /* commit */
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferPageCommitmentARB() call failed.");
m_sparse_bo = 0;
}
}
/** Executes a single test iteration. The BufferStorage test will call this method
* numerously during its life-time, testing various valid flag combinations applied
* to the tested sparse buffer object at glBufferStorage() call time.
*
* @param sparse_bo_storage_flags <flags> argument, used by the test in the glBufferStorage()
* call to set up the sparse buffer's storage.
*
* @return true if the test case executed correctly, false otherwise.
*/
bool InvalidateBufferStorageTestCase::execute(glw::GLuint sparse_bo_storage_flags)
{
(void)sparse_bo_storage_flags;
bool result = true;
/* Since we cannot really perform any validation related to whether buffer
* storage invalidation works corectly, all this test can really do is to verify
* if the implementation does not crash when both entry-points are used against
* a sparse buffer object.
*/
for (unsigned int n_entry_point = 0; n_entry_point < 2; /* glInvalidateBuffer(), glInvalidateBufferSubData() */
++n_entry_point)
{
const bool should_test_invalidate_buffer = (n_entry_point == 0);
/* For glInvalidateBufferSubData(), we need to test two different ranges. */
for (int n_iteration = 0; n_iteration < ((should_test_invalidate_buffer) ? 1 : 2); ++n_iteration)
{
if (should_test_invalidate_buffer)
{
m_gl.invalidateBufferData(m_sparse_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glInvalidateBufferData() call failed.");
}
else
{
m_gl.invalidateBufferSubData(m_sparse_bo, 0, /* offset */
m_sparse_bo_size_rounded * ((n_iteration == 0) ? 1 : 2));
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glInvalidateBufferSubData() call failed.");
}
} /* for (all iterations) */
} /* for (both entry-points) */
return result;
}
/** Initializes GL objects used across all test case iterations.
*
* Called once during BufferStorage test run-time.
*/
bool InvalidateBufferStorageTestCase::initTestCaseGlobal()
{
const unsigned int n_bytes_needed = m_n_pages_to_use * m_page_size;
/* Determine the number of bytes both the helper and the sparse buffer
* object need to be able to hold, at maximum */
m_sparse_bo_size = n_bytes_needed;
m_sparse_bo_size_rounded = SparseBufferTestUtilities::alignOffset(n_bytes_needed, m_page_size);
return true;
}
/** Initializes GL objects which are needed for a single test case iteration.
*
* deinitTestCaseIteration() will be called after the test case is executed in ::execute()
* to release these objects.
**/
bool InvalidateBufferStorageTestCase::initTestCaseIteration(glw::GLuint sparse_bo)
{
bool result = true;
/* Cache the BO id, if not cached already */
DE_ASSERT(m_sparse_bo == 0 || m_sparse_bo == sparse_bo);
m_sparse_bo = sparse_bo;
/* Set up the sparse bufffer. */
m_gl.bindBuffer(GL_ARRAY_BUFFER, m_sparse_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
m_gl.bufferPageCommitmentARB(GL_ARRAY_BUFFER, 0, /* offset */
m_sparse_bo_size_rounded, GL_TRUE); /* commit */
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferPageCommitmentARB() call failed.");
return result;
}
/** Constructor.
*
* @param gl GL entry-points container
* @param testContext CTS test context
* @param page_size Page size, as reported by implementation for the GL_SPARSE_BUFFER_PAGE_SIZE_ARB query.
* @param pGLBufferPageCommitmentARB Func ptr to glBufferPageCommitmentARB() entry-point.
*/
PixelPackBufferStorageTestCase::PixelPackBufferStorageTestCase(const glw::Functions& gl, tcu::TestContext& testContext,
glw::GLint page_size)
: m_color_rb(0)
, m_color_rb_height(1024)
, m_color_rb_width(1024)
, m_fbo(0)
, m_gl(gl)
, m_helper_bo(0)
, m_page_size(page_size)
, m_po(0)
, m_ref_data_ptr(DE_NULL)
, m_ref_data_size(0)
, m_sparse_bo(0)
, m_sparse_bo_size(0)
, m_sparse_bo_size_rounded(0)
, m_testCtx(testContext)
, m_vao(0)
{
m_ref_data_size = m_color_rb_width * m_color_rb_height * 4; /* rgba */
}
/** Releases all GL objects used across all test case iterations.
*
* Called once during BufferStorage test run-time.
*/
void PixelPackBufferStorageTestCase::deinitTestCaseGlobal()
{
if (m_color_rb != 0)
{
m_gl.deleteRenderbuffers(1, &m_color_rb);
m_color_rb = 0;
}
if (m_fbo != 0)
{
m_gl.deleteFramebuffers(1, &m_fbo);
m_fbo = 0;
}
if (m_helper_bo != 0)
{
m_gl.deleteBuffers(1, &m_helper_bo);
m_helper_bo = 0;
}
if (m_ref_data_ptr != DE_NULL)
{
delete[] m_ref_data_ptr;
m_ref_data_ptr = DE_NULL;
}
if (m_po != 0)
{
m_gl.deleteProgram(m_po);
m_po = 0;
}
if (m_vao != 0)
{
m_gl.deleteVertexArrays(1, &m_vao);
m_vao = 0;
}
}
/** Releases temporary GL objects, created specifically for one test case iteration. */
void PixelPackBufferStorageTestCase::deinitTestCaseIteration()
{
if (m_sparse_bo != 0)
{
m_gl.bindBuffer(GL_ARRAY_BUFFER, m_sparse_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call failed.");
m_gl.bufferPageCommitmentARB(GL_ARRAY_BUFFER, 0, /* offset */
m_sparse_bo_size_rounded, GL_FALSE); /* commit */
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferPageCommitmentARB() call failed.");
m_sparse_bo = 0;
}
}
/** Executes a single test iteration. The BufferStorage test will call this method
* numerously during its life-time, testing various valid flag combinations applied
* to the tested sparse buffer object at glBufferStorage() call time.
*
* @param sparse_bo_storage_flags <flags> argument, used by the test in the glBufferStorage()
* call to set up the sparse buffer's storage.
*
* @return true if the test case executed correctly, false otherwise.
*/
bool PixelPackBufferStorageTestCase::execute(glw::GLuint sparse_bo_storage_flags)
{
(void)sparse_bo_storage_flags;
bool result = true;
m_gl.bindBuffer(GL_COPY_READ_BUFFER, m_helper_bo);
m_gl.bindBuffer(GL_PIXEL_PACK_BUFFER, m_sparse_bo);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBindBuffer() call(s) failed.");
/* Run three separate iterations:
*
* a) All pages that are going to hold the texture data are committed.
* b) Use a zig-zag memory page commitment layout patern.
* b) No pages are committed.
*/
for (unsigned int n_iteration = 0; n_iteration < 3; ++n_iteration)
{
bool result_local = true;
/* Set up the memory page commitment & the storage contents*/
switch (n_iteration)
{
case 0:
{
m_gl.bufferPageCommitmentARB(GL_PIXEL_PACK_BUFFER, 0, /* offset */
m_sparse_bo_size_rounded, GL_TRUE); /* commit */
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferPageCommitmentARB() call failed.");
break;
}
case 1:
{
const unsigned int n_pages = 1 + m_ref_data_size / m_page_size;
DE_ASSERT((m_ref_data_size % m_page_size) == 0);
for (unsigned int n_page = 0; n_page < n_pages; ++n_page)
{
const bool should_commit = ((n_page % 2) == 0);
m_gl.bufferPageCommitmentARB(GL_PIXEL_PACK_BUFFER, m_page_size * n_page, m_page_size,
should_commit ? GL_TRUE : GL_FALSE);
GLU_EXPECT_NO_ERROR(m_gl.getError(), "glBufferPageCommitmentARB() call failed.");
} /* for (all relevant memory pages) */<