blob: 6454b7d84a9331e5cca7ac5cffd484997baeffa4 [file] [log] [blame]
/*
* Copyright (c) 2012-2017 The Khronos Group Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <VX/vx.h>
#include <VX/vxu.h>
#include "test_engine/test.h"
#define VALID_SHIFT_MIN 0
// #define VALID_SHIFT_MIN -64
#define VALID_SHIFT_MAX 7
#define CT_EXECUTE_ASYNC
static void referenceConvertDepth(CT_Image src, CT_Image dst, int shift, vx_enum policy)
{
uint32_t i, j;
ASSERT(src && dst);
ASSERT(src->width == dst->width);
ASSERT(src->height == dst->height);
ASSERT((src->format == VX_DF_IMAGE_U8 && dst->format == VX_DF_IMAGE_S16) || (src->format == VX_DF_IMAGE_S16 && dst->format == VX_DF_IMAGE_U8));
ASSERT(policy == VX_CONVERT_POLICY_WRAP || policy == VX_CONVERT_POLICY_SATURATE);
if (shift > 16) shift = 16;
if (shift < -16) shift = -16;
if (src->format == VX_DF_IMAGE_U8)
{
// according to spec the policy is ignored
// if (policy == VX_CONVERT_POLICY_WRAP)
{
// up-conversion + wrap
if (shift < 0)
{
for (i = 0; i < dst->height; ++i)
for (j = 0; j < dst->width; ++j)
dst->data.s16[i * dst->stride + j] = ((unsigned)src->data.y[i * src->stride + j]) >> (-shift);
}
else
{
for (i = 0; i < dst->height; ++i)
for (j = 0; j < dst->width; ++j)
dst->data.s16[i * dst->stride + j] = ((unsigned)src->data.y[i * src->stride + j]) << shift;
}
}
// else if (VX_CONVERT_POLICY_SATURATE)
// {
// // up-conversion + saturate
// if (shift < 0)
// {
// for (i = 0; i < dst->height; ++i)
// for (j = 0; j < dst->width; ++j)
// dst->data.s16[i * dst->stride + j] = ((unsigned)src->data.y[i * src->stride + j]) >> (-shift);
// }
// else
// {
// for (i = 0; i < dst->height; ++i)
// for (j = 0; j < dst->width; ++j)
// {
// unsigned v = ((unsigned)src->data.y[i * src->stride + j]) << shift;
// if (v > 32767) v = 32767;
// dst->data.s16[i * dst->stride + j] = v;
// }
// }
// }
}
else if (policy == VX_CONVERT_POLICY_WRAP)
{
// down-conversion + wrap
if (shift < 0)
{
for (i = 0; i < dst->height; ++i)
for (j = 0; j < dst->width; ++j)
dst->data.y[i * dst->stride + j] = src->data.s16[i * src->stride + j] << (-shift);
}
else
{
for (i = 0; i < dst->height; ++i)
for (j = 0; j < dst->width; ++j)
dst->data.y[i * dst->stride + j] = src->data.s16[i * src->stride + j] >> shift;
}
}
else if (policy == VX_CONVERT_POLICY_SATURATE)
{
// down-conversion + saturate
if (shift < 0)
{
for (i = 0; i < dst->height; ++i)
for (j = 0; j < dst->width; ++j)
{
int32_t v = src->data.s16[i * src->stride + j] << (-shift);
if (v > 255) v = 255;
if (v < 0) v = 0;
dst->data.y[i * dst->stride + j] = v;
}
}
else
{
for (i = 0; i < dst->height; ++i)
for (j = 0; j < dst->width; ++j)
{
int32_t v = src->data.s16[i * src->stride + j] >> shift;
if (v > 255) v = 255;
if (v < 0) v = 0;
dst->data.y[i * dst->stride + j] = v;
}
}
}
}
static void fillSquence(CT_Image dst, uint32_t seq_init)
{
uint32_t i, j;
uint32_t val = seq_init;
ASSERT(dst);
ASSERT(dst->format == VX_DF_IMAGE_U8 || dst->format == VX_DF_IMAGE_S16);
if (dst->format == VX_DF_IMAGE_U8)
{
for (i = 0; i < dst->height; ++i)
for (j = 0; j < dst->width; ++j)
dst->data.y[i * dst->stride + j] = ++val;
}
else
{
for (i = 0; i < dst->height; ++i)
for (j = 0; j < dst->width; ++j)
dst->data.s16[i * dst->stride + j] = ++val;
}
}
TESTCASE(vxuConvertDepth, CT_VXContext, ct_setup_vx_context, 0)
TESTCASE(vxConvertDepth, CT_VXContext, ct_setup_vx_context, 0)
TEST(vxuConvertDepth, NegativeSizes)
{
vx_image img16x88, img88x16, img16x16;
vx_int32 shift_zero = 0;
vx_int32 shift_one = 1;
vx_context context = context_->vx_context_;
ASSERT_VX_OBJECT(img16x88 = vxCreateImage(context, 16, 88, VX_DF_IMAGE_U8), VX_TYPE_IMAGE);
ASSERT_VX_OBJECT(img88x16 = vxCreateImage(context, 88, 16, VX_DF_IMAGE_S16), VX_TYPE_IMAGE);
ASSERT_VX_OBJECT(img16x16 = vxCreateImage(context, 16, 16, VX_DF_IMAGE_U8), VX_TYPE_IMAGE);
// initialize to guarantee that images are allocated
ASSERT_NO_FAILURE(ct_fill_image_random(img16x88, &CT()->seed_));
ASSERT_NO_FAILURE(ct_fill_image_random(img88x16, &CT()->seed_));
ASSERT_NO_FAILURE(ct_fill_image_random(img16x16, &CT()->seed_));
EXPECT_NE_VX_STATUS(VX_SUCCESS, vxuConvertDepth(context, img16x88, img88x16, VX_CONVERT_POLICY_SATURATE, shift_zero));
EXPECT_NE_VX_STATUS(VX_SUCCESS, vxuConvertDepth(context, img16x88, img88x16, VX_CONVERT_POLICY_WRAP, shift_zero));
EXPECT_NE_VX_STATUS(VX_SUCCESS, vxuConvertDepth(context, img16x88, img88x16, VX_CONVERT_POLICY_SATURATE, shift_one));
EXPECT_NE_VX_STATUS(VX_SUCCESS, vxuConvertDepth(context, img16x88, img88x16, VX_CONVERT_POLICY_WRAP, shift_one));
EXPECT_NE_VX_STATUS(VX_SUCCESS, vxuConvertDepth(context, img88x16, img16x16, VX_CONVERT_POLICY_SATURATE, shift_zero));
EXPECT_NE_VX_STATUS(VX_SUCCESS, vxuConvertDepth(context, img88x16, img16x16, VX_CONVERT_POLICY_WRAP, shift_zero));
EXPECT_NE_VX_STATUS(VX_SUCCESS, vxuConvertDepth(context, img88x16, img16x16, VX_CONVERT_POLICY_SATURATE, shift_one));
EXPECT_NE_VX_STATUS(VX_SUCCESS, vxuConvertDepth(context, img88x16, img16x16, VX_CONVERT_POLICY_WRAP, shift_one));
VX_CALL(vxReleaseImage(&img16x88));
VX_CALL(vxReleaseImage(&img88x16));
VX_CALL(vxReleaseImage(&img16x16));
}
TEST(vxConvertDepth, NegativeSizes)
{
vx_image img16x88, img88x16, img16x16;
vx_graph graph;
vx_node node;
vx_scalar shift;
vx_int32 sh = 1;
vx_context context = context_->vx_context_;
ASSERT_VX_OBJECT(shift = vxCreateScalar(context, VX_TYPE_INT32, &sh), VX_TYPE_SCALAR);
ASSERT_VX_OBJECT(img16x88 = vxCreateImage(context, 16, 88, VX_DF_IMAGE_U8), VX_TYPE_IMAGE);
ASSERT_VX_OBJECT(img88x16 = vxCreateImage(context, 88, 16, VX_DF_IMAGE_S16), VX_TYPE_IMAGE);
ASSERT_VX_OBJECT(img16x16 = vxCreateImage(context, 16, 16, VX_DF_IMAGE_U8), VX_TYPE_IMAGE);
ASSERT_VX_OBJECT(graph = vxCreateGraph(context), VX_TYPE_GRAPH);
ASSERT_VX_OBJECT(node = vxConvertDepthNode(graph, img16x88, img88x16, VX_CONVERT_POLICY_SATURATE, shift), VX_TYPE_NODE);
EXPECT_NE_VX_STATUS(VX_SUCCESS, vxVerifyGraph(graph));
VX_CALL(vxReleaseNode(&node));
VX_CALL(vxReleaseGraph(&graph));
ASSERT_VX_OBJECT(graph = vxCreateGraph(context), VX_TYPE_GRAPH);
ASSERT_VX_OBJECT(node = vxConvertDepthNode(graph, img16x88, img88x16, VX_CONVERT_POLICY_WRAP, shift), VX_TYPE_NODE);
EXPECT_NE_VX_STATUS(VX_SUCCESS, vxVerifyGraph(graph));
VX_CALL(vxReleaseNode(&node));
VX_CALL(vxReleaseGraph(&graph));
ASSERT_VX_OBJECT(graph = vxCreateGraph(context), VX_TYPE_GRAPH);
ASSERT_VX_OBJECT(node = vxConvertDepthNode(graph, img88x16, img16x16, VX_CONVERT_POLICY_SATURATE, shift), VX_TYPE_NODE);
EXPECT_NE_VX_STATUS(VX_SUCCESS, vxVerifyGraph(graph));
VX_CALL(vxReleaseNode(&node));
VX_CALL(vxReleaseGraph(&graph));
ASSERT_VX_OBJECT(graph = vxCreateGraph(context), VX_TYPE_GRAPH);
ASSERT_VX_OBJECT(node = vxConvertDepthNode(graph, img88x16, img16x16, VX_CONVERT_POLICY_WRAP, shift), VX_TYPE_NODE);
EXPECT_NE_VX_STATUS(VX_SUCCESS, vxVerifyGraph(graph));
VX_CALL(vxReleaseNode(&node));
VX_CALL(vxReleaseGraph(&graph));
VX_CALL(vxReleaseImage(&img16x88));
VX_CALL(vxReleaseImage(&img88x16));
VX_CALL(vxReleaseImage(&img16x16));
VX_CALL(vxReleaseScalar(&shift));
}
typedef struct {
const char* name;
uint32_t width;
uint32_t height;
vx_df_image format_from;
vx_df_image format_to;
vx_enum policy;
} cvt_depth_arg;
#define CVT_ARG(w,h,from,to,p) ARG(#p"/"#w"x"#h" "#from"->"#to, w, h, VX_DF_IMAGE_##from, VX_DF_IMAGE_##to, VX_CONVERT_POLICY_##p)
#define PREPEND_SIZE(macro, ...) \
CT_EXPAND(macro(1, 1, __VA_ARGS__)), \
CT_EXPAND(macro(15, 17, __VA_ARGS__)), \
CT_EXPAND(macro(32, 32, __VA_ARGS__)), \
CT_EXPAND(macro(640, 480, __VA_ARGS__)), \
CT_EXPAND(macro(1231, 1234, __VA_ARGS__))
/*,
CT_EXPAND(macro(1280, 720, __VA_ARGS__)),
CT_EXPAND(macro(1920, 1080, __VA_ARGS__))*/
#define CVT_ARGS \
PREPEND_SIZE(CVT_ARG, U8, S16, SATURATE), \
PREPEND_SIZE(CVT_ARG, U8, S16, WRAP), \
PREPEND_SIZE(CVT_ARG, S16, U8, SATURATE), \
PREPEND_SIZE(CVT_ARG, S16, U8, WRAP)
TEST_WITH_ARG(vxuConvertDepth, BitExact, cvt_depth_arg, CVT_ARGS)
{
vx_image src, dst;
CT_Image ref_src, refdst, vxdst;
vx_int32 shift_val;
vx_context context = context_->vx_context_;
ASSERT_NO_FAILURE({
ref_src = ct_allocate_image(arg_->width, arg_->height, arg_->format_from);
fillSquence(ref_src, (uint32_t)CT()->seed_);
src = ct_image_to_vx_image(ref_src, context);
});
ASSERT_VX_OBJECT(dst = vxCreateImage(context, arg_->width, arg_->height, arg_->format_to), VX_TYPE_IMAGE);
refdst = ct_allocate_image(arg_->width, arg_->height, arg_->format_to);
vxdst = ct_allocate_image(arg_->width, arg_->height, arg_->format_to);
for (shift_val = VALID_SHIFT_MIN; shift_val <= VALID_SHIFT_MAX; ++shift_val)
{
ct_update_progress(shift_val - VALID_SHIFT_MIN, VALID_SHIFT_MAX - VALID_SHIFT_MIN + 1);
EXPECT_EQ_VX_STATUS(VX_SUCCESS, vxuConvertDepth(context, src, dst, arg_->policy, shift_val));
ASSERT_NO_FAILURE({
ct_image_copyfrom_vx_image(vxdst, dst);
referenceConvertDepth(ref_src, refdst, shift_val, arg_->policy);
});
EXPECT_EQ_CTIMAGE(refdst, vxdst);
if (CT_HasFailure())
{
printf("Shift value is %d\n", shift_val);
break;
}
}
// checked release vx images
VX_CALL(vxReleaseImage(&dst));
VX_CALL(vxReleaseImage(&src));
EXPECT_EQ_PTR(NULL, dst);
EXPECT_EQ_PTR(NULL, src);
}
TEST_WITH_ARG(vxConvertDepth, BitExact, cvt_depth_arg, CVT_ARGS)
{
vx_image src, dst;
CT_Image ref_src, refdst, vxdst;
vx_graph graph;
vx_node node;
vx_scalar scalar_shift;
vx_int32 shift = 0;
vx_int32 tmp = 0;
vx_context context = context_->vx_context_;
ASSERT_NO_FAILURE({
ref_src = ct_allocate_image(arg_->width, arg_->height, arg_->format_from);
fillSquence(ref_src, (uint32_t)CT()->seed_);
src = ct_image_to_vx_image(ref_src, context);
});
ASSERT_VX_OBJECT(dst = vxCreateImage(context, arg_->width, arg_->height, arg_->format_to), VX_TYPE_IMAGE);
ASSERT_VX_OBJECT(scalar_shift = vxCreateScalar(context, VX_TYPE_INT32, &tmp), VX_TYPE_SCALAR);
ASSERT_VX_OBJECT(graph = vxCreateGraph(context), VX_TYPE_GRAPH);
ASSERT_VX_OBJECT(node = vxConvertDepthNode(graph, src, dst, arg_->policy, scalar_shift), VX_TYPE_NODE);
refdst = ct_allocate_image(arg_->width, arg_->height, arg_->format_to);
vxdst = ct_allocate_image(arg_->width, arg_->height, arg_->format_to);
for (shift = VALID_SHIFT_MIN; shift <= VALID_SHIFT_MAX; ++shift)
{
ct_update_progress(shift - VALID_SHIFT_MIN, VALID_SHIFT_MAX - VALID_SHIFT_MIN + 1);
ASSERT_EQ_VX_STATUS(VX_SUCCESS, vxCopyScalar(scalar_shift, &shift, VX_WRITE_ONLY, VX_MEMORY_TYPE_HOST));
// run graph
#ifdef CT_EXECUTE_ASYNC
EXPECT_EQ_VX_STATUS(VX_SUCCESS, vxScheduleGraph(graph));
EXPECT_EQ_VX_STATUS(VX_SUCCESS, vxWaitGraph(graph));
#else
EXPECT_EQ_VX_STATUS(VX_SUCCESS, vxProcessGraph(graph));
#endif
ASSERT_NO_FAILURE({
ct_image_copyfrom_vx_image(vxdst, dst);
referenceConvertDepth(ref_src, refdst, shift, arg_->policy);
});
EXPECT_EQ_CTIMAGE(refdst, vxdst);
if (CT_HasFailure())
{
printf("Shift value is %d\n", shift);
break;
}
}
VX_CALL(vxReleaseImage(&dst));
VX_CALL(vxReleaseImage(&src));
VX_CALL(vxReleaseScalar(&scalar_shift));
VX_CALL(vxReleaseNode(&node));
VX_CALL(vxReleaseGraph(&graph));
}
TESTCASE_TESTS(vxuConvertDepth, DISABLED_NegativeSizes, BitExact)
TESTCASE_TESTS(vxConvertDepth, DISABLED_NegativeSizes, BitExact)