Merge pull request #537 from KhronosGroup/fix-535

Unsigned integers are disallowed on legacy targets.
diff --git a/spirv_glsl.cpp b/spirv_glsl.cpp
index 7adda14..0c79892 100644
--- a/spirv_glsl.cpp
+++ b/spirv_glsl.cpp
@@ -26,6 +26,45 @@
 using namespace spirv_cross;
 using namespace std;
 
+static bool is_unsigned_opcode(Op op)
+{
+	// Don't have to be exhaustive, only relevant for legacy target checking ...
+	switch (op)
+	{
+	case OpShiftRightLogical:
+	case OpUGreaterThan:
+	case OpUGreaterThanEqual:
+	case OpULessThan:
+	case OpULessThanEqual:
+	case OpUConvert:
+	case OpUDiv:
+	case OpUMod:
+	case OpUMulExtended:
+	case OpConvertUToF:
+	case OpConvertFToU:
+		return true;
+
+	default:
+		return false;
+	}
+}
+
+static bool is_unsigned_glsl_opcode(GLSLstd450 op)
+{
+	// Don't have to be exhaustive, only relevant for legacy target checking ...
+	switch (op)
+	{
+	case GLSLstd450UClamp:
+	case GLSLstd450UMin:
+	case GLSLstd450UMax:
+	case GLSLstd450FindUMsb:
+		return true;
+
+	default:
+		return false;
+	}
+}
+
 static bool packing_is_vec4_padded(BufferPackingStandard packing)
 {
 	switch (packing)
@@ -2442,6 +2481,9 @@
 	bool unary = false;
 	string op;
 
+	if (is_legacy() && is_unsigned_opcode(cop.opcode))
+		SPIRV_CROSS_THROW("Unsigned integers are not supported on legacy targets.");
+
 	// TODO: Find a clean way to reuse emit_instruction.
 	switch (cop.opcode)
 	{
@@ -3013,7 +3055,14 @@
 		if (splat)
 		{
 			res += convert_to_string(c.scalar(vector, 0));
-			if (backend.uint32_t_literal_suffix)
+			if (is_legacy())
+			{
+				// Fake unsigned constant literals with signed ones if possible.
+				// Things like array sizes, etc, tend to be unsigned even though they could just as easily be signed.
+				if (c.scalar_i32(vector, 0) < 0)
+					SPIRV_CROSS_THROW("Tried to convert uint literal into int, but this made the literal negative.");
+			}
+			else if (backend.uint32_t_literal_suffix)
 				res += "u";
 		}
 		else
@@ -3025,7 +3074,15 @@
 				else
 				{
 					res += convert_to_string(c.scalar(vector, i));
-					if (backend.uint32_t_literal_suffix)
+					if (is_legacy())
+					{
+						// Fake unsigned constant literals with signed ones if possible.
+						// Things like array sizes, etc, tend to be unsigned even though they could just as easily be signed.
+						if (c.scalar_i32(vector, i) < 0)
+							SPIRV_CROSS_THROW(
+							    "Tried to convert uint literal into int, but this made the literal negative.");
+					}
+					else if (backend.uint32_t_literal_suffix)
 						res += "u";
 				}
 
@@ -4044,7 +4101,10 @@
 
 void CompilerGLSL::emit_glsl_op(uint32_t result_type, uint32_t id, uint32_t eop, const uint32_t *args, uint32_t)
 {
-	GLSLstd450 op = static_cast<GLSLstd450>(eop);
+	auto op = static_cast<GLSLstd450>(eop);
+
+	if (is_legacy() && is_unsigned_glsl_opcode(op))
+		SPIRV_CROSS_THROW("Unsigned integers are not supported on legacy GLSL targets.");
 
 	switch (op)
 	{
@@ -4109,8 +4169,8 @@
 	}
 
 	// Minmax
-	case GLSLstd450FMin:
 	case GLSLstd450UMin:
+	case GLSLstd450FMin:
 	case GLSLstd450SMin:
 		emit_binary_func_op(result_type, id, args[0], args[1], "min");
 		break;
@@ -4530,8 +4590,7 @@
 		{
 			require_extension_internal("GL_KHR_shader_subgroup_clustered");
 		}
-		else if (operation == GroupOperationExclusiveScan ||
-		         operation == GroupOperationInclusiveScan ||
+		else if (operation == GroupOperationExclusiveScan || operation == GroupOperationInclusiveScan ||
 		         operation == GroupOperationReduce)
 		{
 			require_extension_internal("GL_KHR_shader_subgroup_arithmetic");
@@ -4633,6 +4692,7 @@
 		emit_unary_func_op(result_type, id, ops[3], "subgroupAllEqual");
 		break;
 
+		// clang-format off
 #define GROUP_OP(op, glsl_op) \
 case OpGroupNonUniform##op: \
 	{ \
@@ -4663,6 +4723,7 @@
 	GROUP_OP(BitwiseOr, Or)
 	GROUP_OP(BitwiseXor, Xor)
 #undef GROUP_OP
+		// clang-format on
 
 	case OpGroupNonUniformQuadSwap:
 	{
@@ -7518,8 +7579,8 @@
 		}
 		else if (memory == ScopeSubgroup)
 		{
-			const uint32_t all_barriers = MemorySemanticsWorkgroupMemoryMask | MemorySemanticsUniformMemoryMask |
-			                              MemorySemanticsImageMemoryMask;
+			const uint32_t all_barriers =
+			    MemorySemanticsWorkgroupMemoryMask | MemorySemanticsUniformMemoryMask | MemorySemanticsImageMemoryMask;
 
 			if (semantics & (MemorySemanticsCrossWorkgroupMemoryMask | MemorySemanticsSubgroupMemoryMask))
 			{
@@ -8318,6 +8379,9 @@
 		break;
 	}
 
+	if (type.basetype == SPIRType::UInt && is_legacy())
+		SPIRV_CROSS_THROW("Unsigned integers are not supported on legacy targets.");
+
 	if (type.vecsize == 1 && type.columns == 1) // Scalar builtin
 	{
 		switch (type.basetype)
diff --git a/spirv_hlsl.cpp b/spirv_hlsl.cpp
index 1704dae..f10686e 100644
--- a/spirv_hlsl.cpp
+++ b/spirv_hlsl.cpp
@@ -3696,8 +3696,10 @@
 		if (operation == GroupOperationReduce)
 		{
 			bool forward = should_forward(ops[4]);
-			auto left = join("countbits(", to_enclosed_expression(ops[4]), ".x) + countbits(", to_enclosed_expression(ops[4]), ".y)");
-			auto right = join("countbits(", to_enclosed_expression(ops[4]), ".z) + countbits(", to_enclosed_expression(ops[4]), ".w)");
+			auto left = join("countbits(", to_enclosed_expression(ops[4]), ".x) + countbits(",
+			                 to_enclosed_expression(ops[4]), ".y)");
+			auto right = join("countbits(", to_enclosed_expression(ops[4]), ".z) + countbits(",
+			                  to_enclosed_expression(ops[4]), ".w)");
 			emit_op(result_type, id, join(left, " + ", right), forward);
 			inherit_expression_dependencies(id, ops[4]);
 		}
@@ -3735,6 +3737,7 @@
 		break;
 	}
 
+	// clang-format off
 #define GROUP_OP(op, hlsl_op, supports_scan) \
 case OpGroupNonUniform##op: \
 	{ \
@@ -3769,6 +3772,7 @@
 	GROUP_OP(BitwiseOr, BitOr, false)
 	GROUP_OP(BitwiseXor, BitXor, false)
 #undef GROUP_OP
+		// clang-format on
 
 	case OpGroupNonUniformQuadSwap:
 	{