x86: fix immediate operand for AND instruction in ATT mode (issue #1047)
diff --git a/arch/X86/X86ATTInstPrinter.c b/arch/X86/X86ATTInstPrinter.c
index 517a546..4bec478 100644
--- a/arch/X86/X86ATTInstPrinter.c
+++ b/arch/X86/X86ATTInstPrinter.c
@@ -493,9 +493,7 @@
 
 static void printOperand(MCInst *MI, unsigned OpNo, SStream *O)
 {
-	uint8_t opsize = 0;
 	MCOperand *Op  = MCInst_getOperand(MI, OpNo);
-
 	if (MCOperand_isReg(Op)) {
 		unsigned int reg = MCOperand_getReg(Op);
 		printRegName(O, reg);
@@ -506,88 +504,16 @@
 				MI->flat_insn->detail->x86.operands[MI->flat_insn->detail->x86.op_count].type = X86_OP_REG;
 				MI->flat_insn->detail->x86.operands[MI->flat_insn->detail->x86.op_count].reg = reg;
 				MI->flat_insn->detail->x86.operands[MI->flat_insn->detail->x86.op_count].size = MI->csh->regsize_map[reg];
+
 				MI->flat_insn->detail->x86.op_count++;
 			}
 		}
 	} else if (MCOperand_isImm(Op)) {
 		// Print X86 immediates as signed values.
 		int64_t imm = MCOperand_getImm(Op);
-
-		switch(MCInst_getOpcode(MI)) {
-			default:
-				break;
-
-			case X86_AAD8i8:
-			case X86_AAM8i8:
-			case X86_ADC8i8:
-			case X86_ADD8i8:
-			case X86_AND8i8:
-			case X86_CMP8i8:
-			case X86_OR8i8:
-			case X86_SBB8i8:
-			case X86_SUB8i8:
-			case X86_TEST8i8:
-			case X86_XOR8i8:
-			case X86_ROL8ri:
-			case X86_ADC8ri:
-			case X86_ADD8ri:
-			case X86_ADD8ri8:
-			case X86_AND8ri:
-			case X86_AND8ri8:
-			case X86_CMP8ri:
-			case X86_MOV8ri:
-			case X86_MOV8ri_alt:
-			case X86_OR8ri:
-			case X86_OR8ri8:
-			case X86_RCL8ri:
-			case X86_RCR8ri:
-			case X86_ROR8ri:
-			case X86_SAL8ri:
-			case X86_SAR8ri:
-			case X86_SBB8ri:
-			case X86_SHL8ri:
-			case X86_SHR8ri:
-			case X86_SUB8ri:
-			case X86_SUB8ri8:
-			case X86_TEST8ri:
-			case X86_TEST8ri_NOREX:
-			case X86_TEST8ri_alt:
-			case X86_XOR8ri:
-			case X86_XOR8ri8:
-			case X86_OUT8ir:
-
-			case X86_ADC8mi:
-			case X86_ADD8mi:
-			case X86_AND8mi:
-			case X86_CMP8mi:
-			case X86_LOCK_ADD8mi:
-			case X86_LOCK_AND8mi:
-			case X86_LOCK_OR8mi:
-			case X86_LOCK_SUB8mi:
-			case X86_LOCK_XOR8mi:
-			case X86_MOV8mi:
-			case X86_OR8mi:
-			case X86_RCL8mi:
-			case X86_RCR8mi:
-			case X86_ROL8mi:
-			case X86_ROR8mi:
-			case X86_SAL8mi:
-			case X86_SAR8mi:
-			case X86_SBB8mi:
-			case X86_SHL8mi:
-			case X86_SHR8mi:
-			case X86_SUB8mi:
-			case X86_TEST8mi:
-			case X86_TEST8mi_alt:
-			case X86_XOR8mi:
-			case X86_PUSH64i8:
-			case X86_CMP32ri8:
-			case X86_CMP64ri8:
-
-				imm = imm & 0xff;
-				opsize = 1;     // immediate of 1 byte
-				break;
-		}
+		int opsize = X86_immediate_size(MCInst_getOpcode(MI));
+		if (opsize == 1)    // print 1 byte immediate in positive form
+			imm = imm & 0xff;
 
 		switch(MI->flat_insn->id) {
 			default:
@@ -597,13 +523,22 @@
 					else
 						SStream_concat(O, "$%"PRIu64, imm);
 				} else {
-					if (imm < -HEX_THRESHOLD)
+					if (imm == 0x8000000000000000LL)  // imm == -imm
+						SStream_concat0(O, "$0x8000000000000000");
+					else if (imm < -HEX_THRESHOLD)
 						SStream_concat(O, "$-0x%"PRIx64, -imm);
 					else
 						SStream_concat(O, "$-%"PRIu64, -imm);
 				}
 				break;
 
+			case X86_INS_MOVABS:
+				// do not print number in negative form
+				SStream_concat(O, "$0x%"PRIx64, imm);
+				break;
+
+			case X86_INS_IN:
+			case X86_INS_OUT:
 			case X86_INS_INT:
 				// do not print number in negative form
 				imm = imm & 0xff;
@@ -617,7 +552,7 @@
 			case X86_INS_LCALL:
 			case X86_INS_LJMP:
 				// always print address in positive form
-				if (OpNo == 1) { // selector is ptr16
+				if (OpNo == 1) {	// selector is ptr16
 					imm = imm & 0xffff;
 					opsize = 2;
 				}
@@ -631,7 +566,7 @@
 				if (imm >= 0 && imm <= HEX_THRESHOLD)
 					SStream_concat(O, "$%u", imm);
 				else {
-					imm = arch_masks[MI->op1_size? MI->op1_size : MI->imm_size] & imm;
+					imm = arch_masks[opsize? opsize : MI->imm_size] & imm;
 					SStream_concat(O, "$0x%"PRIx64, imm);
 				}
 				break;
@@ -657,7 +592,7 @@
 				MI->flat_insn->detail->x86.operands[MI->flat_insn->detail->x86.op_count].imm = imm;
 
 				if (opsize > 0)
-					MI->flat_insn->detail->x86.operands[MI->flat_insn->detail->x86.op_count].size = opsize;
+					MI->flat_insn->detail->x86.operands[MI->flat_insn->detail->x86.op_count].size = (uint8_t)opsize;
 				else if (MI->op1_size > 0)
 					MI->flat_insn->detail->x86.operands[MI->flat_insn->detail->x86.op_count].size = MI->op1_size;
 				else
diff --git a/arch/X86/X86ImmSize.inc b/arch/X86/X86ImmSize.inc
new file mode 100644
index 0000000..da0f360
--- /dev/null
+++ b/arch/X86/X86ImmSize.inc
@@ -0,0 +1,312 @@
+{1, X86_AAD8i8},
+{1, X86_AAM8i8},
+{2, X86_ADC16i16},
+{2, X86_ADC16mi},
+{2, X86_ADC16mi8},
+{2, X86_ADC16ri},
+{2, X86_ADC16ri8},
+{4, X86_ADC32i32},
+{4, X86_ADC32mi},
+{4, X86_ADC32mi8},
+{4, X86_ADC32ri},
+{4, X86_ADC32ri8},
+{8, X86_ADC64i32},
+{8, X86_ADC64mi32},
+{8, X86_ADC64mi8},
+{8, X86_ADC64ri32},
+{8, X86_ADC64ri8},
+{1, X86_ADC8i8},
+{1, X86_ADC8mi},
+{1, X86_ADC8ri},
+{2, X86_ADD16i16},
+{2, X86_ADD16mi},
+{2, X86_ADD16mi8},
+{2, X86_ADD16ri},
+{2, X86_ADD16ri8},
+{4, X86_ADD32i32},
+{4, X86_ADD32mi},
+{4, X86_ADD32mi8},
+{4, X86_ADD32ri},
+{4, X86_ADD32ri8},
+{8, X86_ADD64i32},
+{8, X86_ADD64mi32},
+{8, X86_ADD64mi8},
+{8, X86_ADD64ri32},
+{8, X86_ADD64ri8},
+{1, X86_ADD8i8},
+{1, X86_ADD8mi},
+{1, X86_ADD8ri},
+{1, X86_ADD8ri8},
+{2, X86_AND16i16},
+{2, X86_AND16mi},
+{2, X86_AND16mi8},
+{2, X86_AND16ri},
+{2, X86_AND16ri8},
+{4, X86_AND32i32},
+{4, X86_AND32mi},
+{4, X86_AND32mi8},
+{4, X86_AND32ri},
+{4, X86_AND32ri8},
+{8, X86_AND64i32},
+{8, X86_AND64mi32},
+{8, X86_AND64mi8},
+{8, X86_AND64ri32},
+{8, X86_AND64ri8},
+{1, X86_AND8i8},
+{1, X86_AND8mi},
+{1, X86_AND8ri},
+{1, X86_AND8ri8},
+{2, X86_BT16mi8},
+{2, X86_BT16ri8},
+{4, X86_BT32mi8},
+{4, X86_BT32ri8},
+{8, X86_BT64mi8},
+{8, X86_BT64ri8},
+{2, X86_BTC16mi8},
+{2, X86_BTC16ri8},
+{4, X86_BTC32mi8},
+{4, X86_BTC32ri8},
+{8, X86_BTC64mi8},
+{8, X86_BTC64ri8},
+{2, X86_BTR16mi8},
+{2, X86_BTR16ri8},
+{4, X86_BTR32mi8},
+{4, X86_BTR32ri8},
+{8, X86_BTR64mi8},
+{8, X86_BTR64ri8},
+{2, X86_BTS16mi8},
+{2, X86_BTS16ri8},
+{4, X86_BTS32mi8},
+{4, X86_BTS32ri8},
+{8, X86_BTS64mi8},
+{8, X86_BTS64ri8},
+{2, X86_CALLpcrel16},
+{2, X86_CMP16i16},
+{2, X86_CMP16mi},
+{2, X86_CMP16mi8},
+{2, X86_CMP16ri},
+{2, X86_CMP16ri8},
+{4, X86_CMP32i32},
+{4, X86_CMP32mi},
+{4, X86_CMP32mi8},
+{4, X86_CMP32ri},
+{4, X86_CMP32ri8},
+{8, X86_CMP64i32},
+{8, X86_CMP64mi32},
+{8, X86_CMP64mi8},
+{8, X86_CMP64ri32},
+{8, X86_CMP64ri8},
+{1, X86_CMP8i8},
+{1, X86_CMP8mi},
+{1, X86_CMP8ri},
+{2, X86_IMUL16rmi8},
+{2, X86_IMUL16rri8},
+{4, X86_IMUL32rmi8},
+{4, X86_IMUL32rri8},
+{8, X86_IMUL64rmi32},
+{8, X86_IMUL64rmi8},
+{8, X86_IMUL64rri32},
+{8, X86_IMUL64rri8},
+{2, X86_IN16ri},
+{4, X86_IN32ri},
+{1, X86_IN8ri},
+{2, X86_JMP_2},
+{2, X86_MOV16mi},
+{2, X86_MOV16ri},
+{2, X86_MOV16ri_alt},
+{4, X86_MOV32mi},
+{4, X86_MOV32ri},
+{8, X86_MOV32ri64},
+{4, X86_MOV32ri_alt},
+{8, X86_MOV64mi32},
+{8, X86_MOV64ri},
+{8, X86_MOV64ri32},
+{1, X86_MOV8mi},
+{1, X86_MOV8ri},
+{1, X86_MOV8ri_alt},
+{2, X86_OR16i16},
+{2, X86_OR16mi},
+{2, X86_OR16mi8},
+{2, X86_OR16ri},
+{2, X86_OR16ri8},
+{4, X86_OR32i32},
+{4, X86_OR32mi},
+{4, X86_OR32mi8},
+{4, X86_OR32ri},
+{4, X86_OR32ri8},
+{8, X86_OR64i32},
+{8, X86_OR64mi32},
+{8, X86_OR64mi8},
+{8, X86_OR64ri32},
+{8, X86_OR64ri8},
+{1, X86_OR8i8},
+{1, X86_OR8mi},
+{1, X86_OR8ri},
+{1, X86_OR8ri8},
+{2, X86_PUSH16i8},
+{4, X86_PUSH32i8},
+{8, X86_PUSH64i16},
+{8, X86_PUSH64i32},
+{8, X86_PUSH64i8},
+{2, X86_PUSHi16},
+{4, X86_PUSHi32},
+{2, X86_RCL16mi},
+{2, X86_RCL16ri},
+{4, X86_RCL32mi},
+{4, X86_RCL32ri},
+{8, X86_RCL64mi},
+{8, X86_RCL64ri},
+{1, X86_RCL8mi},
+{1, X86_RCL8ri},
+{2, X86_RCR16mi},
+{2, X86_RCR16ri},
+{4, X86_RCR32mi},
+{4, X86_RCR32ri},
+{8, X86_RCR64mi},
+{8, X86_RCR64ri},
+{1, X86_RCR8mi},
+{1, X86_RCR8ri},
+{2, X86_ROL16mi},
+{2, X86_ROL16ri},
+{4, X86_ROL32mi},
+{4, X86_ROL32ri},
+{8, X86_ROL64mi},
+{8, X86_ROL64ri},
+{1, X86_ROL8mi},
+{1, X86_ROL8ri},
+{2, X86_ROR16mi},
+{2, X86_ROR16ri},
+{4, X86_ROR32mi},
+{4, X86_ROR32ri},
+{8, X86_ROR64mi},
+{8, X86_ROR64ri},
+{1, X86_ROR8mi},
+{1, X86_ROR8ri},
+{4, X86_RORX32mi},
+{4, X86_RORX32ri},
+{8, X86_RORX64mi},
+{8, X86_RORX64ri},
+{2, X86_SAL16mi},
+{2, X86_SAL16ri},
+{4, X86_SAL32mi},
+{4, X86_SAL32ri},
+{8, X86_SAL64mi},
+{8, X86_SAL64ri},
+{1, X86_SAL8mi},
+{1, X86_SAL8ri},
+{2, X86_SAR16mi},
+{2, X86_SAR16ri},
+{4, X86_SAR32mi},
+{4, X86_SAR32ri},
+{8, X86_SAR64mi},
+{8, X86_SAR64ri},
+{1, X86_SAR8mi},
+{1, X86_SAR8ri},
+{2, X86_SBB16i16},
+{2, X86_SBB16mi},
+{2, X86_SBB16mi8},
+{2, X86_SBB16ri},
+{2, X86_SBB16ri8},
+{4, X86_SBB32i32},
+{4, X86_SBB32mi},
+{4, X86_SBB32mi8},
+{4, X86_SBB32ri},
+{4, X86_SBB32ri8},
+{8, X86_SBB64i32},
+{8, X86_SBB64mi32},
+{8, X86_SBB64mi8},
+{8, X86_SBB64ri32},
+{8, X86_SBB64ri8},
+{1, X86_SBB8i8},
+{1, X86_SBB8mi},
+{1, X86_SBB8ri},
+{2, X86_SHL16mi},
+{2, X86_SHL16ri},
+{4, X86_SHL32mi},
+{4, X86_SHL32ri},
+{8, X86_SHL64mi},
+{8, X86_SHL64ri},
+{1, X86_SHL8mi},
+{1, X86_SHL8ri},
+{1, X86_SHLD16mri8},
+{2, X86_SHLD16rri8},
+{1, X86_SHLD32mri8},
+{4, X86_SHLD32rri8},
+{1, X86_SHLD64mri8},
+{8, X86_SHLD64rri8},
+{2, X86_SHR16mi},
+{2, X86_SHR16ri},
+{4, X86_SHR32mi},
+{4, X86_SHR32ri},
+{8, X86_SHR64mi},
+{8, X86_SHR64ri},
+{1, X86_SHR8mi},
+{1, X86_SHR8ri},
+{1, X86_SHRD16mri8},
+{2, X86_SHRD16rri8},
+{1, X86_SHRD32mri8},
+{4, X86_SHRD32rri8},
+{1, X86_SHRD64mri8},
+{8, X86_SHRD64rri8},
+{2, X86_SUB16i16},
+{2, X86_SUB16mi},
+{2, X86_SUB16mi8},
+{2, X86_SUB16ri},
+{2, X86_SUB16ri8},
+{4, X86_SUB32i32},
+{4, X86_SUB32mi},
+{4, X86_SUB32mi8},
+{4, X86_SUB32ri},
+{4, X86_SUB32ri8},
+{8, X86_SUB64i32},
+{8, X86_SUB64mi32},
+{8, X86_SUB64mi8},
+{8, X86_SUB64ri32},
+{8, X86_SUB64ri8},
+{1, X86_SUB8i8},
+{1, X86_SUB8mi},
+{1, X86_SUB8ri},
+{1, X86_SUB8ri8},
+{8, X86_TCRETURNdi64},
+{8, X86_TCRETURNmi64},
+{8, X86_TCRETURNri64},
+{2, X86_TEST16i16},
+{2, X86_TEST16mi},
+{2, X86_TEST16mi_alt},
+{2, X86_TEST16ri},
+{2, X86_TEST16ri_alt},
+{4, X86_TEST32i32},
+{4, X86_TEST32mi},
+{4, X86_TEST32mi_alt},
+{4, X86_TEST32ri},
+{4, X86_TEST32ri_alt},
+{8, X86_TEST64i32},
+{8, X86_TEST64mi32},
+{4, X86_TEST64mi32_alt},
+{8, X86_TEST64ri32},
+{4, X86_TEST64ri32_alt},
+{1, X86_TEST8i8},
+{1, X86_TEST8mi},
+{1, X86_TEST8mi_alt},
+{1, X86_TEST8ri},
+{1, X86_TEST8ri_NOREX},
+{1, X86_TEST8ri_alt},
+{2, X86_XOR16i16},
+{2, X86_XOR16mi},
+{2, X86_XOR16mi8},
+{2, X86_XOR16ri},
+{2, X86_XOR16ri8},
+{4, X86_XOR32i32},
+{4, X86_XOR32mi},
+{4, X86_XOR32mi8},
+{4, X86_XOR32ri},
+{4, X86_XOR32ri8},
+{8, X86_XOR64i32},
+{8, X86_XOR64mi32},
+{8, X86_XOR64mi8},
+{8, X86_XOR64ri32},
+{8, X86_XOR64ri8},
+{1, X86_XOR8i8},
+{1, X86_XOR8mi},
+{1, X86_XOR8ri},
+{1, X86_XOR8ri8},
diff --git a/arch/X86/X86IntelInstPrinter.c b/arch/X86/X86IntelInstPrinter.c
index f2420c0..0e1b89f 100644
--- a/arch/X86/X86IntelInstPrinter.c
+++ b/arch/X86/X86IntelInstPrinter.c
@@ -585,9 +585,37 @@
 	}
 }
 
+static void printImm(int syntax, SStream *O, int64_t imm, bool positive)
+{
+	if (positive) {
+		if (imm < 0) {
+			SStream_concat(O, "0x%"PRIx64, imm);
+		} else {
+			if (imm > HEX_THRESHOLD)
+				SStream_concat(O, "0x%"PRIx64, imm);
+			else
+				SStream_concat(O, "%"PRIu64, imm);
+		}
+	} else {
+		if (imm < 0) {
+			if (imm == 0x8000000000000000LL)  // imm == -imm
+				SStream_concat0(O, "0x8000000000000000");
+			else if (imm < -HEX_THRESHOLD)
+				SStream_concat(O, "-0x%"PRIx64, -imm);
+			else
+				SStream_concat(O, "-%"PRIu64, -imm);
+
+		} else {
+			if (imm > HEX_THRESHOLD)
+				SStream_concat(O, "0x%"PRIx64, imm);
+			else
+				SStream_concat(O, "%"PRIu64, imm);
+		}
+	}
+}
+
 static void printOperand(MCInst *MI, unsigned OpNo, SStream *O)
 {
-	uint8_t opsize = 0;
 	MCOperand *Op  = MCInst_getOperand(MI, OpNo);
 
 	if (MCOperand_isReg(Op)) {
@@ -601,6 +629,7 @@
 				MI->flat_insn->detail->x86.operands[MI->flat_insn->detail->x86.op_count].type = X86_OP_REG;
 				MI->flat_insn->detail->x86.operands[MI->flat_insn->detail->x86.op_count].reg = reg;
 				MI->flat_insn->detail->x86.operands[MI->flat_insn->detail->x86.op_count].size = MI->csh->regsize_map[reg];
+
 				MI->flat_insn->detail->x86.op_count++;
 			}
 		}
@@ -609,110 +638,37 @@
 			MI->op1_size = MI->csh->regsize_map[reg];
 	} else if (MCOperand_isImm(Op)) {
 		int64_t imm = MCOperand_getImm(Op);
+		int opsize = X86_immediate_size(MCInst_getOpcode(MI));
+		if (opsize == 1)    // print 1 byte immediate in positive form
+			imm = imm & 0xff;
 
-		switch(MCInst_getOpcode(MI)) {
-			default:
-				break;
-
-			case X86_AAD8i8:
-			case X86_AAM8i8:
-			case X86_ADC8i8:
-			case X86_ADD8i8:
-			case X86_AND8i8:
-			case X86_CMP8i8:
-			case X86_OR8i8:
-			case X86_SBB8i8:
-			case X86_SUB8i8:
-			case X86_TEST8i8:
-			case X86_XOR8i8:
-			case X86_ROL8ri:
-			case X86_ADC8ri:
-			case X86_ADD8ri:
-			case X86_ADD8ri8:
-			case X86_AND8ri:
-			case X86_AND8ri8:
-			case X86_CMP8ri:
-			case X86_MOV8ri:
-			case X86_MOV8ri_alt:
-			case X86_OR8ri:
-			case X86_OR8ri8:
-			case X86_RCL8ri:
-			case X86_RCR8ri:
-			case X86_ROR8ri:
-			case X86_SAL8ri:
-			case X86_SAR8ri:
-			case X86_SBB8ri:
-			case X86_SHL8ri:
-			case X86_SHR8ri:
-			case X86_SUB8ri:
-			case X86_SUB8ri8:
-			case X86_TEST8ri:
-			case X86_TEST8ri_NOREX:
-			case X86_TEST8ri_alt:
-			case X86_XOR8ri:
-			case X86_XOR8ri8:
-			case X86_OUT8ir:
-
-			case X86_ADC8mi:
-			case X86_ADD8mi:
-			case X86_AND8mi:
-			case X86_CMP8mi:
-			case X86_LOCK_ADD8mi:
-			case X86_LOCK_AND8mi:
-			case X86_LOCK_OR8mi:
-			case X86_LOCK_SUB8mi:
-			case X86_LOCK_XOR8mi:
-			case X86_MOV8mi:
-			case X86_OR8mi:
-			case X86_RCL8mi:
-			case X86_RCR8mi:
-			case X86_ROL8mi:
-			case X86_ROR8mi:
-			case X86_SAL8mi:
-			case X86_SAR8mi:
-			case X86_SBB8mi:
-			case X86_SHL8mi:
-			case X86_SHR8mi:
-			case X86_SUB8mi:
-			case X86_TEST8mi:
-			case X86_TEST8mi_alt:
-			case X86_XOR8mi:
-			case X86_PUSH64i8:
-			case X86_CMP32ri8:
-			case X86_CMP64ri8:
-
-				imm = imm & 0xff;
-				opsize = 1;     // immediate of 1 byte
-				break;
-		}
-
+		// printf(">>> id = %u\n", MI->flat_insn->id);
 		switch(MI->flat_insn->id) {
 			default:
-				if (imm >= 0) {
-					if (imm > HEX_THRESHOLD)
-						SStream_concat(O, "0x%"PRIx64, imm);
-					else
-						SStream_concat(O, "%"PRIu64, imm);
-				} else {
-					if (imm < -HEX_THRESHOLD)
-						SStream_concat(O, "-0x%"PRIx64, -imm);
-					else
-						SStream_concat(O, "-%"PRIu64, -imm);
-				}
+				printImm(MI->csh->syntax, O, imm, false);
+				break;
 
+			case X86_INS_MOVABS:
+				// do not print number in negative form
+				printImm(MI->csh->syntax, O, imm, true);
+				break;
+
+			case X86_INS_IN:
+			case X86_INS_OUT:
+			case X86_INS_INT:
+				// do not print number in negative form
+				imm = imm & 0xff;
+				printImm(MI->csh->syntax, O, imm, true);
 				break;
 
 			case X86_INS_LCALL:
 			case X86_INS_LJMP:
 				// always print address in positive form
-				if (OpNo == 1) {	// selector is ptr16
+				if (OpNo == 1) {	// ptr16 part
 					imm = imm & 0xffff;
 					opsize = 2;
 				}
-				if (imm > HEX_THRESHOLD)
-					SStream_concat(O, "0x%"PRIx64, imm);
-				else
-					SStream_concat(O, "%"PRIu64, imm);
+				printImm(MI->csh->syntax, O, imm, true);
 				break;
 
 			case X86_INS_AND:
@@ -720,19 +676,20 @@
 			case X86_INS_XOR:
 				// do not print number in negative form
 				if (imm >= 0 && imm <= HEX_THRESHOLD)
-					SStream_concat(O, "%u", imm);
+					printImm(MI->csh->syntax, O, imm, true);
 				else {
-					imm = arch_masks[MI->op1_size? MI->op1_size : MI->imm_size] & imm;
-					SStream_concat(O, "0x%"PRIx64, imm);
+					imm = arch_masks[opsize? opsize : MI->imm_size] & imm;
+					printImm(MI->csh->syntax, O, imm, true);
 				}
 				break;
+
 			case X86_INS_RET:
 				// RET imm16
 				if (imm >= 0 && imm <= HEX_THRESHOLD)
-					SStream_concat(O, "%u", imm);
+					printImm(MI->csh->syntax, O, imm, true);
 				else {
 					imm = 0xffff & imm;
-					SStream_concat(O, "0x%x", 0xffff & imm);
+					printImm(MI->csh->syntax, O, imm, true);
 				}
 				break;
 		}
@@ -743,7 +700,7 @@
 			} else {
 				MI->flat_insn->detail->x86.operands[MI->flat_insn->detail->x86.op_count].type = X86_OP_IMM;
 				if (opsize > 0)
-					MI->flat_insn->detail->x86.operands[MI->flat_insn->detail->x86.op_count].size = opsize;
+					MI->flat_insn->detail->x86.operands[MI->flat_insn->detail->x86.op_count].size = (uint8_t)opsize;
 				else if (MI->flat_insn->detail->x86.op_count > 0) {
 					if (MI->flat_insn->id != X86_INS_LCALL && MI->flat_insn->id != X86_INS_LJMP) {
 						MI->flat_insn->detail->x86.operands[MI->flat_insn->detail->x86.op_count].size =
@@ -752,14 +709,11 @@
 						MI->flat_insn->detail->x86.operands[MI->flat_insn->detail->x86.op_count].size = MI->imm_size;
 				} else
 					MI->flat_insn->detail->x86.operands[MI->flat_insn->detail->x86.op_count].size = MI->imm_size;
-
 				MI->flat_insn->detail->x86.operands[MI->flat_insn->detail->x86.op_count].imm = imm;
+
 				MI->flat_insn->detail->x86.op_count++;
 			}
 		}
-
-		//if (MI->op1_size == 0)
-		//	MI->op1_size = MI->imm_size;
 	}
 }
 
diff --git a/arch/X86/X86Mapping.c b/arch/X86/X86Mapping.c
index fcccaa9..5129952 100644
--- a/arch/X86/X86Mapping.c
+++ b/arch/X86/X86Mapping.c
@@ -47942,4 +47942,36 @@
 	}
 }
 
+// map immediate size to instruction id
+static struct size_id {
+	unsigned char size;
+	unsigned short id;
+} x86_imm_size[] = {
+#include "X86ImmSize.inc"
+};
+
+// given the instruction name, return the size of its immediate operand (or 0)
+int X86_immediate_size(unsigned int id)
+{
+	// binary searching since the IDs is sorted in order
+	unsigned int left, right, m;
+
+	left = 0;
+	right = ARR_SIZE(x86_imm_size) - 1;
+
+	while(left <= right) {
+		m = (left + right) / 2;
+		if (id == x86_imm_size[m].id)
+			return x86_imm_size[m].size;
+
+		if (id < x86_imm_size[m].id)
+			right = m - 1;
+		else
+			left = m + 1;
+	}
+
+	// not found
+	return 0;
+}
+
 #endif
diff --git a/arch/X86/X86Mapping.h b/arch/X86/X86Mapping.h
index 513631e..b33917d 100644
--- a/arch/X86/X86Mapping.h
+++ b/arch/X86/X86Mapping.h
@@ -60,4 +60,6 @@
 
 void op_addAvxRoundingMode(MCInst *MI, int v);
 
+int X86_immediate_size(unsigned int id);
+
 #endif