Merges encoding to next (#1194)
* merge encoding branch into next branch
* added python bindings and updated test to support encoding
* fix python import
* fix py binding fields
* fix disp size printing
* fixed py binding, again
* Update CREDITS.TXT
* fixed formatting and a cast
* Changed param from int to uint8_t, fixed warnings
diff --git a/CREDITS.TXT b/CREDITS.TXT
index f57d312..912736c 100644
--- a/CREDITS.TXT
+++ b/CREDITS.TXT
@@ -67,4 +67,6 @@
Fotis Loukos: TMS320C64x architecture.
Wolfgang Schwotzer: M680X architecture.
Philippe Antoine: Integration with oss-fuzz and various fixes.
+Martin (obs1dium): x86 encoding features
+Stephen Eckels (stevemk14ebr): x86 encoding features
diff --git a/arch/X86/X86ATTInstPrinter.c b/arch/X86/X86ATTInstPrinter.c
index f1d4a06..7daf406 100644
--- a/arch/X86/X86ATTInstPrinter.c
+++ b/arch/X86/X86ATTInstPrinter.c
@@ -657,7 +657,8 @@
} else if (MCOperand_isImm(Op)) {
// Print X86 immediates as signed values.
int64_t imm = MCOperand_getImm(Op);
- int opsize = X86_immediate_size(MCInst_getOpcode(MI));
+ uint8_t encsize;
+ int opsize = X86_immediate_size(MCInst_getOpcode(MI), &encsize);
if (opsize == 1) // print 1 byte immediate in positive form
imm = imm & 0xff;
@@ -738,7 +739,10 @@
MI->flat_insn->detail->x86.operands[MI->flat_insn->detail->x86.op_count].imm = imm;
if (opsize > 0)
+ {
MI->flat_insn->detail->x86.operands[MI->flat_insn->detail->x86.op_count].size = (uint8_t)opsize;
+ MI->flat_insn->detail->x86.encoding.imm_size = encsize;
+ }
else if (MI->op1_size > 0)
MI->flat_insn->detail->x86.operands[MI->flat_insn->detail->x86.op_count].size = MI->op1_size;
else
diff --git a/arch/X86/X86Disassembler.c b/arch/X86/X86Disassembler.c
index 1c9d559..0dea5ce 100644
--- a/arch/X86/X86Disassembler.c
+++ b/arch/X86/X86Disassembler.c
@@ -883,12 +883,23 @@
pub->detail->x86.addr_size = inter->addressSize;
pub->detail->x86.modrm = inter->orgModRM;
- pub->detail->x86.sib = inter->sib;
- pub->detail->x86.disp = inter->displacement;
+ pub->detail->x86.encoding.modrm_offset = inter->modRMOffset;
+ pub->detail->x86.sib = inter->sib;
pub->detail->x86.sib_index = x86_map_sib_index(inter->sibIndex);
pub->detail->x86.sib_scale = inter->sibScale;
pub->detail->x86.sib_base = x86_map_sib_base(inter->sibBase);
+
+ pub->detail->x86.disp = inter->displacement;
+ if (inter->consumedDisplacement)
+ {
+ pub->detail->x86.encoding.disp_offset = inter->displacementOffset;
+ pub->detail->x86.encoding.disp_size = inter->displacementSize;
+ }
+
+ pub->detail->x86.encoding.imm_offset = inter->immediateOffset;
+ if (pub->detail->x86.encoding.imm_size == 0 && inter->immediateOffset != 0)
+ pub->detail->x86.encoding.imm_size = inter->immediateSize;
}
void X86_init(MCRegisterInfo *MRI)
diff --git a/arch/X86/X86DisassemblerDecoder.c b/arch/X86/X86DisassemblerDecoder.c
index 788b797..eace92f 100644
--- a/arch/X86/X86DisassemblerDecoder.c
+++ b/arch/X86/X86DisassemblerDecoder.c
@@ -1584,6 +1584,8 @@
if (insn->consumedModRM)
return 0;
+ insn->modRMOffset = (uint8_t)(insn->readerCursor - insn->startLocation);
+
if (consumeByte(insn, &insn->modRM))
return -1;
@@ -2043,7 +2045,7 @@
return -1;
// Apply the AVX512 compressed displacement scaling factor.
if (x86OperandSets[insn->spec->operands][index].encoding != ENCODING_REG && insn->eaDisplacement == EA_DISP_8)
- insn->displacement *= 1 << (x86OperandSets[insn->spec->operands][index].encoding - ENCODING_RM);
+ insn->displacement *= (int64_t)1 << (x86OperandSets[insn->spec->operands][index].encoding - ENCODING_RM);
break;
case ENCODING_CB:
case ENCODING_CW:
@@ -2087,6 +2089,15 @@
case ENCODING_Ia:
if (readImmediate(insn, insn->addressSize))
return -1;
+ /* Direct memory-offset (moffset) immediate will get mapped
+ to memory operand later. We want the encoding info to
+ reflect that as well. */
+ insn->displacementOffset = insn->immediateOffset;
+ insn->consumedDisplacement = true;
+ insn->displacementSize = insn->immediateSize;
+ insn->displacement = insn->immediates[insn->numImmediatesConsumed - 1];
+ insn->immediateOffset = 0;
+ insn->immediateSize = 0;
break;
case ENCODING_RB:
if (readOpcodeRegister(insn, 1))
diff --git a/arch/X86/X86DisassemblerDecoder.h b/arch/X86/X86DisassemblerDecoder.h
index 9099711..7061276 100644
--- a/arch/X86/X86DisassemblerDecoder.h
+++ b/arch/X86/X86DisassemblerDecoder.h
@@ -594,7 +594,7 @@
uint8_t sib;
/* The displacement, used for memory operands */
bool consumedDisplacement;
- int32_t displacement;
+ int64_t displacement;
/* The value of the two-byte escape prefix (usually 0x0f) */
uint8_t twoByteEscape;
/* The value of the three-byte escape prefix (usually 0x38 or 0x3a) */
@@ -614,6 +614,8 @@
needed to find relocation entries for adding symbolic operands */
uint8_t displacementOffset;
uint8_t immediateOffset;
+ uint8_t modRMOffset;
+
// end-of-zero-members
diff --git a/arch/X86/X86ImmSize.inc b/arch/X86/X86ImmSize.inc
index 4abab5c..6fe829e 100644
--- a/arch/X86/X86ImmSize.inc
+++ b/arch/X86/X86ImmSize.inc
@@ -1,339 +1,339 @@
-{1, X86_AAD8i8},
-{1, X86_AAM8i8},
-{2, X86_ADC16i16},
-{2, X86_ADC16mi},
-{2, X86_ADC16mi8},
-{2, X86_ADC16ri},
-{2, X86_ADC16ri8},
-{4, X86_ADC32i32},
-{4, X86_ADC32mi},
-{4, X86_ADC32mi8},
-{4, X86_ADC32ri},
-{4, X86_ADC32ri8},
-{8, X86_ADC64i32},
-{8, X86_ADC64mi32},
-{8, X86_ADC64mi8},
-{8, X86_ADC64ri32},
-{8, X86_ADC64ri8},
-{1, X86_ADC8i8},
-{1, X86_ADC8mi},
-{1, X86_ADC8mi8},
-{1, X86_ADC8ri},
-{1, X86_ADC8ri8},
-{2, X86_ADD16i16},
-{2, X86_ADD16mi},
-{2, X86_ADD16mi8},
-{2, X86_ADD16ri},
-{2, X86_ADD16ri8},
-{4, X86_ADD32i32},
-{4, X86_ADD32mi},
-{4, X86_ADD32mi8},
-{4, X86_ADD32ri},
-{4, X86_ADD32ri8},
-{8, X86_ADD64i32},
-{8, X86_ADD64mi32},
-{8, X86_ADD64mi8},
-{8, X86_ADD64ri32},
-{8, X86_ADD64ri8},
-{1, X86_ADD8i8},
-{1, X86_ADD8mi},
-{1, X86_ADD8mi8},
-{1, X86_ADD8ri},
-{1, X86_ADD8ri8},
-{2, X86_AND16i16},
-{2, X86_AND16mi},
-{2, X86_AND16mi8},
-{2, X86_AND16ri},
-{2, X86_AND16ri8},
-{4, X86_AND32i32},
-{4, X86_AND32mi},
-{4, X86_AND32mi8},
-{4, X86_AND32ri},
-{4, X86_AND32ri8},
-{8, X86_AND64i32},
-{8, X86_AND64mi32},
-{8, X86_AND64mi8},
-{8, X86_AND64ri32},
-{8, X86_AND64ri8},
-{1, X86_AND8i8},
-{1, X86_AND8mi},
-{1, X86_AND8mi8},
-{1, X86_AND8ri},
-{1, X86_AND8ri8},
-{2, X86_BT16mi8},
-{2, X86_BT16ri8},
-{4, X86_BT32mi8},
-{4, X86_BT32ri8},
-{8, X86_BT64mi8},
-{8, X86_BT64ri8},
-{2, X86_BTC16mi8},
-{2, X86_BTC16ri8},
-{4, X86_BTC32mi8},
-{4, X86_BTC32ri8},
-{8, X86_BTC64mi8},
-{8, X86_BTC64ri8},
-{2, X86_BTR16mi8},
-{2, X86_BTR16ri8},
-{4, X86_BTR32mi8},
-{4, X86_BTR32ri8},
-{8, X86_BTR64mi8},
-{8, X86_BTR64ri8},
-{2, X86_BTS16mi8},
-{2, X86_BTS16ri8},
-{4, X86_BTS32mi8},
-{4, X86_BTS32ri8},
-{8, X86_BTS64mi8},
-{8, X86_BTS64ri8},
-{2, X86_CALLpcrel16},
-{2, X86_CMP16i16},
-{2, X86_CMP16mi},
-{2, X86_CMP16mi8},
-{2, X86_CMP16ri},
-{2, X86_CMP16ri8},
-{4, X86_CMP32i32},
-{4, X86_CMP32mi},
-{4, X86_CMP32mi8},
-{4, X86_CMP32ri},
-{4, X86_CMP32ri8},
-{8, X86_CMP64i32},
-{8, X86_CMP64mi32},
-{8, X86_CMP64mi8},
-{8, X86_CMP64ri32},
-{8, X86_CMP64ri8},
-{1, X86_CMP8i8},
-{1, X86_CMP8mi},
-{1, X86_CMP8mi8},
-{1, X86_CMP8ri},
-{1, X86_CMP8ri8},
-{2, X86_IMUL16rmi8},
-{2, X86_IMUL16rri8},
-{4, X86_IMUL32rmi8},
-{4, X86_IMUL32rri8},
-{8, X86_IMUL64rmi32},
-{8, X86_IMUL64rmi8},
-{8, X86_IMUL64rri32},
-{8, X86_IMUL64rri8},
-{2, X86_IN16ri},
-{4, X86_IN32ri},
-{1, X86_IN8ri},
-{2, X86_JMP_2},
-{2, X86_MOV16mi},
-{2, X86_MOV16ri},
-{2, X86_MOV16ri_alt},
-{4, X86_MOV32mi},
-{4, X86_MOV32ri},
-{8, X86_MOV32ri64},
-{4, X86_MOV32ri_alt},
-{8, X86_MOV64mi32},
-{8, X86_MOV64ri},
-{8, X86_MOV64ri32},
-{1, X86_MOV8mi},
-{1, X86_MOV8ri},
-{1, X86_MOV8ri_alt},
-{2, X86_OR16i16},
-{2, X86_OR16mi},
-{2, X86_OR16mi8},
-{2, X86_OR16ri},
-{2, X86_OR16ri8},
-{4, X86_OR32i32},
-{4, X86_OR32mi},
-{4, X86_OR32mi8},
-{4, X86_OR32ri},
-{4, X86_OR32ri8},
-{8, X86_OR64i32},
-{8, X86_OR64mi32},
-{8, X86_OR64mi8},
-{8, X86_OR64ri32},
-{8, X86_OR64ri8},
-{1, X86_OR8i8},
-{1, X86_OR8mi},
-{1, X86_OR8mi8},
-{1, X86_OR8ri},
-{1, X86_OR8ri8},
-{2, X86_PUSH16i8},
-{4, X86_PUSH32i8},
-{8, X86_PUSH64i16},
-{8, X86_PUSH64i32},
-{8, X86_PUSH64i8},
-{2, X86_PUSHi16},
-{4, X86_PUSHi32},
-{2, X86_RCL16mi},
-{2, X86_RCL16ri},
-{4, X86_RCL32mi},
-{4, X86_RCL32ri},
-{8, X86_RCL64mi},
-{8, X86_RCL64ri},
-{1, X86_RCL8mi},
-{1, X86_RCL8ri},
-{2, X86_RCR16mi},
-{2, X86_RCR16ri},
-{4, X86_RCR32mi},
-{4, X86_RCR32ri},
-{8, X86_RCR64mi},
-{8, X86_RCR64ri},
-{1, X86_RCR8mi},
-{1, X86_RCR8ri},
-{4, X86_RELEASE_ADD32mi},
-{8, X86_RELEASE_ADD64mi32},
-{1, X86_RELEASE_ADD8mi},
-{4, X86_RELEASE_AND32mi},
-{8, X86_RELEASE_AND64mi32},
-{1, X86_RELEASE_AND8mi},
-{2, X86_RELEASE_MOV16mi},
-{4, X86_RELEASE_MOV32mi},
-{8, X86_RELEASE_MOV64mi32},
-{1, X86_RELEASE_MOV8mi},
-{4, X86_RELEASE_OR32mi},
-{8, X86_RELEASE_OR64mi32},
-{1, X86_RELEASE_OR8mi},
-{4, X86_RELEASE_XOR32mi},
-{8, X86_RELEASE_XOR64mi32},
-{1, X86_RELEASE_XOR8mi},
-{2, X86_ROL16mi},
-{2, X86_ROL16ri},
-{4, X86_ROL32mi},
-{4, X86_ROL32ri},
-{8, X86_ROL64mi},
-{8, X86_ROL64ri},
-{1, X86_ROL8mi},
-{1, X86_ROL8ri},
-{2, X86_ROR16mi},
-{2, X86_ROR16ri},
-{4, X86_ROR32mi},
-{4, X86_ROR32ri},
-{8, X86_ROR64mi},
-{8, X86_ROR64ri},
-{1, X86_ROR8mi},
-{1, X86_ROR8ri},
-{4, X86_RORX32mi},
-{4, X86_RORX32ri},
-{8, X86_RORX64mi},
-{8, X86_RORX64ri},
-{2, X86_SAL16mi},
-{2, X86_SAL16ri},
-{4, X86_SAL32mi},
-{4, X86_SAL32ri},
-{8, X86_SAL64mi},
-{8, X86_SAL64ri},
-{1, X86_SAL8mi},
-{1, X86_SAL8ri},
-{2, X86_SAR16mi},
-{2, X86_SAR16ri},
-{4, X86_SAR32mi},
-{4, X86_SAR32ri},
-{8, X86_SAR64mi},
-{8, X86_SAR64ri},
-{1, X86_SAR8mi},
-{1, X86_SAR8ri},
-{2, X86_SBB16i16},
-{2, X86_SBB16mi},
-{2, X86_SBB16mi8},
-{2, X86_SBB16ri},
-{2, X86_SBB16ri8},
-{4, X86_SBB32i32},
-{4, X86_SBB32mi},
-{4, X86_SBB32mi8},
-{4, X86_SBB32ri},
-{4, X86_SBB32ri8},
-{8, X86_SBB64i32},
-{8, X86_SBB64mi32},
-{8, X86_SBB64mi8},
-{8, X86_SBB64ri32},
-{8, X86_SBB64ri8},
-{1, X86_SBB8i8},
-{1, X86_SBB8mi},
-{1, X86_SBB8mi8},
-{1, X86_SBB8ri},
-{1, X86_SBB8ri8},
-{2, X86_SHL16mi},
-{2, X86_SHL16ri},
-{4, X86_SHL32mi},
-{4, X86_SHL32ri},
-{8, X86_SHL64mi},
-{8, X86_SHL64ri},
-{1, X86_SHL8mi},
-{1, X86_SHL8ri},
-{1, X86_SHLD16mri8},
-{2, X86_SHLD16rri8},
-{1, X86_SHLD32mri8},
-{4, X86_SHLD32rri8},
-{1, X86_SHLD64mri8},
-{8, X86_SHLD64rri8},
-{2, X86_SHR16mi},
-{2, X86_SHR16ri},
-{4, X86_SHR32mi},
-{4, X86_SHR32ri},
-{8, X86_SHR64mi},
-{8, X86_SHR64ri},
-{1, X86_SHR8mi},
-{1, X86_SHR8ri},
-{1, X86_SHRD16mri8},
-{2, X86_SHRD16rri8},
-{1, X86_SHRD32mri8},
-{4, X86_SHRD32rri8},
-{1, X86_SHRD64mri8},
-{8, X86_SHRD64rri8},
-{2, X86_SUB16i16},
-{2, X86_SUB16mi},
-{2, X86_SUB16mi8},
-{2, X86_SUB16ri},
-{2, X86_SUB16ri8},
-{4, X86_SUB32i32},
-{4, X86_SUB32mi},
-{4, X86_SUB32mi8},
-{4, X86_SUB32ri},
-{4, X86_SUB32ri8},
-{8, X86_SUB64i32},
-{8, X86_SUB64mi32},
-{8, X86_SUB64mi8},
-{8, X86_SUB64ri32},
-{8, X86_SUB64ri8},
-{1, X86_SUB8i8},
-{1, X86_SUB8mi},
-{1, X86_SUB8mi8},
-{1, X86_SUB8ri},
-{1, X86_SUB8ri8},
-{8, X86_TCRETURNdi64},
-{8, X86_TCRETURNmi64},
-{8, X86_TCRETURNri64},
-{2, X86_TEST16i16},
-{2, X86_TEST16mi},
-{2, X86_TEST16mi_alt},
-{2, X86_TEST16ri},
-{2, X86_TEST16ri_alt},
-{4, X86_TEST32i32},
-{4, X86_TEST32mi},
-{4, X86_TEST32mi_alt},
-{4, X86_TEST32ri},
-{4, X86_TEST32ri_alt},
-{8, X86_TEST64i32},
-{8, X86_TEST64mi32},
-{4, X86_TEST64mi32_alt},
-{8, X86_TEST64ri32},
-{4, X86_TEST64ri32_alt},
-{1, X86_TEST8i8},
-{1, X86_TEST8mi},
-{1, X86_TEST8mi_alt},
-{1, X86_TEST8ri},
-{1, X86_TEST8ri_NOREX},
-{1, X86_TEST8ri_alt},
-{2, X86_XOR16i16},
-{2, X86_XOR16mi},
-{2, X86_XOR16mi8},
-{2, X86_XOR16ri},
-{2, X86_XOR16ri8},
-{4, X86_XOR32i32},
-{4, X86_XOR32mi},
-{4, X86_XOR32mi8},
-{4, X86_XOR32ri},
-{4, X86_XOR32ri8},
-{8, X86_XOR64i32},
-{8, X86_XOR64mi32},
-{8, X86_XOR64mi8},
-{8, X86_XOR64ri32},
-{8, X86_XOR64ri8},
-{1, X86_XOR8i8},
-{1, X86_XOR8mi},
-{1, X86_XOR8mi8},
-{1, X86_XOR8ri},
-{1, X86_XOR8ri8},
+{1, 1, X86_AAD8i8},
+{1, 1, X86_AAM8i8},
+{2, 2, X86_ADC16i16},
+{2, 2, X86_ADC16mi},
+{1, 2, X86_ADC16mi8},
+{2, 2, X86_ADC16ri},
+{1, 2, X86_ADC16ri8},
+{4, 4, X86_ADC32i32},
+{4, 4, X86_ADC32mi},
+{1, 4, X86_ADC32mi8},
+{4, 4, X86_ADC32ri},
+{1, 4, X86_ADC32ri8},
+{4, 8, X86_ADC64i32},
+{4, 8, X86_ADC64mi32},
+{1, 8, X86_ADC64mi8},
+{4, 8, X86_ADC64ri32},
+{1, 8, X86_ADC64ri8},
+{1, 1, X86_ADC8i8},
+{1, 1, X86_ADC8mi},
+{1, 1, X86_ADC8mi8},
+{1, 1, X86_ADC8ri},
+{1, 1, X86_ADC8ri8},
+{2, 2, X86_ADD16i16},
+{2, 2, X86_ADD16mi},
+{1, 2, X86_ADD16mi8},
+{2, 2, X86_ADD16ri},
+{1, 2, X86_ADD16ri8},
+{4, 4, X86_ADD32i32},
+{4, 4, X86_ADD32mi},
+{1, 4, X86_ADD32mi8},
+{4, 4, X86_ADD32ri},
+{1, 4, X86_ADD32ri8},
+{4, 8, X86_ADD64i32},
+{4, 8, X86_ADD64mi32},
+{1, 8, X86_ADD64mi8},
+{4, 8, X86_ADD64ri32},
+{1, 8, X86_ADD64ri8},
+{1, 1, X86_ADD8i8},
+{1, 1, X86_ADD8mi},
+{1, 1, X86_ADD8mi8},
+{1, 1, X86_ADD8ri},
+{1, 1, X86_ADD8ri8},
+{2, 2, X86_AND16i16},
+{2, 2, X86_AND16mi},
+{1, 2, X86_AND16mi8},
+{2, 2, X86_AND16ri},
+{1, 2, X86_AND16ri8},
+{4, 4, X86_AND32i32},
+{4, 4, X86_AND32mi},
+{1, 4, X86_AND32mi8},
+{4, 4, X86_AND32ri},
+{1, 4, X86_AND32ri8},
+{4, 8, X86_AND64i32},
+{4, 8, X86_AND64mi32},
+{1, 8, X86_AND64mi8},
+{4, 8, X86_AND64ri32},
+{1, 8, X86_AND64ri8},
+{1, 1, X86_AND8i8},
+{1, 1, X86_AND8mi},
+{1, 1, X86_AND8mi8},
+{1, 1, X86_AND8ri},
+{1, 1, X86_AND8ri8},
+{1, 1, X86_BT16mi8},
+{1, 1, X86_BT16ri8},
+{1, 1, X86_BT32mi8},
+{1, 1, X86_BT32ri8},
+{1, 1, X86_BT64mi8},
+{1, 1, X86_BT64ri8},
+{1, 1, X86_BTC16mi8},
+{1, 1, X86_BTC16ri8},
+{1, 1, X86_BTC32mi8},
+{1, 1, X86_BTC32ri8},
+{1, 1, X86_BTC64mi8},
+{1, 1, X86_BTC64ri8},
+{1, 1, X86_BTR16mi8},
+{1, 1, X86_BTR16ri8},
+{1, 1, X86_BTR32mi8},
+{1, 1, X86_BTR32ri8},
+{1, 1, X86_BTR64mi8},
+{1, 1, X86_BTR64ri8},
+{1, 1, X86_BTS16mi8},
+{1, 1, X86_BTS16ri8},
+{1, 1, X86_BTS32mi8},
+{1, 1, X86_BTS32ri8},
+{1, 1, X86_BTS64mi8},
+{1, 1, X86_BTS64ri8},
+{2, 2, X86_CALLpcrel16},
+{2, 2, X86_CMP16i16},
+{2, 2, X86_CMP16mi},
+{1, 2, X86_CMP16mi8},
+{2, 2, X86_CMP16ri},
+{1, 2, X86_CMP16ri8},
+{4, 4, X86_CMP32i32},
+{4, 4, X86_CMP32mi},
+{1, 4, X86_CMP32mi8},
+{4, 4, X86_CMP32ri},
+{1, 4, X86_CMP32ri8},
+{4, 8, X86_CMP64i32},
+{4, 8, X86_CMP64mi32},
+{1, 8, X86_CMP64mi8},
+{4, 8, X86_CMP64ri32},
+{1, 8, X86_CMP64ri8},
+{1, 1, X86_CMP8i8},
+{1, 1, X86_CMP8mi},
+{1, 1, X86_CMP8mi8},
+{1, 1, X86_CMP8ri},
+{1, 1, X86_CMP8ri8},
+{1, 2, X86_IMUL16rmi8},
+{1, 2, X86_IMUL16rri8},
+{1, 4, X86_IMUL32rmi8},
+{1, 4, X86_IMUL32rri8},
+{4, 8, X86_IMUL64rmi32},
+{1, 8, X86_IMUL64rmi8},
+{4, 8, X86_IMUL64rri32},
+{1, 8, X86_IMUL64rri8},
+{2, 2, X86_IN16ri},
+{4, 4, X86_IN32ri},
+{1, 1, X86_IN8ri},
+{2, 2, X86_JMP_2},
+{2, 2, X86_MOV16mi},
+{2, 2, X86_MOV16ri},
+{2, 2, X86_MOV16ri_alt},
+{4, 4, X86_MOV32mi},
+{4, 4, X86_MOV32ri},
+{8, 8, X86_MOV32ri64},
+{4, 4, X86_MOV32ri_alt},
+{4, 8, X86_MOV64mi32},
+{8, 8, X86_MOV64ri},
+{4, 8, X86_MOV64ri32},
+{1, 1, X86_MOV8mi},
+{1, 1, X86_MOV8ri},
+{1, 1, X86_MOV8ri_alt},
+{2, 2, X86_OR16i16},
+{2, 2, X86_OR16mi},
+{1, 2, X86_OR16mi8},
+{2, 2, X86_OR16ri},
+{1, 2, X86_OR16ri8},
+{4, 4, X86_OR32i32},
+{4, 4, X86_OR32mi},
+{1, 4, X86_OR32mi8},
+{4, 4, X86_OR32ri},
+{1, 4, X86_OR32ri8},
+{4, 8, X86_OR64i32},
+{4, 8, X86_OR64mi32},
+{1, 8, X86_OR64mi8},
+{4, 8, X86_OR64ri32},
+{1, 8, X86_OR64ri8},
+{1, 1, X86_OR8i8},
+{1, 1, X86_OR8mi},
+{1, 1, X86_OR8mi8},
+{1, 1, X86_OR8ri},
+{1, 1, X86_OR8ri8},
+{1, 2, X86_PUSH16i8},
+{1, 4, X86_PUSH32i8},
+{2, 8, X86_PUSH64i16},
+{4, 8, X86_PUSH64i32},
+{1, 8, X86_PUSH64i8},
+{2, 2, X86_PUSHi16},
+{4, 4, X86_PUSHi32},
+{1, 1, X86_RCL16mi},
+{1, 1, X86_RCL16ri},
+{1, 1, X86_RCL32mi},
+{1, 1, X86_RCL32ri},
+{1, 1, X86_RCL64mi},
+{1, 1, X86_RCL64ri},
+{1, 1, X86_RCL8mi},
+{1, 1, X86_RCL8ri},
+{1, 1, X86_RCR16mi},
+{1, 1, X86_RCR16ri},
+{1, 1, X86_RCR32mi},
+{1, 1, X86_RCR32ri},
+{1, 1, X86_RCR64mi},
+{1, 1, X86_RCR64ri},
+{1, 1, X86_RCR8mi},
+{1, 1, X86_RCR8ri},
+{4, 4, X86_RELEASE_ADD32mi},
+{4, 8, X86_RELEASE_ADD64mi32},
+{1, 1, X86_RELEASE_ADD8mi},
+{4, 4, X86_RELEASE_AND32mi},
+{4, 8, X86_RELEASE_AND64mi32},
+{1, 1, X86_RELEASE_AND8mi},
+{2, 2, X86_RELEASE_MOV16mi},
+{4, 4, X86_RELEASE_MOV32mi},
+{4, 8, X86_RELEASE_MOV64mi32},
+{1, 1, X86_RELEASE_MOV8mi},
+{4, 4, X86_RELEASE_OR32mi},
+{4, 8, X86_RELEASE_OR64mi32},
+{1, 1, X86_RELEASE_OR8mi},
+{4, 4, X86_RELEASE_XOR32mi},
+{4, 8, X86_RELEASE_XOR64mi32},
+{1, 1, X86_RELEASE_XOR8mi},
+{1, 1, X86_ROL16mi},
+{1, 1, X86_ROL16ri},
+{1, 1, X86_ROL32mi},
+{1, 1, X86_ROL32ri},
+{1, 1, X86_ROL64mi},
+{1, 1, X86_ROL64ri},
+{1, 1, X86_ROL8mi},
+{1, 1, X86_ROL8ri},
+{1, 1, X86_ROR16mi},
+{1, 1, X86_ROR16ri},
+{1, 1, X86_ROR32mi},
+{1, 1, X86_ROR32ri},
+{1, 1, X86_ROR64mi},
+{1, 1, X86_ROR64ri},
+{1, 1, X86_ROR8mi},
+{1, 1, X86_ROR8ri},
+{4, 4, X86_RORX32mi},
+{4, 4, X86_RORX32ri},
+{8, 8, X86_RORX64mi},
+{8, 8, X86_RORX64ri},
+{1, 1, X86_SAL16mi},
+{1, 1, X86_SAL16ri},
+{1, 1, X86_SAL32mi},
+{1, 1, X86_SAL32ri},
+{1, 1, X86_SAL64mi},
+{1, 1, X86_SAL64ri},
+{1, 1, X86_SAL8mi},
+{1, 1, X86_SAL8ri},
+{1, 1, X86_SAR16mi},
+{1, 1, X86_SAR16ri},
+{1, 1, X86_SAR32mi},
+{1, 1, X86_SAR32ri},
+{1, 1, X86_SAR64mi},
+{1, 1, X86_SAR64ri},
+{1, 1, X86_SAR8mi},
+{1, 1, X86_SAR8ri},
+{2, 2, X86_SBB16i16},
+{2, 2, X86_SBB16mi},
+{1, 2, X86_SBB16mi8},
+{2, 2, X86_SBB16ri},
+{1, 2, X86_SBB16ri8},
+{4, 4, X86_SBB32i32},
+{4, 4, X86_SBB32mi},
+{1, 4, X86_SBB32mi8},
+{4, 4, X86_SBB32ri},
+{1, 4, X86_SBB32ri8},
+{4, 8, X86_SBB64i32},
+{4, 8, X86_SBB64mi32},
+{1, 8, X86_SBB64mi8},
+{4, 8, X86_SBB64ri32},
+{1, 8, X86_SBB64ri8},
+{1, 1, X86_SBB8i8},
+{1, 1, X86_SBB8mi},
+{1, 1, X86_SBB8mi8},
+{1, 1, X86_SBB8ri},
+{1, 1, X86_SBB8ri8},
+{1, 1, X86_SHL16mi},
+{1, 1, X86_SHL16ri},
+{1, 1, X86_SHL32mi},
+{1, 1, X86_SHL32ri},
+{1, 1, X86_SHL64mi},
+{1, 1, X86_SHL64ri},
+{1, 1, X86_SHL8mi},
+{1, 1, X86_SHL8ri},
+{1, 1, X86_SHLD16mri8},
+{1, 1, X86_SHLD16rri8},
+{1, 1, X86_SHLD32mri8},
+{1, 1, X86_SHLD32rri8},
+{1, 1, X86_SHLD64mri8},
+{1, 1, X86_SHLD64rri8},
+{1, 1, X86_SHR16mi},
+{1, 1, X86_SHR16ri},
+{1, 1, X86_SHR32mi},
+{1, 1, X86_SHR32ri},
+{1, 1, X86_SHR64mi},
+{1, 1, X86_SHR64ri},
+{1, 1, X86_SHR8mi},
+{1, 1, X86_SHR8ri},
+{1, 1, X86_SHRD16mri8},
+{1, 1, X86_SHRD16rri8},
+{1, 1, X86_SHRD32mri8},
+{1, 1, X86_SHRD32rri8},
+{1, 1, X86_SHRD64mri8},
+{1, 1, X86_SHRD64rri8},
+{2, 2, X86_SUB16i16},
+{2, 2, X86_SUB16mi},
+{1, 2, X86_SUB16mi8},
+{2, 2, X86_SUB16ri},
+{1, 2, X86_SUB16ri8},
+{4, 4, X86_SUB32i32},
+{4, 4, X86_SUB32mi},
+{1, 4, X86_SUB32mi8},
+{4, 4, X86_SUB32ri},
+{1, 4, X86_SUB32ri8},
+{4, 8, X86_SUB64i32},
+{4, 8, X86_SUB64mi32},
+{1, 8, X86_SUB64mi8},
+{4, 8, X86_SUB64ri32},
+{1, 8, X86_SUB64ri8},
+{1, 1, X86_SUB8i8},
+{1, 1, X86_SUB8mi},
+{1, 1, X86_SUB8mi8},
+{1, 1, X86_SUB8ri},
+{1, 1, X86_SUB8ri8},
+{8, 8, X86_TCRETURNdi64},
+{8, 8, X86_TCRETURNmi64},
+{8, 8, X86_TCRETURNri64},
+{2, 2, X86_TEST16i16},
+{2, 2, X86_TEST16mi},
+{2, 2, X86_TEST16mi_alt},
+{2, 2, X86_TEST16ri},
+{2, 2, X86_TEST16ri_alt},
+{4, 4, X86_TEST32i32},
+{4, 4, X86_TEST32mi},
+{4, 4, X86_TEST32mi_alt},
+{4, 4, X86_TEST32ri},
+{4, 4, X86_TEST32ri_alt},
+{4, 8, X86_TEST64i32},
+{4, 8, X86_TEST64mi32},
+{4, 4, X86_TEST64mi32_alt},
+{4, 8, X86_TEST64ri32},
+{4, 4, X86_TEST64ri32_alt},
+{1, 1, X86_TEST8i8},
+{1, 1, X86_TEST8mi},
+{1, 1, X86_TEST8mi_alt},
+{1, 1, X86_TEST8ri},
+{1, 1, X86_TEST8ri_NOREX},
+{1, 1, X86_TEST8ri_alt},
+{2, 2, X86_XOR16i16},
+{2, 2, X86_XOR16mi},
+{1, 2, X86_XOR16mi8},
+{2, 2, X86_XOR16ri},
+{1, 2, X86_XOR16ri8},
+{4, 4, X86_XOR32i32},
+{4, 4, X86_XOR32mi},
+{1, 4, X86_XOR32mi8},
+{4, 4, X86_XOR32ri},
+{1, 4, X86_XOR32ri8},
+{4, 8, X86_XOR64i32},
+{4, 8, X86_XOR64mi32},
+{1, 8, X86_XOR64mi8},
+{4, 8, X86_XOR64ri32},
+{1, 8, X86_XOR64ri8},
+{1, 1, X86_XOR8i8},
+{1, 1, X86_XOR8mi},
+{1, 1, X86_XOR8mi8},
+{1, 1, X86_XOR8ri},
+{1, 1, X86_XOR8ri8},
diff --git a/arch/X86/X86IntelInstPrinter.c b/arch/X86/X86IntelInstPrinter.c
index 29a1f15..a9ca606 100644
--- a/arch/X86/X86IntelInstPrinter.c
+++ b/arch/X86/X86IntelInstPrinter.c
@@ -803,7 +803,7 @@
MCOperand *Op = MCInst_getOperand(MI, OpNo);
if (MCOperand_isImm(Op)) {
int64_t imm = MCOperand_getImm(Op) + MI->flat_insn->size + MI->address;
- int opsize = X86_immediate_size(MI->Opcode);
+ int opsize = X86_immediate_size(MI->Opcode, NULL);
// truncat imm for non-64bit
if (MI->csh->mode != CS_MODE_64) {
@@ -886,7 +886,8 @@
MI->op1_size = MI->csh->regsize_map[reg];
} else if (MCOperand_isImm(Op)) {
int64_t imm = MCOperand_getImm(Op);
- int opsize = X86_immediate_size(MCInst_getOpcode(MI));
+ uint8_t encsize;
+ int opsize = X86_immediate_size(MCInst_getOpcode(MI), &encsize);
if (opsize == 1) // print 1 byte immediate in positive form
imm = imm & 0xff;
@@ -952,7 +953,10 @@
MI->flat_insn->detail->x86.operands[MI->flat_insn->detail->x86.op_count].type = X86_OP_IMM;
if (opsize > 0)
- MI->flat_insn->detail->x86.operands[MI->flat_insn->detail->x86.op_count].size = (uint8_t)opsize;
+ {
+ MI->flat_insn->detail->x86.operands[MI->flat_insn->detail->x86.op_count].size = opsize;
+ MI->flat_insn->detail->x86.encoding.imm_size = encsize;
+ }
else if (MI->flat_insn->detail->x86.op_count > 0) {
if (MI->flat_insn->id != X86_INS_LCALL && MI->flat_insn->id != X86_INS_LJMP) {
MI->flat_insn->detail->x86.operands[MI->flat_insn->detail->x86.op_count].size =
diff --git a/arch/X86/X86Mapping.c b/arch/X86/X86Mapping.c
index 0c3116a..7431ad1 100644
--- a/arch/X86/X86Mapping.c
+++ b/arch/X86/X86Mapping.c
@@ -3564,14 +3564,15 @@
// map immediate size to instruction id
static struct size_id {
- unsigned char size;
- unsigned short id;
+ uint8_t enc_size;
+ uint8_t size;
+ uint16_t id;
} x86_imm_size[] = {
#include "X86ImmSize.inc"
};
// given the instruction name, return the size of its immediate operand (or 0)
-int X86_immediate_size(unsigned int id)
+int X86_immediate_size(unsigned int id, uint8_t *enc_size)
{
#if 0
// linear searching
@@ -3584,7 +3585,7 @@
}
#endif
- // binary searching since the IDs is sorted in order
+ // binary searching since the IDs are sorted in order
unsigned int left, right, m;
left = 0;
@@ -3593,7 +3594,11 @@
while(left <= right) {
m = (left + right) / 2;
if (id == x86_imm_size[m].id)
+ {
+ if (enc_size != NULL)
+ *enc_size = x86_imm_size[m].enc_size;
return x86_imm_size[m].size;
+ }
if (id < x86_imm_size[m].id)
right = m - 1;
diff --git a/arch/X86/X86Mapping.h b/arch/X86/X86Mapping.h
index a60ea4f..26ab9e3 100644
--- a/arch/X86/X86Mapping.h
+++ b/arch/X86/X86Mapping.h
@@ -69,6 +69,6 @@
cs_regs regs_write, uint8_t *regs_write_count);
// given the instruction id, return the size of its immediate operand (or 0)
-int X86_immediate_size(unsigned int id);
+int X86_immediate_size(unsigned int id, uint8_t *enc_size);
#endif
diff --git a/bindings/python/capstone/__init__.py b/bindings/python/capstone/__init__.py
index bddc7e7..eff46c1 100644
--- a/bindings/python/capstone/__init__.py
+++ b/bindings/python/capstone/__init__.py
@@ -631,7 +631,9 @@
(self.prefix, self.opcode, self.rex, self.addr_size, \
self.modrm, self.sib, self.disp, \
self.sib_index, self.sib_scale, self.sib_base, self.xop_cc, self.sse_cc, \
- self.avx_cc, self.avx_sae, self.avx_rm, self.eflags, self.operands) = x86.get_arch_info(self._raw.detail.contents.arch.x86)
+ self.avx_cc, self.avx_sae, self.avx_rm, self.eflags, \
+ self.modrm_offset, self.disp_offset, self.disp_size, self.imm_offset, self.imm_size, \
+ self.operands) = x86.get_arch_info(self._raw.detail.contents.arch.x86)
elif arch == CS_ARCH_M68K:
(self.operands, self.op_size) = m68k.get_arch_info(self._raw.detail.contents.arch.m68k)
elif arch == CS_ARCH_MIPS:
diff --git a/bindings/python/capstone/x86.py b/bindings/python/capstone/x86.py
index 59aaa4b..63bcd99 100644
--- a/bindings/python/capstone/x86.py
+++ b/bindings/python/capstone/x86.py
@@ -44,6 +44,15 @@
return self.value.mem
+class CsX86Encoding(ctypes.Structure):
+ _fields_ = (
+ ('modrm_offset', ctypes.c_uint8),
+ ('disp_offset', ctypes.c_uint8),
+ ('disp_size', ctypes.c_uint8),
+ ('imm_offset', ctypes.c_uint8),
+ ('imm_size', ctypes.c_uint8),
+ )
+
class CsX86(ctypes.Structure):
_fields_ = (
('prefix', ctypes.c_uint8 * 4),
@@ -52,7 +61,7 @@
('addr_size', ctypes.c_uint8),
('modrm', ctypes.c_uint8),
('sib', ctypes.c_uint8),
- ('disp', ctypes.c_int32),
+ ('disp', ctypes.c_int64),
('sib_index', ctypes.c_uint),
('sib_scale', ctypes.c_int8),
('sib_base', ctypes.c_uint),
@@ -64,11 +73,13 @@
('eflags', ctypes.c_uint64),
('op_count', ctypes.c_uint8),
('operands', X86Op * 8),
+ ('encoding', CsX86Encoding),
)
def get_arch_info(a):
return (a.prefix[:], a.opcode[:], a.rex, a.addr_size, \
a.modrm, a.sib, a.disp, a.sib_index, a.sib_scale, \
a.sib_base, a.xop_cc, a.sse_cc, a.avx_cc, a.avx_sae, a.avx_rm, a.eflags, \
+ a.encoding.modrm_offset, a.encoding.disp_offset, a.encoding.disp_size, a.encoding.imm_offset, a.encoding.imm_size, \
copy_ctypes_list(a.operands[:a.op_count]))
diff --git a/bindings/python/test_x86.py b/bindings/python/test_x86.py
index 9c1a0e1..2dd74a1 100755
--- a/bindings/python/test_x86.py
+++ b/bindings/python/test_x86.py
@@ -7,9 +7,9 @@
from xprint import to_hex, to_x, to_x_32
-X86_CODE64 = b"\x55\x48\x8b\x05\xb8\x13\x00\x00\x8f\xe8\x60\xcd\xe2\x07"
-X86_CODE16 = b"\x8d\x4c\x32\x08\x01\xd8\x81\xc6\x34\x12\x00\x00\x05\x23\x01\x00\x00\x36\x8b\x84\x91\x23\x01\x00\x00\x41\x8d\x84\x39\x89\x67\x00\x00\x8d\x87\x89\x67\x00\x00\xb4\xc6"
-X86_CODE32 = b"\x8d\x4c\x32\x08\x01\xd8\x81\xc6\x34\x12\x00\x00\x05\x23\x01\x00\x00\x36\x8b\x84\x91\x23\x01\x00\x00\x41\x8d\x84\x39\x89\x67\x00\x00\x8d\x87\x89\x67\x00\x00\xb4\xc6"
+X86_CODE64 = b"\x55\x48\x8b\x05\xb8\x13\x00\x00\xe9\xea\xbe\xad\xde\xff\x25\x23\x01\x00\x00\xe8\xdf\xbe\xad\xde\x74\xff"
+X86_CODE16 = b"\x8d\x4c\x32\x08\x01\xd8\x81\xc6\x34\x12\x00\x00\x05\x23\x01\x00\x00\x36\x8b\x84\x91\x23\x01\x00\x00\x41\x8d\x84\x39\x89\x67\x00\x00\x8d\x87\x89\x67\x00\x00\xb4\xc6\x66\xe9\xb8\x00\x00\x00\x67\xff\xa0\x23\x01\x00\x00\x66\xe8\xcb\x00\x00\x00\x74\xfc"
+X86_CODE32 = b"\x8d\x4c\x32\x08\x01\xd8\x81\xc6\x34\x12\x00\x00\x05\x23\x01\x00\x00\x36\x8b\x84\x91\x23\x01\x00\x00\x41\x8d\x84\x39\x89\x67\x00\x00\x8d\x87\x89\x67\x00\x00\xb4\xc6\xe9\xea\xbe\xad\xde\xff\xa0\x23\x01\x00\x00\xe8\xdf\xbe\xad\xde\x74\xff"
all_tests = (
(CS_ARCH_X86, CS_MODE_16, X86_CODE16, "X86 16bit (Intel syntax)", None),
@@ -145,9 +145,21 @@
# print modRM byte
print("\tmodrm: 0x%x" % (insn.modrm))
+ # print modRM offset
+ if insn.modrm_offset != 0:
+ print("\tmodrm_offset: 0x%x" % (insn.modrm_offset))
+
# print displacement value
print("\tdisp: 0x%s" % to_x_32(insn.disp))
+ # print displacement offset (offset into instruction bytes)
+ if insn.disp_offset != 0:
+ print("\tdisp_offset: 0x%x" % (insn.disp_offset))
+
+ # print displacement size
+ if insn.disp_size != 0:
+ print("\tdisp_size: 0x%x" % (insn.disp_size))
+
# SIB is not available in 16-bit mode
if (mode & CS_MODE_16 == 0):
# print SIB byte
@@ -186,6 +198,10 @@
for i in range(count):
op = insn.op_find(X86_OP_IMM, i + 1)
print("\t\timms[%u]: 0x%s" % (i + 1, to_x(op.imm)))
+ if insn.imm_offset != 0:
+ print("\timm_offset: 0x%x" % (insn.imm_offset))
+ if insn.imm_size != 0:
+ print("\timm_size: 0x%x" % (insn.imm_size))
if len(insn.operands) > 0:
print("\top_count: %u" % len(insn.operands))
diff --git a/cstool/cstool_x86.c b/cstool/cstool_x86.c
index ae55162..f5d2bab 100644
--- a/cstool/cstool_x86.c
+++ b/cstool/cstool_x86.c
@@ -195,7 +195,7 @@
printf("\trex: 0x%x\n", x86->rex);
printf("\taddr_size: %u\n", x86->addr_size);
printf("\tmodrm: 0x%x\n", x86->modrm);
- printf("\tdisp: 0x%x\n", x86->disp);
+ printf("\tdisp: 0x%" PRIx64 "\n", x86->disp);
// SIB is not available in 16-bit mode
if ((mode & CS_MODE_16) == 0) {
diff --git a/include/capstone/x86.h b/include/capstone/x86.h
index 7b280b6..713b461 100644
--- a/include/capstone/x86.h
+++ b/include/capstone/x86.h
@@ -295,6 +295,19 @@
bool avx_zero_opmask;
} cs_x86_op;
+typedef struct cs_x86_encoding {
+ // ModR/M offset, or 0 when irrelevant
+ uint8_t modrm_offset;
+
+ // Displacement offset, or 0 when irrelevant.
+ uint8_t disp_offset;
+ uint8_t disp_size;
+
+ // Immediate offset, or 0 when irrelevant.
+ uint8_t imm_offset;
+ uint8_t imm_size;
+} cs_x86_encoding;
+
// Instruction structure
typedef struct cs_x86 {
// Instruction prefix, which can be up to 4 bytes.
@@ -323,13 +336,12 @@
// SIB value, or 0 when irrelevant.
uint8_t sib;
- // Displacement value, or 0 when irrelevant.
- int32_t disp;
+ // Displacement value, valid if encoding.disp_offset != 0
+ int64_t disp;
- /* SIB state */
// SIB index register, or X86_REG_INVALID when irrelevant.
x86_reg sib_index;
- // SIB scale. only applicable if sib_index is relevant.
+ // SIB scale, only applicable if sib_index is valid.
int8_t sib_scale;
// SIB base register, or X86_REG_INVALID when irrelevant.
x86_reg sib_base;
@@ -364,6 +376,8 @@
uint8_t op_count;
cs_x86_op operands[8]; // operands for this instruction.
+
+ cs_x86_encoding encoding; // encoding information
} cs_x86;
//> X86 instructions
diff --git a/tests/test_x86.c b/tests/test_x86.c
index 0281fa9..1fa1b13 100644
--- a/tests/test_x86.c
+++ b/tests/test_x86.c
@@ -152,8 +152,19 @@
printf("\taddr_size: %u\n", x86->addr_size);
printf("\tmodrm: 0x%x\n", x86->modrm);
- printf("\tdisp: 0x%x\n", x86->disp);
-
+ if (x86->encoding.modrm_offset != 0) {
+ printf("\tmodrm_offset: 0x%x\n", x86->encoding.modrm_offset);
+ }
+
+ printf("\tdisp: 0x%" PRIx64 "\n", x86->disp);
+ if (x86->encoding.disp_offset != 0) {
+ printf("\tdisp_offset: 0x%x\n", x86->encoding.disp_offset);
+ }
+
+ if (x86->encoding.disp_size != 0) {
+ printf("\tdisp_size: 0x%x\n", x86->encoding.disp_size);
+ }
+
// SIB is not available in 16-bit mode
if ((mode & CS_MODE_16) == 0) {
printf("\tsib: 0x%x\n", x86->sib);
@@ -197,6 +208,13 @@
for (i = 1; i < count + 1; i++) {
int index = cs_op_index(ud, ins, X86_OP_IMM, i);
printf("\t\timms[%u]: 0x%" PRIx64 "\n", i, x86->operands[index].imm);
+ if (x86->encoding.imm_offset != 0) {
+ printf("\timm_offset: 0x%x\n", x86->encoding.imm_offset);
+ }
+
+ if (x86->encoding.imm_size != 0) {
+ printf("\timm_size: 0x%x\n", x86->encoding.imm_size);
+ }
}
}
@@ -299,11 +317,11 @@
//#define X86_CODE32 "\xa1\x13\x48\x6d\x3a\x8b\x81\x23\x01\x00\x00\x8b\x84\x39\x23\x01\x00\x00"
//#define X86_CODE32 "\xb4\xc6" // mov ah, 0x6c
//#define X86_CODE32 "\x77\x04" // ja +6
-#define X86_CODE64 "\x55\x48\x8b\x05\xb8\x13\x00\x00\x8f\xe8\x60\xcd\xe2\x07"
+#define X86_CODE64 "\x55\x48\x8b\x05\xb8\x13\x00\x00\xe9\xea\xbe\xad\xde\xff\x25\x23\x01\x00\x00\xe8\xdf\xbe\xad\xde\x74\xff"
//#define X86_CODE64 "\xe9\x79\xff\xff\xff" // jmp 0xf7e
-#define X86_CODE16 "\x8d\x4c\x32\x08\x01\xd8\x81\xc6\x34\x12\x00\x00\x05\x23\x01\x00\x00\x36\x8b\x84\x91\x23\x01\x00\x00\x41\x8d\x84\x39\x89\x67\x00\x00\x8d\x87\x89\x67\x00\x00\xb4\xc6"
-#define X86_CODE32 "\x8d\x4c\x32\x08\x01\xd8\x81\xc6\x34\x12\x00\x00\x05\x23\x01\x00\x00\x36\x8b\x84\x91\x23\x01\x00\x00\x41\x8d\x84\x39\x89\x67\x00\x00\x8d\x87\x89\x67\x00\x00\xb4\xc6"
+#define X86_CODE16 "\x8d\x4c\x32\x08\x01\xd8\x81\xc6\x34\x12\x00\x00\x05\x23\x01\x00\x00\x36\x8b\x84\x91\x23\x01\x00\x00\x41\x8d\x84\x39\x89\x67\x00\x00\x8d\x87\x89\x67\x00\x00\xb4\xc6\x66\xe9\xb8\x00\x00\x00\x67\xff\xa0\x23\x01\x00\x00\x66\xe8\xcb\x00\x00\x00\x74\xfc"
+#define X86_CODE32 "\x8d\x4c\x32\x08\x01\xd8\x81\xc6\x34\x12\x00\x00\x05\x23\x01\x00\x00\x36\x8b\x84\x91\x23\x01\x00\x00\x41\x8d\x84\x39\x89\x67\x00\x00\x8d\x87\x89\x67\x00\x00\xb4\xc6\xe9\xea\xbe\xad\xde\xff\xa0\x23\x01\x00\x00\xe8\xdf\xbe\xad\xde\x74\xff"
//#define X86_CODE32 "\x05\x23\x01\x00\x00\x0f\x01\xda"
//#define X86_CODE32 "\x0f\xa7\xc0" // xstorerng
//#define X86_CODE32 "\x64\xa1\x18\x00\x00\x00" // mov eax, dword ptr fs:[18]