blob: 4ba05baccb3029e5ed56f56a385402812d1984c8 [file] [log] [blame]
/* tc-i386.c -- Assemble code for the Intel 80386
Copyright (C) 1989-2016 Free Software Foundation, Inc.
This file is part of GAS, the GNU Assembler.
GAS is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GAS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GAS; see the file COPYING. If not, write to the Free
Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
02110-1301, USA. */
/* Intel 80386 machine specific gas.
Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
x86_64 support by Jan Hubicka (jh@suse.cz)
VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
Bugs & suggestions are completely welcome. This is free software.
Please help us make it better. */
#include "as.h"
#include "safe-ctype.h"
#include "subsegs.h"
#include "dwarf2dbg.h"
#include "dw2gencfi.h"
#include "elf/x86-64.h"
#include "opcodes/i386-init.h"
#ifndef REGISTER_WARNINGS
#define REGISTER_WARNINGS 1
#endif
#ifndef INFER_ADDR_PREFIX
#define INFER_ADDR_PREFIX 1
#endif
#ifndef DEFAULT_ARCH
#define DEFAULT_ARCH "i386"
#endif
#ifndef INLINE
#if __GNUC__ >= 2
#define INLINE __inline__
#else
#define INLINE
#endif
#endif
/* Prefixes will be emitted in the order defined below.
WAIT_PREFIX must be the first prefix since FWAIT is really is an
instruction, and so must come before any prefixes.
The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
#define WAIT_PREFIX 0
#define SEG_PREFIX 1
#define ADDR_PREFIX 2
#define DATA_PREFIX 3
#define REP_PREFIX 4
#define HLE_PREFIX REP_PREFIX
#define BND_PREFIX REP_PREFIX
#define LOCK_PREFIX 5
#define REX_PREFIX 6 /* must come last. */
#define MAX_PREFIXES 7 /* max prefixes per opcode */
/* we define the syntax here (modulo base,index,scale syntax) */
#define REGISTER_PREFIX '%'
#define IMMEDIATE_PREFIX '$'
#define ABSOLUTE_PREFIX '*'
/* these are the instruction mnemonic suffixes in AT&T syntax or
memory operand size in Intel syntax. */
#define WORD_MNEM_SUFFIX 'w'
#define BYTE_MNEM_SUFFIX 'b'
#define SHORT_MNEM_SUFFIX 's'
#define LONG_MNEM_SUFFIX 'l'
#define QWORD_MNEM_SUFFIX 'q'
#define XMMWORD_MNEM_SUFFIX 'x'
#define YMMWORD_MNEM_SUFFIX 'y'
#define ZMMWORD_MNEM_SUFFIX 'z'
/* Intel Syntax. Use a non-ascii letter since since it never appears
in instructions. */
#define LONG_DOUBLE_MNEM_SUFFIX '\1'
#define END_OF_INSN '\0'
/*
'templates' is for grouping together 'template' structures for opcodes
of the same name. This is only used for storing the insns in the grand
ole hash table of insns.
The templates themselves start at START and range up to (but not including)
END.
*/
typedef struct
{
const insn_template *start;
const insn_template *end;
}
templates;
/* 386 operand encoding bytes: see 386 book for details of this. */
typedef struct
{
unsigned int regmem; /* codes register or memory operand */
unsigned int reg; /* codes register operand (or extended opcode) */
unsigned int mode; /* how to interpret regmem & reg */
}
modrm_byte;
/* x86-64 extension prefix. */
typedef int rex_byte;
/* 386 opcode byte to code indirect addressing. */
typedef struct
{
unsigned base;
unsigned index;
unsigned scale;
}
sib_byte;
/* x86 arch names, types and features */
typedef struct
{
const char *name; /* arch name */
unsigned int len; /* arch string length */
enum processor_type type; /* arch type */
i386_cpu_flags flags; /* cpu feature flags */
unsigned int skip; /* show_arch should skip this. */
}
arch_entry;
/* Used to turn off indicated flags. */
typedef struct
{
const char *name; /* arch name */
unsigned int len; /* arch string length */
i386_cpu_flags flags; /* cpu feature flags */
}
noarch_entry;
static void update_code_flag (int, int);
static void set_code_flag (int);
static void set_16bit_gcc_code_flag (int);
static void set_intel_syntax (int);
static void set_intel_mnemonic (int);
static void set_allow_index_reg (int);
static void set_check (int);
static void set_cpu_arch (int);
#ifdef TE_PE
static void pe_directive_secrel (int);
#endif
static void signed_cons (int);
static char *output_invalid (int c);
static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
const char *);
static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
const char *);
static int i386_att_operand (char *);
static int i386_intel_operand (char *, int);
static int i386_intel_simplify (expressionS *);
static int i386_intel_parse_name (const char *, expressionS *);
static const reg_entry *parse_register (char *, char **);
static char *parse_insn (char *, char *);
static char *parse_operands (char *, const char *);
static void swap_operands (void);
static void swap_2_operands (int, int);
static void optimize_imm (void);
static void optimize_disp (void);
static const insn_template *match_template (char);
static int check_string (void);
static int process_suffix (void);
static int check_byte_reg (void);
static int check_long_reg (void);
static int check_qword_reg (void);
static int check_word_reg (void);
static int finalize_imm (void);
static int process_operands (void);
static const seg_entry *build_modrm_byte (void);
static void output_insn (void);
static void output_imm (fragS *, offsetT);
static void output_disp (fragS *, offsetT);
#ifndef I386COFF
static void s_bss (int);
#endif
#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
static void handle_large_common (int small ATTRIBUTE_UNUSED);
#endif
static const char *default_arch = DEFAULT_ARCH;
/* This struct describes rounding control and SAE in the instruction. */
struct RC_Operation
{
enum rc_type
{
rne = 0,
rd,
ru,
rz,
saeonly
} type;
int operand;
};
static struct RC_Operation rc_op;
/* The struct describes masking, applied to OPERAND in the instruction.
MASK is a pointer to the corresponding mask register. ZEROING tells
whether merging or zeroing mask is used. */
struct Mask_Operation
{
const reg_entry *mask;
unsigned int zeroing;
/* The operand where this operation is associated. */
int operand;
};
static struct Mask_Operation mask_op;
/* The struct describes broadcasting, applied to OPERAND. FACTOR is
broadcast factor. */
struct Broadcast_Operation
{
/* Type of broadcast: no broadcast, {1to8}, or {1to16}. */
int type;
/* Index of broadcasted operand. */
int operand;
};
static struct Broadcast_Operation broadcast_op;
/* VEX prefix. */
typedef struct
{
/* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
unsigned char bytes[4];
unsigned int length;
/* Destination or source register specifier. */
const reg_entry *register_specifier;
} vex_prefix;
/* 'md_assemble ()' gathers together information and puts it into a
i386_insn. */
union i386_op
{
expressionS *disps;
expressionS *imms;
const reg_entry *regs;
};
enum i386_error
{
operand_size_mismatch,
operand_type_mismatch,
register_type_mismatch,
number_of_operands_mismatch,
invalid_instruction_suffix,
bad_imm4,
old_gcc_only,
unsupported_with_intel_mnemonic,
unsupported_syntax,
unsupported,
invalid_vsib_address,
invalid_vector_register_set,
unsupported_vector_index_register,
unsupported_broadcast,
broadcast_not_on_src_operand,
broadcast_needed,
unsupported_masking,
mask_not_on_destination,
no_default_mask,
unsupported_rc_sae,
rc_sae_operand_not_last_imm,
invalid_register_operand,
try_vector_disp8
};
struct _i386_insn
{
/* TM holds the template for the insn were currently assembling. */
insn_template tm;
/* SUFFIX holds the instruction size suffix for byte, word, dword
or qword, if given. */
char suffix;
/* OPERANDS gives the number of given operands. */
unsigned int operands;
/* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
of given register, displacement, memory operands and immediate
operands. */
unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
/* TYPES [i] is the type (see above #defines) which tells us how to
use OP[i] for the corresponding operand. */
i386_operand_type types[MAX_OPERANDS];
/* Displacement expression, immediate expression, or register for each
operand. */
union i386_op op[MAX_OPERANDS];
/* Flags for operands. */
unsigned int flags[MAX_OPERANDS];
#define Operand_PCrel 1
/* Relocation type for operand */
enum bfd_reloc_code_real reloc[MAX_OPERANDS];
/* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
the base index byte below. */
const reg_entry *base_reg;
const reg_entry *index_reg;
unsigned int log2_scale_factor;
/* SEG gives the seg_entries of this insn. They are zero unless
explicit segment overrides are given. */
const seg_entry *seg[2];
/* Copied first memory operand string, for re-checking. */
char *memop1_string;
/* PREFIX holds all the given prefix opcodes (usually null).
PREFIXES is the number of prefix opcodes. */
unsigned int prefixes;
unsigned char prefix[MAX_PREFIXES];
/* RM and SIB are the modrm byte and the sib byte where the
addressing modes of this insn are encoded. */
modrm_byte rm;
rex_byte rex;
rex_byte vrex;
sib_byte sib;
vex_prefix vex;
/* Masking attributes. */
struct Mask_Operation *mask;
/* Rounding control and SAE attributes. */
struct RC_Operation *rounding;
/* Broadcasting attributes. */
struct Broadcast_Operation *broadcast;
/* Compressed disp8*N attribute. */
unsigned int memshift;
/* Swap operand in encoding. */
unsigned int swap_operand;
/* Prefer 8bit or 32bit displacement in encoding. */
enum
{
disp_encoding_default = 0,
disp_encoding_8bit,
disp_encoding_32bit
} disp_encoding;
/* REP prefix. */
const char *rep_prefix;
/* HLE prefix. */
const char *hle_prefix;
/* Have BND prefix. */
const char *bnd_prefix;
/* Need VREX to support upper 16 registers. */
int need_vrex;
/* Error message. */
enum i386_error error;
};
typedef struct _i386_insn i386_insn;
/* Link RC type with corresponding string, that'll be looked for in
asm. */
struct RC_name
{
enum rc_type type;
const char *name;
unsigned int len;
};
static const struct RC_name RC_NamesTable[] =
{
{ rne, STRING_COMMA_LEN ("rn-sae") },
{ rd, STRING_COMMA_LEN ("rd-sae") },
{ ru, STRING_COMMA_LEN ("ru-sae") },
{ rz, STRING_COMMA_LEN ("rz-sae") },
{ saeonly, STRING_COMMA_LEN ("sae") },
};
/* List of chars besides those in app.c:symbol_chars that can start an
operand. Used to prevent the scrubber eating vital white-space. */
const char extra_symbol_chars[] = "*%-([{"
#ifdef LEX_AT
"@"
#endif
#ifdef LEX_QM
"?"
#endif
;
#if (defined (TE_I386AIX) \
|| ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
&& !defined (TE_GNU) \
&& !defined (TE_LINUX) \
&& !defined (TE_NACL) \
&& !defined (TE_NETWARE) \
&& !defined (TE_FreeBSD) \
&& !defined (TE_DragonFly) \
&& !defined (TE_NetBSD)))
/* This array holds the chars that always start a comment. If the
pre-processor is disabled, these aren't very useful. The option
--divide will remove '/' from this list. */
const char *i386_comment_chars = "#/";
#define SVR4_COMMENT_CHARS 1
#define PREFIX_SEPARATOR '\\'
#else
const char *i386_comment_chars = "#";
#define PREFIX_SEPARATOR '/'
#endif
/* This array holds the chars that only start a comment at the beginning of
a line. If the line seems to have the form '# 123 filename'
.line and .file directives will appear in the pre-processed output.
Note that input_file.c hand checks for '#' at the beginning of the
first line of the input file. This is because the compiler outputs
#NO_APP at the beginning of its output.
Also note that comments started like this one will always work if
'/' isn't otherwise defined. */
const char line_comment_chars[] = "#/";
const char line_separator_chars[] = ";";
/* Chars that can be used to separate mant from exp in floating point
nums. */
const char EXP_CHARS[] = "eE";
/* Chars that mean this number is a floating point constant
As in 0f12.456
or 0d1.2345e12. */
const char FLT_CHARS[] = "fFdDxX";
/* Tables for lexical analysis. */
static char mnemonic_chars[256];
static char register_chars[256];
static char operand_chars[256];
static char identifier_chars[256];
static char digit_chars[256];
/* Lexical macros. */
#define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
#define is_operand_char(x) (operand_chars[(unsigned char) x])
#define is_register_char(x) (register_chars[(unsigned char) x])
#define is_space_char(x) ((x) == ' ')
#define is_identifier_char(x) (identifier_chars[(unsigned char) x])
#define is_digit_char(x) (digit_chars[(unsigned char) x])
/* All non-digit non-letter characters that may occur in an operand. */
static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
/* md_assemble() always leaves the strings it's passed unaltered. To
effect this we maintain a stack of saved characters that we've smashed
with '\0's (indicating end of strings for various sub-fields of the
assembler instruction). */
static char save_stack[32];
static char *save_stack_p;
#define END_STRING_AND_SAVE(s) \
do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
#define RESTORE_END_STRING(s) \
do { *(s) = *--save_stack_p; } while (0)
/* The instruction we're assembling. */
static i386_insn i;
/* Possible templates for current insn. */
static const templates *current_templates;
/* Per instruction expressionS buffers: max displacements & immediates. */
static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
/* Current operand we are working on. */
static int this_operand = -1;
/* We support four different modes. FLAG_CODE variable is used to distinguish
these. */
enum flag_code {
CODE_32BIT,
CODE_16BIT,
CODE_64BIT };
static enum flag_code flag_code;
static unsigned int object_64bit;
static unsigned int disallow_64bit_reloc;
static int use_rela_relocations = 0;
#if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
|| defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
|| defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
/* The ELF ABI to use. */
enum x86_elf_abi
{
I386_ABI,
X86_64_ABI,
X86_64_X32_ABI
};
static enum x86_elf_abi x86_elf_abi = I386_ABI;
#endif
#if defined (TE_PE) || defined (TE_PEP)
/* Use big object file format. */
static int use_big_obj = 0;
#endif
#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
/* 1 if generating code for a shared library. */
static int shared = 0;
#endif
/* 1 for intel syntax,
0 if att syntax. */
static int intel_syntax = 0;
/* 1 for Intel64 ISA,
0 if AMD64 ISA. */
static int intel64;
/* 1 for intel mnemonic,
0 if att mnemonic. */
static int intel_mnemonic = !SYSV386_COMPAT;
/* 1 if support old (<= 2.8.1) versions of gcc. */
static int old_gcc = OLDGCC_COMPAT;
/* 1 if pseudo registers are permitted. */
static int allow_pseudo_reg = 0;
/* 1 if register prefix % not required. */
static int allow_naked_reg = 0;
/* 1 if the assembler should add BND prefix for all control-tranferring
instructions supporting it, even if this prefix wasn't specified
explicitly. */
static int add_bnd_prefix = 0;
/* 1 if pseudo index register, eiz/riz, is allowed . */
static int allow_index_reg = 0;
/* 1 if the assembler should ignore LOCK prefix, even if it was
specified explicitly. */
static int omit_lock_prefix = 0;
/* 1 if the assembler should encode lfence, mfence, and sfence as
"lock addl $0, (%{re}sp)". */
static int avoid_fence = 0;
/* 1 if the assembler should generate relax relocations. */
static int generate_relax_relocations
= DEFAULT_GENERATE_X86_RELAX_RELOCATIONS;
static enum check_kind
{
check_none = 0,
check_warning,
check_error
}
sse_check, operand_check = check_warning;
/* Register prefix used for error message. */
static const char *register_prefix = "%";
/* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
leave, push, and pop instructions so that gcc has the same stack
frame as in 32 bit mode. */
static char stackop_size = '\0';
/* Non-zero to optimize code alignment. */
int optimize_align_code = 1;
/* Non-zero to quieten some warnings. */
static int quiet_warnings = 0;
/* CPU name. */
static const char *cpu_arch_name = NULL;
static char *cpu_sub_arch_name = NULL;
/* CPU feature flags. */
static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
/* If we have selected a cpu we are generating instructions for. */
static int cpu_arch_tune_set = 0;
/* Cpu we are generating instructions for. */
enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
/* CPU feature flags of cpu we are generating instructions for. */
static i386_cpu_flags cpu_arch_tune_flags;
/* CPU instruction set architecture used. */
enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
/* CPU feature flags of instruction set architecture used. */
i386_cpu_flags cpu_arch_isa_flags;
/* If set, conditional jumps are not automatically promoted to handle
larger than a byte offset. */
static unsigned int no_cond_jump_promotion = 0;
/* Encode SSE instructions with VEX prefix. */
static unsigned int sse2avx;
/* Encode scalar AVX instructions with specific vector length. */
static enum
{
vex128 = 0,
vex256
} avxscalar;
/* Encode scalar EVEX LIG instructions with specific vector length. */
static enum
{
evexl128 = 0,
evexl256,
evexl512
} evexlig;
/* Encode EVEX WIG instructions with specific evex.w. */
static enum
{
evexw0 = 0,
evexw1
} evexwig;
/* Value to encode in EVEX RC bits, for SAE-only instructions. */
static enum rc_type evexrcig = rne;
/* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
static symbolS *GOT_symbol;
/* The dwarf2 return column, adjusted for 32 or 64 bit. */
unsigned int x86_dwarf2_return_column;
/* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
int x86_cie_data_alignment;
/* Interface to relax_segment.
There are 3 major relax states for 386 jump insns because the
different types of jumps add different sizes to frags when we're
figuring out what sort of jump to choose to reach a given label. */
/* Types. */
#define UNCOND_JUMP 0
#define COND_JUMP 1
#define COND_JUMP86 2
/* Sizes. */
#define CODE16 1
#define SMALL 0
#define SMALL16 (SMALL | CODE16)
#define BIG 2
#define BIG16 (BIG | CODE16)
#ifndef INLINE
#ifdef __GNUC__
#define INLINE __inline__
#else
#define INLINE
#endif
#endif
#define ENCODE_RELAX_STATE(type, size) \
((relax_substateT) (((type) << 2) | (size)))
#define TYPE_FROM_RELAX_STATE(s) \
((s) >> 2)
#define DISP_SIZE_FROM_RELAX_STATE(s) \
((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
/* This table is used by relax_frag to promote short jumps to long
ones where necessary. SMALL (short) jumps may be promoted to BIG
(32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
don't allow a short jump in a 32 bit code segment to be promoted to
a 16 bit offset jump because it's slower (requires data size
prefix), and doesn't work, unless the destination is in the bottom
64k of the code segment (The top 16 bits of eip are zeroed). */
const relax_typeS md_relax_table[] =
{
/* The fields are:
1) most positive reach of this state,
2) most negative reach of this state,
3) how many bytes this mode will have in the variable part of the frag
4) which index into the table to try if we can't fit into this one. */
/* UNCOND_JUMP states. */
{127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
{127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
/* dword jmp adds 4 bytes to frag:
0 extra opcode bytes, 4 displacement bytes. */
{0, 0, 4, 0},
/* word jmp adds 2 byte2 to frag:
0 extra opcode bytes, 2 displacement bytes. */
{0, 0, 2, 0},
/* COND_JUMP states. */
{127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
{127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
/* dword conditionals adds 5 bytes to frag:
1 extra opcode byte, 4 displacement bytes. */
{0, 0, 5, 0},
/* word conditionals add 3 bytes to frag:
1 extra opcode byte, 2 displacement bytes. */
{0, 0, 3, 0},
/* COND_JUMP86 states. */
{127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
{127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
/* dword conditionals adds 5 bytes to frag:
1 extra opcode byte, 4 displacement bytes. */
{0, 0, 5, 0},
/* word conditionals add 4 bytes to frag:
1 displacement byte and a 3 byte long branch insn. */
{0, 0, 4, 0}
};
static const arch_entry cpu_arch[] =
{
/* Do not replace the first two entries - i386_target_format()
relies on them being there in this order. */
{ STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
CPU_GENERIC32_FLAGS, 0 },
{ STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
CPU_GENERIC64_FLAGS, 0 },
{ STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
CPU_NONE_FLAGS, 0 },
{ STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
CPU_I186_FLAGS, 0 },
{ STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
CPU_I286_FLAGS, 0 },
{ STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
CPU_I386_FLAGS, 0 },
{ STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
CPU_I486_FLAGS, 0 },
{ STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
CPU_I586_FLAGS, 0 },
{ STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
CPU_I686_FLAGS, 0 },
{ STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
CPU_I586_FLAGS, 0 },
{ STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
CPU_PENTIUMPRO_FLAGS, 0 },
{ STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
CPU_P2_FLAGS, 0 },
{ STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
CPU_P3_FLAGS, 0 },
{ STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
CPU_P4_FLAGS, 0 },
{ STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
CPU_CORE_FLAGS, 0 },
{ STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
CPU_NOCONA_FLAGS, 0 },
{ STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
CPU_CORE_FLAGS, 1 },
{ STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
CPU_CORE_FLAGS, 0 },
{ STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
CPU_CORE2_FLAGS, 1 },
{ STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
CPU_CORE2_FLAGS, 0 },
{ STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
CPU_COREI7_FLAGS, 0 },
{ STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
CPU_L1OM_FLAGS, 0 },
{ STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
CPU_K1OM_FLAGS, 0 },
{ STRING_COMMA_LEN ("iamcu"), PROCESSOR_IAMCU,
CPU_IAMCU_FLAGS, 0 },
{ STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
CPU_K6_FLAGS, 0 },
{ STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
CPU_K6_2_FLAGS, 0 },
{ STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
CPU_ATHLON_FLAGS, 0 },
{ STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
CPU_K8_FLAGS, 1 },
{ STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
CPU_K8_FLAGS, 0 },
{ STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
CPU_K8_FLAGS, 0 },
{ STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
CPU_AMDFAM10_FLAGS, 0 },
{ STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
CPU_BDVER1_FLAGS, 0 },
{ STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
CPU_BDVER2_FLAGS, 0 },
{ STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD,
CPU_BDVER3_FLAGS, 0 },
{ STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD,
CPU_BDVER4_FLAGS, 0 },
{ STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER,
CPU_ZNVER1_FLAGS, 0 },
{ STRING_COMMA_LEN ("btver1"), PROCESSOR_BT,
CPU_BTVER1_FLAGS, 0 },
{ STRING_COMMA_LEN ("btver2"), PROCESSOR_BT,
CPU_BTVER2_FLAGS, 0 },
{ STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
CPU_8087_FLAGS, 0 },
{ STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
CPU_287_FLAGS, 0 },
{ STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
CPU_387_FLAGS, 0 },
{ STRING_COMMA_LEN (".687"), PROCESSOR_UNKNOWN,
CPU_687_FLAGS, 0 },
{ STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
CPU_MMX_FLAGS, 0 },
{ STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
CPU_SSE_FLAGS, 0 },
{ STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
CPU_SSE2_FLAGS, 0 },
{ STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
CPU_SSE3_FLAGS, 0 },
{ STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
CPU_SSSE3_FLAGS, 0 },
{ STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
CPU_SSE4_1_FLAGS, 0 },
{ STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
CPU_SSE4_2_FLAGS, 0 },
{ STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
CPU_SSE4_2_FLAGS, 0 },
{ STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
CPU_AVX_FLAGS, 0 },
{ STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
CPU_AVX2_FLAGS, 0 },
{ STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN,
CPU_AVX512F_FLAGS, 0 },
{ STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN,
CPU_AVX512CD_FLAGS, 0 },
{ STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN,
CPU_AVX512ER_FLAGS, 0 },
{ STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN,
CPU_AVX512PF_FLAGS, 0 },
{ STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN,
CPU_AVX512DQ_FLAGS, 0 },
{ STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN,
CPU_AVX512BW_FLAGS, 0 },
{ STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN,
CPU_AVX512VL_FLAGS, 0 },
{ STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
CPU_VMX_FLAGS, 0 },
{ STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
CPU_VMFUNC_FLAGS, 0 },
{ STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
CPU_SMX_FLAGS, 0 },
{ STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
CPU_XSAVE_FLAGS, 0 },
{ STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
CPU_XSAVEOPT_FLAGS, 0 },
{ STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN,
CPU_XSAVEC_FLAGS, 0 },
{ STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN,
CPU_XSAVES_FLAGS, 0 },
{ STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
CPU_AES_FLAGS, 0 },
{ STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
CPU_PCLMUL_FLAGS, 0 },
{ STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
CPU_PCLMUL_FLAGS, 1 },
{ STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
CPU_FSGSBASE_FLAGS, 0 },
{ STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
CPU_RDRND_FLAGS, 0 },
{ STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
CPU_F16C_FLAGS, 0 },
{ STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
CPU_BMI2_FLAGS, 0 },
{ STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
CPU_FMA_FLAGS, 0 },
{ STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
CPU_FMA4_FLAGS, 0 },
{ STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
CPU_XOP_FLAGS, 0 },
{ STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
CPU_LWP_FLAGS, 0 },
{ STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
CPU_MOVBE_FLAGS, 0 },
{ STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN,
CPU_CX16_FLAGS, 0 },
{ STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
CPU_EPT_FLAGS, 0 },
{ STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
CPU_LZCNT_FLAGS, 0 },
{ STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
CPU_HLE_FLAGS, 0 },
{ STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
CPU_RTM_FLAGS, 0 },
{ STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
CPU_INVPCID_FLAGS, 0 },
{ STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
CPU_CLFLUSH_FLAGS, 0 },
{ STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
CPU_NOP_FLAGS, 0 },
{ STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
CPU_SYSCALL_FLAGS, 0 },
{ STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
CPU_RDTSCP_FLAGS, 0 },
{ STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
CPU_3DNOW_FLAGS, 0 },
{ STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
CPU_3DNOWA_FLAGS, 0 },
{ STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
CPU_PADLOCK_FLAGS, 0 },
{ STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
CPU_SVME_FLAGS, 1 },
{ STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
CPU_SVME_FLAGS, 0 },
{ STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
CPU_SSE4A_FLAGS, 0 },
{ STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
CPU_ABM_FLAGS, 0 },
{ STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
CPU_BMI_FLAGS, 0 },
{ STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
CPU_TBM_FLAGS, 0 },
{ STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN,
CPU_ADX_FLAGS, 0 },
{ STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN,
CPU_RDSEED_FLAGS, 0 },
{ STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN,
CPU_PRFCHW_FLAGS, 0 },
{ STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN,
CPU_SMAP_FLAGS, 0 },
{ STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN,
CPU_MPX_FLAGS, 0 },
{ STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN,
CPU_SHA_FLAGS, 0 },
{ STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN,
CPU_CLFLUSHOPT_FLAGS, 0 },
{ STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN,
CPU_PREFETCHWT1_FLAGS, 0 },
{ STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN,
CPU_SE1_FLAGS, 0 },
{ STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN,
CPU_CLWB_FLAGS, 0 },
{ STRING_COMMA_LEN (".pcommit"), PROCESSOR_UNKNOWN,
CPU_PCOMMIT_FLAGS, 0 },
{ STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN,
CPU_AVX512IFMA_FLAGS, 0 },
{ STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN,
CPU_AVX512VBMI_FLAGS, 0 },
{ STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN,
CPU_CLZERO_FLAGS, 0 },
{ STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN,
CPU_MWAITX_FLAGS, 0 },
{ STRING_COMMA_LEN (".ospke"), PROCESSOR_UNKNOWN,
CPU_OSPKE_FLAGS, 0 },
{ STRING_COMMA_LEN (".rdpid"), PROCESSOR_UNKNOWN,
CPU_RDPID_FLAGS, 0 },
};
static const noarch_entry cpu_noarch[] =
{
{ STRING_COMMA_LEN ("no87"), CPU_ANY_X87_FLAGS },
{ STRING_COMMA_LEN ("no287"), CPU_ANY_287_FLAGS },
{ STRING_COMMA_LEN ("no387"), CPU_ANY_387_FLAGS },
{ STRING_COMMA_LEN ("no687"), CPU_ANY_687_FLAGS },
{ STRING_COMMA_LEN ("nommx"), CPU_ANY_MMX_FLAGS },
{ STRING_COMMA_LEN ("nosse"), CPU_ANY_SSE_FLAGS },
{ STRING_COMMA_LEN ("nosse2"), CPU_ANY_SSE2_FLAGS },
{ STRING_COMMA_LEN ("nosse3"), CPU_ANY_SSE3_FLAGS },
{ STRING_COMMA_LEN ("nossse3"), CPU_ANY_SSSE3_FLAGS },
{ STRING_COMMA_LEN ("nosse4.1"), CPU_ANY_SSE4_1_FLAGS },
{ STRING_COMMA_LEN ("nosse4.2"), CPU_ANY_SSE4_2_FLAGS },
{ STRING_COMMA_LEN ("nosse4"), CPU_ANY_SSE4_1_FLAGS },
{ STRING_COMMA_LEN ("noavx"), CPU_ANY_AVX_FLAGS },
{ STRING_COMMA_LEN ("noavx2"), CPU_ANY_AVX2_FLAGS },
{ STRING_COMMA_LEN ("noavx512f"), CPU_ANY_AVX512F_FLAGS },
{ STRING_COMMA_LEN ("noavx512cd"), CPU_ANY_AVX512CD_FLAGS },
{ STRING_COMMA_LEN ("noavx512er"), CPU_ANY_AVX512ER_FLAGS },
{ STRING_COMMA_LEN ("noavx512pf"), CPU_ANY_AVX512PF_FLAGS },
{ STRING_COMMA_LEN ("noavx512dq"), CPU_ANY_AVX512DQ_FLAGS },
{ STRING_COMMA_LEN ("noavx512bw"), CPU_ANY_AVX512BW_FLAGS },
{ STRING_COMMA_LEN ("noavx512vl"), CPU_ANY_AVX512VL_FLAGS },
{ STRING_COMMA_LEN ("noavx512ifma"), CPU_ANY_AVX512IFMA_FLAGS },
{ STRING_COMMA_LEN ("noavx512vbmi"), CPU_ANY_AVX512VBMI_FLAGS },
};
#ifdef I386COFF
/* Like s_lcomm_internal in gas/read.c but the alignment string
is allowed to be optional. */
static symbolS *
pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
{
addressT align = 0;
SKIP_WHITESPACE ();
if (needs_align
&& *input_line_pointer == ',')
{
align = parse_align (needs_align - 1);
if (align == (addressT) -1)
return NULL;
}
else
{
if (size >= 8)
align = 3;
else if (size >= 4)
align = 2;
else if (size >= 2)
align = 1;
else
align = 0;
}
bss_alloc (symbolP, size, align);
return symbolP;
}
static void
pe_lcomm (int needs_align)
{
s_comm_internal (needs_align * 2, pe_lcomm_internal);
}
#endif
const pseudo_typeS md_pseudo_table[] =
{
#if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
{"align", s_align_bytes, 0},
#else
{"align", s_align_ptwo, 0},
#endif
{"arch", set_cpu_arch, 0},
#ifndef I386COFF
{"bss", s_bss, 0},
#else
{"lcomm", pe_lcomm, 1},
#endif
{"ffloat", float_cons, 'f'},
{"dfloat", float_cons, 'd'},
{"tfloat", float_cons, 'x'},
{"value", cons, 2},
{"slong", signed_cons, 4},
{"noopt", s_ignore, 0},
{"optim", s_ignore, 0},
{"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
{"code16", set_code_flag, CODE_16BIT},
{"code32", set_code_flag, CODE_32BIT},
{"code64", set_code_flag, CODE_64BIT},
{"intel_syntax", set_intel_syntax, 1},
{"att_syntax", set_intel_syntax, 0},
{"intel_mnemonic", set_intel_mnemonic, 1},
{"att_mnemonic", set_intel_mnemonic, 0},
{"allow_index_reg", set_allow_index_reg, 1},
{"disallow_index_reg", set_allow_index_reg, 0},
{"sse_check", set_check, 0},
{"operand_check", set_check, 1},
#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
{"largecomm", handle_large_common, 0},
#else
{"file", (void (*) (int)) dwarf2_directive_file, 0},
{"loc", dwarf2_directive_loc, 0},
{"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
#endif
#ifdef TE_PE
{"secrel32", pe_directive_secrel, 0},
#endif
{0, 0, 0}
};
/* For interface with expression (). */
extern char *input_line_pointer;
/* Hash table for instruction mnemonic lookup. */
static struct hash_control *op_hash;
/* Hash table for register lookup. */
static struct hash_control *reg_hash;
void
i386_align_code (fragS *fragP, int count)
{
/* Various efficient no-op patterns for aligning code labels.
Note: Don't try to assemble the instructions in the comments.
0L and 0w are not legal. */
static const unsigned char f32_1[] =
{0x90}; /* nop */
static const unsigned char f32_2[] =
{0x66,0x90}; /* xchg %ax,%ax */
static const unsigned char f32_3[] =
{0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
static const unsigned char f32_4[] =
{0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
static const unsigned char f32_5[] =
{0x90, /* nop */
0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
static const unsigned char f32_6[] =
{0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
static const unsigned char f32_7[] =
{0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
static const unsigned char f32_8[] =
{0x90, /* nop */
0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
static const unsigned char f32_9[] =
{0x89,0xf6, /* movl %esi,%esi */
0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
static const unsigned char f32_10[] =
{0x8d,0x76,0x00, /* leal 0(%esi),%esi */
0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
static const unsigned char f32_11[] =
{0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
static const unsigned char f32_12[] =
{0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
static const unsigned char f32_13[] =
{0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
static const unsigned char f32_14[] =
{0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
static const unsigned char f16_3[] =
{0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
static const unsigned char f16_4[] =
{0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
static const unsigned char f16_5[] =
{0x90, /* nop */
0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
static const unsigned char f16_6[] =
{0x89,0xf6, /* mov %si,%si */
0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
static const unsigned char f16_7[] =
{0x8d,0x74,0x00, /* lea 0(%si),%si */
0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
static const unsigned char f16_8[] =
{0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
static const unsigned char jump_31[] =
{0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
static const unsigned char *const f32_patt[] = {
f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
};
static const unsigned char *const f16_patt[] = {
f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
};
/* nopl (%[re]ax) */
static const unsigned char alt_3[] =
{0x0f,0x1f,0x00};
/* nopl 0(%[re]ax) */
static const unsigned char alt_4[] =
{0x0f,0x1f,0x40,0x00};
/* nopl 0(%[re]ax,%[re]ax,1) */
static const unsigned char alt_5[] =
{0x0f,0x1f,0x44,0x00,0x00};
/* nopw 0(%[re]ax,%[re]ax,1) */
static const unsigned char alt_6[] =
{0x66,0x0f,0x1f,0x44,0x00,0x00};
/* nopl 0L(%[re]ax) */
static const unsigned char alt_7[] =
{0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
/* nopl 0L(%[re]ax,%[re]ax,1) */
static const unsigned char alt_8[] =
{0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
/* nopw 0L(%[re]ax,%[re]ax,1) */
static const unsigned char alt_9[] =
{0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
/* nopw %cs:0L(%[re]ax,%[re]ax,1) */
static const unsigned char alt_10[] =
{0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
static const unsigned char *const alt_patt[] = {
f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
alt_9, alt_10
};
/* Only align for at least a positive non-zero boundary. */
if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
return;
/* We need to decide which NOP sequence to use for 32bit and
64bit. When -mtune= is used:
1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
PROCESSOR_GENERIC32, f32_patt will be used.
2. For the rest, alt_patt will be used.
When -mtune= isn't used, alt_patt will be used if
cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
be used.
When -march= or .arch is used, we can't use anything beyond
cpu_arch_isa_flags. */
if (flag_code == CODE_16BIT)
{
if (count > 8)
{
memcpy (fragP->fr_literal + fragP->fr_fix,
jump_31, count);
/* Adjust jump offset. */
fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
}
else
memcpy (fragP->fr_literal + fragP->fr_fix,
f16_patt[count - 1], count);
}
else
{
const unsigned char *const *patt = NULL;
if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
{
/* PROCESSOR_UNKNOWN means that all ISAs may be used. */
switch (cpu_arch_tune)
{
case PROCESSOR_UNKNOWN:
/* We use cpu_arch_isa_flags to check if we SHOULD
optimize with nops. */
if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
patt = alt_patt;
else
patt = f32_patt;
break;
case PROCESSOR_PENTIUM4:
case PROCESSOR_NOCONA:
case PROCESSOR_CORE:
case PROCESSOR_CORE2:
case PROCESSOR_COREI7:
case PROCESSOR_L1OM:
case PROCESSOR_K1OM:
case PROCESSOR_GENERIC64:
case PROCESSOR_K6:
case PROCESSOR_ATHLON:
case PROCESSOR_K8:
case PROCESSOR_AMDFAM10:
case PROCESSOR_BD:
case PROCESSOR_ZNVER:
case PROCESSOR_BT:
patt = alt_patt;
break;
case PROCESSOR_I386:
case PROCESSOR_I486:
case PROCESSOR_PENTIUM:
case PROCESSOR_PENTIUMPRO:
case PROCESSOR_IAMCU:
case PROCESSOR_GENERIC32:
patt = f32_patt;
break;
}
}
else
{
switch (fragP->tc_frag_data.tune)
{
case PROCESSOR_UNKNOWN:
/* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
PROCESSOR_UNKNOWN. */
abort ();
break;
case PROCESSOR_I386:
case PROCESSOR_I486:
case PROCESSOR_PENTIUM:
case PROCESSOR_IAMCU:
case PROCESSOR_K6:
case PROCESSOR_ATHLON:
case PROCESSOR_K8:
case PROCESSOR_AMDFAM10:
case PROCESSOR_BD:
case PROCESSOR_ZNVER:
case PROCESSOR_BT:
case PROCESSOR_GENERIC32:
/* We use cpu_arch_isa_flags to check if we CAN optimize
with nops. */
if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
patt = alt_patt;
else
patt = f32_patt;
break;
case PROCESSOR_PENTIUMPRO:
case PROCESSOR_PENTIUM4:
case PROCESSOR_NOCONA:
case PROCESSOR_CORE:
case PROCESSOR_CORE2:
case PROCESSOR_COREI7:
case PROCESSOR_L1OM:
case PROCESSOR_K1OM:
if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
patt = alt_patt;
else
patt = f32_patt;
break;
case PROCESSOR_GENERIC64:
patt = alt_patt;
break;
}
}
if (patt == f32_patt)
{
/* If the padding is less than 15 bytes, we use the normal
ones. Otherwise, we use a jump instruction and adjust
its offset. */
int limit;
/* For 64bit, the limit is 3 bytes. */
if (flag_code == CODE_64BIT
&& fragP->tc_frag_data.isa_flags.bitfield.cpulm)
limit = 3;
else
limit = 15;
if (count < limit)
memcpy (fragP->fr_literal + fragP->fr_fix,
patt[count - 1], count);
else
{
memcpy (fragP->fr_literal + fragP->fr_fix,
jump_31, count);
/* Adjust jump offset. */
fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
}
}
else
{
/* Maximum length of an instruction is 10 byte. If the
padding is greater than 10 bytes and we don't use jump,
we have to break it into smaller pieces. */
int padding = count;
while (padding > 10)
{
padding -= 10;
memcpy (fragP->fr_literal + fragP->fr_fix + padding,
patt [9], 10);
}
if (padding)
memcpy (fragP->fr_literal + fragP->fr_fix,
patt [padding - 1], padding);
}
}
fragP->fr_var = count;
}
static INLINE int
operand_type_all_zero (const union i386_operand_type *x)
{
switch (ARRAY_SIZE(x->array))
{
case 3:
if (x->array[2])
return 0;
case 2:
if (x->array[1])
return 0;
case 1:
return !x->array[0];
default:
abort ();
}
}
static INLINE void
operand_type_set (union i386_operand_type *x, unsigned int v)
{
switch (ARRAY_SIZE(x->array))
{
case 3:
x->array[2] = v;
case 2:
x->array[1] = v;
case 1:
x->array[0] = v;
break;
default:
abort ();
}
}
static INLINE int
operand_type_equal (const union i386_operand_type *x,
const union i386_operand_type *y)
{
switch (ARRAY_SIZE(x->array))
{
case 3:
if (x->array[2] != y->array[2])
return 0;
case 2:
if (x->array[1] != y->array[1])
return 0;
case 1:
return x->array[0] == y->array[0];
break;
default:
abort ();
}
}
static INLINE int
cpu_flags_all_zero (const union i386_cpu_flags *x)
{
switch (ARRAY_SIZE(x->array))
{
case 3:
if (x->array[2])
return 0;
case 2:
if (x->array[1])
return 0;
case 1:
return !x->array[0];
default:
abort ();
}
}
static INLINE int
cpu_flags_equal (const union i386_cpu_flags *x,
const union i386_cpu_flags *y)
{
switch (ARRAY_SIZE(x->array))
{
case 3:
if (x->array[2] != y->array[2])
return 0;
case 2:
if (x->array[1] != y->array[1])
return 0;
case 1:
return x->array[0] == y->array[0];
break;
default:
abort ();
}
}
static INLINE int
cpu_flags_check_cpu64 (i386_cpu_flags f)
{
return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
|| (flag_code != CODE_64BIT && f.bitfield.cpu64));
}
static INLINE i386_cpu_flags
cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
{
switch (ARRAY_SIZE (x.array))
{
case 3:
x.array [2] &= y.array [2];
case 2:
x.array [1] &= y.array [1];
case 1:
x.array [0] &= y.array [0];
break;
default:
abort ();
}
return x;
}
static INLINE i386_cpu_flags
cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
{
switch (ARRAY_SIZE (x.array))
{
case 3:
x.array [2] |= y.array [2];
case 2:
x.array [1] |= y.array [1];
case 1:
x.array [0] |= y.array [0];
break;
default:
abort ();
}
return x;
}
static INLINE i386_cpu_flags
cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
{
switch (ARRAY_SIZE (x.array))
{
case 3:
x.array [2] &= ~y.array [2];
case 2:
x.array [1] &= ~y.array [1];
case 1:
x.array [0] &= ~y.array [0];
break;
default:
abort ();
}
return x;
}
static int
valid_iamcu_cpu_flags (const i386_cpu_flags *flags)
{
if (cpu_arch_isa == PROCESSOR_IAMCU)
{
static const i386_cpu_flags iamcu_flags = CPU_IAMCU_COMPAT_FLAGS;
i386_cpu_flags compat_flags;
compat_flags = cpu_flags_and_not (*flags, iamcu_flags);
return cpu_flags_all_zero (&compat_flags);
}
else
return 1;
}
#define CPU_FLAGS_ARCH_MATCH 0x1
#define CPU_FLAGS_64BIT_MATCH 0x2
#define CPU_FLAGS_AES_MATCH 0x4
#define CPU_FLAGS_PCLMUL_MATCH 0x8
#define CPU_FLAGS_AVX_MATCH 0x10
#define CPU_FLAGS_32BIT_MATCH \
(CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
| CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
#define CPU_FLAGS_PERFECT_MATCH \
(CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
/* Return CPU flags match bits. */
static int
cpu_flags_match (const insn_template *t)
{
i386_cpu_flags x = t->cpu_flags;
int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
x.bitfield.cpu64 = 0;
x.bitfield.cpuno64 = 0;
if (cpu_flags_all_zero (&x))
{
/* This instruction is available on all archs. */
match |= CPU_FLAGS_32BIT_MATCH;
}
else
{
/* This instruction is available only on some archs. */
i386_cpu_flags cpu = cpu_arch_flags;
cpu = cpu_flags_and (x, cpu);
if (!cpu_flags_all_zero (&cpu))
{
if (x.bitfield.cpuavx)
{
/* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
if (cpu.bitfield.cpuavx)
{
/* Check SSE2AVX. */
if (!t->opcode_modifier.sse2avx|| sse2avx)
{
match |= (CPU_FLAGS_ARCH_MATCH
| CPU_FLAGS_AVX_MATCH);
/* Check AES. */
if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
match |= CPU_FLAGS_AES_MATCH;
/* Check PCLMUL. */
if (!x.bitfield.cpupclmul
|| cpu.bitfield.cpupclmul)
match |= CPU_FLAGS_PCLMUL_MATCH;
}
}
else
match |= CPU_FLAGS_ARCH_MATCH;
}
else if (x.bitfield.cpuavx512vl)
{
/* Match AVX512VL. */
if (cpu.bitfield.cpuavx512vl)
{
/* Need another match. */
cpu.bitfield.cpuavx512vl = 0;
if (!cpu_flags_all_zero (&cpu))
match |= CPU_FLAGS_32BIT_MATCH;
else
match |= CPU_FLAGS_ARCH_MATCH;
}
else
match |= CPU_FLAGS_ARCH_MATCH;
}
else
match |= CPU_FLAGS_32BIT_MATCH;
}
}
return match;
}
static INLINE i386_operand_type
operand_type_and (i386_operand_type x, i386_operand_type y)
{
switch (ARRAY_SIZE (x.array))
{
case 3:
x.array [2] &= y.array [2];
case 2:
x.array [1] &= y.array [1];
case 1:
x.array [0] &= y.array [0];
break;
default:
abort ();
}
return x;
}
static INLINE i386_operand_type
operand_type_or (i386_operand_type x, i386_operand_type y)
{
switch (ARRAY_SIZE (x.array))
{
case 3:
x.array [2] |= y.array [2];
case 2:
x.array [1] |= y.array [1];
case 1:
x.array [0] |= y.array [0];
break;
default:
abort ();
}
return x;
}
static INLINE i386_operand_type
operand_type_xor (i386_operand_type x, i386_operand_type y)
{
switch (ARRAY_SIZE (x.array))
{
case 3:
x.array [2] ^= y.array [2];
case 2:
x.array [1] ^= y.array [1];
case 1:
x.array [0] ^= y.array [0];
break;
default:
abort ();
}
return x;
}
static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
static const i386_operand_type control = OPERAND_TYPE_CONTROL;
static const i386_operand_type inoutportreg
= OPERAND_TYPE_INOUTPORTREG;
static const i386_operand_type reg16_inoutportreg
= OPERAND_TYPE_REG16_INOUTPORTREG;
static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
static const i386_operand_type anydisp
= OPERAND_TYPE_ANYDISP;
static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
static const i386_operand_type regzmm = OPERAND_TYPE_REGZMM;
static const i386_operand_type regmask = OPERAND_TYPE_REGMASK;
static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
enum operand_type
{
reg,
imm,
disp,
anymem
};
static INLINE int
operand_type_check (i386_operand_type t, enum operand_type c)
{
switch (c)
{
case reg:
return (t.bitfield.reg8
|| t.bitfield.reg16
|| t.bitfield.reg32
|| t.bitfield.reg64);
case imm:
return (t.bitfield.imm8
|| t.bitfield.imm8s
|| t.bitfield.imm16
|| t.bitfield.imm32
|| t.bitfield.imm32s
|| t.bitfield.imm64);
case disp:
return (t.bitfield.disp8
|| t.bitfield.disp16
|| t.bitfield.disp32
|| t.bitfield.disp32s
|| t.bitfield.disp64);
case anymem:
return (t.bitfield.disp8
|| t.bitfield.disp16
|| t.bitfield.disp32
|| t.bitfield.disp32s
|| t.bitfield.disp64
|| t.bitfield.baseindex);
default:
abort ();
}
return 0;
}
/* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
operand J for instruction template T. */
static INLINE int
match_reg_size (const insn_template *t, unsigned int j)
{
return !((i.types[j].bitfield.byte
&& !t->operand_types[j].bitfield.byte)
|| (i.types[j].bitfield.word
&& !t->operand_types[j].bitfield.word)
|| (i.types[j].bitfield.dword
&& !t->operand_types[j].bitfield.dword)
|| (i.types[j].bitfield.qword
&& !t->operand_types[j].bitfield.qword));
}
/* Return 1 if there is no conflict in any size on operand J for
instruction template T. */
static INLINE int
match_mem_size (const insn_template *t, unsigned int j)
{
return (match_reg_size (t, j)
&& !((i.types[j].bitfield.unspecified
&& !i.broadcast
&& !t->operand_types[j].bitfield.unspecified)
|| (i.types[j].bitfield.fword
&& !t->operand_types[j].bitfield.fword)
|| (i.types[j].bitfield.tbyte
&& !t->operand_types[j].bitfield.tbyte)
|| (i.types[j].bitfield.xmmword
&& !t->operand_types[j].bitfield.xmmword)
|| (i.types[j].bitfield.ymmword
&& !t->operand_types[j].bitfield.ymmword)
|| (i.types[j].bitfield.zmmword
&& !t->operand_types[j].bitfield.zmmword)));
}
/* Return 1 if there is no size conflict on any operands for
instruction template T. */
static INLINE int
operand_size_match (const insn_template *t)
{
unsigned int j;
int match = 1;
/* Don't check jump instructions. */
if (t->opcode_modifier.jump
|| t->opcode_modifier.jumpbyte
|| t->opcode_modifier.jumpdword
|| t->opcode_modifier.jumpintersegment)
return match;
/* Check memory and accumulator operand size. */
for (j = 0; j < i.operands; j++)
{
if (t->operand_types[j].bitfield.anysize)
continue;
if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
{
match = 0;
break;
}
if (i.types[j].bitfield.mem && !match_mem_size (t, j))
{
match = 0;
break;
}
}
if (match)
return match;
else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
{
mismatch:
i.error = operand_size_mismatch;
return 0;
}
/* Check reverse. */
gas_assert (i.operands == 2);
match = 1;
for (j = 0; j < 2; j++)
{
if (t->operand_types[j].bitfield.acc
&& !match_reg_size (t, j ? 0 : 1))
goto mismatch;
if (i.types[j].bitfield.mem
&& !match_mem_size (t, j ? 0 : 1))
goto mismatch;
}
return match;
}
static INLINE int
operand_type_match (i386_operand_type overlap,
i386_operand_type given)
{
i386_operand_type temp = overlap;
temp.bitfield.jumpabsolute = 0;
temp.bitfield.unspecified = 0;
temp.bitfield.byte = 0;
temp.bitfield.word = 0;
temp.bitfield.dword = 0;
temp.bitfield.fword = 0;
temp.bitfield.qword = 0;
temp.bitfield.tbyte = 0;
temp.bitfield.xmmword = 0;
temp.bitfield.ymmword = 0;
temp.bitfield.zmmword = 0;
if (operand_type_all_zero (&temp))
goto mismatch;
if (given.bitfield.baseindex == overlap.bitfield.baseindex
&& given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
return 1;
mismatch:
i.error = operand_type_mismatch;
return 0;
}
/* If given types g0 and g1 are registers they must be of the same type
unless the expected operand type register overlap is null.
Note that Acc in a template matches every size of reg. */
static INLINE int
operand_type_register_match (i386_operand_type m0,
i386_operand_type g0,
i386_operand_type t0,
i386_operand_type m1,
i386_operand_type g1,
i386_operand_type t1)
{
if (!operand_type_check (g0, reg))
return 1;
if (!operand_type_check (g1, reg))
return 1;
if (g0.bitfield.reg8 == g1.bitfield.reg8
&& g0.bitfield.reg16 == g1.bitfield.reg16
&& g0.bitfield.reg32 == g1.bitfield.reg32
&& g0.bitfield.reg64 == g1.bitfield.reg64)
return 1;
if (m0.bitfield.acc)
{
t0.bitfield.reg8 = 1;
t0.bitfield.reg16 = 1;
t0.bitfield.reg32 = 1;
t0.bitfield.reg64 = 1;
}
if (m1.bitfield.acc)
{
t1.bitfield.reg8 = 1;
t1.bitfield.reg16 = 1;
t1.bitfield.reg32 = 1;
t1.bitfield.reg64 = 1;
}
if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
&& !(t0.bitfield.reg16 & t1.bitfield.reg16)
&& !(t0.bitfield.reg32 & t1.bitfield.reg32)
&& !(t0.bitfield.reg64 & t1.bitfield.reg64))
return 1;
i.error = register_type_mismatch;
return 0;
}
static INLINE unsigned int
register_number (const reg_entry *r)
{
unsigned int nr = r->reg_num;
if (r->reg_flags & RegRex)
nr += 8;
if (r->reg_flags & RegVRex)
nr += 16;
return nr;
}
static INLINE unsigned int
mode_from_disp_size (i386_operand_type t)
{
if (t.bitfield.disp8 || t.bitfield.vec_disp8)
return 1;
else if (t.bitfield.disp16
|| t.bitfield.disp32
|| t.bitfield.disp32s)
return 2;
else
return 0;
}
static INLINE int
fits_in_signed_byte (addressT num)
{
return num + 0x80 <= 0xff;
}
static INLINE int
fits_in_unsigned_byte (addressT num)
{
return num <= 0xff;
}
static INLINE int
fits_in_unsigned_word (addressT num)
{
return num <= 0xffff;
}
static INLINE int
fits_in_signed_word (addressT num)
{
return num + 0x8000 <= 0xffff;
}
static INLINE int
fits_in_signed_long (addressT num ATTRIBUTE_UNUSED)
{
#ifndef BFD64
return 1;
#else
return num + 0x80000000 <= 0xffffffff;
#endif
} /* fits_in_signed_long() */
static INLINE int
fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED)
{
#ifndef BFD64
return 1;
#else
return num <= 0xffffffff;
#endif
} /* fits_in_unsigned_long() */
static INLINE int
fits_in_vec_disp8 (offsetT num)
{
int shift = i.memshift;
unsigned int mask;
if (shift == -1)
abort ();
mask = (1 << shift) - 1;
/* Return 0 if NUM isn't properly aligned. */
if ((num & mask))
return 0;
/* Check if NUM will fit in 8bit after shift. */
return fits_in_signed_byte (num >> shift);
}
static INLINE int
fits_in_imm4 (offsetT num)
{
return (num & 0xf) == num;
}
static i386_operand_type
smallest_imm_type (offsetT num)
{
i386_operand_type t;
operand_type_set (&t, 0);
t.bitfield.imm64 = 1;
if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
{
/* This code is disabled on the 486 because all the Imm1 forms
in the opcode table are slower on the i486. They're the
versions with the implicitly specified single-position
displacement, which has another syntax if you really want to
use that form. */
t.bitfield.imm1 = 1;
t.bitfield.imm8 = 1;
t.bitfield.imm8s = 1;
t.bitfield.imm16 = 1;
t.bitfield.imm32 = 1;
t.bitfield.imm32s = 1;
}
else if (fits_in_signed_byte (num))
{
t.bitfield.imm8 = 1;
t.bitfield.imm8s = 1;
t.bitfield.imm16 = 1;
t.bitfield.imm32 = 1;
t.bitfield.imm32s = 1;
}
else if (fits_in_unsigned_byte (num))
{
t.bitfield.imm8 = 1;
t.bitfield.imm16 = 1;
t.bitfield.imm32 = 1;
t.bitfield.imm32s = 1;
}
else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
{
t.bitfield.imm16 = 1;
t.bitfield.imm32 = 1;
t.bitfield.imm32s = 1;
}
else if (fits_in_signed_long (num))
{
t.bitfield.imm32 = 1;
t.bitfield.imm32s = 1;
}
else if (fits_in_unsigned_long (num))
t.bitfield.imm32 = 1;
return t;
}
static offsetT
offset_in_range (offsetT val, int size)
{
addressT mask;
switch (size)
{
case 1: mask = ((addressT) 1 << 8) - 1; break;
case 2: mask = ((addressT) 1 << 16) - 1; break;
case 4: mask = ((addressT) 2 << 31) - 1; break;
#ifdef BFD64
case 8: mask = ((addressT) 2 << 63) - 1; break;
#endif
default: abort ();
}
#ifdef BFD64
/* If BFD64, sign extend val for 32bit address mode. */
if (flag_code != CODE_64BIT
|| i.prefix[ADDR_PREFIX])
if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
#endif
if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
{
char buf1[40], buf2[40];
sprint_value (buf1, val);
sprint_value (buf2, val & mask);
as_warn (_("%s shortened to %s"), buf1, buf2);
}
return val & mask;
}
enum PREFIX_GROUP
{
PREFIX_EXIST = 0,
PREFIX_LOCK,
PREFIX_REP,
PREFIX_OTHER
};
/* Returns
a. PREFIX_EXIST if attempting to add a prefix where one from the
same class already exists.
b. PREFIX_LOCK if lock prefix is added.
c. PREFIX_REP if rep/repne prefix is added.
d. PREFIX_OTHER if other prefix is added.
*/
static enum PREFIX_GROUP
add_prefix (unsigned int prefix)
{
enum PREFIX_GROUP ret = PREFIX_OTHER;
unsigned int q;
if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
&& flag_code == CODE_64BIT)
{
if ((i.prefix[REX_PREFIX] & prefix & REX_W)
|| ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
&& (prefix & (REX_R | REX_X | REX_B))))
ret = PREFIX_EXIST;
q = REX_PREFIX;
}
else
{
switch (prefix)
{
default:
abort ();
case CS_PREFIX_OPCODE:
case DS_PREFIX_OPCODE:
case ES_PREFIX_OPCODE:
case FS_PREFIX_OPCODE:
case GS_PREFIX_OPCODE:
case SS_PREFIX_OPCODE:
q = SEG_PREFIX;
break;
case REPNE_PREFIX_OPCODE:
case REPE_PREFIX_OPCODE:
q = REP_PREFIX;
ret = PREFIX_REP;
break;
case LOCK_PREFIX_OPCODE:
q = LOCK_PREFIX;
ret = PREFIX_LOCK;
break;
case FWAIT_OPCODE:
q = WAIT_PREFIX;
break;
case ADDR_PREFIX_OPCODE:
q = ADDR_PREFIX;
break;
case DATA_PREFIX_OPCODE:
q = DATA_PREFIX;
break;
}
if (i.prefix[q] != 0)
ret = PREFIX_EXIST;
}
if (ret)
{
if (!i.prefix[q])
++i.prefixes;
i.prefix[q] |= prefix;
}
else
as_bad (_("same type of prefix used twice"));
return ret;
}
static void
update_code_flag (int value, int check)
{
PRINTF_LIKE ((*as_error));
flag_code = (enum flag_code) value;
if (flag_code == CODE_64BIT)
{
cpu_arch_flags.bitfield.cpu64 = 1;
cpu_arch_flags.bitfield.cpuno64 = 0;
}
else
{
cpu_arch_flags.bitfield.cpu64 = 0;
cpu_arch_flags.bitfield.cpuno64 = 1;
}
if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
{
if (check)
as_error = as_fatal;
else
as_error = as_bad;
(*as_error) (_("64bit mode not supported on `%s'."),
cpu_arch_name ? cpu_arch_name : default_arch);
}
if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
{
if (check)
as_error = as_fatal;
else
as_error = as_bad;
(*as_error) (_("32bit mode not supported on `%s'."),
cpu_arch_name ? cpu_arch_name : default_arch);
}
stackop_size = '\0';
}
static void
set_code_flag (int value)
{
update_code_flag (value, 0);
}
static void
set_16bit_gcc_code_flag (int new_code_flag)
{
flag_code = (enum flag_code) new_code_flag;
if (flag_code != CODE_16BIT)
abort ();
cpu_arch_flags.bitfield.cpu64 = 0;
cpu_arch_flags.bitfield.cpuno64 = 1;
stackop_size = LONG_MNEM_SUFFIX;
}
static void
set_intel_syntax (int syntax_flag)
{
/* Find out if register prefixing is specified. */
int ask_naked_reg = 0;
SKIP_WHITESPACE ();
if (!is_end_of_line[(unsigned char) *input_line_pointer])
{
char *string;
int e = get_symbol_name (&string);
if (strcmp (string, "prefix") == 0)
ask_naked_reg = 1;
else if (strcmp (string, "noprefix") == 0)
ask_naked_reg = -1;
else
as_bad (_("bad argument to syntax directive."));
(void) restore_line_pointer (e);
}
demand_empty_rest_of_line ();
intel_syntax = syntax_flag;
if (ask_naked_reg == 0)
allow_naked_reg = (intel_syntax
&& (bfd_get_symbol_leading_char (stdoutput) != '\0'));
else
allow_naked_reg = (ask_naked_reg < 0);
expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
identifier_chars['$'] = intel_syntax ? '$' : 0;
register_prefix = allow_naked_reg ? "" : "%";
}
static void
set_intel_mnemonic (int mnemonic_flag)
{
intel_mnemonic = mnemonic_flag;
}
static void
set_allow_index_reg (int flag)
{
allow_index_reg = flag;
}
static void
set_check (int what)
{
enum check_kind *kind;
const char *str;
if (what)
{
kind = &operand_check;
str = "operand";
}
else
{
kind = &sse_check;
str = "sse";
}
SKIP_WHITESPACE ();
if (!is_end_of_line[(unsigned char) *input_line_pointer])
{
char *string;
int e = get_symbol_name (&string);
if (strcmp (string, "none") == 0)
*kind = check_none;
else if (strcmp (string, "warning") == 0)
*kind = check_warning;
else if (strcmp (string, "error") == 0)
*kind = check_error;
else
as_bad (_("bad argument to %s_check directive."), str);
(void) restore_line_pointer (e);
}
else
as_bad (_("missing argument for %s_check directive"), str);
demand_empty_rest_of_line ();
}
static void
check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
{
#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
static const char *arch;
/* Intel LIOM is only supported on ELF. */
if (!IS_ELF)
return;
if (!arch)
{
/* Use cpu_arch_name if it is set in md_parse_option. Otherwise
use default_arch. */
arch = cpu_arch_name;
if (!arch)
arch = default_arch;
}
/* If we are targeting Intel MCU, we must enable it. */
if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_IAMCU
|| new_flag.bitfield.cpuiamcu)
return;
/* If we are targeting Intel L1OM, we must enable it. */
if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
|| new_flag.bitfield.cpul1om)
return;
/* If we are targeting Intel K1OM, we must enable it. */
if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
|| new_flag.bitfield.cpuk1om)
return;
as_bad (_("`%s' is not supported on `%s'"), name, arch);
#endif
}
static void
set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
{
SKIP_WHITESPACE ();
if (!is_end_of_line[(unsigned char) *input_line_pointer])
{
char *string;
int e = get_symbol_name (&string);
unsigned int j;
i386_cpu_flags flags;
for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
{
if (strcmp (string, cpu_arch[j].name) == 0)
{
check_cpu_arch_compatible (string, cpu_arch[j].flags);
if (*string != '.')
{
cpu_arch_name = cpu_arch[j].name;
cpu_sub_arch_name = NULL;
cpu_arch_flags = cpu_arch[j].flags;
if (flag_code == CODE_64BIT)
{
cpu_arch_flags.bitfield.cpu64 = 1;
cpu_arch_flags.bitfield.cpuno64 = 0;
}
else
{
cpu_arch_flags.bitfield.cpu64 = 0;
cpu_arch_flags.bitfield.cpuno64 = 1;
}
cpu_arch_isa = cpu_arch[j].type;
cpu_arch_isa_flags = cpu_arch[j].flags;
if (!cpu_arch_tune_set)
{
cpu_arch_tune = cpu_arch_isa;
cpu_arch_tune_flags = cpu_arch_isa_flags;
}
break;
}
flags = cpu_flags_or (cpu_arch_flags,
cpu_arch[j].flags);
if (!valid_iamcu_cpu_flags (&flags))
as_fatal (_("`%s' isn't valid for Intel MCU"),
cpu_arch[j].name);
else if (!cpu_flags_equal (&flags, &cpu_arch_flags))
{
if (cpu_sub_arch_name)
{
char *name = cpu_sub_arch_name;
cpu_sub_arch_name = concat (name,
cpu_arch[j].name,
(const char *) NULL);
free (name);
}
else
cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
cpu_arch_flags = flags;
cpu_arch_isa_flags = flags;
}
(void) restore_line_pointer (e);
demand_empty_rest_of_line ();
return;
}
}
if (*string == '.' && j >= ARRAY_SIZE (cpu_arch))
{
/* Disable an ISA entension. */
for (j = 0; j < ARRAY_SIZE (cpu_noarch); j++)
if (strcmp (string + 1, cpu_noarch [j].name) == 0)
{
flags = cpu_flags_and_not (cpu_arch_flags,
cpu_noarch[j].flags);
if (!cpu_flags_equal (&flags, &cpu_arch_flags))
{
if (cpu_sub_arch_name)
{
char *name = cpu_sub_arch_name;
cpu_sub_arch_name = concat (name, string,
(const char *) NULL);
free (name);
}
else
cpu_sub_arch_name = xstrdup (string);
cpu_arch_flags = flags;
cpu_arch_isa_flags = flags;
}
(void) restore_line_pointer (e);
demand_empty_rest_of_line ();
return;
}
j = ARRAY_SIZE (cpu_arch);
}
if (j >= ARRAY_SIZE (cpu_arch))
as_bad (_("no such architecture: `%s'"), string);
*input_line_pointer = e;
}
else
as_bad (_("missing cpu architecture"));
no_cond_jump_promotion = 0;
if (*input_line_pointer == ','
&& !is_end_of_line[(unsigned char) input_line_pointer[1]])
{
char *string;
char e;
++input_line_pointer;
e = get_symbol_name (&string);
if (strcmp (string, "nojumps") == 0)
no_cond_jump_promotion = 1;
else if (strcmp (string, "jumps") == 0)
;
else
as_bad (_("no such architecture modifier: `%s'"), string);
(void) restore_line_pointer (e);
}
demand_empty_rest_of_line ();
}
enum bfd_architecture
i386_arch (void)
{
if (cpu_arch_isa == PROCESSOR_L1OM)
{
if (OUTPUT_FLAVOR != bfd_target_elf_flavour
|| flag_code != CODE_64BIT)
as_fatal (_("Intel L1OM is 64bit ELF only"));
return bfd_arch_l1om;
}
else if (cpu_arch_isa == PROCESSOR_K1OM)
{
if (OUTPUT_FLAVOR != bfd_target_elf_flavour
|| flag_code != CODE_64BIT)
as_fatal (_("Intel K1OM is 64bit ELF only"));
return bfd_arch_k1om;
}
else if (cpu_arch_isa == PROCESSOR_IAMCU)
{
if (OUTPUT_FLAVOR != bfd_target_elf_flavour
|| flag_code == CODE_64BIT)
as_fatal (_("Intel MCU is 32bit ELF only"));
return bfd_arch_iamcu;
}
else
return bfd_arch_i386;
}
unsigned long
i386_mach (void)
{
if (!strncmp (default_arch, "x86_64", 6))
{
if (cpu_arch_isa == PROCESSOR_L1OM)
{
if (OUTPUT_FLAVOR != bfd_target_elf_flavour
|| default_arch[6] != '\0')
as_fatal (_("Intel L1OM is 64bit ELF only"));
return bfd_mach_l1om;
}
else if (cpu_arch_isa == PROCESSOR_K1OM)
{
if (OUTPUT_FLAVOR != bfd_target_elf_flavour
|| default_arch[6] != '\0')
as_fatal (_("Intel K1OM is 64bit ELF only"));
return bfd_mach_k1om;
}
else if (default_arch[6] == '\0')
return bfd_mach_x86_64;
else
return bfd_mach_x64_32;
}
else if (!strcmp (default_arch, "i386")
|| !strcmp (default_arch, "iamcu"))
{
if (cpu_arch_isa == PROCESSOR_IAMCU)
{
if (OUTPUT_FLAVOR != bfd_target_elf_flavour)
as_fatal (_("Intel MCU is 32bit ELF only"));
return bfd_mach_i386_iamcu;
}
else
return bfd_mach_i386_i386;
}
else
as_fatal (_("unknown architecture"));
}
void
md_begin (void)
{
const char *hash_err;
/* Initialize op_hash hash table. */
op_hash = hash_new ();
{
const insn_template *optab;
templates *core_optab;
/* Setup for loop. */
optab = i386_optab;
core_optab = XNEW (templates);
core_optab->start = optab;
while (1)
{
++optab;
if (optab->name == NULL
|| strcmp (optab->name, (optab - 1)->name) != 0)
{
/* different name --> ship out current template list;
add to hash table; & begin anew. */
core_optab->end = optab;
hash_err = hash_insert (op_hash,
(optab - 1)->name,
(void *) core_optab);
if (hash_err)
{
as_fatal (_("can't hash %s: %s"),
(optab - 1)->name,
hash_err);
}
if (optab->name == NULL)
break;
core_optab = XNEW (templates);
core_optab->start = optab;
}
}
}
/* Initialize reg_hash hash table. */
reg_hash = hash_new ();
{
const reg_entry *regtab;
unsigned int regtab_size = i386_regtab_size;
for (regtab = i386_regtab; regtab_size--; regtab++)
{
hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
if (hash_err)
as_fatal (_("can't hash %s: %s"),
regtab->reg_name,
hash_err);
}
}
/* Fill in lexical tables: mnemonic_chars, operand_chars. */
{
int c;
char *p;
for (c = 0; c < 256; c++)
{
if (ISDIGIT (c))
{
digit_chars[c] = c;
mnemonic_chars[c] = c;
register_chars[c] = c;
operand_chars[c] = c;
}
else if (ISLOWER (c))
{
mnemonic_chars[c] = c;
register_chars[c] = c;
operand_chars[c] = c;
}
else if (ISUPPER (c))
{
mnemonic_chars[c] = TOLOWER (c);
register_chars[c] = mnemonic_chars[c];
operand_chars[c] = c;
}
else if (c == '{' || c == '}')
operand_chars[c] = c;
if (ISALPHA (c) || ISDIGIT (c))
identifier_chars[c] = c;
else if (c >= 128)
{
identifier_chars[c] = c;
operand_chars[c] = c;
}
}
#ifdef LEX_AT
identifier_chars['@'] = '@';
#endif
#ifdef LEX_QM
identifier_chars['?'] = '?';
operand_chars['?'] = '?';
#endif
digit_chars['-'] = '-';
mnemonic_chars['_'] = '_';
mnemonic_chars['-'] = '-';
mnemonic_chars['.'] = '.';
identifier_chars['_'] = '_';
identifier_chars['.'] = '.';
for (p = operand_special_chars; *p != '\0'; p++)
operand_chars[(unsigned char) *p] = *p;
}
if (flag_code == CODE_64BIT)
{
#if defined (OBJ_COFF) && defined (TE_PE)
x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
? 32 : 16);
#else
x86_dwarf2_return_column = 16;
#endif
x86_cie_data_alignment = -8;
}
else
{
x86_dwarf2_return_column = 8;
x86_cie_data_alignment = -4;
}
}
void
i386_print_statistics (FILE *file)
{
hash_print_statistics (file, "i386 opcode", op_hash);
hash_print_statistics (file, "i386 register", reg_hash);
}
#ifdef DEBUG386
/* Debugging routines for md_assemble. */
static void pte (insn_template *);
static void pt (i386_operand_type);
static void pe (expressionS *);
static void ps (symbolS *);
static void
pi (char *line, i386_insn *x)
{
unsigned int j;
fprintf (stdout, "%s: template ", line);
pte (&x->tm);
fprintf (stdout, " address: base %s index %s scale %x\n",
x->base_reg ? x->base_reg->reg_name : "none",
x->index_reg ? x->index_reg->reg_name : "none",
x->log2_scale_factor);
fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
x->rm.mode, x->rm.reg, x->rm.regmem);
fprintf (stdout, " sib: base %x index %x scale %x\n",
x->sib.base, x->sib.index, x->sib.scale);
fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
(x->rex & REX_W) != 0,
(x->rex & REX_R) != 0,
(x->rex & REX_X) != 0,
(x->rex & REX_B) != 0);
for (j = 0; j < x->operands; j++)
{
fprintf (stdout, " #%d: ", j + 1);
pt (x->types[j]);
fprintf (stdout, "\n");
if (x->types[j].bitfield.reg8
|| x->types[j].bitfield.reg16
|| x->types[j].bitfield.reg32
|| x->types[j].bitfield.reg64
|| x->types[j].bitfield.regmmx
|| x->types[j].bitfield.regxmm
|| x->types[j].bitfield.regymm
|| x->types[j].bitfield.regzmm
|| x->types[j].bitfield.sreg2
|| x->types[j].bitfield.sreg3
|| x->types[j].bitfield.control
|| x->types[j].bitfield.debug
|| x->types[j].bitfield.test)
fprintf (stdout, "%s\n"