1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GAS, the GNU Assembler.
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GAS; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
23 /* Intel 80386 machine specific gas.
24 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
25 x86_64 support by Jan Hubicka (jh@suse.cz)
26 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
27 Bugs & suggestions are completely welcome. This is free software.
28 Please help us make it better. */
31 #include "safe-ctype.h"
33 #include "dwarf2dbg.h"
34 #include "dw2gencfi.h"
35 #include "elf/x86-64.h"
36 #include "opcodes/i386-init.h"
38 #ifndef REGISTER_WARNINGS
39 #define REGISTER_WARNINGS 1
42 #ifndef INFER_ADDR_PREFIX
43 #define INFER_ADDR_PREFIX 1
47 #define DEFAULT_ARCH "i386"
52 #define INLINE __inline__
58 /* Prefixes will be emitted in the order defined below.
59 WAIT_PREFIX must be the first prefix since FWAIT is really is an
60 instruction, and so must come before any prefixes.
61 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
62 REP_PREFIX, LOCK_PREFIX. */
69 #define REX_PREFIX 6 /* must come last. */
70 #define MAX_PREFIXES 7 /* max prefixes per opcode */
72 /* we define the syntax here (modulo base,index,scale syntax) */
73 #define REGISTER_PREFIX '%'
74 #define IMMEDIATE_PREFIX '$'
75 #define ABSOLUTE_PREFIX '*'
77 /* these are the instruction mnemonic suffixes in AT&T syntax or
78 memory operand size in Intel syntax. */
79 #define WORD_MNEM_SUFFIX 'w'
80 #define BYTE_MNEM_SUFFIX 'b'
81 #define SHORT_MNEM_SUFFIX 's'
82 #define LONG_MNEM_SUFFIX 'l'
83 #define QWORD_MNEM_SUFFIX 'q'
84 #define XMMWORD_MNEM_SUFFIX 'x'
85 #define YMMWORD_MNEM_SUFFIX 'y'
86 /* Intel Syntax. Use a non-ascii letter since since it never appears
88 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
90 #define END_OF_INSN '\0'
93 'templates' is for grouping together 'template' structures for opcodes
94 of the same name. This is only used for storing the insns in the grand
95 ole hash table of insns.
96 The templates themselves start at START and range up to (but not including)
101 const insn_template *start;
102 const insn_template *end;
106 /* 386 operand encoding bytes: see 386 book for details of this. */
109 unsigned int regmem; /* codes register or memory operand */
110 unsigned int reg; /* codes register operand (or extended opcode) */
111 unsigned int mode; /* how to interpret regmem & reg */
115 /* x86-64 extension prefix. */
116 typedef int rex_byte;
118 /* 386 opcode byte to code indirect addressing. */
127 /* x86 arch names, types and features */
130 const char *name; /* arch name */
131 unsigned int len; /* arch string length */
132 enum processor_type type; /* arch type */
133 i386_cpu_flags flags; /* cpu feature flags */
134 unsigned int skip; /* show_arch should skip this. */
135 unsigned int negated; /* turn off indicated flags. */
139 static void update_code_flag (int, int);
140 static void set_code_flag (int);
141 static void set_16bit_gcc_code_flag (int);
142 static void set_intel_syntax (int);
143 static void set_intel_mnemonic (int);
144 static void set_allow_index_reg (int);
145 static void set_sse_check (int);
146 static void set_cpu_arch (int);
148 static void pe_directive_secrel (int);
150 static void signed_cons (int);
151 static char *output_invalid (int c);
152 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
154 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
156 static int i386_att_operand (char *);
157 static int i386_intel_operand (char *, int);
158 static int i386_intel_simplify (expressionS *);
159 static int i386_intel_parse_name (const char *, expressionS *);
160 static const reg_entry *parse_register (char *, char **);
161 static char *parse_insn (char *, char *);
162 static char *parse_operands (char *, const char *);
163 static void swap_operands (void);
164 static void swap_2_operands (int, int);
165 static void optimize_imm (void);
166 static void optimize_disp (void);
167 static const insn_template *match_template (void);
168 static int check_string (void);
169 static int process_suffix (void);
170 static int check_byte_reg (void);
171 static int check_long_reg (void);
172 static int check_qword_reg (void);
173 static int check_word_reg (void);
174 static int finalize_imm (void);
175 static int process_operands (void);
176 static const seg_entry *build_modrm_byte (void);
177 static void output_insn (void);
178 static void output_imm (fragS *, offsetT);
179 static void output_disp (fragS *, offsetT);
181 static void s_bss (int);
183 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
184 static void handle_large_common (int small ATTRIBUTE_UNUSED);
187 static const char *default_arch = DEFAULT_ARCH;
192 /* VEX prefix is either 2 byte or 3 byte. */
193 unsigned char bytes[3];
195 /* Destination or source register specifier. */
196 const reg_entry *register_specifier;
199 /* 'md_assemble ()' gathers together information and puts it into a
206 const reg_entry *regs;
211 operand_size_mismatch,
212 operand_type_mismatch,
213 register_type_mismatch,
214 number_of_operands_mismatch,
215 invalid_instruction_suffix,
218 unsupported_with_intel_mnemonic,
225 /* TM holds the template for the insn were currently assembling. */
228 /* SUFFIX holds the instruction size suffix for byte, word, dword
229 or qword, if given. */
232 /* OPERANDS gives the number of given operands. */
233 unsigned int operands;
235 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
236 of given register, displacement, memory operands and immediate
238 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
240 /* TYPES [i] is the type (see above #defines) which tells us how to
241 use OP[i] for the corresponding operand. */
242 i386_operand_type types[MAX_OPERANDS];
244 /* Displacement expression, immediate expression, or register for each
246 union i386_op op[MAX_OPERANDS];
248 /* Flags for operands. */
249 unsigned int flags[MAX_OPERANDS];
250 #define Operand_PCrel 1
252 /* Relocation type for operand */
253 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
255 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
256 the base index byte below. */
257 const reg_entry *base_reg;
258 const reg_entry *index_reg;
259 unsigned int log2_scale_factor;
261 /* SEG gives the seg_entries of this insn. They are zero unless
262 explicit segment overrides are given. */
263 const seg_entry *seg[2];
265 /* PREFIX holds all the given prefix opcodes (usually null).
266 PREFIXES is the number of prefix opcodes. */
267 unsigned int prefixes;
268 unsigned char prefix[MAX_PREFIXES];
270 /* RM and SIB are the modrm byte and the sib byte where the
271 addressing modes of this insn are encoded. */
277 /* Swap operand in encoding. */
278 unsigned int swap_operand;
280 /* Force 32bit displacement in encoding. */
281 unsigned int disp32_encoding;
284 enum i386_error error;
287 typedef struct _i386_insn i386_insn;
289 /* List of chars besides those in app.c:symbol_chars that can start an
290 operand. Used to prevent the scrubber eating vital white-space. */
291 const char extra_symbol_chars[] = "*%-(["
300 #if (defined (TE_I386AIX) \
301 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
302 && !defined (TE_GNU) \
303 && !defined (TE_LINUX) \
304 && !defined (TE_NETWARE) \
305 && !defined (TE_FreeBSD) \
306 && !defined (TE_DragonFly) \
307 && !defined (TE_NetBSD)))
308 /* This array holds the chars that always start a comment. If the
309 pre-processor is disabled, these aren't very useful. The option
310 --divide will remove '/' from this list. */
311 const char *i386_comment_chars = "#/";
312 #define SVR4_COMMENT_CHARS 1
313 #define PREFIX_SEPARATOR '\\'
316 const char *i386_comment_chars = "#";
317 #define PREFIX_SEPARATOR '/'
320 /* This array holds the chars that only start a comment at the beginning of
321 a line. If the line seems to have the form '# 123 filename'
322 .line and .file directives will appear in the pre-processed output.
323 Note that input_file.c hand checks for '#' at the beginning of the
324 first line of the input file. This is because the compiler outputs
325 #NO_APP at the beginning of its output.
326 Also note that comments started like this one will always work if
327 '/' isn't otherwise defined. */
328 const char line_comment_chars[] = "#/";
330 const char line_separator_chars[] = ";";
332 /* Chars that can be used to separate mant from exp in floating point
334 const char EXP_CHARS[] = "eE";
336 /* Chars that mean this number is a floating point constant
339 const char FLT_CHARS[] = "fFdDxX";
341 /* Tables for lexical analysis. */
342 static char mnemonic_chars[256];
343 static char register_chars[256];
344 static char operand_chars[256];
345 static char identifier_chars[256];
346 static char digit_chars[256];
348 /* Lexical macros. */
349 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
350 #define is_operand_char(x) (operand_chars[(unsigned char) x])
351 #define is_register_char(x) (register_chars[(unsigned char) x])
352 #define is_space_char(x) ((x) == ' ')
353 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
354 #define is_digit_char(x) (digit_chars[(unsigned char) x])
356 /* All non-digit non-letter characters that may occur in an operand. */
357 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
359 /* md_assemble() always leaves the strings it's passed unaltered. To
360 effect this we maintain a stack of saved characters that we've smashed
361 with '\0's (indicating end of strings for various sub-fields of the
362 assembler instruction). */
363 static char save_stack[32];
364 static char *save_stack_p;
365 #define END_STRING_AND_SAVE(s) \
366 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
367 #define RESTORE_END_STRING(s) \
368 do { *(s) = *--save_stack_p; } while (0)
370 /* The instruction we're assembling. */
373 /* Possible templates for current insn. */
374 static const templates *current_templates;
376 /* Per instruction expressionS buffers: max displacements & immediates. */
377 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
378 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
380 /* Current operand we are working on. */
381 static int this_operand = -1;
383 /* We support four different modes. FLAG_CODE variable is used to distinguish
391 static enum flag_code flag_code;
392 static unsigned int object_64bit;
393 static int use_rela_relocations = 0;
395 /* The names used to print error messages. */
396 static const char *flag_code_names[] =
403 /* 1 for intel syntax,
405 static int intel_syntax = 0;
407 /* 1 for intel mnemonic,
408 0 if att mnemonic. */
409 static int intel_mnemonic = !SYSV386_COMPAT;
411 /* 1 if support old (<= 2.8.1) versions of gcc. */
412 static int old_gcc = OLDGCC_COMPAT;
414 /* 1 if pseudo registers are permitted. */
415 static int allow_pseudo_reg = 0;
417 /* 1 if register prefix % not required. */
418 static int allow_naked_reg = 0;
420 /* 1 if pseudo index register, eiz/riz, is allowed . */
421 static int allow_index_reg = 0;
431 /* Register prefix used for error message. */
432 static const char *register_prefix = "%";
434 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
435 leave, push, and pop instructions so that gcc has the same stack
436 frame as in 32 bit mode. */
437 static char stackop_size = '\0';
439 /* Non-zero to optimize code alignment. */
440 int optimize_align_code = 1;
442 /* Non-zero to quieten some warnings. */
443 static int quiet_warnings = 0;
446 static const char *cpu_arch_name = NULL;
447 static char *cpu_sub_arch_name = NULL;
449 /* CPU feature flags. */
450 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
452 /* If we have selected a cpu we are generating instructions for. */
453 static int cpu_arch_tune_set = 0;
455 /* Cpu we are generating instructions for. */
456 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
458 /* CPU feature flags of cpu we are generating instructions for. */
459 static i386_cpu_flags cpu_arch_tune_flags;
461 /* CPU instruction set architecture used. */
462 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
464 /* CPU feature flags of instruction set architecture used. */
465 i386_cpu_flags cpu_arch_isa_flags;
467 /* If set, conditional jumps are not automatically promoted to handle
468 larger than a byte offset. */
469 static unsigned int no_cond_jump_promotion = 0;
471 /* Encode SSE instructions with VEX prefix. */
472 static unsigned int sse2avx;
474 /* Encode scalar AVX instructions with specific vector length. */
481 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
482 static symbolS *GOT_symbol;
484 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
485 unsigned int x86_dwarf2_return_column;
487 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
488 int x86_cie_data_alignment;
490 /* Interface to relax_segment.
491 There are 3 major relax states for 386 jump insns because the
492 different types of jumps add different sizes to frags when we're
493 figuring out what sort of jump to choose to reach a given label. */
496 #define UNCOND_JUMP 0
498 #define COND_JUMP86 2
503 #define SMALL16 (SMALL | CODE16)
505 #define BIG16 (BIG | CODE16)
509 #define INLINE __inline__
515 #define ENCODE_RELAX_STATE(type, size) \
516 ((relax_substateT) (((type) << 2) | (size)))
517 #define TYPE_FROM_RELAX_STATE(s) \
519 #define DISP_SIZE_FROM_RELAX_STATE(s) \
520 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
522 /* This table is used by relax_frag to promote short jumps to long
523 ones where necessary. SMALL (short) jumps may be promoted to BIG
524 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
525 don't allow a short jump in a 32 bit code segment to be promoted to
526 a 16 bit offset jump because it's slower (requires data size
527 prefix), and doesn't work, unless the destination is in the bottom
528 64k of the code segment (The top 16 bits of eip are zeroed). */
530 const relax_typeS md_relax_table[] =
533 1) most positive reach of this state,
534 2) most negative reach of this state,
535 3) how many bytes this mode will have in the variable part of the frag
536 4) which index into the table to try if we can't fit into this one. */
538 /* UNCOND_JUMP states. */
539 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
540 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
541 /* dword jmp adds 4 bytes to frag:
542 0 extra opcode bytes, 4 displacement bytes. */
544 /* word jmp adds 2 byte2 to frag:
545 0 extra opcode bytes, 2 displacement bytes. */
548 /* COND_JUMP states. */
549 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
550 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
551 /* dword conditionals adds 5 bytes to frag:
552 1 extra opcode byte, 4 displacement bytes. */
554 /* word conditionals add 3 bytes to frag:
555 1 extra opcode byte, 2 displacement bytes. */
558 /* COND_JUMP86 states. */
559 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
560 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
561 /* dword conditionals adds 5 bytes to frag:
562 1 extra opcode byte, 4 displacement bytes. */
564 /* word conditionals add 4 bytes to frag:
565 1 displacement byte and a 3 byte long branch insn. */
569 static const arch_entry cpu_arch[] =
571 /* Do not replace the first two entries - i386_target_format()
572 relies on them being there in this order. */
573 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
574 CPU_GENERIC32_FLAGS, 0, 0 },
575 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
576 CPU_GENERIC64_FLAGS, 0, 0 },
577 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
578 CPU_NONE_FLAGS, 0, 0 },
579 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
580 CPU_I186_FLAGS, 0, 0 },
581 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
582 CPU_I286_FLAGS, 0, 0 },
583 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
584 CPU_I386_FLAGS, 0, 0 },
585 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
586 CPU_I486_FLAGS, 0, 0 },
587 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
588 CPU_I586_FLAGS, 0, 0 },
589 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
590 CPU_I686_FLAGS, 0, 0 },
591 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
592 CPU_I586_FLAGS, 0, 0 },
593 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
594 CPU_PENTIUMPRO_FLAGS, 0, 0 },
595 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
596 CPU_P2_FLAGS, 0, 0 },
597 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
598 CPU_P3_FLAGS, 0, 0 },
599 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
600 CPU_P4_FLAGS, 0, 0 },
601 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
602 CPU_CORE_FLAGS, 0, 0 },
603 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
604 CPU_NOCONA_FLAGS, 0, 0 },
605 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
606 CPU_CORE_FLAGS, 1, 0 },
607 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
608 CPU_CORE_FLAGS, 0, 0 },
609 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
610 CPU_CORE2_FLAGS, 1, 0 },
611 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
612 CPU_CORE2_FLAGS, 0, 0 },
613 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
614 CPU_COREI7_FLAGS, 0, 0 },
615 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
616 CPU_L1OM_FLAGS, 0, 0 },
617 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
618 CPU_K6_FLAGS, 0, 0 },
619 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
620 CPU_K6_2_FLAGS, 0, 0 },
621 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
622 CPU_ATHLON_FLAGS, 0, 0 },
623 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
624 CPU_K8_FLAGS, 1, 0 },
625 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
626 CPU_K8_FLAGS, 0, 0 },
627 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
628 CPU_K8_FLAGS, 0, 0 },
629 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
630 CPU_AMDFAM10_FLAGS, 0, 0 },
631 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BDVER1,
632 CPU_BDVER1_FLAGS, 0, 0 },
633 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
634 CPU_8087_FLAGS, 0, 0 },
635 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
636 CPU_287_FLAGS, 0, 0 },
637 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
638 CPU_387_FLAGS, 0, 0 },
639 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
640 CPU_ANY87_FLAGS, 0, 1 },
641 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
642 CPU_MMX_FLAGS, 0, 0 },
643 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
644 CPU_3DNOWA_FLAGS, 0, 1 },
645 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
646 CPU_SSE_FLAGS, 0, 0 },
647 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
648 CPU_SSE2_FLAGS, 0, 0 },
649 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
650 CPU_SSE3_FLAGS, 0, 0 },
651 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
652 CPU_SSSE3_FLAGS, 0, 0 },
653 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
654 CPU_SSE4_1_FLAGS, 0, 0 },
655 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
656 CPU_SSE4_2_FLAGS, 0, 0 },
657 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
658 CPU_SSE4_2_FLAGS, 0, 0 },
659 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
660 CPU_ANY_SSE_FLAGS, 0, 1 },
661 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
662 CPU_AVX_FLAGS, 0, 0 },
663 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
664 CPU_ANY_AVX_FLAGS, 0, 1 },
665 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
666 CPU_VMX_FLAGS, 0, 0 },
667 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
668 CPU_SMX_FLAGS, 0, 0 },
669 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
670 CPU_XSAVE_FLAGS, 0, 0 },
671 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
672 CPU_XSAVEOPT_FLAGS, 0, 0 },
673 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
674 CPU_AES_FLAGS, 0, 0 },
675 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
676 CPU_PCLMUL_FLAGS, 0, 0 },
677 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
678 CPU_PCLMUL_FLAGS, 1, 0 },
679 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
680 CPU_FSGSBASE_FLAGS, 0, 0 },
681 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
682 CPU_RDRND_FLAGS, 0, 0 },
683 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
684 CPU_F16C_FLAGS, 0, 0 },
685 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
686 CPU_FMA_FLAGS, 0, 0 },
687 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
688 CPU_FMA4_FLAGS, 0, 0 },
689 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
690 CPU_XOP_FLAGS, 0, 0 },
691 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
692 CPU_LWP_FLAGS, 0, 0 },
693 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
694 CPU_MOVBE_FLAGS, 0, 0 },
695 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
696 CPU_EPT_FLAGS, 0, 0 },
697 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
698 CPU_CLFLUSH_FLAGS, 0, 0 },
699 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
700 CPU_NOP_FLAGS, 0, 0 },
701 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
702 CPU_SYSCALL_FLAGS, 0, 0 },
703 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
704 CPU_RDTSCP_FLAGS, 0, 0 },
705 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
706 CPU_3DNOW_FLAGS, 0, 0 },
707 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
708 CPU_3DNOWA_FLAGS, 0, 0 },
709 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
710 CPU_PADLOCK_FLAGS, 0, 0 },
711 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
712 CPU_SVME_FLAGS, 1, 0 },
713 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
714 CPU_SVME_FLAGS, 0, 0 },
715 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
716 CPU_SSE4A_FLAGS, 0, 0 },
717 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
718 CPU_ABM_FLAGS, 0, 0 },
722 /* Like s_lcomm_internal in gas/read.c but the alignment string
723 is allowed to be optional. */
726 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
733 && *input_line_pointer == ',')
735 align = parse_align (needs_align - 1);
737 if (align == (addressT) -1)
752 bss_alloc (symbolP, size, align);
757 pe_lcomm (int needs_align)
759 s_comm_internal (needs_align * 2, pe_lcomm_internal);
763 const pseudo_typeS md_pseudo_table[] =
765 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
766 {"align", s_align_bytes, 0},
768 {"align", s_align_ptwo, 0},
770 {"arch", set_cpu_arch, 0},
774 {"lcomm", pe_lcomm, 1},
776 {"ffloat", float_cons, 'f'},
777 {"dfloat", float_cons, 'd'},
778 {"tfloat", float_cons, 'x'},
780 {"slong", signed_cons, 4},
781 {"noopt", s_ignore, 0},
782 {"optim", s_ignore, 0},
783 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
784 {"code16", set_code_flag, CODE_16BIT},
785 {"code32", set_code_flag, CODE_32BIT},
786 {"code64", set_code_flag, CODE_64BIT},
787 {"intel_syntax", set_intel_syntax, 1},
788 {"att_syntax", set_intel_syntax, 0},
789 {"intel_mnemonic", set_intel_mnemonic, 1},
790 {"att_mnemonic", set_intel_mnemonic, 0},
791 {"allow_index_reg", set_allow_index_reg, 1},
792 {"disallow_index_reg", set_allow_index_reg, 0},
793 {"sse_check", set_sse_check, 0},
794 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
795 {"largecomm", handle_large_common, 0},
797 {"file", (void (*) (int)) dwarf2_directive_file, 0},
798 {"loc", dwarf2_directive_loc, 0},
799 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
802 {"secrel32", pe_directive_secrel, 0},
807 /* For interface with expression (). */
808 extern char *input_line_pointer;
810 /* Hash table for instruction mnemonic lookup. */
811 static struct hash_control *op_hash;
813 /* Hash table for register lookup. */
814 static struct hash_control *reg_hash;
817 i386_align_code (fragS *fragP, int count)
819 /* Various efficient no-op patterns for aligning code labels.
820 Note: Don't try to assemble the instructions in the comments.
821 0L and 0w are not legal. */
822 static const char f32_1[] =
824 static const char f32_2[] =
825 {0x66,0x90}; /* xchg %ax,%ax */
826 static const char f32_3[] =
827 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
828 static const char f32_4[] =
829 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
830 static const char f32_5[] =
832 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
833 static const char f32_6[] =
834 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
835 static const char f32_7[] =
836 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
837 static const char f32_8[] =
839 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
840 static const char f32_9[] =
841 {0x89,0xf6, /* movl %esi,%esi */
842 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
843 static const char f32_10[] =
844 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
845 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
846 static const char f32_11[] =
847 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
848 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
849 static const char f32_12[] =
850 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
851 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
852 static const char f32_13[] =
853 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
854 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
855 static const char f32_14[] =
856 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
857 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
858 static const char f16_3[] =
859 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
860 static const char f16_4[] =
861 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
862 static const char f16_5[] =
864 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
865 static const char f16_6[] =
866 {0x89,0xf6, /* mov %si,%si */
867 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
868 static const char f16_7[] =
869 {0x8d,0x74,0x00, /* lea 0(%si),%si */
870 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
871 static const char f16_8[] =
872 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
873 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
874 static const char jump_31[] =
875 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
876 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
877 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
878 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
879 static const char *const f32_patt[] = {
880 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
881 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
883 static const char *const f16_patt[] = {
884 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
887 static const char alt_3[] =
889 /* nopl 0(%[re]ax) */
890 static const char alt_4[] =
891 {0x0f,0x1f,0x40,0x00};
892 /* nopl 0(%[re]ax,%[re]ax,1) */
893 static const char alt_5[] =
894 {0x0f,0x1f,0x44,0x00,0x00};
895 /* nopw 0(%[re]ax,%[re]ax,1) */
896 static const char alt_6[] =
897 {0x66,0x0f,0x1f,0x44,0x00,0x00};
898 /* nopl 0L(%[re]ax) */
899 static const char alt_7[] =
900 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
901 /* nopl 0L(%[re]ax,%[re]ax,1) */
902 static const char alt_8[] =
903 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
904 /* nopw 0L(%[re]ax,%[re]ax,1) */
905 static const char alt_9[] =
906 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
907 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
908 static const char alt_10[] =
909 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
911 nopw %cs:0L(%[re]ax,%[re]ax,1) */
912 static const char alt_long_11[] =
914 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
917 nopw %cs:0L(%[re]ax,%[re]ax,1) */
918 static const char alt_long_12[] =
921 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
925 nopw %cs:0L(%[re]ax,%[re]ax,1) */
926 static const char alt_long_13[] =
930 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
935 nopw %cs:0L(%[re]ax,%[re]ax,1) */
936 static const char alt_long_14[] =
941 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
947 nopw %cs:0L(%[re]ax,%[re]ax,1) */
948 static const char alt_long_15[] =
954 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
955 /* nopl 0(%[re]ax,%[re]ax,1)
956 nopw 0(%[re]ax,%[re]ax,1) */
957 static const char alt_short_11[] =
958 {0x0f,0x1f,0x44,0x00,0x00,
959 0x66,0x0f,0x1f,0x44,0x00,0x00};
960 /* nopw 0(%[re]ax,%[re]ax,1)
961 nopw 0(%[re]ax,%[re]ax,1) */
962 static const char alt_short_12[] =
963 {0x66,0x0f,0x1f,0x44,0x00,0x00,
964 0x66,0x0f,0x1f,0x44,0x00,0x00};
965 /* nopw 0(%[re]ax,%[re]ax,1)
967 static const char alt_short_13[] =
968 {0x66,0x0f,0x1f,0x44,0x00,0x00,
969 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
972 static const char alt_short_14[] =
973 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
974 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
976 nopl 0L(%[re]ax,%[re]ax,1) */
977 static const char alt_short_15[] =
978 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
979 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
980 static const char *const alt_short_patt[] = {
981 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
982 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
983 alt_short_14, alt_short_15
985 static const char *const alt_long_patt[] = {
986 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
987 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
988 alt_long_14, alt_long_15
991 /* Only align for at least a positive non-zero boundary. */
992 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
995 /* We need to decide which NOP sequence to use for 32bit and
996 64bit. When -mtune= is used:
998 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
999 PROCESSOR_GENERIC32, f32_patt will be used.
1000 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1001 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1002 PROCESSOR_GENERIC64, alt_long_patt will be used.
1003 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1004 PROCESSOR_AMDFAM10, and PROCESSOR_BDVER1, alt_short_patt
1007 When -mtune= isn't used, alt_long_patt will be used if
1008 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1011 When -march= or .arch is used, we can't use anything beyond
1012 cpu_arch_isa_flags. */
1014 if (flag_code == CODE_16BIT)
1018 memcpy (fragP->fr_literal + fragP->fr_fix,
1020 /* Adjust jump offset. */
1021 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1024 memcpy (fragP->fr_literal + fragP->fr_fix,
1025 f16_patt[count - 1], count);
1029 const char *const *patt = NULL;
1031 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1033 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1034 switch (cpu_arch_tune)
1036 case PROCESSOR_UNKNOWN:
1037 /* We use cpu_arch_isa_flags to check if we SHOULD
1038 optimize with nops. */
1039 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1040 patt = alt_long_patt;
1044 case PROCESSOR_PENTIUM4:
1045 case PROCESSOR_NOCONA:
1046 case PROCESSOR_CORE:
1047 case PROCESSOR_CORE2:
1048 case PROCESSOR_COREI7:
1049 case PROCESSOR_L1OM:
1050 case PROCESSOR_GENERIC64:
1051 patt = alt_long_patt;
1054 case PROCESSOR_ATHLON:
1056 case PROCESSOR_AMDFAM10:
1057 case PROCESSOR_BDVER1:
1058 patt = alt_short_patt;
1060 case PROCESSOR_I386:
1061 case PROCESSOR_I486:
1062 case PROCESSOR_PENTIUM:
1063 case PROCESSOR_PENTIUMPRO:
1064 case PROCESSOR_GENERIC32:
1071 switch (fragP->tc_frag_data.tune)
1073 case PROCESSOR_UNKNOWN:
1074 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1075 PROCESSOR_UNKNOWN. */
1079 case PROCESSOR_I386:
1080 case PROCESSOR_I486:
1081 case PROCESSOR_PENTIUM:
1083 case PROCESSOR_ATHLON:
1085 case PROCESSOR_AMDFAM10:
1086 case PROCESSOR_BDVER1:
1087 case PROCESSOR_GENERIC32:
1088 /* We use cpu_arch_isa_flags to check if we CAN optimize
1090 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1091 patt = alt_short_patt;
1095 case PROCESSOR_PENTIUMPRO:
1096 case PROCESSOR_PENTIUM4:
1097 case PROCESSOR_NOCONA:
1098 case PROCESSOR_CORE:
1099 case PROCESSOR_CORE2:
1100 case PROCESSOR_COREI7:
1101 case PROCESSOR_L1OM:
1102 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1103 patt = alt_long_patt;
1107 case PROCESSOR_GENERIC64:
1108 patt = alt_long_patt;
1113 if (patt == f32_patt)
1115 /* If the padding is less than 15 bytes, we use the normal
1116 ones. Otherwise, we use a jump instruction and adjust
1120 /* For 64bit, the limit is 3 bytes. */
1121 if (flag_code == CODE_64BIT
1122 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1127 memcpy (fragP->fr_literal + fragP->fr_fix,
1128 patt[count - 1], count);
1131 memcpy (fragP->fr_literal + fragP->fr_fix,
1133 /* Adjust jump offset. */
1134 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1139 /* Maximum length of an instruction is 15 byte. If the
1140 padding is greater than 15 bytes and we don't use jump,
1141 we have to break it into smaller pieces. */
1142 int padding = count;
1143 while (padding > 15)
1146 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1151 memcpy (fragP->fr_literal + fragP->fr_fix,
1152 patt [padding - 1], padding);
1155 fragP->fr_var = count;
1159 operand_type_all_zero (const union i386_operand_type *x)
1161 switch (ARRAY_SIZE(x->array))
1170 return !x->array[0];
1177 operand_type_set (union i386_operand_type *x, unsigned int v)
1179 switch (ARRAY_SIZE(x->array))
1194 operand_type_equal (const union i386_operand_type *x,
1195 const union i386_operand_type *y)
1197 switch (ARRAY_SIZE(x->array))
1200 if (x->array[2] != y->array[2])
1203 if (x->array[1] != y->array[1])
1206 return x->array[0] == y->array[0];
1214 cpu_flags_all_zero (const union i386_cpu_flags *x)
1216 switch (ARRAY_SIZE(x->array))
1225 return !x->array[0];
1232 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1234 switch (ARRAY_SIZE(x->array))
1249 cpu_flags_equal (const union i386_cpu_flags *x,
1250 const union i386_cpu_flags *y)
1252 switch (ARRAY_SIZE(x->array))
1255 if (x->array[2] != y->array[2])
1258 if (x->array[1] != y->array[1])
1261 return x->array[0] == y->array[0];
1269 cpu_flags_check_cpu64 (i386_cpu_flags f)
1271 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1272 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1275 static INLINE i386_cpu_flags
1276 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1278 switch (ARRAY_SIZE (x.array))
1281 x.array [2] &= y.array [2];
1283 x.array [1] &= y.array [1];
1285 x.array [0] &= y.array [0];
1293 static INLINE i386_cpu_flags
1294 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1296 switch (ARRAY_SIZE (x.array))
1299 x.array [2] |= y.array [2];
1301 x.array [1] |= y.array [1];
1303 x.array [0] |= y.array [0];
1311 static INLINE i386_cpu_flags
1312 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1314 switch (ARRAY_SIZE (x.array))
1317 x.array [2] &= ~y.array [2];
1319 x.array [1] &= ~y.array [1];
1321 x.array [0] &= ~y.array [0];
1329 #define CPU_FLAGS_ARCH_MATCH 0x1
1330 #define CPU_FLAGS_64BIT_MATCH 0x2
1331 #define CPU_FLAGS_AES_MATCH 0x4
1332 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1333 #define CPU_FLAGS_AVX_MATCH 0x10
1335 #define CPU_FLAGS_32BIT_MATCH \
1336 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1337 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1338 #define CPU_FLAGS_PERFECT_MATCH \
1339 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1341 /* Return CPU flags match bits. */
1344 cpu_flags_match (const insn_template *t)
1346 i386_cpu_flags x = t->cpu_flags;
1347 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1349 x.bitfield.cpu64 = 0;
1350 x.bitfield.cpuno64 = 0;
1352 if (cpu_flags_all_zero (&x))
1354 /* This instruction is available on all archs. */
1355 match |= CPU_FLAGS_32BIT_MATCH;
1359 /* This instruction is available only on some archs. */
1360 i386_cpu_flags cpu = cpu_arch_flags;
1362 cpu.bitfield.cpu64 = 0;
1363 cpu.bitfield.cpuno64 = 0;
1364 cpu = cpu_flags_and (x, cpu);
1365 if (!cpu_flags_all_zero (&cpu))
1367 if (x.bitfield.cpuavx)
1369 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1370 if (cpu.bitfield.cpuavx)
1372 /* Check SSE2AVX. */
1373 if (!t->opcode_modifier.sse2avx|| sse2avx)
1375 match |= (CPU_FLAGS_ARCH_MATCH
1376 | CPU_FLAGS_AVX_MATCH);
1378 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1379 match |= CPU_FLAGS_AES_MATCH;
1381 if (!x.bitfield.cpupclmul
1382 || cpu.bitfield.cpupclmul)
1383 match |= CPU_FLAGS_PCLMUL_MATCH;
1387 match |= CPU_FLAGS_ARCH_MATCH;
1390 match |= CPU_FLAGS_32BIT_MATCH;
1396 static INLINE i386_operand_type
1397 operand_type_and (i386_operand_type x, i386_operand_type y)
1399 switch (ARRAY_SIZE (x.array))
1402 x.array [2] &= y.array [2];
1404 x.array [1] &= y.array [1];
1406 x.array [0] &= y.array [0];
1414 static INLINE i386_operand_type
1415 operand_type_or (i386_operand_type x, i386_operand_type y)
1417 switch (ARRAY_SIZE (x.array))
1420 x.array [2] |= y.array [2];
1422 x.array [1] |= y.array [1];
1424 x.array [0] |= y.array [0];
1432 static INLINE i386_operand_type
1433 operand_type_xor (i386_operand_type x, i386_operand_type y)
1435 switch (ARRAY_SIZE (x.array))
1438 x.array [2] ^= y.array [2];
1440 x.array [1] ^= y.array [1];
1442 x.array [0] ^= y.array [0];
1450 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1451 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1452 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1453 static const i386_operand_type inoutportreg
1454 = OPERAND_TYPE_INOUTPORTREG;
1455 static const i386_operand_type reg16_inoutportreg
1456 = OPERAND_TYPE_REG16_INOUTPORTREG;
1457 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1458 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1459 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1460 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1461 static const i386_operand_type anydisp
1462 = OPERAND_TYPE_ANYDISP;
1463 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1464 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1465 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1466 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1467 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1468 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1469 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1470 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1471 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1472 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1473 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1474 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1485 operand_type_check (i386_operand_type t, enum operand_type c)
1490 return (t.bitfield.reg8
1493 || t.bitfield.reg64);
1496 return (t.bitfield.imm8
1500 || t.bitfield.imm32s
1501 || t.bitfield.imm64);
1504 return (t.bitfield.disp8
1505 || t.bitfield.disp16
1506 || t.bitfield.disp32
1507 || t.bitfield.disp32s
1508 || t.bitfield.disp64);
1511 return (t.bitfield.disp8
1512 || t.bitfield.disp16
1513 || t.bitfield.disp32
1514 || t.bitfield.disp32s
1515 || t.bitfield.disp64
1516 || t.bitfield.baseindex);
1525 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1526 operand J for instruction template T. */
1529 match_reg_size (const insn_template *t, unsigned int j)
1531 return !((i.types[j].bitfield.byte
1532 && !t->operand_types[j].bitfield.byte)
1533 || (i.types[j].bitfield.word
1534 && !t->operand_types[j].bitfield.word)
1535 || (i.types[j].bitfield.dword
1536 && !t->operand_types[j].bitfield.dword)
1537 || (i.types[j].bitfield.qword
1538 && !t->operand_types[j].bitfield.qword));
1541 /* Return 1 if there is no conflict in any size on operand J for
1542 instruction template T. */
1545 match_mem_size (const insn_template *t, unsigned int j)
1547 return (match_reg_size (t, j)
1548 && !((i.types[j].bitfield.unspecified
1549 && !t->operand_types[j].bitfield.unspecified)
1550 || (i.types[j].bitfield.fword
1551 && !t->operand_types[j].bitfield.fword)
1552 || (i.types[j].bitfield.tbyte
1553 && !t->operand_types[j].bitfield.tbyte)
1554 || (i.types[j].bitfield.xmmword
1555 && !t->operand_types[j].bitfield.xmmword)
1556 || (i.types[j].bitfield.ymmword
1557 && !t->operand_types[j].bitfield.ymmword)));
1560 /* Return 1 if there is no size conflict on any operands for
1561 instruction template T. */
1564 operand_size_match (const insn_template *t)
1569 /* Don't check jump instructions. */
1570 if (t->opcode_modifier.jump
1571 || t->opcode_modifier.jumpbyte
1572 || t->opcode_modifier.jumpdword
1573 || t->opcode_modifier.jumpintersegment)
1576 /* Check memory and accumulator operand size. */
1577 for (j = 0; j < i.operands; j++)
1579 if (t->operand_types[j].bitfield.anysize)
1582 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1588 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1597 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1600 i.error = operand_size_mismatch;
1604 /* Check reverse. */
1605 gas_assert (i.operands == 2);
1608 for (j = 0; j < 2; j++)
1610 if (t->operand_types[j].bitfield.acc
1611 && !match_reg_size (t, j ? 0 : 1))
1614 if (i.types[j].bitfield.mem
1615 && !match_mem_size (t, j ? 0 : 1))
1623 operand_type_match (i386_operand_type overlap,
1624 i386_operand_type given)
1626 i386_operand_type temp = overlap;
1628 temp.bitfield.jumpabsolute = 0;
1629 temp.bitfield.unspecified = 0;
1630 temp.bitfield.byte = 0;
1631 temp.bitfield.word = 0;
1632 temp.bitfield.dword = 0;
1633 temp.bitfield.fword = 0;
1634 temp.bitfield.qword = 0;
1635 temp.bitfield.tbyte = 0;
1636 temp.bitfield.xmmword = 0;
1637 temp.bitfield.ymmword = 0;
1638 if (operand_type_all_zero (&temp))
1641 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1642 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1646 i.error = operand_type_mismatch;
1650 /* If given types g0 and g1 are registers they must be of the same type
1651 unless the expected operand type register overlap is null.
1652 Note that Acc in a template matches every size of reg. */
1655 operand_type_register_match (i386_operand_type m0,
1656 i386_operand_type g0,
1657 i386_operand_type t0,
1658 i386_operand_type m1,
1659 i386_operand_type g1,
1660 i386_operand_type t1)
1662 if (!operand_type_check (g0, reg))
1665 if (!operand_type_check (g1, reg))
1668 if (g0.bitfield.reg8 == g1.bitfield.reg8
1669 && g0.bitfield.reg16 == g1.bitfield.reg16
1670 && g0.bitfield.reg32 == g1.bitfield.reg32
1671 && g0.bitfield.reg64 == g1.bitfield.reg64)
1674 if (m0.bitfield.acc)
1676 t0.bitfield.reg8 = 1;
1677 t0.bitfield.reg16 = 1;
1678 t0.bitfield.reg32 = 1;
1679 t0.bitfield.reg64 = 1;
1682 if (m1.bitfield.acc)
1684 t1.bitfield.reg8 = 1;
1685 t1.bitfield.reg16 = 1;
1686 t1.bitfield.reg32 = 1;
1687 t1.bitfield.reg64 = 1;
1690 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1691 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1692 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1693 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1696 i.error = register_type_mismatch;
1701 static INLINE unsigned int
1702 mode_from_disp_size (i386_operand_type t)
1704 if (t.bitfield.disp8)
1706 else if (t.bitfield.disp16
1707 || t.bitfield.disp32
1708 || t.bitfield.disp32s)
1715 fits_in_signed_byte (offsetT num)
1717 return (num >= -128) && (num <= 127);
1721 fits_in_unsigned_byte (offsetT num)
1723 return (num & 0xff) == num;
1727 fits_in_unsigned_word (offsetT num)
1729 return (num & 0xffff) == num;
1733 fits_in_signed_word (offsetT num)
1735 return (-32768 <= num) && (num <= 32767);
1739 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1744 return (!(((offsetT) -1 << 31) & num)
1745 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1747 } /* fits_in_signed_long() */
1750 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1755 return (num & (((offsetT) 2 << 31) - 1)) == num;
1757 } /* fits_in_unsigned_long() */
1760 fits_in_imm4 (offsetT num)
1762 return (num & 0xf) == num;
1765 static i386_operand_type
1766 smallest_imm_type (offsetT num)
1768 i386_operand_type t;
1770 operand_type_set (&t, 0);
1771 t.bitfield.imm64 = 1;
1773 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1775 /* This code is disabled on the 486 because all the Imm1 forms
1776 in the opcode table are slower on the i486. They're the
1777 versions with the implicitly specified single-position
1778 displacement, which has another syntax if you really want to
1780 t.bitfield.imm1 = 1;
1781 t.bitfield.imm8 = 1;
1782 t.bitfield.imm8s = 1;
1783 t.bitfield.imm16 = 1;
1784 t.bitfield.imm32 = 1;
1785 t.bitfield.imm32s = 1;
1787 else if (fits_in_signed_byte (num))
1789 t.bitfield.imm8 = 1;
1790 t.bitfield.imm8s = 1;
1791 t.bitfield.imm16 = 1;
1792 t.bitfield.imm32 = 1;
1793 t.bitfield.imm32s = 1;
1795 else if (fits_in_unsigned_byte (num))
1797 t.bitfield.imm8 = 1;
1798 t.bitfield.imm16 = 1;
1799 t.bitfield.imm32 = 1;
1800 t.bitfield.imm32s = 1;
1802 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1804 t.bitfield.imm16 = 1;
1805 t.bitfield.imm32 = 1;
1806 t.bitfield.imm32s = 1;
1808 else if (fits_in_signed_long (num))
1810 t.bitfield.imm32 = 1;
1811 t.bitfield.imm32s = 1;
1813 else if (fits_in_unsigned_long (num))
1814 t.bitfield.imm32 = 1;
1820 offset_in_range (offsetT val, int size)
1826 case 1: mask = ((addressT) 1 << 8) - 1; break;
1827 case 2: mask = ((addressT) 1 << 16) - 1; break;
1828 case 4: mask = ((addressT) 2 << 31) - 1; break;
1830 case 8: mask = ((addressT) 2 << 63) - 1; break;
1836 /* If BFD64, sign extend val for 32bit address mode. */
1837 if (flag_code != CODE_64BIT
1838 || i.prefix[ADDR_PREFIX])
1839 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1840 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1843 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1845 char buf1[40], buf2[40];
1847 sprint_value (buf1, val);
1848 sprint_value (buf2, val & mask);
1849 as_warn (_("%s shortened to %s"), buf1, buf2);
1863 a. PREFIX_EXIST if attempting to add a prefix where one from the
1864 same class already exists.
1865 b. PREFIX_LOCK if lock prefix is added.
1866 c. PREFIX_REP if rep/repne prefix is added.
1867 d. PREFIX_OTHER if other prefix is added.
1870 static enum PREFIX_GROUP
1871 add_prefix (unsigned int prefix)
1873 enum PREFIX_GROUP ret = PREFIX_OTHER;
1876 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1877 && flag_code == CODE_64BIT)
1879 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1880 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1881 && (prefix & (REX_R | REX_X | REX_B))))
1892 case CS_PREFIX_OPCODE:
1893 case DS_PREFIX_OPCODE:
1894 case ES_PREFIX_OPCODE:
1895 case FS_PREFIX_OPCODE:
1896 case GS_PREFIX_OPCODE:
1897 case SS_PREFIX_OPCODE:
1901 case REPNE_PREFIX_OPCODE:
1902 case REPE_PREFIX_OPCODE:
1907 case LOCK_PREFIX_OPCODE:
1916 case ADDR_PREFIX_OPCODE:
1920 case DATA_PREFIX_OPCODE:
1924 if (i.prefix[q] != 0)
1932 i.prefix[q] |= prefix;
1935 as_bad (_("same type of prefix used twice"));
1941 update_code_flag (int value, int check)
1943 PRINTF_LIKE ((*as_error));
1945 flag_code = (enum flag_code) value;
1946 if (flag_code == CODE_64BIT)
1948 cpu_arch_flags.bitfield.cpu64 = 1;
1949 cpu_arch_flags.bitfield.cpuno64 = 0;
1953 cpu_arch_flags.bitfield.cpu64 = 0;
1954 cpu_arch_flags.bitfield.cpuno64 = 1;
1956 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
1959 as_error = as_fatal;
1962 (*as_error) (_("64bit mode not supported on `%s'."),
1963 cpu_arch_name ? cpu_arch_name : default_arch);
1965 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
1968 as_error = as_fatal;
1971 (*as_error) (_("32bit mode not supported on `%s'."),
1972 cpu_arch_name ? cpu_arch_name : default_arch);
1974 stackop_size = '\0';
1978 set_code_flag (int value)
1980 update_code_flag (value, 0);
1984 set_16bit_gcc_code_flag (int new_code_flag)
1986 flag_code = (enum flag_code) new_code_flag;
1987 if (flag_code != CODE_16BIT)
1989 cpu_arch_flags.bitfield.cpu64 = 0;
1990 cpu_arch_flags.bitfield.cpuno64 = 1;
1991 stackop_size = LONG_MNEM_SUFFIX;
1995 set_intel_syntax (int syntax_flag)
1997 /* Find out if register prefixing is specified. */
1998 int ask_naked_reg = 0;
2001 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2003 char *string = input_line_pointer;
2004 int e = get_symbol_end ();
2006 if (strcmp (string, "prefix") == 0)
2008 else if (strcmp (string, "noprefix") == 0)
2011 as_bad (_("bad argument to syntax directive."));
2012 *input_line_pointer = e;
2014 demand_empty_rest_of_line ();
2016 intel_syntax = syntax_flag;
2018 if (ask_naked_reg == 0)
2019 allow_naked_reg = (intel_syntax
2020 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2022 allow_naked_reg = (ask_naked_reg < 0);
2024 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2026 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2027 identifier_chars['$'] = intel_syntax ? '$' : 0;
2028 register_prefix = allow_naked_reg ? "" : "%";
2032 set_intel_mnemonic (int mnemonic_flag)
2034 intel_mnemonic = mnemonic_flag;
2038 set_allow_index_reg (int flag)
2040 allow_index_reg = flag;
2044 set_sse_check (int dummy ATTRIBUTE_UNUSED)
2048 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2050 char *string = input_line_pointer;
2051 int e = get_symbol_end ();
2053 if (strcmp (string, "none") == 0)
2054 sse_check = sse_check_none;
2055 else if (strcmp (string, "warning") == 0)
2056 sse_check = sse_check_warning;
2057 else if (strcmp (string, "error") == 0)
2058 sse_check = sse_check_error;
2060 as_bad (_("bad argument to sse_check directive."));
2061 *input_line_pointer = e;
2064 as_bad (_("missing argument for sse_check directive"));
2066 demand_empty_rest_of_line ();
2070 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2071 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2073 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2074 static const char *arch;
2076 /* Intel LIOM is only supported on ELF. */
2082 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2083 use default_arch. */
2084 arch = cpu_arch_name;
2086 arch = default_arch;
2089 /* If we are targeting Intel L1OM, we must enable it. */
2090 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2091 || new_flag.bitfield.cpul1om)
2094 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2099 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2103 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2105 char *string = input_line_pointer;
2106 int e = get_symbol_end ();
2108 i386_cpu_flags flags;
2110 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2112 if (strcmp (string, cpu_arch[j].name) == 0)
2114 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2118 cpu_arch_name = cpu_arch[j].name;
2119 cpu_sub_arch_name = NULL;
2120 cpu_arch_flags = cpu_arch[j].flags;
2121 if (flag_code == CODE_64BIT)
2123 cpu_arch_flags.bitfield.cpu64 = 1;
2124 cpu_arch_flags.bitfield.cpuno64 = 0;
2128 cpu_arch_flags.bitfield.cpu64 = 0;
2129 cpu_arch_flags.bitfield.cpuno64 = 1;
2131 cpu_arch_isa = cpu_arch[j].type;
2132 cpu_arch_isa_flags = cpu_arch[j].flags;
2133 if (!cpu_arch_tune_set)
2135 cpu_arch_tune = cpu_arch_isa;
2136 cpu_arch_tune_flags = cpu_arch_isa_flags;
2141 if (!cpu_arch[j].negated)
2142 flags = cpu_flags_or (cpu_arch_flags,
2145 flags = cpu_flags_and_not (cpu_arch_flags,
2147 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2149 if (cpu_sub_arch_name)
2151 char *name = cpu_sub_arch_name;
2152 cpu_sub_arch_name = concat (name,
2154 (const char *) NULL);
2158 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2159 cpu_arch_flags = flags;
2161 *input_line_pointer = e;
2162 demand_empty_rest_of_line ();
2166 if (j >= ARRAY_SIZE (cpu_arch))
2167 as_bad (_("no such architecture: `%s'"), string);
2169 *input_line_pointer = e;
2172 as_bad (_("missing cpu architecture"));
2174 no_cond_jump_promotion = 0;
2175 if (*input_line_pointer == ','
2176 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2178 char *string = ++input_line_pointer;
2179 int e = get_symbol_end ();
2181 if (strcmp (string, "nojumps") == 0)
2182 no_cond_jump_promotion = 1;
2183 else if (strcmp (string, "jumps") == 0)
2186 as_bad (_("no such architecture modifier: `%s'"), string);
2188 *input_line_pointer = e;
2191 demand_empty_rest_of_line ();
2194 enum bfd_architecture
2197 if (cpu_arch_isa == PROCESSOR_L1OM)
2199 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2200 || flag_code != CODE_64BIT)
2201 as_fatal (_("Intel L1OM is 64bit ELF only"));
2202 return bfd_arch_l1om;
2205 return bfd_arch_i386;
2211 if (!strcmp (default_arch, "x86_64"))
2213 if (cpu_arch_isa == PROCESSOR_L1OM)
2215 if (OUTPUT_FLAVOR != bfd_target_elf_flavour)
2216 as_fatal (_("Intel L1OM is 64bit ELF only"));
2217 return bfd_mach_l1om;
2220 return bfd_mach_x86_64;
2222 else if (!strcmp (default_arch, "i386"))
2223 return bfd_mach_i386_i386;
2225 as_fatal (_("Unknown architecture"));
2231 const char *hash_err;
2233 /* Initialize op_hash hash table. */
2234 op_hash = hash_new ();
2237 const insn_template *optab;
2238 templates *core_optab;
2240 /* Setup for loop. */
2242 core_optab = (templates *) xmalloc (sizeof (templates));
2243 core_optab->start = optab;
2248 if (optab->name == NULL
2249 || strcmp (optab->name, (optab - 1)->name) != 0)
2251 /* different name --> ship out current template list;
2252 add to hash table; & begin anew. */
2253 core_optab->end = optab;
2254 hash_err = hash_insert (op_hash,
2256 (void *) core_optab);
2259 as_fatal (_("Internal Error: Can't hash %s: %s"),
2263 if (optab->name == NULL)
2265 core_optab = (templates *) xmalloc (sizeof (templates));
2266 core_optab->start = optab;
2271 /* Initialize reg_hash hash table. */
2272 reg_hash = hash_new ();
2274 const reg_entry *regtab;
2275 unsigned int regtab_size = i386_regtab_size;
2277 for (regtab = i386_regtab; regtab_size--; regtab++)
2279 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2281 as_fatal (_("Internal Error: Can't hash %s: %s"),
2287 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2292 for (c = 0; c < 256; c++)
2297 mnemonic_chars[c] = c;
2298 register_chars[c] = c;
2299 operand_chars[c] = c;
2301 else if (ISLOWER (c))
2303 mnemonic_chars[c] = c;
2304 register_chars[c] = c;
2305 operand_chars[c] = c;
2307 else if (ISUPPER (c))
2309 mnemonic_chars[c] = TOLOWER (c);
2310 register_chars[c] = mnemonic_chars[c];
2311 operand_chars[c] = c;
2314 if (ISALPHA (c) || ISDIGIT (c))
2315 identifier_chars[c] = c;
2318 identifier_chars[c] = c;
2319 operand_chars[c] = c;
2324 identifier_chars['@'] = '@';
2327 identifier_chars['?'] = '?';
2328 operand_chars['?'] = '?';
2330 digit_chars['-'] = '-';
2331 mnemonic_chars['_'] = '_';
2332 mnemonic_chars['-'] = '-';
2333 mnemonic_chars['.'] = '.';
2334 identifier_chars['_'] = '_';
2335 identifier_chars['.'] = '.';
2337 for (p = operand_special_chars; *p != '\0'; p++)
2338 operand_chars[(unsigned char) *p] = *p;
2341 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2344 record_alignment (text_section, 2);
2345 record_alignment (data_section, 2);
2346 record_alignment (bss_section, 2);
2350 if (flag_code == CODE_64BIT)
2352 x86_dwarf2_return_column = 16;
2353 x86_cie_data_alignment = -8;
2357 x86_dwarf2_return_column = 8;
2358 x86_cie_data_alignment = -4;
2363 i386_print_statistics (FILE *file)
2365 hash_print_statistics (file, "i386 opcode", op_hash);
2366 hash_print_statistics (file, "i386 register", reg_hash);
2371 /* Debugging routines for md_assemble. */
2372 static void pte (insn_template *);
2373 static void pt (i386_operand_type);
2374 static void pe (expressionS *);
2375 static void ps (symbolS *);
2378 pi (char *line, i386_insn *x)
2382 fprintf (stdout, "%s: template ", line);
2384 fprintf (stdout, " address: base %s index %s scale %x\n",
2385 x->base_reg ? x->base_reg->reg_name : "none",
2386 x->index_reg ? x->index_reg->reg_name : "none",
2387 x->log2_scale_factor);
2388 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2389 x->rm.mode, x->rm.reg, x->rm.regmem);
2390 fprintf (stdout, " sib: base %x index %x scale %x\n",
2391 x->sib.base, x->sib.index, x->sib.scale);
2392 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2393 (x->rex & REX_W) != 0,
2394 (x->rex & REX_R) != 0,
2395 (x->rex & REX_X) != 0,
2396 (x->rex & REX_B) != 0);
2397 for (j = 0; j < x->operands; j++)
2399 fprintf (stdout, " #%d: ", j + 1);
2401 fprintf (stdout, "\n");
2402 if (x->types[j].bitfield.reg8
2403 || x->types[j].bitfield.reg16
2404 || x->types[j].bitfield.reg32
2405 || x->types[j].bitfield.reg64
2406 || x->types[j].bitfield.regmmx
2407 || x->types[j].bitfield.regxmm
2408 || x->types[j].bitfield.regymm
2409 || x->types[j].bitfield.sreg2
2410 || x->types[j].bitfield.sreg3
2411 || x->types[j].bitfield.control
2412 || x->types[j].bitfield.debug
2413 || x->types[j].bitfield.test)
2414 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2415 if (operand_type_check (x->types[j], imm))
2417 if (operand_type_check (x->types[j], disp))
2418 pe (x->op[j].disps);
2423 pte (insn_template *t)
2426 fprintf (stdout, " %d operands ", t->operands);
2427 fprintf (stdout, "opcode %x ", t->base_opcode);
2428 if (t->extension_opcode != None)
2429 fprintf (stdout, "ext %x ", t->extension_opcode);
2430 if (t->opcode_modifier.d)
2431 fprintf (stdout, "D");
2432 if (t->opcode_modifier.w)
2433 fprintf (stdout, "W");
2434 fprintf (stdout, "\n");
2435 for (j = 0; j < t->operands; j++)
2437 fprintf (stdout, " #%d type ", j + 1);
2438 pt (t->operand_types[j]);
2439 fprintf (stdout, "\n");
2446 fprintf (stdout, " operation %d\n", e->X_op);
2447 fprintf (stdout, " add_number %ld (%lx)\n",
2448 (long) e->X_add_number, (long) e->X_add_number);
2449 if (e->X_add_symbol)
2451 fprintf (stdout, " add_symbol ");
2452 ps (e->X_add_symbol);
2453 fprintf (stdout, "\n");
2457 fprintf (stdout, " op_symbol ");
2458 ps (e->X_op_symbol);
2459 fprintf (stdout, "\n");
2466 fprintf (stdout, "%s type %s%s",
2468 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2469 segment_name (S_GET_SEGMENT (s)));
2472 static struct type_name
2474 i386_operand_type mask;
2477 const type_names[] =
2479 { OPERAND_TYPE_REG8, "r8" },
2480 { OPERAND_TYPE_REG16, "r16" },
2481 { OPERAND_TYPE_REG32, "r32" },
2482 { OPERAND_TYPE_REG64, "r64" },
2483 { OPERAND_TYPE_IMM8, "i8" },
2484 { OPERAND_TYPE_IMM8, "i8s" },
2485 { OPERAND_TYPE_IMM16, "i16" },
2486 { OPERAND_TYPE_IMM32, "i32" },
2487 { OPERAND_TYPE_IMM32S, "i32s" },
2488 { OPERAND_TYPE_IMM64, "i64" },
2489 { OPERAND_TYPE_IMM1, "i1" },
2490 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2491 { OPERAND_TYPE_DISP8, "d8" },
2492 { OPERAND_TYPE_DISP16, "d16" },
2493 { OPERAND_TYPE_DISP32, "d32" },
2494 { OPERAND_TYPE_DISP32S, "d32s" },
2495 { OPERAND_TYPE_DISP64, "d64" },
2496 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2497 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2498 { OPERAND_TYPE_CONTROL, "control reg" },
2499 { OPERAND_TYPE_TEST, "test reg" },
2500 { OPERAND_TYPE_DEBUG, "debug reg" },
2501 { OPERAND_TYPE_FLOATREG, "FReg" },
2502 { OPERAND_TYPE_FLOATACC, "FAcc" },
2503 { OPERAND_TYPE_SREG2, "SReg2" },
2504 { OPERAND_TYPE_SREG3, "SReg3" },
2505 { OPERAND_TYPE_ACC, "Acc" },
2506 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2507 { OPERAND_TYPE_REGMMX, "rMMX" },
2508 { OPERAND_TYPE_REGXMM, "rXMM" },
2509 { OPERAND_TYPE_REGYMM, "rYMM" },
2510 { OPERAND_TYPE_ESSEG, "es" },
2514 pt (i386_operand_type t)
2517 i386_operand_type a;
2519 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2521 a = operand_type_and (t, type_names[j].mask);
2522 if (!operand_type_all_zero (&a))
2523 fprintf (stdout, "%s, ", type_names[j].name);
2528 #endif /* DEBUG386 */
2530 static bfd_reloc_code_real_type
2531 reloc (unsigned int size,
2534 bfd_reloc_code_real_type other)
2536 if (other != NO_RELOC)
2538 reloc_howto_type *rel;
2543 case BFD_RELOC_X86_64_GOT32:
2544 return BFD_RELOC_X86_64_GOT64;
2546 case BFD_RELOC_X86_64_PLTOFF64:
2547 return BFD_RELOC_X86_64_PLTOFF64;
2549 case BFD_RELOC_X86_64_GOTPC32:
2550 other = BFD_RELOC_X86_64_GOTPC64;
2552 case BFD_RELOC_X86_64_GOTPCREL:
2553 other = BFD_RELOC_X86_64_GOTPCREL64;
2555 case BFD_RELOC_X86_64_TPOFF32:
2556 other = BFD_RELOC_X86_64_TPOFF64;
2558 case BFD_RELOC_X86_64_DTPOFF32:
2559 other = BFD_RELOC_X86_64_DTPOFF64;
2565 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2566 if (size == 4 && flag_code != CODE_64BIT)
2569 rel = bfd_reloc_type_lookup (stdoutput, other);
2571 as_bad (_("unknown relocation (%u)"), other);
2572 else if (size != bfd_get_reloc_size (rel))
2573 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2574 bfd_get_reloc_size (rel),
2576 else if (pcrel && !rel->pc_relative)
2577 as_bad (_("non-pc-relative relocation for pc-relative field"));
2578 else if ((rel->complain_on_overflow == complain_overflow_signed
2580 || (rel->complain_on_overflow == complain_overflow_unsigned
2582 as_bad (_("relocated field and relocation type differ in signedness"));
2591 as_bad (_("there are no unsigned pc-relative relocations"));
2594 case 1: return BFD_RELOC_8_PCREL;
2595 case 2: return BFD_RELOC_16_PCREL;
2596 case 4: return BFD_RELOC_32_PCREL;
2597 case 8: return BFD_RELOC_64_PCREL;
2599 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2606 case 4: return BFD_RELOC_X86_64_32S;
2611 case 1: return BFD_RELOC_8;
2612 case 2: return BFD_RELOC_16;
2613 case 4: return BFD_RELOC_32;
2614 case 8: return BFD_RELOC_64;
2616 as_bad (_("cannot do %s %u byte relocation"),
2617 sign > 0 ? "signed" : "unsigned", size);
2623 /* Here we decide which fixups can be adjusted to make them relative to
2624 the beginning of the section instead of the symbol. Basically we need
2625 to make sure that the dynamic relocations are done correctly, so in
2626 some cases we force the original symbol to be used. */
2629 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2631 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2635 /* Don't adjust pc-relative references to merge sections in 64-bit
2637 if (use_rela_relocations
2638 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2642 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2643 and changed later by validate_fix. */
2644 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2645 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2648 /* adjust_reloc_syms doesn't know about the GOT. */
2649 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2650 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2651 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2652 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2653 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2654 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2655 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2656 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2657 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2658 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2659 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2660 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2661 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2662 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2663 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2664 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2665 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2666 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2667 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2668 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2669 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2670 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2671 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2672 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2673 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2674 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2675 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2676 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2683 intel_float_operand (const char *mnemonic)
2685 /* Note that the value returned is meaningful only for opcodes with (memory)
2686 operands, hence the code here is free to improperly handle opcodes that
2687 have no operands (for better performance and smaller code). */
2689 if (mnemonic[0] != 'f')
2690 return 0; /* non-math */
2692 switch (mnemonic[1])
2694 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2695 the fs segment override prefix not currently handled because no
2696 call path can make opcodes without operands get here */
2698 return 2 /* integer op */;
2700 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2701 return 3; /* fldcw/fldenv */
2704 if (mnemonic[2] != 'o' /* fnop */)
2705 return 3; /* non-waiting control op */
2708 if (mnemonic[2] == 's')
2709 return 3; /* frstor/frstpm */
2712 if (mnemonic[2] == 'a')
2713 return 3; /* fsave */
2714 if (mnemonic[2] == 't')
2716 switch (mnemonic[3])
2718 case 'c': /* fstcw */
2719 case 'd': /* fstdw */
2720 case 'e': /* fstenv */
2721 case 's': /* fsts[gw] */
2727 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2728 return 0; /* fxsave/fxrstor are not really math ops */
2735 /* Build the VEX prefix. */
2738 build_vex_prefix (const insn_template *t)
2740 unsigned int register_specifier;
2741 unsigned int implied_prefix;
2742 unsigned int vector_length;
2744 /* Check register specifier. */
2745 if (i.vex.register_specifier)
2747 register_specifier = i.vex.register_specifier->reg_num;
2748 if ((i.vex.register_specifier->reg_flags & RegRex))
2749 register_specifier += 8;
2750 register_specifier = ~register_specifier & 0xf;
2753 register_specifier = 0xf;
2755 /* Use 2-byte VEX prefix by swappping destination and source
2758 && i.operands == i.reg_operands
2759 && i.tm.opcode_modifier.vexopcode == VEX0F
2760 && i.tm.opcode_modifier.s
2763 unsigned int xchg = i.operands - 1;
2764 union i386_op temp_op;
2765 i386_operand_type temp_type;
2767 temp_type = i.types[xchg];
2768 i.types[xchg] = i.types[0];
2769 i.types[0] = temp_type;
2770 temp_op = i.op[xchg];
2771 i.op[xchg] = i.op[0];
2774 gas_assert (i.rm.mode == 3);
2778 i.rm.regmem = i.rm.reg;
2781 /* Use the next insn. */
2785 if (i.tm.opcode_modifier.vex == VEXScalar)
2786 vector_length = avxscalar;
2788 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2790 switch ((i.tm.base_opcode >> 8) & 0xff)
2795 case DATA_PREFIX_OPCODE:
2798 case REPE_PREFIX_OPCODE:
2801 case REPNE_PREFIX_OPCODE:
2808 /* Use 2-byte VEX prefix if possible. */
2809 if (i.tm.opcode_modifier.vexopcode == VEX0F
2810 && i.tm.opcode_modifier.vexw != VEXW1
2811 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2813 /* 2-byte VEX prefix. */
2817 i.vex.bytes[0] = 0xc5;
2819 /* Check the REX.R bit. */
2820 r = (i.rex & REX_R) ? 0 : 1;
2821 i.vex.bytes[1] = (r << 7
2822 | register_specifier << 3
2823 | vector_length << 2
2828 /* 3-byte VEX prefix. */
2833 switch (i.tm.opcode_modifier.vexopcode)
2837 i.vex.bytes[0] = 0xc4;
2841 i.vex.bytes[0] = 0xc4;
2845 i.vex.bytes[0] = 0xc4;
2849 i.vex.bytes[0] = 0x8f;
2853 i.vex.bytes[0] = 0x8f;
2857 i.vex.bytes[0] = 0x8f;
2863 /* The high 3 bits of the second VEX byte are 1's compliment
2864 of RXB bits from REX. */
2865 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2867 /* Check the REX.W bit. */
2868 w = (i.rex & REX_W) ? 1 : 0;
2869 if (i.tm.opcode_modifier.vexw)
2874 if (i.tm.opcode_modifier.vexw == VEXW1)
2878 i.vex.bytes[2] = (w << 7
2879 | register_specifier << 3
2880 | vector_length << 2
2886 process_immext (void)
2890 if (i.tm.cpu_flags.bitfield.cpusse3 && i.operands > 0)
2892 /* SSE3 Instructions have the fixed operands with an opcode
2893 suffix which is coded in the same place as an 8-bit immediate
2894 field would be. Here we check those operands and remove them
2898 for (x = 0; x < i.operands; x++)
2899 if (i.op[x].regs->reg_num != x)
2900 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
2901 register_prefix, i.op[x].regs->reg_name, x + 1,
2907 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
2908 which is coded in the same place as an 8-bit immediate field
2909 would be. Here we fake an 8-bit immediate operand from the
2910 opcode suffix stored in tm.extension_opcode.
2912 AVX instructions also use this encoding, for some of
2913 3 argument instructions. */
2915 gas_assert (i.imm_operands == 0
2917 || (i.tm.opcode_modifier.vex
2918 && i.operands <= 4)));
2920 exp = &im_expressions[i.imm_operands++];
2921 i.op[i.operands].imms = exp;
2922 i.types[i.operands] = imm8;
2924 exp->X_op = O_constant;
2925 exp->X_add_number = i.tm.extension_opcode;
2926 i.tm.extension_opcode = None;
2929 /* This is the guts of the machine-dependent assembler. LINE points to a
2930 machine dependent instruction. This function is supposed to emit
2931 the frags/bytes it assembles to. */
2934 md_assemble (char *line)
2937 char mnemonic[MAX_MNEM_SIZE];
2938 const insn_template *t;
2940 /* Initialize globals. */
2941 memset (&i, '\0', sizeof (i));
2942 for (j = 0; j < MAX_OPERANDS; j++)
2943 i.reloc[j] = NO_RELOC;
2944 memset (disp_expressions, '\0', sizeof (disp_expressions));
2945 memset (im_expressions, '\0', sizeof (im_expressions));
2946 save_stack_p = save_stack;
2948 /* First parse an instruction mnemonic & call i386_operand for the operands.
2949 We assume that the scrubber has arranged it so that line[0] is the valid
2950 start of a (possibly prefixed) mnemonic. */
2952 line = parse_insn (line, mnemonic);
2956 line = parse_operands (line, mnemonic);
2961 /* Now we've parsed the mnemonic into a set of templates, and have the
2962 operands at hand. */
2964 /* All intel opcodes have reversed operands except for "bound" and
2965 "enter". We also don't reverse intersegment "jmp" and "call"
2966 instructions with 2 immediate operands so that the immediate segment
2967 precedes the offset, as it does when in AT&T mode. */
2970 && (strcmp (mnemonic, "bound") != 0)
2971 && (strcmp (mnemonic, "invlpga") != 0)
2972 && !(operand_type_check (i.types[0], imm)
2973 && operand_type_check (i.types[1], imm)))
2976 /* The order of the immediates should be reversed
2977 for 2 immediates extrq and insertq instructions */
2978 if (i.imm_operands == 2
2979 && (strcmp (mnemonic, "extrq") == 0
2980 || strcmp (mnemonic, "insertq") == 0))
2981 swap_2_operands (0, 1);
2986 /* Don't optimize displacement for movabs since it only takes 64bit
2989 && !i.disp32_encoding
2990 && (flag_code != CODE_64BIT
2991 || strcmp (mnemonic, "movabs") != 0))
2994 /* Next, we find a template that matches the given insn,
2995 making sure the overlap of the given operands types is consistent
2996 with the template operand types. */
2998 if (!(t = match_template ()))
3001 if (sse_check != sse_check_none
3002 && !i.tm.opcode_modifier.noavx
3003 && (i.tm.cpu_flags.bitfield.cpusse
3004 || i.tm.cpu_flags.bitfield.cpusse2
3005 || i.tm.cpu_flags.bitfield.cpusse3
3006 || i.tm.cpu_flags.bitfield.cpussse3
3007 || i.tm.cpu_flags.bitfield.cpusse4_1
3008 || i.tm.cpu_flags.bitfield.cpusse4_2))
3010 (sse_check == sse_check_warning
3012 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3015 /* Zap movzx and movsx suffix. The suffix has been set from
3016 "word ptr" or "byte ptr" on the source operand in Intel syntax
3017 or extracted from mnemonic in AT&T syntax. But we'll use
3018 the destination register to choose the suffix for encoding. */
3019 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3021 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3022 there is no suffix, the default will be byte extension. */
3023 if (i.reg_operands != 2
3026 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3031 if (i.tm.opcode_modifier.fwait)
3032 if (!add_prefix (FWAIT_OPCODE))
3035 /* Check for lock without a lockable instruction. Destination operand
3036 must be memory unless it is xchg (0x86). */
3037 if (i.prefix[LOCK_PREFIX]
3038 && (!i.tm.opcode_modifier.islockable
3039 || i.mem_operands == 0
3040 || (i.tm.base_opcode != 0x86
3041 && !operand_type_check (i.types[i.operands - 1], anymem))))
3043 as_bad (_("expecting lockable instruction after `lock'"));
3047 /* Check string instruction segment overrides. */
3048 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3050 if (!check_string ())
3052 i.disp_operands = 0;
3055 if (!process_suffix ())
3058 /* Update operand types. */
3059 for (j = 0; j < i.operands; j++)
3060 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3062 /* Make still unresolved immediate matches conform to size of immediate
3063 given in i.suffix. */
3064 if (!finalize_imm ())
3067 if (i.types[0].bitfield.imm1)
3068 i.imm_operands = 0; /* kludge for shift insns. */
3070 /* We only need to check those implicit registers for instructions
3071 with 3 operands or less. */
3072 if (i.operands <= 3)
3073 for (j = 0; j < i.operands; j++)
3074 if (i.types[j].bitfield.inoutportreg
3075 || i.types[j].bitfield.shiftcount
3076 || i.types[j].bitfield.acc
3077 || i.types[j].bitfield.floatacc)
3080 /* ImmExt should be processed after SSE2AVX. */
3081 if (!i.tm.opcode_modifier.sse2avx
3082 && i.tm.opcode_modifier.immext)
3085 /* For insns with operands there are more diddles to do to the opcode. */
3088 if (!process_operands ())
3091 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3093 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3094 as_warn (_("translating to `%sp'"), i.tm.name);
3097 if (i.tm.opcode_modifier.vex)
3098 build_vex_prefix (t);
3100 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3101 instructions may define INT_OPCODE as well, so avoid this corner
3102 case for those instructions that use MODRM. */
3103 if (i.tm.base_opcode == INT_OPCODE
3104 && !i.tm.opcode_modifier.modrm
3105 && i.op[0].imms->X_add_number == 3)
3107 i.tm.base_opcode = INT3_OPCODE;
3111 if ((i.tm.opcode_modifier.jump
3112 || i.tm.opcode_modifier.jumpbyte
3113 || i.tm.opcode_modifier.jumpdword)
3114 && i.op[0].disps->X_op == O_constant)
3116 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3117 the absolute address given by the constant. Since ix86 jumps and
3118 calls are pc relative, we need to generate a reloc. */
3119 i.op[0].disps->X_add_symbol = &abs_symbol;
3120 i.op[0].disps->X_op = O_symbol;
3123 if (i.tm.opcode_modifier.rex64)
3126 /* For 8 bit registers we need an empty rex prefix. Also if the
3127 instruction already has a prefix, we need to convert old
3128 registers to new ones. */
3130 if ((i.types[0].bitfield.reg8
3131 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3132 || (i.types[1].bitfield.reg8
3133 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3134 || ((i.types[0].bitfield.reg8
3135 || i.types[1].bitfield.reg8)
3140 i.rex |= REX_OPCODE;
3141 for (x = 0; x < 2; x++)
3143 /* Look for 8 bit operand that uses old registers. */
3144 if (i.types[x].bitfield.reg8
3145 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3147 /* In case it is "hi" register, give up. */
3148 if (i.op[x].regs->reg_num > 3)
3149 as_bad (_("can't encode register '%s%s' in an "
3150 "instruction requiring REX prefix."),
3151 register_prefix, i.op[x].regs->reg_name);
3153 /* Otherwise it is equivalent to the extended register.
3154 Since the encoding doesn't change this is merely
3155 cosmetic cleanup for debug output. */
3157 i.op[x].regs = i.op[x].regs + 8;
3163 add_prefix (REX_OPCODE | i.rex);
3165 /* We are ready to output the insn. */
3170 parse_insn (char *line, char *mnemonic)
3173 char *token_start = l;
3176 const insn_template *t;
3179 /* Non-zero if we found a prefix only acceptable with string insns. */
3180 const char *expecting_string_instruction = NULL;
3185 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3190 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3192 as_bad (_("no such instruction: `%s'"), token_start);
3197 if (!is_space_char (*l)
3198 && *l != END_OF_INSN
3200 || (*l != PREFIX_SEPARATOR
3203 as_bad (_("invalid character %s in mnemonic"),
3204 output_invalid (*l));
3207 if (token_start == l)
3209 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3210 as_bad (_("expecting prefix; got nothing"));
3212 as_bad (_("expecting mnemonic; got nothing"));
3216 /* Look up instruction (or prefix) via hash table. */
3217 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3219 if (*l != END_OF_INSN
3220 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3221 && current_templates
3222 && current_templates->start->opcode_modifier.isprefix)
3224 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3226 as_bad ((flag_code != CODE_64BIT
3227 ? _("`%s' is only supported in 64-bit mode")
3228 : _("`%s' is not supported in 64-bit mode")),
3229 current_templates->start->name);
3232 /* If we are in 16-bit mode, do not allow addr16 or data16.
3233 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3234 if ((current_templates->start->opcode_modifier.size16
3235 || current_templates->start->opcode_modifier.size32)
3236 && flag_code != CODE_64BIT
3237 && (current_templates->start->opcode_modifier.size32
3238 ^ (flag_code == CODE_16BIT)))
3240 as_bad (_("redundant %s prefix"),
3241 current_templates->start->name);
3244 /* Add prefix, checking for repeated prefixes. */
3245 switch (add_prefix (current_templates->start->base_opcode))
3250 expecting_string_instruction = current_templates->start->name;
3255 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3262 if (!current_templates)
3264 /* Check if we should swap operand or force 32bit displacement in
3266 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3268 else if (mnem_p - 4 == dot_p
3272 i.disp32_encoding = 1;
3277 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3280 if (!current_templates)
3283 /* See if we can get a match by trimming off a suffix. */
3286 case WORD_MNEM_SUFFIX:
3287 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3288 i.suffix = SHORT_MNEM_SUFFIX;
3290 case BYTE_MNEM_SUFFIX:
3291 case QWORD_MNEM_SUFFIX:
3292 i.suffix = mnem_p[-1];
3294 current_templates = (const templates *) hash_find (op_hash,
3297 case SHORT_MNEM_SUFFIX:
3298 case LONG_MNEM_SUFFIX:
3301 i.suffix = mnem_p[-1];
3303 current_templates = (const templates *) hash_find (op_hash,
3312 if (intel_float_operand (mnemonic) == 1)
3313 i.suffix = SHORT_MNEM_SUFFIX;
3315 i.suffix = LONG_MNEM_SUFFIX;
3317 current_templates = (const templates *) hash_find (op_hash,
3322 if (!current_templates)
3324 as_bad (_("no such instruction: `%s'"), token_start);
3329 if (current_templates->start->opcode_modifier.jump
3330 || current_templates->start->opcode_modifier.jumpbyte)
3332 /* Check for a branch hint. We allow ",pt" and ",pn" for
3333 predict taken and predict not taken respectively.
3334 I'm not sure that branch hints actually do anything on loop
3335 and jcxz insns (JumpByte) for current Pentium4 chips. They
3336 may work in the future and it doesn't hurt to accept them
3338 if (l[0] == ',' && l[1] == 'p')
3342 if (!add_prefix (DS_PREFIX_OPCODE))
3346 else if (l[2] == 'n')
3348 if (!add_prefix (CS_PREFIX_OPCODE))
3354 /* Any other comma loses. */
3357 as_bad (_("invalid character %s in mnemonic"),
3358 output_invalid (*l));
3362 /* Check if instruction is supported on specified architecture. */
3364 for (t = current_templates->start; t < current_templates->end; ++t)
3366 supported |= cpu_flags_match (t);
3367 if (supported == CPU_FLAGS_PERFECT_MATCH)
3371 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3373 as_bad (flag_code == CODE_64BIT
3374 ? _("`%s' is not supported in 64-bit mode")
3375 : _("`%s' is only supported in 64-bit mode"),
3376 current_templates->start->name);
3379 if (supported != CPU_FLAGS_PERFECT_MATCH)
3381 as_bad (_("`%s' is not supported on `%s%s'"),
3382 current_templates->start->name,
3383 cpu_arch_name ? cpu_arch_name : default_arch,
3384 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3389 if (!cpu_arch_flags.bitfield.cpui386
3390 && (flag_code != CODE_16BIT))
3392 as_warn (_("use .code16 to ensure correct addressing mode"));
3395 /* Check for rep/repne without a string instruction. */
3396 if (expecting_string_instruction)
3398 static templates override;
3400 for (t = current_templates->start; t < current_templates->end; ++t)
3401 if (t->opcode_modifier.isstring)
3403 if (t >= current_templates->end)
3405 as_bad (_("expecting string instruction after `%s'"),
3406 expecting_string_instruction);
3409 for (override.start = t; t < current_templates->end; ++t)
3410 if (!t->opcode_modifier.isstring)
3413 current_templates = &override;
3420 parse_operands (char *l, const char *mnemonic)
3424 /* 1 if operand is pending after ','. */
3425 unsigned int expecting_operand = 0;
3427 /* Non-zero if operand parens not balanced. */
3428 unsigned int paren_not_balanced;
3430 while (*l != END_OF_INSN)
3432 /* Skip optional white space before operand. */
3433 if (is_space_char (*l))
3435 if (!is_operand_char (*l) && *l != END_OF_INSN)
3437 as_bad (_("invalid character %s before operand %d"),
3438 output_invalid (*l),
3442 token_start = l; /* after white space */
3443 paren_not_balanced = 0;
3444 while (paren_not_balanced || *l != ',')
3446 if (*l == END_OF_INSN)
3448 if (paren_not_balanced)
3451 as_bad (_("unbalanced parenthesis in operand %d."),
3454 as_bad (_("unbalanced brackets in operand %d."),
3459 break; /* we are done */
3461 else if (!is_operand_char (*l) && !is_space_char (*l))
3463 as_bad (_("invalid character %s in operand %d"),
3464 output_invalid (*l),
3471 ++paren_not_balanced;
3473 --paren_not_balanced;
3478 ++paren_not_balanced;
3480 --paren_not_balanced;
3484 if (l != token_start)
3485 { /* Yes, we've read in another operand. */
3486 unsigned int operand_ok;
3487 this_operand = i.operands++;
3488 i.types[this_operand].bitfield.unspecified = 1;
3489 if (i.operands > MAX_OPERANDS)
3491 as_bad (_("spurious operands; (%d operands/instruction max)"),
3495 /* Now parse operand adding info to 'i' as we go along. */
3496 END_STRING_AND_SAVE (l);
3500 i386_intel_operand (token_start,
3501 intel_float_operand (mnemonic));
3503 operand_ok = i386_att_operand (token_start);
3505 RESTORE_END_STRING (l);
3511 if (expecting_operand)
3513 expecting_operand_after_comma:
3514 as_bad (_("expecting operand after ','; got nothing"));
3519 as_bad (_("expecting operand before ','; got nothing"));
3524 /* Now *l must be either ',' or END_OF_INSN. */
3527 if (*++l == END_OF_INSN)
3529 /* Just skip it, if it's \n complain. */
3530 goto expecting_operand_after_comma;
3532 expecting_operand = 1;
3539 swap_2_operands (int xchg1, int xchg2)
3541 union i386_op temp_op;
3542 i386_operand_type temp_type;
3543 enum bfd_reloc_code_real temp_reloc;
3545 temp_type = i.types[xchg2];
3546 i.types[xchg2] = i.types[xchg1];
3547 i.types[xchg1] = temp_type;
3548 temp_op = i.op[xchg2];
3549 i.op[xchg2] = i.op[xchg1];
3550 i.op[xchg1] = temp_op;
3551 temp_reloc = i.reloc[xchg2];
3552 i.reloc[xchg2] = i.reloc[xchg1];
3553 i.reloc[xchg1] = temp_reloc;
3557 swap_operands (void)
3563 swap_2_operands (1, i.operands - 2);
3566 swap_2_operands (0, i.operands - 1);
3572 if (i.mem_operands == 2)
3574 const seg_entry *temp_seg;
3575 temp_seg = i.seg[0];
3576 i.seg[0] = i.seg[1];
3577 i.seg[1] = temp_seg;
3581 /* Try to ensure constant immediates are represented in the smallest
3586 char guess_suffix = 0;
3590 guess_suffix = i.suffix;
3591 else if (i.reg_operands)
3593 /* Figure out a suffix from the last register operand specified.
3594 We can't do this properly yet, ie. excluding InOutPortReg,
3595 but the following works for instructions with immediates.
3596 In any case, we can't set i.suffix yet. */
3597 for (op = i.operands; --op >= 0;)
3598 if (i.types[op].bitfield.reg8)
3600 guess_suffix = BYTE_MNEM_SUFFIX;
3603 else if (i.types[op].bitfield.reg16)
3605 guess_suffix = WORD_MNEM_SUFFIX;
3608 else if (i.types[op].bitfield.reg32)
3610 guess_suffix = LONG_MNEM_SUFFIX;
3613 else if (i.types[op].bitfield.reg64)
3615 guess_suffix = QWORD_MNEM_SUFFIX;
3619 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3620 guess_suffix = WORD_MNEM_SUFFIX;
3622 for (op = i.operands; --op >= 0;)
3623 if (operand_type_check (i.types[op], imm))
3625 switch (i.op[op].imms->X_op)
3628 /* If a suffix is given, this operand may be shortened. */
3629 switch (guess_suffix)
3631 case LONG_MNEM_SUFFIX:
3632 i.types[op].bitfield.imm32 = 1;
3633 i.types[op].bitfield.imm64 = 1;
3635 case WORD_MNEM_SUFFIX:
3636 i.types[op].bitfield.imm16 = 1;
3637 i.types[op].bitfield.imm32 = 1;
3638 i.types[op].bitfield.imm32s = 1;
3639 i.types[op].bitfield.imm64 = 1;
3641 case BYTE_MNEM_SUFFIX:
3642 i.types[op].bitfield.imm8 = 1;
3643 i.types[op].bitfield.imm8s = 1;
3644 i.types[op].bitfield.imm16 = 1;
3645 i.types[op].bitfield.imm32 = 1;
3646 i.types[op].bitfield.imm32s = 1;
3647 i.types[op].bitfield.imm64 = 1;
3651 /* If this operand is at most 16 bits, convert it
3652 to a signed 16 bit number before trying to see
3653 whether it will fit in an even smaller size.
3654 This allows a 16-bit operand such as $0xffe0 to
3655 be recognised as within Imm8S range. */
3656 if ((i.types[op].bitfield.imm16)
3657 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3659 i.op[op].imms->X_add_number =
3660 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3662 if ((i.types[op].bitfield.imm32)
3663 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3666 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3667 ^ ((offsetT) 1 << 31))
3668 - ((offsetT) 1 << 31));
3671 = operand_type_or (i.types[op],
3672 smallest_imm_type (i.op[op].imms->X_add_number));
3674 /* We must avoid matching of Imm32 templates when 64bit
3675 only immediate is available. */
3676 if (guess_suffix == QWORD_MNEM_SUFFIX)
3677 i.types[op].bitfield.imm32 = 0;
3684 /* Symbols and expressions. */
3686 /* Convert symbolic operand to proper sizes for matching, but don't
3687 prevent matching a set of insns that only supports sizes other
3688 than those matching the insn suffix. */
3690 i386_operand_type mask, allowed;
3691 const insn_template *t;
3693 operand_type_set (&mask, 0);
3694 operand_type_set (&allowed, 0);
3696 for (t = current_templates->start;
3697 t < current_templates->end;
3699 allowed = operand_type_or (allowed,
3700 t->operand_types[op]);
3701 switch (guess_suffix)
3703 case QWORD_MNEM_SUFFIX:
3704 mask.bitfield.imm64 = 1;
3705 mask.bitfield.imm32s = 1;
3707 case LONG_MNEM_SUFFIX:
3708 mask.bitfield.imm32 = 1;
3710 case WORD_MNEM_SUFFIX:
3711 mask.bitfield.imm16 = 1;
3713 case BYTE_MNEM_SUFFIX:
3714 mask.bitfield.imm8 = 1;
3719 allowed = operand_type_and (mask, allowed);
3720 if (!operand_type_all_zero (&allowed))
3721 i.types[op] = operand_type_and (i.types[op], mask);
3728 /* Try to use the smallest displacement type too. */
3730 optimize_disp (void)
3734 for (op = i.operands; --op >= 0;)
3735 if (operand_type_check (i.types[op], disp))
3737 if (i.op[op].disps->X_op == O_constant)
3739 offsetT op_disp = i.op[op].disps->X_add_number;
3741 if (i.types[op].bitfield.disp16
3742 && (op_disp & ~(offsetT) 0xffff) == 0)
3744 /* If this operand is at most 16 bits, convert
3745 to a signed 16 bit number and don't use 64bit
3747 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3748 i.types[op].bitfield.disp64 = 0;
3750 if (i.types[op].bitfield.disp32
3751 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3753 /* If this operand is at most 32 bits, convert
3754 to a signed 32 bit number and don't use 64bit
3756 op_disp &= (((offsetT) 2 << 31) - 1);
3757 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3758 i.types[op].bitfield.disp64 = 0;
3760 if (!op_disp && i.types[op].bitfield.baseindex)
3762 i.types[op].bitfield.disp8 = 0;
3763 i.types[op].bitfield.disp16 = 0;
3764 i.types[op].bitfield.disp32 = 0;
3765 i.types[op].bitfield.disp32s = 0;
3766 i.types[op].bitfield.disp64 = 0;
3770 else if (flag_code == CODE_64BIT)
3772 if (fits_in_signed_long (op_disp))
3774 i.types[op].bitfield.disp64 = 0;
3775 i.types[op].bitfield.disp32s = 1;
3777 if (i.prefix[ADDR_PREFIX]
3778 && fits_in_unsigned_long (op_disp))
3779 i.types[op].bitfield.disp32 = 1;
3781 if ((i.types[op].bitfield.disp32
3782 || i.types[op].bitfield.disp32s
3783 || i.types[op].bitfield.disp16)
3784 && fits_in_signed_byte (op_disp))
3785 i.types[op].bitfield.disp8 = 1;
3787 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3788 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3790 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3791 i.op[op].disps, 0, i.reloc[op]);
3792 i.types[op].bitfield.disp8 = 0;
3793 i.types[op].bitfield.disp16 = 0;
3794 i.types[op].bitfield.disp32 = 0;
3795 i.types[op].bitfield.disp32s = 0;
3796 i.types[op].bitfield.disp64 = 0;
3799 /* We only support 64bit displacement on constants. */
3800 i.types[op].bitfield.disp64 = 0;
3804 /* Check if operands are valid for the instruction. Update VEX
3808 VEX_check_operands (const insn_template *t)
3810 if (!t->opcode_modifier.vex)
3813 /* Only check VEX_Imm4, which must be the first operand. */
3814 if (t->operand_types[0].bitfield.vec_imm4)
3816 if (i.op[0].imms->X_op != O_constant
3817 || !fits_in_imm4 (i.op[0].imms->X_add_number))
3823 /* Turn off Imm8 so that update_imm won't complain. */
3824 i.types[0] = vec_imm4;
3830 static const insn_template *
3831 match_template (void)
3833 /* Points to template once we've found it. */
3834 const insn_template *t;
3835 i386_operand_type overlap0, overlap1, overlap2, overlap3;
3836 i386_operand_type overlap4;
3837 unsigned int found_reverse_match;
3838 i386_opcode_modifier suffix_check;
3839 i386_operand_type operand_types [MAX_OPERANDS];
3840 int addr_prefix_disp;
3842 unsigned int found_cpu_match;
3843 unsigned int check_register;
3845 #if MAX_OPERANDS != 5
3846 # error "MAX_OPERANDS must be 5."
3849 found_reverse_match = 0;
3850 addr_prefix_disp = -1;
3852 memset (&suffix_check, 0, sizeof (suffix_check));
3853 if (i.suffix == BYTE_MNEM_SUFFIX)
3854 suffix_check.no_bsuf = 1;
3855 else if (i.suffix == WORD_MNEM_SUFFIX)
3856 suffix_check.no_wsuf = 1;
3857 else if (i.suffix == SHORT_MNEM_SUFFIX)
3858 suffix_check.no_ssuf = 1;
3859 else if (i.suffix == LONG_MNEM_SUFFIX)
3860 suffix_check.no_lsuf = 1;
3861 else if (i.suffix == QWORD_MNEM_SUFFIX)
3862 suffix_check.no_qsuf = 1;
3863 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
3864 suffix_check.no_ldsuf = 1;
3866 /* Must have right number of operands. */
3867 i.error = number_of_operands_mismatch;
3869 for (t = current_templates->start; t < current_templates->end; t++)
3871 addr_prefix_disp = -1;
3873 if (i.operands != t->operands)
3876 /* Check processor support. */
3877 i.error = unsupported;
3878 found_cpu_match = (cpu_flags_match (t)
3879 == CPU_FLAGS_PERFECT_MATCH);
3880 if (!found_cpu_match)
3883 /* Check old gcc support. */
3884 i.error = old_gcc_only;
3885 if (!old_gcc && t->opcode_modifier.oldgcc)
3888 /* Check AT&T mnemonic. */
3889 i.error = unsupported_with_intel_mnemonic;
3890 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
3893 /* Check AT&T/Intel syntax. */
3894 i.error = unsupported_syntax;
3895 if ((intel_syntax && t->opcode_modifier.attsyntax)
3896 || (!intel_syntax && t->opcode_modifier.intelsyntax))
3899 /* Check the suffix, except for some instructions in intel mode. */
3900 i.error = invalid_instruction_suffix;
3901 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
3902 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
3903 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
3904 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
3905 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
3906 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
3907 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
3910 if (!operand_size_match (t))
3913 for (j = 0; j < MAX_OPERANDS; j++)
3914 operand_types[j] = t->operand_types[j];
3916 /* In general, don't allow 64-bit operands in 32-bit mode. */
3917 if (i.suffix == QWORD_MNEM_SUFFIX
3918 && flag_code != CODE_64BIT
3920 ? (!t->opcode_modifier.ignoresize
3921 && !intel_float_operand (t->name))
3922 : intel_float_operand (t->name) != 2)
3923 && ((!operand_types[0].bitfield.regmmx
3924 && !operand_types[0].bitfield.regxmm
3925 && !operand_types[0].bitfield.regymm)
3926 || (!operand_types[t->operands > 1].bitfield.regmmx
3927 && !!operand_types[t->operands > 1].bitfield.regxmm
3928 && !!operand_types[t->operands > 1].bitfield.regymm))
3929 && (t->base_opcode != 0x0fc7
3930 || t->extension_opcode != 1 /* cmpxchg8b */))
3933 /* In general, don't allow 32-bit operands on pre-386. */
3934 else if (i.suffix == LONG_MNEM_SUFFIX
3935 && !cpu_arch_flags.bitfield.cpui386
3937 ? (!t->opcode_modifier.ignoresize
3938 && !intel_float_operand (t->name))
3939 : intel_float_operand (t->name) != 2)
3940 && ((!operand_types[0].bitfield.regmmx
3941 && !operand_types[0].bitfield.regxmm)
3942 || (!operand_types[t->operands > 1].bitfield.regmmx
3943 && !!operand_types[t->operands > 1].bitfield.regxmm)))
3946 /* Do not verify operands when there are none. */
3950 /* We've found a match; break out of loop. */
3954 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
3955 into Disp32/Disp16/Disp32 operand. */
3956 if (i.prefix[ADDR_PREFIX] != 0)
3958 /* There should be only one Disp operand. */
3962 for (j = 0; j < MAX_OPERANDS; j++)
3964 if (operand_types[j].bitfield.disp16)
3966 addr_prefix_disp = j;
3967 operand_types[j].bitfield.disp32 = 1;
3968 operand_types[j].bitfield.disp16 = 0;
3974 for (j = 0; j < MAX_OPERANDS; j++)
3976 if (operand_types[j].bitfield.disp32)
3978 addr_prefix_disp = j;
3979 operand_types[j].bitfield.disp32 = 0;
3980 operand_types[j].bitfield.disp16 = 1;
3986 for (j = 0; j < MAX_OPERANDS; j++)
3988 if (operand_types[j].bitfield.disp64)
3990 addr_prefix_disp = j;
3991 operand_types[j].bitfield.disp64 = 0;
3992 operand_types[j].bitfield.disp32 = 1;
4000 /* We check register size if needed. */
4001 check_register = t->opcode_modifier.checkregsize;
4002 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4003 switch (t->operands)
4006 if (!operand_type_match (overlap0, i.types[0]))
4010 /* xchg %eax, %eax is a special case. It is an aliase for nop
4011 only in 32bit mode and we can use opcode 0x90. In 64bit
4012 mode, we can't use 0x90 for xchg %eax, %eax since it should
4013 zero-extend %eax to %rax. */
4014 if (flag_code == CODE_64BIT
4015 && t->base_opcode == 0x90
4016 && operand_type_equal (&i.types [0], &acc32)
4017 && operand_type_equal (&i.types [1], &acc32))
4021 /* If we swap operand in encoding, we either match
4022 the next one or reverse direction of operands. */
4023 if (t->opcode_modifier.s)
4025 else if (t->opcode_modifier.d)
4030 /* If we swap operand in encoding, we match the next one. */
4031 if (i.swap_operand && t->opcode_modifier.s)
4035 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4036 if (!operand_type_match (overlap0, i.types[0])
4037 || !operand_type_match (overlap1, i.types[1])
4039 && !operand_type_register_match (overlap0, i.types[0],
4041 overlap1, i.types[1],
4044 /* Check if other direction is valid ... */
4045 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4049 /* Try reversing direction of operands. */
4050 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4051 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4052 if (!operand_type_match (overlap0, i.types[0])
4053 || !operand_type_match (overlap1, i.types[1])
4055 && !operand_type_register_match (overlap0,
4062 /* Does not match either direction. */
4065 /* found_reverse_match holds which of D or FloatDR
4067 if (t->opcode_modifier.d)
4068 found_reverse_match = Opcode_D;
4069 else if (t->opcode_modifier.floatd)
4070 found_reverse_match = Opcode_FloatD;
4072 found_reverse_match = 0;
4073 if (t->opcode_modifier.floatr)
4074 found_reverse_match |= Opcode_FloatR;
4078 /* Found a forward 2 operand match here. */
4079 switch (t->operands)
4082 overlap4 = operand_type_and (i.types[4],
4085 overlap3 = operand_type_and (i.types[3],
4088 overlap2 = operand_type_and (i.types[2],
4093 switch (t->operands)
4096 if (!operand_type_match (overlap4, i.types[4])
4097 || !operand_type_register_match (overlap3,
4105 if (!operand_type_match (overlap3, i.types[3])
4107 && !operand_type_register_match (overlap2,
4115 /* Here we make use of the fact that there are no
4116 reverse match 3 operand instructions, and all 3
4117 operand instructions only need to be checked for
4118 register consistency between operands 2 and 3. */
4119 if (!operand_type_match (overlap2, i.types[2])
4121 && !operand_type_register_match (overlap1,
4131 /* Found either forward/reverse 2, 3 or 4 operand match here:
4132 slip through to break. */
4134 if (!found_cpu_match)
4136 found_reverse_match = 0;
4140 /* Check if VEX operands are valid. */
4141 if (VEX_check_operands (t))
4144 /* We've found a match; break out of loop. */
4148 if (t == current_templates->end)
4150 /* We found no match. */
4151 const char *err_msg;
4156 case operand_size_mismatch:
4157 err_msg = _("operand size mismatch");
4159 case operand_type_mismatch:
4160 err_msg = _("operand type mismatch");
4162 case register_type_mismatch:
4163 err_msg = _("register type mismatch");
4165 case number_of_operands_mismatch:
4166 err_msg = _("number of operands mismatch");
4168 case invalid_instruction_suffix:
4169 err_msg = _("invalid instruction suffix");
4172 err_msg = _("Imm4 isn't the first operand");
4175 err_msg = _("only supported with old gcc");
4177 case unsupported_with_intel_mnemonic:
4178 err_msg = _("unsupported with Intel mnemonic");
4180 case unsupported_syntax:
4181 err_msg = _("unsupported syntax");
4184 err_msg = _("unsupported");
4187 as_bad (_("%s for `%s'"), err_msg,
4188 current_templates->start->name);
4192 if (!quiet_warnings)
4195 && (i.types[0].bitfield.jumpabsolute
4196 != operand_types[0].bitfield.jumpabsolute))
4198 as_warn (_("indirect %s without `*'"), t->name);
4201 if (t->opcode_modifier.isprefix
4202 && t->opcode_modifier.ignoresize)
4204 /* Warn them that a data or address size prefix doesn't
4205 affect assembly of the next line of code. */
4206 as_warn (_("stand-alone `%s' prefix"), t->name);
4210 /* Copy the template we found. */
4213 if (addr_prefix_disp != -1)
4214 i.tm.operand_types[addr_prefix_disp]
4215 = operand_types[addr_prefix_disp];
4217 if (found_reverse_match)
4219 /* If we found a reverse match we must alter the opcode
4220 direction bit. found_reverse_match holds bits to change
4221 (different for int & float insns). */
4223 i.tm.base_opcode ^= found_reverse_match;
4225 i.tm.operand_types[0] = operand_types[1];
4226 i.tm.operand_types[1] = operand_types[0];
4235 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4236 if (i.tm.operand_types[mem_op].bitfield.esseg)
4238 if (i.seg[0] != NULL && i.seg[0] != &es)
4240 as_bad (_("`%s' operand %d must use `%ses' segment"),
4246 /* There's only ever one segment override allowed per instruction.
4247 This instruction possibly has a legal segment override on the
4248 second operand, so copy the segment to where non-string
4249 instructions store it, allowing common code. */
4250 i.seg[0] = i.seg[1];
4252 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4254 if (i.seg[1] != NULL && i.seg[1] != &es)
4256 as_bad (_("`%s' operand %d must use `%ses' segment"),
4267 process_suffix (void)
4269 /* If matched instruction specifies an explicit instruction mnemonic
4271 if (i.tm.opcode_modifier.size16)
4272 i.suffix = WORD_MNEM_SUFFIX;
4273 else if (i.tm.opcode_modifier.size32)
4274 i.suffix = LONG_MNEM_SUFFIX;
4275 else if (i.tm.opcode_modifier.size64)
4276 i.suffix = QWORD_MNEM_SUFFIX;
4277 else if (i.reg_operands)
4279 /* If there's no instruction mnemonic suffix we try to invent one
4280 based on register operands. */
4283 /* We take i.suffix from the last register operand specified,
4284 Destination register type is more significant than source
4285 register type. crc32 in SSE4.2 prefers source register
4287 if (i.tm.base_opcode == 0xf20f38f1)
4289 if (i.types[0].bitfield.reg16)
4290 i.suffix = WORD_MNEM_SUFFIX;
4291 else if (i.types[0].bitfield.reg32)
4292 i.suffix = LONG_MNEM_SUFFIX;
4293 else if (i.types[0].bitfield.reg64)
4294 i.suffix = QWORD_MNEM_SUFFIX;
4296 else if (i.tm.base_opcode == 0xf20f38f0)
4298 if (i.types[0].bitfield.reg8)
4299 i.suffix = BYTE_MNEM_SUFFIX;
4306 if (i.tm.base_opcode == 0xf20f38f1
4307 || i.tm.base_opcode == 0xf20f38f0)
4309 /* We have to know the operand size for crc32. */
4310 as_bad (_("ambiguous memory operand size for `%s`"),
4315 for (op = i.operands; --op >= 0;)
4316 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4318 if (i.types[op].bitfield.reg8)
4320 i.suffix = BYTE_MNEM_SUFFIX;
4323 else if (i.types[op].bitfield.reg16)
4325 i.suffix = WORD_MNEM_SUFFIX;
4328 else if (i.types[op].bitfield.reg32)
4330 i.suffix = LONG_MNEM_SUFFIX;
4333 else if (i.types[op].bitfield.reg64)
4335 i.suffix = QWORD_MNEM_SUFFIX;
4341 else if (i.suffix == BYTE_MNEM_SUFFIX)
4344 && i.tm.opcode_modifier.ignoresize
4345 && i.tm.opcode_modifier.no_bsuf)
4347 else if (!check_byte_reg ())
4350 else if (i.suffix == LONG_MNEM_SUFFIX)
4353 && i.tm.opcode_modifier.ignoresize
4354 && i.tm.opcode_modifier.no_lsuf)
4356 else if (!check_long_reg ())
4359 else if (i.suffix == QWORD_MNEM_SUFFIX)
4362 && i.tm.opcode_modifier.ignoresize
4363 && i.tm.opcode_modifier.no_qsuf)
4365 else if (!check_qword_reg ())
4368 else if (i.suffix == WORD_MNEM_SUFFIX)
4371 && i.tm.opcode_modifier.ignoresize
4372 && i.tm.opcode_modifier.no_wsuf)
4374 else if (!check_word_reg ())
4377 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4378 || i.suffix == YMMWORD_MNEM_SUFFIX)
4380 /* Skip if the instruction has x/y suffix. match_template
4381 should check if it is a valid suffix. */
4383 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4384 /* Do nothing if the instruction is going to ignore the prefix. */
4389 else if (i.tm.opcode_modifier.defaultsize
4391 /* exclude fldenv/frstor/fsave/fstenv */
4392 && i.tm.opcode_modifier.no_ssuf)
4394 i.suffix = stackop_size;
4396 else if (intel_syntax
4398 && (i.tm.operand_types[0].bitfield.jumpabsolute
4399 || i.tm.opcode_modifier.jumpbyte
4400 || i.tm.opcode_modifier.jumpintersegment
4401 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4402 && i.tm.extension_opcode <= 3)))
4407 if (!i.tm.opcode_modifier.no_qsuf)
4409 i.suffix = QWORD_MNEM_SUFFIX;
4413 if (!i.tm.opcode_modifier.no_lsuf)
4414 i.suffix = LONG_MNEM_SUFFIX;
4417 if (!i.tm.opcode_modifier.no_wsuf)
4418 i.suffix = WORD_MNEM_SUFFIX;
4427 if (i.tm.opcode_modifier.w)
4429 as_bad (_("no instruction mnemonic suffix given and "
4430 "no register operands; can't size instruction"));
4436 unsigned int suffixes;
4438 suffixes = !i.tm.opcode_modifier.no_bsuf;
4439 if (!i.tm.opcode_modifier.no_wsuf)
4441 if (!i.tm.opcode_modifier.no_lsuf)
4443 if (!i.tm.opcode_modifier.no_ldsuf)
4445 if (!i.tm.opcode_modifier.no_ssuf)
4447 if (!i.tm.opcode_modifier.no_qsuf)
4450 /* There are more than suffix matches. */
4451 if (i.tm.opcode_modifier.w
4452 || ((suffixes & (suffixes - 1))
4453 && !i.tm.opcode_modifier.defaultsize
4454 && !i.tm.opcode_modifier.ignoresize))
4456 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4462 /* Change the opcode based on the operand size given by i.suffix;
4463 We don't need to change things for byte insns. */
4466 && i.suffix != BYTE_MNEM_SUFFIX
4467 && i.suffix != XMMWORD_MNEM_SUFFIX
4468 && i.suffix != YMMWORD_MNEM_SUFFIX)
4470 /* It's not a byte, select word/dword operation. */
4471 if (i.tm.opcode_modifier.w)
4473 if (i.tm.opcode_modifier.shortform)
4474 i.tm.base_opcode |= 8;
4476 i.tm.base_opcode |= 1;
4479 /* Now select between word & dword operations via the operand
4480 size prefix, except for instructions that will ignore this
4482 if (i.tm.opcode_modifier.addrprefixop0)
4484 /* The address size override prefix changes the size of the
4486 if ((flag_code == CODE_32BIT
4487 && i.op->regs[0].reg_type.bitfield.reg16)
4488 || (flag_code != CODE_32BIT
4489 && i.op->regs[0].reg_type.bitfield.reg32))
4490 if (!add_prefix (ADDR_PREFIX_OPCODE))
4493 else if (i.suffix != QWORD_MNEM_SUFFIX
4494 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4495 && !i.tm.opcode_modifier.ignoresize
4496 && !i.tm.opcode_modifier.floatmf
4497 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4498 || (flag_code == CODE_64BIT
4499 && i.tm.opcode_modifier.jumpbyte)))
4501 unsigned int prefix = DATA_PREFIX_OPCODE;
4503 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4504 prefix = ADDR_PREFIX_OPCODE;
4506 if (!add_prefix (prefix))
4510 /* Set mode64 for an operand. */
4511 if (i.suffix == QWORD_MNEM_SUFFIX
4512 && flag_code == CODE_64BIT
4513 && !i.tm.opcode_modifier.norex64)
4515 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4516 need rex64. cmpxchg8b is also a special case. */
4517 if (! (i.operands == 2
4518 && i.tm.base_opcode == 0x90
4519 && i.tm.extension_opcode == None
4520 && operand_type_equal (&i.types [0], &acc64)
4521 && operand_type_equal (&i.types [1], &acc64))
4522 && ! (i.operands == 1
4523 && i.tm.base_opcode == 0xfc7
4524 && i.tm.extension_opcode == 1
4525 && !operand_type_check (i.types [0], reg)
4526 && operand_type_check (i.types [0], anymem)))
4530 /* Size floating point instruction. */
4531 if (i.suffix == LONG_MNEM_SUFFIX)
4532 if (i.tm.opcode_modifier.floatmf)
4533 i.tm.base_opcode ^= 4;
4540 check_byte_reg (void)
4544 for (op = i.operands; --op >= 0;)
4546 /* If this is an eight bit register, it's OK. If it's the 16 or
4547 32 bit version of an eight bit register, we will just use the
4548 low portion, and that's OK too. */
4549 if (i.types[op].bitfield.reg8)
4552 /* crc32 doesn't generate this warning. */
4553 if (i.tm.base_opcode == 0xf20f38f0)
4556 if ((i.types[op].bitfield.reg16
4557 || i.types[op].bitfield.reg32
4558 || i.types[op].bitfield.reg64)
4559 && i.op[op].regs->reg_num < 4)
4561 /* Prohibit these changes in the 64bit mode, since the
4562 lowering is more complicated. */
4563 if (flag_code == CODE_64BIT
4564 && !i.tm.operand_types[op].bitfield.inoutportreg)
4566 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4567 register_prefix, i.op[op].regs->reg_name,
4571 #if REGISTER_WARNINGS
4573 && !i.tm.operand_types[op].bitfield.inoutportreg)
4574 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4576 (i.op[op].regs + (i.types[op].bitfield.reg16
4577 ? REGNAM_AL - REGNAM_AX
4578 : REGNAM_AL - REGNAM_EAX))->reg_name,
4580 i.op[op].regs->reg_name,
4585 /* Any other register is bad. */
4586 if (i.types[op].bitfield.reg16
4587 || i.types[op].bitfield.reg32
4588 || i.types[op].bitfield.reg64
4589 || i.types[op].bitfield.regmmx
4590 || i.types[op].bitfield.regxmm
4591 || i.types[op].bitfield.regymm
4592 || i.types[op].bitfield.sreg2
4593 || i.types[op].bitfield.sreg3
4594 || i.types[op].bitfield.control
4595 || i.types[op].bitfield.debug
4596 || i.types[op].bitfield.test
4597 || i.types[op].bitfield.floatreg
4598 || i.types[op].bitfield.floatacc)
4600 as_bad (_("`%s%s' not allowed with `%s%c'"),
4602 i.op[op].regs->reg_name,
4612 check_long_reg (void)
4616 for (op = i.operands; --op >= 0;)
4617 /* Reject eight bit registers, except where the template requires
4618 them. (eg. movzb) */
4619 if (i.types[op].bitfield.reg8
4620 && (i.tm.operand_types[op].bitfield.reg16
4621 || i.tm.operand_types[op].bitfield.reg32
4622 || i.tm.operand_types[op].bitfield.acc))
4624 as_bad (_("`%s%s' not allowed with `%s%c'"),
4626 i.op[op].regs->reg_name,
4631 /* Warn if the e prefix on a general reg is missing. */
4632 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4633 && i.types[op].bitfield.reg16
4634 && (i.tm.operand_types[op].bitfield.reg32
4635 || i.tm.operand_types[op].bitfield.acc))
4637 /* Prohibit these changes in the 64bit mode, since the
4638 lowering is more complicated. */
4639 if (flag_code == CODE_64BIT)
4641 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4642 register_prefix, i.op[op].regs->reg_name,
4646 #if REGISTER_WARNINGS
4648 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4650 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4652 i.op[op].regs->reg_name,
4656 /* Warn if the r prefix on a general reg is missing. */
4657 else if (i.types[op].bitfield.reg64
4658 && (i.tm.operand_types[op].bitfield.reg32
4659 || i.tm.operand_types[op].bitfield.acc))
4662 && i.tm.opcode_modifier.toqword
4663 && !i.types[0].bitfield.regxmm)
4665 /* Convert to QWORD. We want REX byte. */
4666 i.suffix = QWORD_MNEM_SUFFIX;
4670 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4671 register_prefix, i.op[op].regs->reg_name,
4680 check_qword_reg (void)
4684 for (op = i.operands; --op >= 0; )
4685 /* Reject eight bit registers, except where the template requires
4686 them. (eg. movzb) */
4687 if (i.types[op].bitfield.reg8
4688 && (i.tm.operand_types[op].bitfield.reg16
4689 || i.tm.operand_types[op].bitfield.reg32
4690 || i.tm.operand_types[op].bitfield.acc))
4692 as_bad (_("`%s%s' not allowed with `%s%c'"),
4694 i.op[op].regs->reg_name,
4699 /* Warn if the e prefix on a general reg is missing. */
4700 else if ((i.types[op].bitfield.reg16
4701 || i.types[op].bitfield.reg32)
4702 && (i.tm.operand_types[op].bitfield.reg32
4703 || i.tm.operand_types[op].bitfield.acc))
4705 /* Prohibit these changes in the 64bit mode, since the
4706 lowering is more complicated. */
4708 && i.tm.opcode_modifier.todword
4709 && !i.types[0].bitfield.regxmm)
4711 /* Convert to DWORD. We don't want REX byte. */
4712 i.suffix = LONG_MNEM_SUFFIX;
4716 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4717 register_prefix, i.op[op].regs->reg_name,
4726 check_word_reg (void)
4729 for (op = i.operands; --op >= 0;)
4730 /* Reject eight bit registers, except where the template requires
4731 them. (eg. movzb) */
4732 if (i.types[op].bitfield.reg8
4733 && (i.tm.operand_types[op].bitfield.reg16
4734 || i.tm.operand_types[op].bitfield.reg32
4735 || i.tm.operand_types[op].bitfield.acc))
4737 as_bad (_("`%s%s' not allowed with `%s%c'"),
4739 i.op[op].regs->reg_name,
4744 /* Warn if the e prefix on a general reg is present. */
4745 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4746 && i.types[op].bitfield.reg32
4747 && (i.tm.operand_types[op].bitfield.reg16
4748 || i.tm.operand_types[op].bitfield.acc))
4750 /* Prohibit these changes in the 64bit mode, since the
4751 lowering is more complicated. */
4752 if (flag_code == CODE_64BIT)
4754 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4755 register_prefix, i.op[op].regs->reg_name,
4760 #if REGISTER_WARNINGS
4761 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4763 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4765 i.op[op].regs->reg_name,
4773 update_imm (unsigned int j)
4775 i386_operand_type overlap = i.types[j];
4776 if ((overlap.bitfield.imm8
4777 || overlap.bitfield.imm8s
4778 || overlap.bitfield.imm16
4779 || overlap.bitfield.imm32
4780 || overlap.bitfield.imm32s
4781 || overlap.bitfield.imm64)
4782 && !operand_type_equal (&overlap, &imm8)
4783 && !operand_type_equal (&overlap, &imm8s)
4784 && !operand_type_equal (&overlap, &imm16)
4785 && !operand_type_equal (&overlap, &imm32)
4786 && !operand_type_equal (&overlap, &imm32s)
4787 && !operand_type_equal (&overlap, &imm64))
4791 i386_operand_type temp;
4793 operand_type_set (&temp, 0);
4794 if (i.suffix == BYTE_MNEM_SUFFIX)
4796 temp.bitfield.imm8 = overlap.bitfield.imm8;
4797 temp.bitfield.imm8s = overlap.bitfield.imm8s;
4799 else if (i.suffix == WORD_MNEM_SUFFIX)
4800 temp.bitfield.imm16 = overlap.bitfield.imm16;
4801 else if (i.suffix == QWORD_MNEM_SUFFIX)
4803 temp.bitfield.imm64 = overlap.bitfield.imm64;
4804 temp.bitfield.imm32s = overlap.bitfield.imm32s;
4807 temp.bitfield.imm32 = overlap.bitfield.imm32;
4810 else if (operand_type_equal (&overlap, &imm16_32_32s)
4811 || operand_type_equal (&overlap, &imm16_32)
4812 || operand_type_equal (&overlap, &imm16_32s))
4814 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4819 if (!operand_type_equal (&overlap, &imm8)
4820 && !operand_type_equal (&overlap, &imm8s)
4821 && !operand_type_equal (&overlap, &imm16)
4822 && !operand_type_equal (&overlap, &imm32)
4823 && !operand_type_equal (&overlap, &imm32s)
4824 && !operand_type_equal (&overlap, &imm64))
4826 as_bad (_("no instruction mnemonic suffix given; "
4827 "can't determine immediate size"));
4831 i.types[j] = overlap;
4841 /* Update the first 2 immediate operands. */
4842 n = i.operands > 2 ? 2 : i.operands;
4845 for (j = 0; j < n; j++)
4846 if (update_imm (j) == 0)
4849 /* The 3rd operand can't be immediate operand. */
4850 gas_assert (operand_type_check (i.types[2], imm) == 0);
4857 bad_implicit_operand (int xmm)
4859 const char *ireg = xmm ? "xmm0" : "ymm0";
4862 as_bad (_("the last operand of `%s' must be `%s%s'"),
4863 i.tm.name, register_prefix, ireg);
4865 as_bad (_("the first operand of `%s' must be `%s%s'"),
4866 i.tm.name, register_prefix, ireg);
4871 process_operands (void)
4873 /* Default segment register this instruction will use for memory
4874 accesses. 0 means unknown. This is only for optimizing out
4875 unnecessary segment overrides. */
4876 const seg_entry *default_seg = 0;
4878 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
4880 unsigned int dupl = i.operands;
4881 unsigned int dest = dupl - 1;
4884 /* The destination must be an xmm register. */
4885 gas_assert (i.reg_operands
4886 && MAX_OPERANDS > dupl
4887 && operand_type_equal (&i.types[dest], ®xmm));
4889 if (i.tm.opcode_modifier.firstxmm0)
4891 /* The first operand is implicit and must be xmm0. */
4892 gas_assert (operand_type_equal (&i.types[0], ®xmm));
4893 if (i.op[0].regs->reg_num != 0)
4894 return bad_implicit_operand (1);
4896 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
4898 /* Keep xmm0 for instructions with VEX prefix and 3
4904 /* We remove the first xmm0 and keep the number of
4905 operands unchanged, which in fact duplicates the
4907 for (j = 1; j < i.operands; j++)
4909 i.op[j - 1] = i.op[j];
4910 i.types[j - 1] = i.types[j];
4911 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
4915 else if (i.tm.opcode_modifier.implicit1stxmm0)
4917 gas_assert ((MAX_OPERANDS - 1) > dupl
4918 && (i.tm.opcode_modifier.vexsources
4921 /* Add the implicit xmm0 for instructions with VEX prefix
4923 for (j = i.operands; j > 0; j--)
4925 i.op[j] = i.op[j - 1];
4926 i.types[j] = i.types[j - 1];
4927 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
4930 = (const reg_entry *) hash_find (reg_hash, "xmm0");
4931 i.types[0] = regxmm;
4932 i.tm.operand_types[0] = regxmm;
4935 i.reg_operands += 2;
4940 i.op[dupl] = i.op[dest];
4941 i.types[dupl] = i.types[dest];
4942 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
4951 i.op[dupl] = i.op[dest];
4952 i.types[dupl] = i.types[dest];
4953 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
4956 if (i.tm.opcode_modifier.immext)
4959 else if (i.tm.opcode_modifier.firstxmm0)
4963 /* The first operand is implicit and must be xmm0/ymm0. */
4964 gas_assert (i.reg_operands
4965 && (operand_type_equal (&i.types[0], ®xmm)
4966 || operand_type_equal (&i.types[0], ®ymm)));
4967 if (i.op[0].regs->reg_num != 0)
4968 return bad_implicit_operand (i.types[0].bitfield.regxmm);
4970 for (j = 1; j < i.operands; j++)
4972 i.op[j - 1] = i.op[j];
4973 i.types[j - 1] = i.types[j];
4975 /* We need to adjust fields in i.tm since they are used by
4976 build_modrm_byte. */
4977 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
4984 else if (i.tm.opcode_modifier.regkludge)
4986 /* The imul $imm, %reg instruction is converted into
4987 imul $imm, %reg, %reg, and the clr %reg instruction
4988 is converted into xor %reg, %reg. */
4990 unsigned int first_reg_op;
4992 if (operand_type_check (i.types[0], reg))
4996 /* Pretend we saw the extra register operand. */
4997 gas_assert (i.reg_operands == 1
4998 && i.op[first_reg_op + 1].regs == 0);
4999 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5000 i.types[first_reg_op + 1] = i.types[first_reg_op];
5005 if (i.tm.opcode_modifier.shortform)
5007 if (i.types[0].bitfield.sreg2
5008 || i.types[0].bitfield.sreg3)
5010 if (i.tm.base_opcode == POP_SEG_SHORT
5011 && i.op[0].regs->reg_num == 1)
5013 as_bad (_("you can't `pop %scs'"), register_prefix);
5016 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5017 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5022 /* The register or float register operand is in operand
5026 if (i.types[0].bitfield.floatreg
5027 || operand_type_check (i.types[0], reg))
5031 /* Register goes in low 3 bits of opcode. */
5032 i.tm.base_opcode |= i.op[op].regs->reg_num;
5033 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5035 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5037 /* Warn about some common errors, but press on regardless.
5038 The first case can be generated by gcc (<= 2.8.1). */
5039 if (i.operands == 2)
5041 /* Reversed arguments on faddp, fsubp, etc. */
5042 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5043 register_prefix, i.op[!intel_syntax].regs->reg_name,
5044 register_prefix, i.op[intel_syntax].regs->reg_name);
5048 /* Extraneous `l' suffix on fp insn. */
5049 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5050 register_prefix, i.op[0].regs->reg_name);
5055 else if (i.tm.opcode_modifier.modrm)
5057 /* The opcode is completed (modulo i.tm.extension_opcode which
5058 must be put into the modrm byte). Now, we make the modrm and
5059 index base bytes based on all the info we've collected. */
5061 default_seg = build_modrm_byte ();
5063 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5067 else if (i.tm.opcode_modifier.isstring)
5069 /* For the string instructions that allow a segment override
5070 on one of their operands, the default segment is ds. */
5074 if (i.tm.base_opcode == 0x8d /* lea */
5077 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5079 /* If a segment was explicitly specified, and the specified segment
5080 is not the default, use an opcode prefix to select it. If we
5081 never figured out what the default segment is, then default_seg
5082 will be zero at this point, and the specified segment prefix will
5084 if ((i.seg[0]) && (i.seg[0] != default_seg))
5086 if (!add_prefix (i.seg[0]->seg_prefix))
5092 static const seg_entry *
5093 build_modrm_byte (void)
5095 const seg_entry *default_seg = 0;
5096 unsigned int source, dest;
5099 /* The first operand of instructions with VEX prefix and 3 sources
5100 must be VEX_Imm4. */
5101 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5104 unsigned int nds, reg_slot;
5107 if (i.tm.opcode_modifier.veximmext
5108 && i.tm.opcode_modifier.immext)
5110 dest = i.operands - 2;
5111 gas_assert (dest == 3);
5114 dest = i.operands - 1;
5117 /* There are 2 kinds of instructions:
5118 1. 5 operands: 4 register operands or 3 register operands
5119 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5120 VexW0 or VexW1. The destination must be either XMM or YMM
5122 2. 4 operands: 4 register operands or 3 register operands
5123 plus 1 memory operand, VexXDS, and VexImmExt */
5124 gas_assert ((i.reg_operands == 4
5125 || (i.reg_operands == 3 && i.mem_operands == 1))
5126 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5127 && (i.tm.opcode_modifier.veximmext
5128 || (i.imm_operands == 1
5129 && i.types[0].bitfield.vec_imm4
5130 && (i.tm.opcode_modifier.vexw == VEXW0
5131 || i.tm.opcode_modifier.vexw == VEXW1)
5132 && (operand_type_equal (&i.tm.operand_types[dest], ®xmm)
5133 || operand_type_equal (&i.tm.operand_types[dest], ®ymm)))));
5135 if (i.imm_operands == 0)
5137 /* When there is no immediate operand, generate an 8bit
5138 immediate operand to encode the first operand. */
5139 exp = &im_expressions[i.imm_operands++];
5140 i.op[i.operands].imms = exp;
5141 i.types[i.operands] = imm8;
5143 /* If VexW1 is set, the first operand is the source and
5144 the second operand is encoded in the immediate operand. */
5145 if (i.tm.opcode_modifier.vexw == VEXW1)
5156 /* FMA swaps REG and NDS. */
5157 if (i.tm.cpu_flags.bitfield.cpufma)
5165 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5167 || operand_type_equal (&i.tm.operand_types[reg_slot],
5169 exp->X_op = O_constant;
5171 = ((i.op[reg_slot].regs->reg_num
5172 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5177 unsigned int imm_slot;
5179 if (i.tm.opcode_modifier.vexw == VEXW0)
5181 /* If VexW0 is set, the third operand is the source and
5182 the second operand is encoded in the immediate
5189 /* VexW1 is set, the second operand is the source and
5190 the third operand is encoded in the immediate
5196 if (i.tm.opcode_modifier.immext)
5198 /* When ImmExt is set, the immdiate byte is the last
5200 imm_slot = i.operands - 1;
5208 /* Turn on Imm8 so that output_imm will generate it. */
5209 i.types[imm_slot].bitfield.imm8 = 1;
5212 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5214 || operand_type_equal (&i.tm.operand_types[reg_slot],
5216 i.op[imm_slot].imms->X_add_number
5217 |= ((i.op[reg_slot].regs->reg_num
5218 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5222 gas_assert (operand_type_equal (&i.tm.operand_types[nds], ®xmm)
5223 || operand_type_equal (&i.tm.operand_types[nds],
5225 i.vex.register_specifier = i.op[nds].regs;
5230 /* i.reg_operands MUST be the number of real register operands;
5231 implicit registers do not count. If there are 3 register
5232 operands, it must be a instruction with VexNDS. For a
5233 instruction with VexNDD, the destination register is encoded
5234 in VEX prefix. If there are 4 register operands, it must be
5235 a instruction with VEX prefix and 3 sources. */
5236 if (i.mem_operands == 0
5237 && ((i.reg_operands == 2
5238 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5239 || (i.reg_operands == 3
5240 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5241 || (i.reg_operands == 4 && vex_3_sources)))
5249 /* When there are 3 operands, one of them may be immediate,
5250 which may be the first or the last operand. Otherwise,
5251 the first operand must be shift count register (cl) or it
5252 is an instruction with VexNDS. */
5253 gas_assert (i.imm_operands == 1
5254 || (i.imm_operands == 0
5255 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5256 || i.types[0].bitfield.shiftcount)));
5257 if (operand_type_check (i.types[0], imm)
5258 || i.types[0].bitfield.shiftcount)
5264 /* When there are 4 operands, the first two must be 8bit
5265 immediate operands. The source operand will be the 3rd
5268 For instructions with VexNDS, if the first operand
5269 an imm8, the source operand is the 2nd one. If the last
5270 operand is imm8, the source operand is the first one. */
5271 gas_assert ((i.imm_operands == 2
5272 && i.types[0].bitfield.imm8
5273 && i.types[1].bitfield.imm8)
5274 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5275 && i.imm_operands == 1
5276 && (i.types[0].bitfield.imm8
5277 || i.types[i.operands - 1].bitfield.imm8)));
5278 if (i.imm_operands == 2)
5282 if (i.types[0].bitfield.imm8)
5298 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5300 /* For instructions with VexNDS, the register-only
5301 source operand must be XMM or YMM register. It is
5302 encoded in VEX prefix. We need to clear RegMem bit
5303 before calling operand_type_equal. */
5304 i386_operand_type op = i.tm.operand_types[dest];
5305 op.bitfield.regmem = 0;
5306 if ((dest + 1) >= i.operands
5307 || (!operand_type_equal (&op, ®xmm)
5308 && !operand_type_equal (&op, ®ymm)))
5310 i.vex.register_specifier = i.op[dest].regs;
5316 /* One of the register operands will be encoded in the i.tm.reg
5317 field, the other in the combined i.tm.mode and i.tm.regmem
5318 fields. If no form of this instruction supports a memory
5319 destination operand, then we assume the source operand may
5320 sometimes be a memory operand and so we need to store the
5321 destination in the i.rm.reg field. */
5322 if (!i.tm.operand_types[dest].bitfield.regmem
5323 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5325 i.rm.reg = i.op[dest].regs->reg_num;
5326 i.rm.regmem = i.op[source].regs->reg_num;
5327 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5329 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5334 i.rm.reg = i.op[source].regs->reg_num;
5335 i.rm.regmem = i.op[dest].regs->reg_num;
5336 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5338 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5341 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5343 if (!i.types[0].bitfield.control
5344 && !i.types[1].bitfield.control)
5346 i.rex &= ~(REX_R | REX_B);
5347 add_prefix (LOCK_PREFIX_OPCODE);
5351 { /* If it's not 2 reg operands... */
5356 unsigned int fake_zero_displacement = 0;
5359 for (op = 0; op < i.operands; op++)
5360 if (operand_type_check (i.types[op], anymem))
5362 gas_assert (op < i.operands);
5366 if (i.base_reg == 0)
5369 if (!i.disp_operands)
5370 fake_zero_displacement = 1;
5371 if (i.index_reg == 0)
5373 /* Operand is just <disp> */
5374 if (flag_code == CODE_64BIT)
5376 /* 64bit mode overwrites the 32bit absolute
5377 addressing by RIP relative addressing and
5378 absolute addressing is encoded by one of the
5379 redundant SIB forms. */
5380 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5381 i.sib.base = NO_BASE_REGISTER;
5382 i.sib.index = NO_INDEX_REGISTER;
5383 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5384 ? disp32s : disp32);
5386 else if ((flag_code == CODE_16BIT)
5387 ^ (i.prefix[ADDR_PREFIX] != 0))
5389 i.rm.regmem = NO_BASE_REGISTER_16;
5390 i.types[op] = disp16;
5394 i.rm.regmem = NO_BASE_REGISTER;
5395 i.types[op] = disp32;
5398 else /* !i.base_reg && i.index_reg */
5400 if (i.index_reg->reg_num == RegEiz
5401 || i.index_reg->reg_num == RegRiz)
5402 i.sib.index = NO_INDEX_REGISTER;
5404 i.sib.index = i.index_reg->reg_num;
5405 i.sib.base = NO_BASE_REGISTER;
5406 i.sib.scale = i.log2_scale_factor;
5407 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5408 i.types[op].bitfield.disp8 = 0;
5409 i.types[op].bitfield.disp16 = 0;
5410 i.types[op].bitfield.disp64 = 0;
5411 if (flag_code != CODE_64BIT)
5413 /* Must be 32 bit */
5414 i.types[op].bitfield.disp32 = 1;
5415 i.types[op].bitfield.disp32s = 0;
5419 i.types[op].bitfield.disp32 = 0;
5420 i.types[op].bitfield.disp32s = 1;
5422 if ((i.index_reg->reg_flags & RegRex) != 0)
5426 /* RIP addressing for 64bit mode. */
5427 else if (i.base_reg->reg_num == RegRip ||
5428 i.base_reg->reg_num == RegEip)
5430 i.rm.regmem = NO_BASE_REGISTER;
5431 i.types[op].bitfield.disp8 = 0;
5432 i.types[op].bitfield.disp16 = 0;
5433 i.types[op].bitfield.disp32 = 0;
5434 i.types[op].bitfield.disp32s = 1;
5435 i.types[op].bitfield.disp64 = 0;
5436 i.flags[op] |= Operand_PCrel;
5437 if (! i.disp_operands)
5438 fake_zero_displacement = 1;
5440 else if (i.base_reg->reg_type.bitfield.reg16)
5442 switch (i.base_reg->reg_num)
5445 if (i.index_reg == 0)
5447 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5448 i.rm.regmem = i.index_reg->reg_num - 6;
5452 if (i.index_reg == 0)
5455 if (operand_type_check (i.types[op], disp) == 0)
5457 /* fake (%bp) into 0(%bp) */
5458 i.types[op].bitfield.disp8 = 1;
5459 fake_zero_displacement = 1;
5462 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5463 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5465 default: /* (%si) -> 4 or (%di) -> 5 */
5466 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5468 i.rm.mode = mode_from_disp_size (i.types[op]);
5470 else /* i.base_reg and 32/64 bit mode */
5472 if (flag_code == CODE_64BIT
5473 && operand_type_check (i.types[op], disp))
5475 i386_operand_type temp;
5476 operand_type_set (&temp, 0);
5477 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5479 if (i.prefix[ADDR_PREFIX] == 0)
5480 i.types[op].bitfield.disp32s = 1;
5482 i.types[op].bitfield.disp32 = 1;
5485 i.rm.regmem = i.base_reg->reg_num;
5486 if ((i.base_reg->reg_flags & RegRex) != 0)
5488 i.sib.base = i.base_reg->reg_num;
5489 /* x86-64 ignores REX prefix bit here to avoid decoder
5491 if ((i.base_reg->reg_num & 7) == EBP_REG_NUM)
5494 if (i.disp_operands == 0)
5496 fake_zero_displacement = 1;
5497 i.types[op].bitfield.disp8 = 1;
5500 else if (i.base_reg->reg_num == ESP_REG_NUM)
5504 i.sib.scale = i.log2_scale_factor;
5505 if (i.index_reg == 0)
5507 /* <disp>(%esp) becomes two byte modrm with no index
5508 register. We've already stored the code for esp
5509 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5510 Any base register besides %esp will not use the
5511 extra modrm byte. */
5512 i.sib.index = NO_INDEX_REGISTER;
5516 if (i.index_reg->reg_num == RegEiz
5517 || i.index_reg->reg_num == RegRiz)
5518 i.sib.index = NO_INDEX_REGISTER;
5520 i.sib.index = i.index_reg->reg_num;
5521 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5522 if ((i.index_reg->reg_flags & RegRex) != 0)
5527 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5528 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5531 i.rm.mode = mode_from_disp_size (i.types[op]);
5534 if (fake_zero_displacement)
5536 /* Fakes a zero displacement assuming that i.types[op]
5537 holds the correct displacement size. */
5540 gas_assert (i.op[op].disps == 0);
5541 exp = &disp_expressions[i.disp_operands++];
5542 i.op[op].disps = exp;
5543 exp->X_op = O_constant;
5544 exp->X_add_number = 0;
5545 exp->X_add_symbol = (symbolS *) 0;
5546 exp->X_op_symbol = (symbolS *) 0;
5554 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5556 if (operand_type_check (i.types[0], imm))
5557 i.vex.register_specifier = NULL;
5560 /* VEX.vvvv encodes one of the sources when the first
5561 operand is not an immediate. */
5562 if (i.tm.opcode_modifier.vexw == VEXW0)
5563 i.vex.register_specifier = i.op[0].regs;
5565 i.vex.register_specifier = i.op[1].regs;
5568 /* Destination is a XMM register encoded in the ModRM.reg
5570 i.rm.reg = i.op[2].regs->reg_num;
5571 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5574 /* ModRM.rm and VEX.B encodes the other source. */
5575 if (!i.mem_operands)
5579 if (i.tm.opcode_modifier.vexw == VEXW0)
5580 i.rm.regmem = i.op[1].regs->reg_num;
5582 i.rm.regmem = i.op[0].regs->reg_num;
5584 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5588 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5590 i.vex.register_specifier = i.op[2].regs;
5591 if (!i.mem_operands)
5594 i.rm.regmem = i.op[1].regs->reg_num;
5595 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5599 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5600 (if any) based on i.tm.extension_opcode. Again, we must be
5601 careful to make sure that segment/control/debug/test/MMX
5602 registers are coded into the i.rm.reg field. */
5603 else if (i.reg_operands)
5606 unsigned int vex_reg = ~0;
5608 for (op = 0; op < i.operands; op++)
5609 if (i.types[op].bitfield.reg8
5610 || i.types[op].bitfield.reg16
5611 || i.types[op].bitfield.reg32
5612 || i.types[op].bitfield.reg64
5613 || i.types[op].bitfield.regmmx
5614 || i.types[op].bitfield.regxmm
5615 || i.types[op].bitfield.regymm
5616 || i.types[op].bitfield.sreg2
5617 || i.types[op].bitfield.sreg3
5618 || i.types[op].bitfield.control
5619 || i.types[op].bitfield.debug
5620 || i.types[op].bitfield.test)
5625 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5627 /* For instructions with VexNDS, the register-only
5628 source operand is encoded in VEX prefix. */
5629 gas_assert (mem != (unsigned int) ~0);
5634 gas_assert (op < i.operands);
5639 gas_assert (vex_reg < i.operands);
5642 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5644 /* For instructions with VexNDD, there should be
5645 no memory operand and the register destination
5646 is encoded in VEX prefix. */
5647 gas_assert (i.mem_operands == 0
5648 && (op + 2) == i.operands);
5652 gas_assert (op < i.operands);
5654 if (vex_reg != (unsigned int) ~0)
5656 gas_assert (i.reg_operands == 2);
5658 if (!operand_type_equal (&i.tm.operand_types[vex_reg],
5660 && !operand_type_equal (&i.tm.operand_types[vex_reg],
5664 i.vex.register_specifier = i.op[vex_reg].regs;
5667 /* Don't set OP operand twice. */
5670 /* If there is an extension opcode to put here, the
5671 register number must be put into the regmem field. */
5672 if (i.tm.extension_opcode != None)
5674 i.rm.regmem = i.op[op].regs->reg_num;
5675 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5680 i.rm.reg = i.op[op].regs->reg_num;
5681 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5686 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
5687 must set it to 3 to indicate this is a register operand
5688 in the regmem field. */
5689 if (!i.mem_operands)
5693 /* Fill in i.rm.reg field with extension opcode (if any). */
5694 if (i.tm.extension_opcode != None)
5695 i.rm.reg = i.tm.extension_opcode;
5701 output_branch (void)
5707 relax_substateT subtype;
5711 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
5712 size = i.disp32_encoding ? BIG : SMALL;
5715 if (i.prefix[DATA_PREFIX] != 0)
5721 /* Pentium4 branch hints. */
5722 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5723 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5728 if (i.prefix[REX_PREFIX] != 0)
5734 if (i.prefixes != 0 && !intel_syntax)
5735 as_warn (_("skipping prefixes on this instruction"));
5737 /* It's always a symbol; End frag & setup for relax.
5738 Make sure there is enough room in this frag for the largest
5739 instruction we may generate in md_convert_frag. This is 2
5740 bytes for the opcode and room for the prefix and largest
5742 frag_grow (prefix + 2 + 4);
5743 /* Prefix and 1 opcode byte go in fr_fix. */
5744 p = frag_more (prefix + 1);
5745 if (i.prefix[DATA_PREFIX] != 0)
5746 *p++ = DATA_PREFIX_OPCODE;
5747 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
5748 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
5749 *p++ = i.prefix[SEG_PREFIX];
5750 if (i.prefix[REX_PREFIX] != 0)
5751 *p++ = i.prefix[REX_PREFIX];
5752 *p = i.tm.base_opcode;
5754 if ((unsigned char) *p == JUMP_PC_RELATIVE)
5755 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
5756 else if (cpu_arch_flags.bitfield.cpui386)
5757 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
5759 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
5762 sym = i.op[0].disps->X_add_symbol;
5763 off = i.op[0].disps->X_add_number;
5765 if (i.op[0].disps->X_op != O_constant
5766 && i.op[0].disps->X_op != O_symbol)
5768 /* Handle complex expressions. */
5769 sym = make_expr_symbol (i.op[0].disps);
5773 /* 1 possible extra opcode + 4 byte displacement go in var part.
5774 Pass reloc in fr_var. */
5775 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
5785 if (i.tm.opcode_modifier.jumpbyte)
5787 /* This is a loop or jecxz type instruction. */
5789 if (i.prefix[ADDR_PREFIX] != 0)
5791 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
5794 /* Pentium4 branch hints. */
5795 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5796 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5798 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
5807 if (flag_code == CODE_16BIT)
5810 if (i.prefix[DATA_PREFIX] != 0)
5812 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
5822 if (i.prefix[REX_PREFIX] != 0)
5824 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
5828 if (i.prefixes != 0 && !intel_syntax)
5829 as_warn (_("skipping prefixes on this instruction"));
5831 p = frag_more (1 + size);
5832 *p++ = i.tm.base_opcode;
5834 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5835 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
5837 /* All jumps handled here are signed, but don't use a signed limit
5838 check for 32 and 16 bit jumps as we want to allow wrap around at
5839 4G and 64k respectively. */
5841 fixP->fx_signed = 1;
5845 output_interseg_jump (void)
5853 if (flag_code == CODE_16BIT)
5857 if (i.prefix[DATA_PREFIX] != 0)
5863 if (i.prefix[REX_PREFIX] != 0)
5873 if (i.prefixes != 0 && !intel_syntax)
5874 as_warn (_("skipping prefixes on this instruction"));
5876 /* 1 opcode; 2 segment; offset */
5877 p = frag_more (prefix + 1 + 2 + size);
5879 if (i.prefix[DATA_PREFIX] != 0)
5880 *p++ = DATA_PREFIX_OPCODE;
5882 if (i.prefix[REX_PREFIX] != 0)
5883 *p++ = i.prefix[REX_PREFIX];
5885 *p++ = i.tm.base_opcode;
5886 if (i.op[1].imms->X_op == O_constant)
5888 offsetT n = i.op[1].imms->X_add_number;
5891 && !fits_in_unsigned_word (n)
5892 && !fits_in_signed_word (n))
5894 as_bad (_("16-bit jump out of range"));
5897 md_number_to_chars (p, n, size);
5900 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5901 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
5902 if (i.op[0].imms->X_op != O_constant)
5903 as_bad (_("can't handle non absolute segment in `%s'"),
5905 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
5911 fragS *insn_start_frag;
5912 offsetT insn_start_off;
5914 /* Tie dwarf2 debug info to the address at the start of the insn.
5915 We can't do this after the insn has been output as the current
5916 frag may have been closed off. eg. by frag_var. */
5917 dwarf2_emit_insn (0);
5919 insn_start_frag = frag_now;
5920 insn_start_off = frag_now_fix ();
5923 if (i.tm.opcode_modifier.jump)
5925 else if (i.tm.opcode_modifier.jumpbyte
5926 || i.tm.opcode_modifier.jumpdword)
5928 else if (i.tm.opcode_modifier.jumpintersegment)
5929 output_interseg_jump ();
5932 /* Output normal instructions here. */
5936 unsigned int prefix;
5938 /* Since the VEX prefix contains the implicit prefix, we don't
5939 need the explicit prefix. */
5940 if (!i.tm.opcode_modifier.vex)
5942 switch (i.tm.opcode_length)
5945 if (i.tm.base_opcode & 0xff000000)
5947 prefix = (i.tm.base_opcode >> 24) & 0xff;
5952 if ((i.tm.base_opcode & 0xff0000) != 0)
5954 prefix = (i.tm.base_opcode >> 16) & 0xff;
5955 if (i.tm.cpu_flags.bitfield.cpupadlock)
5958 if (prefix != REPE_PREFIX_OPCODE
5959 || (i.prefix[REP_PREFIX]
5960 != REPE_PREFIX_OPCODE))
5961 add_prefix (prefix);
5964 add_prefix (prefix);
5973 /* The prefix bytes. */
5974 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
5976 FRAG_APPEND_1_CHAR (*q);
5979 if (i.tm.opcode_modifier.vex)
5981 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
5986 /* REX byte is encoded in VEX prefix. */
5990 FRAG_APPEND_1_CHAR (*q);
5993 /* There should be no other prefixes for instructions
5998 /* Now the VEX prefix. */
5999 p = frag_more (i.vex.length);
6000 for (j = 0; j < i.vex.length; j++)
6001 p[j] = i.vex.bytes[j];
6004 /* Now the opcode; be careful about word order here! */
6005 if (i.tm.opcode_length == 1)
6007 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6011 switch (i.tm.opcode_length)
6015 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6025 /* Put out high byte first: can't use md_number_to_chars! */
6026 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6027 *p = i.tm.base_opcode & 0xff;
6030 /* Now the modrm byte and sib byte (if present). */
6031 if (i.tm.opcode_modifier.modrm)
6033 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6036 /* If i.rm.regmem == ESP (4)
6037 && i.rm.mode != (Register mode)
6039 ==> need second modrm byte. */
6040 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6042 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6043 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6045 | i.sib.scale << 6));
6048 if (i.disp_operands)
6049 output_disp (insn_start_frag, insn_start_off);
6052 output_imm (insn_start_frag, insn_start_off);
6058 pi ("" /*line*/, &i);
6060 #endif /* DEBUG386 */
6063 /* Return the size of the displacement operand N. */
6066 disp_size (unsigned int n)
6069 if (i.types[n].bitfield.disp64)
6071 else if (i.types[n].bitfield.disp8)
6073 else if (i.types[n].bitfield.disp16)
6078 /* Return the size of the immediate operand N. */
6081 imm_size (unsigned int n)
6084 if (i.types[n].bitfield.imm64)
6086 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6088 else if (i.types[n].bitfield.imm16)
6094 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6099 for (n = 0; n < i.operands; n++)
6101 if (operand_type_check (i.types[n], disp))
6103 if (i.op[n].disps->X_op == O_constant)
6105 int size = disp_size (n);
6108 val = offset_in_range (i.op[n].disps->X_add_number,
6110 p = frag_more (size);
6111 md_number_to_chars (p, val, size);
6115 enum bfd_reloc_code_real reloc_type;
6116 int size = disp_size (n);
6117 int sign = i.types[n].bitfield.disp32s;
6118 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6120 /* We can't have 8 bit displacement here. */
6121 gas_assert (!i.types[n].bitfield.disp8);
6123 /* The PC relative address is computed relative
6124 to the instruction boundary, so in case immediate
6125 fields follows, we need to adjust the value. */
6126 if (pcrel && i.imm_operands)
6131 for (n1 = 0; n1 < i.operands; n1++)
6132 if (operand_type_check (i.types[n1], imm))
6134 /* Only one immediate is allowed for PC
6135 relative address. */
6136 gas_assert (sz == 0);
6138 i.op[n].disps->X_add_number -= sz;
6140 /* We should find the immediate. */
6141 gas_assert (sz != 0);
6144 p = frag_more (size);
6145 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6147 && GOT_symbol == i.op[n].disps->X_add_symbol
6148 && (((reloc_type == BFD_RELOC_32
6149 || reloc_type == BFD_RELOC_X86_64_32S
6150 || (reloc_type == BFD_RELOC_64
6152 && (i.op[n].disps->X_op == O_symbol
6153 || (i.op[n].disps->X_op == O_add
6154 && ((symbol_get_value_expression
6155 (i.op[n].disps->X_op_symbol)->X_op)
6157 || reloc_type == BFD_RELOC_32_PCREL))
6161 if (insn_start_frag == frag_now)
6162 add = (p - frag_now->fr_literal) - insn_start_off;
6167 add = insn_start_frag->fr_fix - insn_start_off;
6168 for (fr = insn_start_frag->fr_next;
6169 fr && fr != frag_now; fr = fr->fr_next)
6171 add += p - frag_now->fr_literal;
6176 reloc_type = BFD_RELOC_386_GOTPC;
6177 i.op[n].imms->X_add_number += add;
6179 else if (reloc_type == BFD_RELOC_64)
6180 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6182 /* Don't do the adjustment for x86-64, as there
6183 the pcrel addressing is relative to the _next_
6184 insn, and that is taken care of in other code. */
6185 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6187 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6188 i.op[n].disps, pcrel, reloc_type);
6195 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6200 for (n = 0; n < i.operands; n++)
6202 if (operand_type_check (i.types[n], imm))
6204 if (i.op[n].imms->X_op == O_constant)
6206 int size = imm_size (n);
6209 val = offset_in_range (i.op[n].imms->X_add_number,
6211 p = frag_more (size);
6212 md_number_to_chars (p, val, size);
6216 /* Not absolute_section.
6217 Need a 32-bit fixup (don't support 8bit
6218 non-absolute imms). Try to support other
6220 enum bfd_reloc_code_real reloc_type;
6221 int size = imm_size (n);
6224 if (i.types[n].bitfield.imm32s
6225 && (i.suffix == QWORD_MNEM_SUFFIX
6226 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6231 p = frag_more (size);
6232 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6234 /* This is tough to explain. We end up with this one if we
6235 * have operands that look like
6236 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6237 * obtain the absolute address of the GOT, and it is strongly
6238 * preferable from a performance point of view to avoid using
6239 * a runtime relocation for this. The actual sequence of
6240 * instructions often look something like:
6245 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6247 * The call and pop essentially return the absolute address
6248 * of the label .L66 and store it in %ebx. The linker itself
6249 * will ultimately change the first operand of the addl so
6250 * that %ebx points to the GOT, but to keep things simple, the
6251 * .o file must have this operand set so that it generates not
6252 * the absolute address of .L66, but the absolute address of
6253 * itself. This allows the linker itself simply treat a GOTPC
6254 * relocation as asking for a pcrel offset to the GOT to be
6255 * added in, and the addend of the relocation is stored in the
6256 * operand field for the instruction itself.
6258 * Our job here is to fix the operand so that it would add
6259 * the correct offset so that %ebx would point to itself. The
6260 * thing that is tricky is that .-.L66 will point to the
6261 * beginning of the instruction, so we need to further modify
6262 * the operand so that it will point to itself. There are
6263 * other cases where you have something like:
6265 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6267 * and here no correction would be required. Internally in
6268 * the assembler we treat operands of this form as not being
6269 * pcrel since the '.' is explicitly mentioned, and I wonder
6270 * whether it would simplify matters to do it this way. Who
6271 * knows. In earlier versions of the PIC patches, the
6272 * pcrel_adjust field was used to store the correction, but
6273 * since the expression is not pcrel, I felt it would be
6274 * confusing to do it this way. */
6276 if ((reloc_type == BFD_RELOC_32
6277 || reloc_type == BFD_RELOC_X86_64_32S
6278 || reloc_type == BFD_RELOC_64)
6280 && GOT_symbol == i.op[n].imms->X_add_symbol
6281 && (i.op[n].imms->X_op == O_symbol
6282 || (i.op[n].imms->X_op == O_add
6283 && ((symbol_get_value_expression
6284 (i.op[n].imms->X_op_symbol)->X_op)
6289 if (insn_start_frag == frag_now)
6290 add = (p - frag_now->fr_literal) - insn_start_off;
6295 add = insn_start_frag->fr_fix - insn_start_off;
6296 for (fr = insn_start_frag->fr_next;
6297 fr && fr != frag_now; fr = fr->fr_next)
6299 add += p - frag_now->fr_literal;
6303 reloc_type = BFD_RELOC_386_GOTPC;
6305 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6307 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6308 i.op[n].imms->X_add_number += add;
6310 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6311 i.op[n].imms, 0, reloc_type);
6317 /* x86_cons_fix_new is called via the expression parsing code when a
6318 reloc is needed. We use this hook to get the correct .got reloc. */
6319 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6320 static int cons_sign = -1;
6323 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6326 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6328 got_reloc = NO_RELOC;
6331 if (exp->X_op == O_secrel)
6333 exp->X_op = O_symbol;
6334 r = BFD_RELOC_32_SECREL;
6338 fix_new_exp (frag, off, len, exp, 0, r);
6341 #if (!defined (OBJ_ELF) && !defined (OBJ_MAYBE_ELF)) || defined (LEX_AT)
6342 # define lex_got(reloc, adjust, types) NULL
6344 /* Parse operands of the form
6345 <symbol>@GOTOFF+<nnn>
6346 and similar .plt or .got references.
6348 If we find one, set up the correct relocation in RELOC and copy the
6349 input string, minus the `@GOTOFF' into a malloc'd buffer for
6350 parsing by the calling routine. Return this buffer, and if ADJUST
6351 is non-null set it to the length of the string we removed from the
6352 input line. Otherwise return NULL. */
6354 lex_got (enum bfd_reloc_code_real *rel,
6356 i386_operand_type *types)
6358 /* Some of the relocations depend on the size of what field is to
6359 be relocated. But in our callers i386_immediate and i386_displacement
6360 we don't yet know the operand size (this will be set by insn
6361 matching). Hence we record the word32 relocation here,
6362 and adjust the reloc according to the real size in reloc(). */
6363 static const struct {
6366 const enum bfd_reloc_code_real rel[2];
6367 const i386_operand_type types64;
6369 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6370 BFD_RELOC_X86_64_PLTOFF64 },
6371 OPERAND_TYPE_IMM64 },
6372 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6373 BFD_RELOC_X86_64_PLT32 },
6374 OPERAND_TYPE_IMM32_32S_DISP32 },
6375 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6376 BFD_RELOC_X86_64_GOTPLT64 },
6377 OPERAND_TYPE_IMM64_DISP64 },
6378 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6379 BFD_RELOC_X86_64_GOTOFF64 },
6380 OPERAND_TYPE_IMM64_DISP64 },
6381 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6382 BFD_RELOC_X86_64_GOTPCREL },
6383 OPERAND_TYPE_IMM32_32S_DISP32 },
6384 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6385 BFD_RELOC_X86_64_TLSGD },
6386 OPERAND_TYPE_IMM32_32S_DISP32 },
6387 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6388 _dummy_first_bfd_reloc_code_real },
6389 OPERAND_TYPE_NONE },
6390 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6391 BFD_RELOC_X86_64_TLSLD },
6392 OPERAND_TYPE_IMM32_32S_DISP32 },
6393 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6394 BFD_RELOC_X86_64_GOTTPOFF },
6395 OPERAND_TYPE_IMM32_32S_DISP32 },
6396 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6397 BFD_RELOC_X86_64_TPOFF32 },
6398 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6399 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6400 _dummy_first_bfd_reloc_code_real },
6401 OPERAND_TYPE_NONE },
6402 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6403 BFD_RELOC_X86_64_DTPOFF32 },
6404 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6405 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6406 _dummy_first_bfd_reloc_code_real },
6407 OPERAND_TYPE_NONE },
6408 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6409 _dummy_first_bfd_reloc_code_real },
6410 OPERAND_TYPE_NONE },
6411 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6412 BFD_RELOC_X86_64_GOT32 },
6413 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6414 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6415 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6416 OPERAND_TYPE_IMM32_32S_DISP32 },
6417 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6418 BFD_RELOC_X86_64_TLSDESC_CALL },
6419 OPERAND_TYPE_IMM32_32S_DISP32 },
6427 for (cp = input_line_pointer; *cp != '@'; cp++)
6428 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6431 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6433 int len = gotrel[j].len;
6434 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6436 if (gotrel[j].rel[object_64bit] != 0)
6439 char *tmpbuf, *past_reloc;
6441 *rel = gotrel[j].rel[object_64bit];
6447 if (flag_code != CODE_64BIT)
6449 types->bitfield.imm32 = 1;
6450 types->bitfield.disp32 = 1;
6453 *types = gotrel[j].types64;
6456 if (GOT_symbol == NULL)
6457 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6459 /* The length of the first part of our input line. */
6460 first = cp - input_line_pointer;
6462 /* The second part goes from after the reloc token until
6463 (and including) an end_of_line char or comma. */
6464 past_reloc = cp + 1 + len;
6466 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6468 second = cp + 1 - past_reloc;
6470 /* Allocate and copy string. The trailing NUL shouldn't
6471 be necessary, but be safe. */
6472 tmpbuf = (char *) xmalloc (first + second + 2);
6473 memcpy (tmpbuf, input_line_pointer, first);
6474 if (second != 0 && *past_reloc != ' ')
6475 /* Replace the relocation token with ' ', so that
6476 errors like foo@GOTOFF1 will be detected. */
6477 tmpbuf[first++] = ' ';
6478 memcpy (tmpbuf + first, past_reloc, second);
6479 tmpbuf[first + second] = '\0';
6483 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6484 gotrel[j].str, 1 << (5 + object_64bit));
6489 /* Might be a symbol version string. Don't as_bad here. */
6494 x86_cons (expressionS *exp, int size)
6496 intel_syntax = -intel_syntax;
6499 if (size == 4 || (object_64bit && size == 8))
6501 /* Handle @GOTOFF and the like in an expression. */
6503 char *gotfree_input_line;
6506 save = input_line_pointer;
6507 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6508 if (gotfree_input_line)
6509 input_line_pointer = gotfree_input_line;
6513 if (gotfree_input_line)
6515 /* expression () has merrily parsed up to the end of line,
6516 or a comma - in the wrong buffer. Transfer how far
6517 input_line_pointer has moved to the right buffer. */
6518 input_line_pointer = (save
6519 + (input_line_pointer - gotfree_input_line)
6521 free (gotfree_input_line);
6522 if (exp->X_op == O_constant
6523 || exp->X_op == O_absent
6524 || exp->X_op == O_illegal
6525 || exp->X_op == O_register
6526 || exp->X_op == O_big)
6528 char c = *input_line_pointer;
6529 *input_line_pointer = 0;
6530 as_bad (_("missing or invalid expression `%s'"), save);
6531 *input_line_pointer = c;
6538 intel_syntax = -intel_syntax;
6541 i386_intel_simplify (exp);
6546 signed_cons (int size)
6548 if (flag_code == CODE_64BIT)
6556 pe_directive_secrel (dummy)
6557 int dummy ATTRIBUTE_UNUSED;
6564 if (exp.X_op == O_symbol)
6565 exp.X_op = O_secrel;
6567 emit_expr (&exp, 4);
6569 while (*input_line_pointer++ == ',');
6571 input_line_pointer--;
6572 demand_empty_rest_of_line ();
6577 i386_immediate (char *imm_start)
6579 char *save_input_line_pointer;
6580 char *gotfree_input_line;
6583 i386_operand_type types;
6585 operand_type_set (&types, ~0);
6587 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
6589 as_bad (_("at most %d immediate operands are allowed"),
6590 MAX_IMMEDIATE_OPERANDS);
6594 exp = &im_expressions[i.imm_operands++];
6595 i.op[this_operand].imms = exp;
6597 if (is_space_char (*imm_start))
6600 save_input_line_pointer = input_line_pointer;
6601 input_line_pointer = imm_start;
6603 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6604 if (gotfree_input_line)
6605 input_line_pointer = gotfree_input_line;
6607 exp_seg = expression (exp);
6610 if (*input_line_pointer)
6611 as_bad (_("junk `%s' after expression"), input_line_pointer);
6613 input_line_pointer = save_input_line_pointer;
6614 if (gotfree_input_line)
6616 free (gotfree_input_line);
6618 if (exp->X_op == O_constant || exp->X_op == O_register)
6619 exp->X_op = O_illegal;
6622 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
6626 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6627 i386_operand_type types, const char *imm_start)
6629 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
6632 as_bad (_("missing or invalid immediate expression `%s'"),
6636 else if (exp->X_op == O_constant)
6638 /* Size it properly later. */
6639 i.types[this_operand].bitfield.imm64 = 1;
6640 /* If not 64bit, sign extend val. */
6641 if (flag_code != CODE_64BIT
6642 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
6644 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
6646 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6647 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
6648 && exp_seg != absolute_section
6649 && exp_seg != text_section
6650 && exp_seg != data_section
6651 && exp_seg != bss_section
6652 && exp_seg != undefined_section
6653 && !bfd_is_com_section (exp_seg))
6655 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6659 else if (!intel_syntax && exp->X_op == O_register)
6662 as_bad (_("illegal immediate register operand %s"), imm_start);
6667 /* This is an address. The size of the address will be
6668 determined later, depending on destination register,
6669 suffix, or the default for the section. */
6670 i.types[this_operand].bitfield.imm8 = 1;
6671 i.types[this_operand].bitfield.imm16 = 1;
6672 i.types[this_operand].bitfield.imm32 = 1;
6673 i.types[this_operand].bitfield.imm32s = 1;
6674 i.types[this_operand].bitfield.imm64 = 1;
6675 i.types[this_operand] = operand_type_and (i.types[this_operand],
6683 i386_scale (char *scale)
6686 char *save = input_line_pointer;
6688 input_line_pointer = scale;
6689 val = get_absolute_expression ();
6694 i.log2_scale_factor = 0;
6697 i.log2_scale_factor = 1;
6700 i.log2_scale_factor = 2;
6703 i.log2_scale_factor = 3;
6707 char sep = *input_line_pointer;
6709 *input_line_pointer = '\0';
6710 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
6712 *input_line_pointer = sep;
6713 input_line_pointer = save;
6717 if (i.log2_scale_factor != 0 && i.index_reg == 0)
6719 as_warn (_("scale factor of %d without an index register"),
6720 1 << i.log2_scale_factor);
6721 i.log2_scale_factor = 0;
6723 scale = input_line_pointer;
6724 input_line_pointer = save;
6729 i386_displacement (char *disp_start, char *disp_end)
6733 char *save_input_line_pointer;
6734 char *gotfree_input_line;
6736 i386_operand_type bigdisp, types = anydisp;
6739 if (i.disp_operands == MAX_MEMORY_OPERANDS)
6741 as_bad (_("at most %d displacement operands are allowed"),
6742 MAX_MEMORY_OPERANDS);
6746 operand_type_set (&bigdisp, 0);
6747 if ((i.types[this_operand].bitfield.jumpabsolute)
6748 || (!current_templates->start->opcode_modifier.jump
6749 && !current_templates->start->opcode_modifier.jumpdword))
6751 bigdisp.bitfield.disp32 = 1;
6752 override = (i.prefix[ADDR_PREFIX] != 0);
6753 if (flag_code == CODE_64BIT)
6757 bigdisp.bitfield.disp32s = 1;
6758 bigdisp.bitfield.disp64 = 1;
6761 else if ((flag_code == CODE_16BIT) ^ override)
6763 bigdisp.bitfield.disp32 = 0;
6764 bigdisp.bitfield.disp16 = 1;
6769 /* For PC-relative branches, the width of the displacement
6770 is dependent upon data size, not address size. */
6771 override = (i.prefix[DATA_PREFIX] != 0);
6772 if (flag_code == CODE_64BIT)
6774 if (override || i.suffix == WORD_MNEM_SUFFIX)
6775 bigdisp.bitfield.disp16 = 1;
6778 bigdisp.bitfield.disp32 = 1;
6779 bigdisp.bitfield.disp32s = 1;
6785 override = (i.suffix == (flag_code != CODE_16BIT
6787 : LONG_MNEM_SUFFIX));
6788 bigdisp.bitfield.disp32 = 1;
6789 if ((flag_code == CODE_16BIT) ^ override)
6791 bigdisp.bitfield.disp32 = 0;
6792 bigdisp.bitfield.disp16 = 1;
6796 i.types[this_operand] = operand_type_or (i.types[this_operand],
6799 exp = &disp_expressions[i.disp_operands];
6800 i.op[this_operand].disps = exp;
6802 save_input_line_pointer = input_line_pointer;
6803 input_line_pointer = disp_start;
6804 END_STRING_AND_SAVE (disp_end);
6806 #ifndef GCC_ASM_O_HACK
6807 #define GCC_ASM_O_HACK 0
6810 END_STRING_AND_SAVE (disp_end + 1);
6811 if (i.types[this_operand].bitfield.baseIndex
6812 && displacement_string_end[-1] == '+')
6814 /* This hack is to avoid a warning when using the "o"
6815 constraint within gcc asm statements.
6818 #define _set_tssldt_desc(n,addr,limit,type) \
6819 __asm__ __volatile__ ( \
6821 "movw %w1,2+%0\n\t" \
6823 "movb %b1,4+%0\n\t" \
6824 "movb %4,5+%0\n\t" \
6825 "movb $0,6+%0\n\t" \
6826 "movb %h1,7+%0\n\t" \
6828 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
6830 This works great except that the output assembler ends
6831 up looking a bit weird if it turns out that there is
6832 no offset. You end up producing code that looks like:
6845 So here we provide the missing zero. */
6847 *displacement_string_end = '0';
6850 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6851 if (gotfree_input_line)
6852 input_line_pointer = gotfree_input_line;
6854 exp_seg = expression (exp);
6857 if (*input_line_pointer)
6858 as_bad (_("junk `%s' after expression"), input_line_pointer);
6860 RESTORE_END_STRING (disp_end + 1);
6862 input_line_pointer = save_input_line_pointer;
6863 if (gotfree_input_line)
6865 free (gotfree_input_line);
6867 if (exp->X_op == O_constant || exp->X_op == O_register)
6868 exp->X_op = O_illegal;
6871 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
6873 RESTORE_END_STRING (disp_end);
6879 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6880 i386_operand_type types, const char *disp_start)
6882 i386_operand_type bigdisp;
6885 /* We do this to make sure that the section symbol is in
6886 the symbol table. We will ultimately change the relocation
6887 to be relative to the beginning of the section. */
6888 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
6889 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
6890 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
6892 if (exp->X_op != O_symbol)
6895 if (S_IS_LOCAL (exp->X_add_symbol)
6896 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
6897 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
6898 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
6899 exp->X_op = O_subtract;
6900 exp->X_op_symbol = GOT_symbol;
6901 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
6902 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
6903 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
6904 i.reloc[this_operand] = BFD_RELOC_64;
6906 i.reloc[this_operand] = BFD_RELOC_32;
6909 else if (exp->X_op == O_absent
6910 || exp->X_op == O_illegal
6911 || exp->X_op == O_big)
6914 as_bad (_("missing or invalid displacement expression `%s'"),
6919 else if (flag_code == CODE_64BIT
6920 && !i.prefix[ADDR_PREFIX]
6921 && exp->X_op == O_constant)
6923 /* Since displacement is signed extended to 64bit, don't allow
6924 disp32 and turn off disp32s if they are out of range. */
6925 i.types[this_operand].bitfield.disp32 = 0;
6926 if (!fits_in_signed_long (exp->X_add_number))
6928 i.types[this_operand].bitfield.disp32s = 0;
6929 if (i.types[this_operand].bitfield.baseindex)
6931 as_bad (_("0x%lx out range of signed 32bit displacement"),
6932 (long) exp->X_add_number);
6938 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6939 else if (exp->X_op != O_constant
6940 && OUTPUT_FLAVOR == bfd_target_aout_flavour
6941 && exp_seg != absolute_section
6942 && exp_seg != text_section
6943 && exp_seg != data_section
6944 && exp_seg != bss_section
6945 && exp_seg != undefined_section
6946 && !bfd_is_com_section (exp_seg))
6948 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6953 /* Check if this is a displacement only operand. */
6954 bigdisp = i.types[this_operand];
6955 bigdisp.bitfield.disp8 = 0;
6956 bigdisp.bitfield.disp16 = 0;
6957 bigdisp.bitfield.disp32 = 0;
6958 bigdisp.bitfield.disp32s = 0;
6959 bigdisp.bitfield.disp64 = 0;
6960 if (operand_type_all_zero (&bigdisp))
6961 i.types[this_operand] = operand_type_and (i.types[this_operand],
6967 /* Make sure the memory operand we've been dealt is valid.
6968 Return 1 on success, 0 on a failure. */
6971 i386_index_check (const char *operand_string)
6974 const char *kind = "base/index";
6975 #if INFER_ADDR_PREFIX
6981 if (current_templates->start->opcode_modifier.isstring
6982 && !current_templates->start->opcode_modifier.immext
6983 && (current_templates->end[-1].opcode_modifier.isstring
6986 /* Memory operands of string insns are special in that they only allow
6987 a single register (rDI, rSI, or rBX) as their memory address. */
6988 unsigned int expected;
6990 kind = "string address";
6992 if (current_templates->start->opcode_modifier.w)
6994 i386_operand_type type = current_templates->end[-1].operand_types[0];
6996 if (!type.bitfield.baseindex
6997 || ((!i.mem_operands != !intel_syntax)
6998 && current_templates->end[-1].operand_types[1]
6999 .bitfield.baseindex))
7000 type = current_templates->end[-1].operand_types[1];
7001 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
7004 expected = 3 /* rBX */;
7006 if (!i.base_reg || i.index_reg
7007 || operand_type_check (i.types[this_operand], disp))
7009 else if (!(flag_code == CODE_64BIT
7010 ? i.prefix[ADDR_PREFIX]
7011 ? i.base_reg->reg_type.bitfield.reg32
7012 : i.base_reg->reg_type.bitfield.reg64
7013 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7014 ? i.base_reg->reg_type.bitfield.reg32
7015 : i.base_reg->reg_type.bitfield.reg16))
7017 else if (i.base_reg->reg_num != expected)
7024 for (j = 0; j < i386_regtab_size; ++j)
7025 if ((flag_code == CODE_64BIT
7026 ? i.prefix[ADDR_PREFIX]
7027 ? i386_regtab[j].reg_type.bitfield.reg32
7028 : i386_regtab[j].reg_type.bitfield.reg64
7029 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7030 ? i386_regtab[j].reg_type.bitfield.reg32
7031 : i386_regtab[j].reg_type.bitfield.reg16)
7032 && i386_regtab[j].reg_num == expected)
7034 gas_assert (j < i386_regtab_size);
7035 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7037 intel_syntax ? '[' : '(',
7039 i386_regtab[j].reg_name,
7040 intel_syntax ? ']' : ')');
7044 else if (flag_code == CODE_64BIT)
7047 && ((i.prefix[ADDR_PREFIX] == 0
7048 && !i.base_reg->reg_type.bitfield.reg64)
7049 || (i.prefix[ADDR_PREFIX]
7050 && !i.base_reg->reg_type.bitfield.reg32))
7052 || i.base_reg->reg_num !=
7053 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
7055 && (!i.index_reg->reg_type.bitfield.baseindex
7056 || (i.prefix[ADDR_PREFIX] == 0
7057 && i.index_reg->reg_num != RegRiz
7058 && !i.index_reg->reg_type.bitfield.reg64
7060 || (i.prefix[ADDR_PREFIX]
7061 && i.index_reg->reg_num != RegEiz
7062 && !i.index_reg->reg_type.bitfield.reg32))))
7067 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
7071 && (!i.base_reg->reg_type.bitfield.reg16
7072 || !i.base_reg->reg_type.bitfield.baseindex))
7074 && (!i.index_reg->reg_type.bitfield.reg16
7075 || !i.index_reg->reg_type.bitfield.baseindex
7077 && i.base_reg->reg_num < 6
7078 && i.index_reg->reg_num >= 6
7079 && i.log2_scale_factor == 0))))
7086 && !i.base_reg->reg_type.bitfield.reg32)
7088 && ((!i.index_reg->reg_type.bitfield.reg32
7089 && i.index_reg->reg_num != RegEiz)
7090 || !i.index_reg->reg_type.bitfield.baseindex)))
7096 #if INFER_ADDR_PREFIX
7097 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
7099 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7101 /* Change the size of any displacement too. At most one of
7102 Disp16 or Disp32 is set.
7103 FIXME. There doesn't seem to be any real need for separate
7104 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7105 Removing them would probably clean up the code quite a lot. */
7106 if (flag_code != CODE_64BIT
7107 && (i.types[this_operand].bitfield.disp16
7108 || i.types[this_operand].bitfield.disp32))
7109 i.types[this_operand]
7110 = operand_type_xor (i.types[this_operand], disp16_32);
7115 as_bad (_("`%s' is not a valid %s expression"),
7120 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7122 flag_code_names[i.prefix[ADDR_PREFIX]
7123 ? flag_code == CODE_32BIT
7132 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7136 i386_att_operand (char *operand_string)
7140 char *op_string = operand_string;
7142 if (is_space_char (*op_string))
7145 /* We check for an absolute prefix (differentiating,
7146 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7147 if (*op_string == ABSOLUTE_PREFIX)
7150 if (is_space_char (*op_string))
7152 i.types[this_operand].bitfield.jumpabsolute = 1;
7155 /* Check if operand is a register. */
7156 if ((r = parse_register (op_string, &end_op)) != NULL)
7158 i386_operand_type temp;
7160 /* Check for a segment override by searching for ':' after a
7161 segment register. */
7163 if (is_space_char (*op_string))
7165 if (*op_string == ':'
7166 && (r->reg_type.bitfield.sreg2
7167 || r->reg_type.bitfield.sreg3))
7172 i.seg[i.mem_operands] = &es;
7175 i.seg[i.mem_operands] = &cs;
7178 i.seg[i.mem_operands] = &ss;
7181 i.seg[i.mem_operands] = &ds;
7184 i.seg[i.mem_operands] = &fs;
7187 i.seg[i.mem_operands] = &gs;
7191 /* Skip the ':' and whitespace. */
7193 if (is_space_char (*op_string))
7196 if (!is_digit_char (*op_string)
7197 && !is_identifier_char (*op_string)
7198 && *op_string != '('
7199 && *op_string != ABSOLUTE_PREFIX)
7201 as_bad (_("bad memory operand `%s'"), op_string);
7204 /* Handle case of %es:*foo. */
7205 if (*op_string == ABSOLUTE_PREFIX)
7208 if (is_space_char (*op_string))
7210 i.types[this_operand].bitfield.jumpabsolute = 1;
7212 goto do_memory_reference;
7216 as_bad (_("junk `%s' after register"), op_string);
7220 temp.bitfield.baseindex = 0;
7221 i.types[this_operand] = operand_type_or (i.types[this_operand],
7223 i.types[this_operand].bitfield.unspecified = 0;
7224 i.op[this_operand].regs = r;
7227 else if (*op_string == REGISTER_PREFIX)
7229 as_bad (_("bad register name `%s'"), op_string);
7232 else if (*op_string == IMMEDIATE_PREFIX)
7235 if (i.types[this_operand].bitfield.jumpabsolute)
7237 as_bad (_("immediate operand illegal with absolute jump"));
7240 if (!i386_immediate (op_string))
7243 else if (is_digit_char (*op_string)
7244 || is_identifier_char (*op_string)
7245 || *op_string == '(')
7247 /* This is a memory reference of some sort. */
7250 /* Start and end of displacement string expression (if found). */
7251 char *displacement_string_start;
7252 char *displacement_string_end;
7254 do_memory_reference:
7255 if ((i.mem_operands == 1
7256 && !current_templates->start->opcode_modifier.isstring)
7257 || i.mem_operands == 2)
7259 as_bad (_("too many memory references for `%s'"),
7260 current_templates->start->name);
7264 /* Check for base index form. We detect the base index form by
7265 looking for an ')' at the end of the operand, searching
7266 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7268 base_string = op_string + strlen (op_string);
7271 if (is_space_char (*base_string))
7274 /* If we only have a displacement, set-up for it to be parsed later. */
7275 displacement_string_start = op_string;
7276 displacement_string_end = base_string + 1;
7278 if (*base_string == ')')
7281 unsigned int parens_balanced = 1;
7282 /* We've already checked that the number of left & right ()'s are
7283 equal, so this loop will not be infinite. */
7287 if (*base_string == ')')
7289 if (*base_string == '(')
7292 while (parens_balanced);
7294 temp_string = base_string;
7296 /* Skip past '(' and whitespace. */
7298 if (is_space_char (*base_string))
7301 if (*base_string == ','
7302 || ((i.base_reg = parse_register (base_string, &end_op))
7305 displacement_string_end = temp_string;
7307 i.types[this_operand].bitfield.baseindex = 1;
7311 base_string = end_op;
7312 if (is_space_char (*base_string))
7316 /* There may be an index reg or scale factor here. */
7317 if (*base_string == ',')
7320 if (is_space_char (*base_string))
7323 if ((i.index_reg = parse_register (base_string, &end_op))
7326 base_string = end_op;
7327 if (is_space_char (*base_string))
7329 if (*base_string == ',')
7332 if (is_space_char (*base_string))
7335 else if (*base_string != ')')
7337 as_bad (_("expecting `,' or `)' "
7338 "after index register in `%s'"),
7343 else if (*base_string == REGISTER_PREFIX)
7345 as_bad (_("bad register name `%s'"), base_string);
7349 /* Check for scale factor. */
7350 if (*base_string != ')')
7352 char *end_scale = i386_scale (base_string);
7357 base_string = end_scale;
7358 if (is_space_char (*base_string))
7360 if (*base_string != ')')
7362 as_bad (_("expecting `)' "
7363 "after scale factor in `%s'"),
7368 else if (!i.index_reg)
7370 as_bad (_("expecting index register or scale factor "
7371 "after `,'; got '%c'"),
7376 else if (*base_string != ')')
7378 as_bad (_("expecting `,' or `)' "
7379 "after base register in `%s'"),
7384 else if (*base_string == REGISTER_PREFIX)
7386 as_bad (_("bad register name `%s'"), base_string);
7391 /* If there's an expression beginning the operand, parse it,
7392 assuming displacement_string_start and
7393 displacement_string_end are meaningful. */
7394 if (displacement_string_start != displacement_string_end)
7396 if (!i386_displacement (displacement_string_start,
7397 displacement_string_end))
7401 /* Special case for (%dx) while doing input/output op. */
7403 && operand_type_equal (&i.base_reg->reg_type,
7404 ®16_inoutportreg)
7406 && i.log2_scale_factor == 0
7407 && i.seg[i.mem_operands] == 0
7408 && !operand_type_check (i.types[this_operand], disp))
7410 i.types[this_operand] = inoutportreg;
7414 if (i386_index_check (operand_string) == 0)
7416 i.types[this_operand].bitfield.mem = 1;
7421 /* It's not a memory operand; argh! */
7422 as_bad (_("invalid char %s beginning operand %d `%s'"),
7423 output_invalid (*op_string),
7428 return 1; /* Normal return. */
7431 /* md_estimate_size_before_relax()
7433 Called just before relax() for rs_machine_dependent frags. The x86
7434 assembler uses these frags to handle variable size jump
7437 Any symbol that is now undefined will not become defined.
7438 Return the correct fr_subtype in the frag.
7439 Return the initial "guess for variable size of frag" to caller.
7440 The guess is actually the growth beyond the fixed part. Whatever
7441 we do to grow the fixed or variable part contributes to our
7445 md_estimate_size_before_relax (fragP, segment)
7449 /* We've already got fragP->fr_subtype right; all we have to do is
7450 check for un-relaxable symbols. On an ELF system, we can't relax
7451 an externally visible symbol, because it may be overridden by a
7453 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7454 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7456 && (S_IS_EXTERNAL (fragP->fr_symbol)
7457 || S_IS_WEAK (fragP->fr_symbol)
7458 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7459 & BSF_GNU_INDIRECT_FUNCTION))))
7461 #if defined (OBJ_COFF) && defined (TE_PE)
7462 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7463 && S_IS_WEAK (fragP->fr_symbol))
7467 /* Symbol is undefined in this segment, or we need to keep a
7468 reloc so that weak symbols can be overridden. */
7469 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7470 enum bfd_reloc_code_real reloc_type;
7471 unsigned char *opcode;
7474 if (fragP->fr_var != NO_RELOC)
7475 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7477 reloc_type = BFD_RELOC_16_PCREL;
7479 reloc_type = BFD_RELOC_32_PCREL;
7481 old_fr_fix = fragP->fr_fix;
7482 opcode = (unsigned char *) fragP->fr_opcode;
7484 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7487 /* Make jmp (0xeb) a (d)word displacement jump. */
7489 fragP->fr_fix += size;
7490 fix_new (fragP, old_fr_fix, size,
7492 fragP->fr_offset, 1,
7498 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7500 /* Negate the condition, and branch past an
7501 unconditional jump. */
7504 /* Insert an unconditional jump. */
7506 /* We added two extra opcode bytes, and have a two byte
7508 fragP->fr_fix += 2 + 2;
7509 fix_new (fragP, old_fr_fix + 2, 2,
7511 fragP->fr_offset, 1,
7518 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7523 fixP = fix_new (fragP, old_fr_fix, 1,
7525 fragP->fr_offset, 1,
7527 fixP->fx_signed = 1;
7531 /* This changes the byte-displacement jump 0x7N
7532 to the (d)word-displacement jump 0x0f,0x8N. */
7533 opcode[1] = opcode[0] + 0x10;
7534 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7535 /* We've added an opcode byte. */
7536 fragP->fr_fix += 1 + size;
7537 fix_new (fragP, old_fr_fix + 1, size,
7539 fragP->fr_offset, 1,
7544 BAD_CASE (fragP->fr_subtype);
7548 return fragP->fr_fix - old_fr_fix;
7551 /* Guess size depending on current relax state. Initially the relax
7552 state will correspond to a short jump and we return 1, because
7553 the variable part of the frag (the branch offset) is one byte
7554 long. However, we can relax a section more than once and in that
7555 case we must either set fr_subtype back to the unrelaxed state,
7556 or return the value for the appropriate branch. */
7557 return md_relax_table[fragP->fr_subtype].rlx_length;
7560 /* Called after relax() is finished.
7562 In: Address of frag.
7563 fr_type == rs_machine_dependent.
7564 fr_subtype is what the address relaxed to.
7566 Out: Any fixSs and constants are set up.
7567 Caller will turn frag into a ".space 0". */
7570 md_convert_frag (abfd, sec, fragP)
7571 bfd *abfd ATTRIBUTE_UNUSED;
7572 segT sec ATTRIBUTE_UNUSED;
7575 unsigned char *opcode;
7576 unsigned char *where_to_put_displacement = NULL;
7577 offsetT target_address;
7578 offsetT opcode_address;
7579 unsigned int extension = 0;
7580 offsetT displacement_from_opcode_start;
7582 opcode = (unsigned char *) fragP->fr_opcode;
7584 /* Address we want to reach in file space. */
7585 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
7587 /* Address opcode resides at in file space. */
7588 opcode_address = fragP->fr_address + fragP->fr_fix;
7590 /* Displacement from opcode start to fill into instruction. */
7591 displacement_from_opcode_start = target_address - opcode_address;
7593 if ((fragP->fr_subtype & BIG) == 0)
7595 /* Don't have to change opcode. */
7596 extension = 1; /* 1 opcode + 1 displacement */
7597 where_to_put_displacement = &opcode[1];
7601 if (no_cond_jump_promotion
7602 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
7603 as_warn_where (fragP->fr_file, fragP->fr_line,
7604 _("long jump required"));
7606 switch (fragP->fr_subtype)
7608 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
7609 extension = 4; /* 1 opcode + 4 displacement */
7611 where_to_put_displacement = &opcode[1];
7614 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
7615 extension = 2; /* 1 opcode + 2 displacement */
7617 where_to_put_displacement = &opcode[1];
7620 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
7621 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
7622 extension = 5; /* 2 opcode + 4 displacement */
7623 opcode[1] = opcode[0] + 0x10;
7624 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7625 where_to_put_displacement = &opcode[2];
7628 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
7629 extension = 3; /* 2 opcode + 2 displacement */
7630 opcode[1] = opcode[0] + 0x10;
7631 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7632 where_to_put_displacement = &opcode[2];
7635 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
7640 where_to_put_displacement = &opcode[3];
7644 BAD_CASE (fragP->fr_subtype);
7649 /* If size if less then four we are sure that the operand fits,
7650 but if it's 4, then it could be that the displacement is larger
7652 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
7654 && ((addressT) (displacement_from_opcode_start - extension
7655 + ((addressT) 1 << 31))
7656 > (((addressT) 2 << 31) - 1)))
7658 as_bad_where (fragP->fr_file, fragP->fr_line,
7659 _("jump target out of range"));
7660 /* Make us emit 0. */
7661 displacement_from_opcode_start = extension;
7663 /* Now put displacement after opcode. */
7664 md_number_to_chars ((char *) where_to_put_displacement,
7665 (valueT) (displacement_from_opcode_start - extension),
7666 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
7667 fragP->fr_fix += extension;
7670 /* Apply a fixup (fixS) to segment data, once it has been determined
7671 by our caller that we have all the info we need to fix it up.
7673 On the 386, immediates, displacements, and data pointers are all in
7674 the same (little-endian) format, so we don't need to care about which
7678 md_apply_fix (fixP, valP, seg)
7679 /* The fix we're to put in. */
7681 /* Pointer to the value of the bits. */
7683 /* Segment fix is from. */
7684 segT seg ATTRIBUTE_UNUSED;
7686 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
7687 valueT value = *valP;
7689 #if !defined (TE_Mach)
7692 switch (fixP->fx_r_type)
7698 fixP->fx_r_type = BFD_RELOC_64_PCREL;
7701 case BFD_RELOC_X86_64_32S:
7702 fixP->fx_r_type = BFD_RELOC_32_PCREL;
7705 fixP->fx_r_type = BFD_RELOC_16_PCREL;
7708 fixP->fx_r_type = BFD_RELOC_8_PCREL;
7713 if (fixP->fx_addsy != NULL
7714 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
7715 || fixP->fx_r_type == BFD_RELOC_64_PCREL
7716 || fixP->fx_r_type == BFD_RELOC_16_PCREL
7717 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
7718 && !use_rela_relocations)
7720 /* This is a hack. There should be a better way to handle this.
7721 This covers for the fact that bfd_install_relocation will
7722 subtract the current location (for partial_inplace, PC relative
7723 relocations); see more below. */
7727 || OUTPUT_FLAVOR == bfd_target_coff_flavour
7730 value += fixP->fx_where + fixP->fx_frag->fr_address;
7732 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7735 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
7738 || (symbol_section_p (fixP->fx_addsy)
7739 && sym_seg != absolute_section))
7740 && !generic_force_reloc (fixP))
7742 /* Yes, we add the values in twice. This is because
7743 bfd_install_relocation subtracts them out again. I think
7744 bfd_install_relocation is broken, but I don't dare change
7746 value += fixP->fx_where + fixP->fx_frag->fr_address;
7750 #if defined (OBJ_COFF) && defined (TE_PE)
7751 /* For some reason, the PE format does not store a
7752 section address offset for a PC relative symbol. */
7753 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
7754 || S_IS_WEAK (fixP->fx_addsy))
7755 value += md_pcrel_from (fixP);
7758 #if defined (OBJ_COFF) && defined (TE_PE)
7759 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7761 value -= S_GET_VALUE (fixP->fx_addsy);
7765 /* Fix a few things - the dynamic linker expects certain values here,
7766 and we must not disappoint it. */
7767 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7768 if (IS_ELF && fixP->fx_addsy)
7769 switch (fixP->fx_r_type)
7771 case BFD_RELOC_386_PLT32:
7772 case BFD_RELOC_X86_64_PLT32:
7773 /* Make the jump instruction point to the address of the operand. At
7774 runtime we merely add the offset to the actual PLT entry. */
7778 case BFD_RELOC_386_TLS_GD:
7779 case BFD_RELOC_386_TLS_LDM:
7780 case BFD_RELOC_386_TLS_IE_32:
7781 case BFD_RELOC_386_TLS_IE:
7782 case BFD_RELOC_386_TLS_GOTIE:
7783 case BFD_RELOC_386_TLS_GOTDESC:
7784 case BFD_RELOC_X86_64_TLSGD:
7785 case BFD_RELOC_X86_64_TLSLD:
7786 case BFD_RELOC_X86_64_GOTTPOFF:
7787 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
7788 value = 0; /* Fully resolved at runtime. No addend. */
7790 case BFD_RELOC_386_TLS_LE:
7791 case BFD_RELOC_386_TLS_LDO_32:
7792 case BFD_RELOC_386_TLS_LE_32:
7793 case BFD_RELOC_X86_64_DTPOFF32:
7794 case BFD_RELOC_X86_64_DTPOFF64:
7795 case BFD_RELOC_X86_64_TPOFF32:
7796 case BFD_RELOC_X86_64_TPOFF64:
7797 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7800 case BFD_RELOC_386_TLS_DESC_CALL:
7801 case BFD_RELOC_X86_64_TLSDESC_CALL:
7802 value = 0; /* Fully resolved at runtime. No addend. */
7803 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7807 case BFD_RELOC_386_GOT32:
7808 case BFD_RELOC_X86_64_GOT32:
7809 value = 0; /* Fully resolved at runtime. No addend. */
7812 case BFD_RELOC_VTABLE_INHERIT:
7813 case BFD_RELOC_VTABLE_ENTRY:
7820 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
7822 #endif /* !defined (TE_Mach) */
7824 /* Are we finished with this relocation now? */
7825 if (fixP->fx_addsy == NULL)
7827 #if defined (OBJ_COFF) && defined (TE_PE)
7828 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7831 /* Remember value for tc_gen_reloc. */
7832 fixP->fx_addnumber = value;
7833 /* Clear out the frag for now. */
7837 else if (use_rela_relocations)
7839 fixP->fx_no_overflow = 1;
7840 /* Remember value for tc_gen_reloc. */
7841 fixP->fx_addnumber = value;
7845 md_number_to_chars (p, value, fixP->fx_size);
7849 md_atof (int type, char *litP, int *sizeP)
7851 /* This outputs the LITTLENUMs in REVERSE order;
7852 in accord with the bigendian 386. */
7853 return ieee_md_atof (type, litP, sizeP, FALSE);
7856 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
7859 output_invalid (int c)
7862 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
7865 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
7866 "(0x%x)", (unsigned char) c);
7867 return output_invalid_buf;
7870 /* REG_STRING starts *before* REGISTER_PREFIX. */
7872 static const reg_entry *
7873 parse_real_register (char *reg_string, char **end_op)
7875 char *s = reg_string;
7877 char reg_name_given[MAX_REG_NAME_SIZE + 1];
7880 /* Skip possible REGISTER_PREFIX and possible whitespace. */
7881 if (*s == REGISTER_PREFIX)
7884 if (is_space_char (*s))
7888 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
7890 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
7891 return (const reg_entry *) NULL;
7895 /* For naked regs, make sure that we are not dealing with an identifier.
7896 This prevents confusing an identifier like `eax_var' with register
7898 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
7899 return (const reg_entry *) NULL;
7903 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
7905 /* Handle floating point regs, allowing spaces in the (i) part. */
7906 if (r == i386_regtab /* %st is first entry of table */)
7908 if (is_space_char (*s))
7913 if (is_space_char (*s))
7915 if (*s >= '0' && *s <= '7')
7919 if (is_space_char (*s))
7924 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
7929 /* We have "%st(" then garbage. */
7930 return (const reg_entry *) NULL;
7934 if (r == NULL || allow_pseudo_reg)
7937 if (operand_type_all_zero (&r->reg_type))
7938 return (const reg_entry *) NULL;
7940 if ((r->reg_type.bitfield.reg32
7941 || r->reg_type.bitfield.sreg3
7942 || r->reg_type.bitfield.control
7943 || r->reg_type.bitfield.debug
7944 || r->reg_type.bitfield.test)
7945 && !cpu_arch_flags.bitfield.cpui386)
7946 return (const reg_entry *) NULL;
7948 if (r->reg_type.bitfield.floatreg
7949 && !cpu_arch_flags.bitfield.cpu8087
7950 && !cpu_arch_flags.bitfield.cpu287
7951 && !cpu_arch_flags.bitfield.cpu387)
7952 return (const reg_entry *) NULL;
7954 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
7955 return (const reg_entry *) NULL;
7957 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
7958 return (const reg_entry *) NULL;
7960 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
7961 return (const reg_entry *) NULL;
7963 /* Don't allow fake index register unless allow_index_reg isn't 0. */
7964 if (!allow_index_reg
7965 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
7966 return (const reg_entry *) NULL;
7968 if (((r->reg_flags & (RegRex64 | RegRex))
7969 || r->reg_type.bitfield.reg64)
7970 && (!cpu_arch_flags.bitfield.cpulm
7971 || !operand_type_equal (&r->reg_type, &control))
7972 && flag_code != CODE_64BIT)
7973 return (const reg_entry *) NULL;
7975 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
7976 return (const reg_entry *) NULL;
7981 /* REG_STRING starts *before* REGISTER_PREFIX. */
7983 static const reg_entry *
7984 parse_register (char *reg_string, char **end_op)
7988 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
7989 r = parse_real_register (reg_string, end_op);
7994 char *save = input_line_pointer;
7998 input_line_pointer = reg_string;
7999 c = get_symbol_end ();
8000 symbolP = symbol_find (reg_string);
8001 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
8003 const expressionS *e = symbol_get_value_expression (symbolP);
8005 know (e->X_op == O_register);
8006 know (e->X_add_number >= 0
8007 && (valueT) e->X_add_number < i386_regtab_size);
8008 r = i386_regtab + e->X_add_number;
8009 *end_op = input_line_pointer;
8011 *input_line_pointer = c;
8012 input_line_pointer = save;
8018 i386_parse_name (char *name, expressionS *e, char *nextcharP)
8021 char *end = input_line_pointer;
8024 r = parse_register (name, &input_line_pointer);
8025 if (r && end <= input_line_pointer)
8027 *nextcharP = *input_line_pointer;
8028 *input_line_pointer = 0;
8029 e->X_op = O_register;
8030 e->X_add_number = r - i386_regtab;
8033 input_line_pointer = end;
8035 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8039 md_operand (expressionS *e)
8044 switch (*input_line_pointer)
8046 case REGISTER_PREFIX:
8047 r = parse_real_register (input_line_pointer, &end);
8050 e->X_op = O_register;
8051 e->X_add_number = r - i386_regtab;
8052 input_line_pointer = end;
8057 gas_assert (intel_syntax);
8058 end = input_line_pointer++;
8060 if (*input_line_pointer == ']')
8062 ++input_line_pointer;
8063 e->X_op_symbol = make_expr_symbol (e);
8064 e->X_add_symbol = NULL;
8065 e->X_add_number = 0;
8071 input_line_pointer = end;
8078 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8079 const char *md_shortopts = "kVQ:sqn";
8081 const char *md_shortopts = "qn";
8084 #define OPTION_32 (OPTION_MD_BASE + 0)
8085 #define OPTION_64 (OPTION_MD_BASE + 1)
8086 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8087 #define OPTION_MARCH (OPTION_MD_BASE + 3)
8088 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
8089 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8090 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8091 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8092 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8093 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8094 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8095 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8096 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 12)
8098 struct option md_longopts[] =
8100 {"32", no_argument, NULL, OPTION_32},
8101 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8102 || defined (TE_PE) || defined (TE_PEP))
8103 {"64", no_argument, NULL, OPTION_64},
8105 {"divide", no_argument, NULL, OPTION_DIVIDE},
8106 {"march", required_argument, NULL, OPTION_MARCH},
8107 {"mtune", required_argument, NULL, OPTION_MTUNE},
8108 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8109 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8110 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8111 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8112 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8113 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8114 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8115 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8116 {NULL, no_argument, NULL, 0}
8118 size_t md_longopts_size = sizeof (md_longopts);
8121 md_parse_option (int c, char *arg)
8129 optimize_align_code = 0;
8136 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8137 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8138 should be emitted or not. FIXME: Not implemented. */
8142 /* -V: SVR4 argument to print version ID. */
8144 print_version_id ();
8147 /* -k: Ignore for FreeBSD compatibility. */
8152 /* -s: On i386 Solaris, this tells the native assembler to use
8153 .stab instead of .stab.excl. We always use .stab anyhow. */
8156 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8157 || defined (TE_PE) || defined (TE_PEP))
8160 const char **list, **l;
8162 list = bfd_target_list ();
8163 for (l = list; *l != NULL; l++)
8164 if (CONST_STRNEQ (*l, "elf64-x86-64")
8165 || strcmp (*l, "coff-x86-64") == 0
8166 || strcmp (*l, "pe-x86-64") == 0
8167 || strcmp (*l, "pei-x86-64") == 0)
8169 default_arch = "x86_64";
8173 as_fatal (_("No compiled in support for x86_64"));
8180 default_arch = "i386";
8184 #ifdef SVR4_COMMENT_CHARS
8189 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8191 for (s = i386_comment_chars; *s != '\0'; s++)
8195 i386_comment_chars = n;
8201 arch = xstrdup (arg);
8205 as_fatal (_("Invalid -march= option: `%s'"), arg);
8206 next = strchr (arch, '+');
8209 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8211 if (strcmp (arch, cpu_arch [j].name) == 0)
8214 if (! cpu_arch[j].flags.bitfield.cpui386)
8217 cpu_arch_name = cpu_arch[j].name;
8218 cpu_sub_arch_name = NULL;
8219 cpu_arch_flags = cpu_arch[j].flags;
8220 cpu_arch_isa = cpu_arch[j].type;
8221 cpu_arch_isa_flags = cpu_arch[j].flags;
8222 if (!cpu_arch_tune_set)
8224 cpu_arch_tune = cpu_arch_isa;
8225 cpu_arch_tune_flags = cpu_arch_isa_flags;
8229 else if (*cpu_arch [j].name == '.'
8230 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8232 /* ISA entension. */
8233 i386_cpu_flags flags;
8235 if (!cpu_arch[j].negated)
8236 flags = cpu_flags_or (cpu_arch_flags,
8239 flags = cpu_flags_and_not (cpu_arch_flags,
8241 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8243 if (cpu_sub_arch_name)
8245 char *name = cpu_sub_arch_name;
8246 cpu_sub_arch_name = concat (name,
8248 (const char *) NULL);
8252 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8253 cpu_arch_flags = flags;
8259 if (j >= ARRAY_SIZE (cpu_arch))
8260 as_fatal (_("Invalid -march= option: `%s'"), arg);
8264 while (next != NULL );
8269 as_fatal (_("Invalid -mtune= option: `%s'"), arg);
8270 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8272 if (strcmp (arg, cpu_arch [j].name) == 0)
8274 cpu_arch_tune_set = 1;
8275 cpu_arch_tune = cpu_arch [j].type;
8276 cpu_arch_tune_flags = cpu_arch[j].flags;
8280 if (j >= ARRAY_SIZE (cpu_arch))
8281 as_fatal (_("Invalid -mtune= option: `%s'"), arg);
8284 case OPTION_MMNEMONIC:
8285 if (strcasecmp (arg, "att") == 0)
8287 else if (strcasecmp (arg, "intel") == 0)
8290 as_fatal (_("Invalid -mmnemonic= option: `%s'"), arg);
8293 case OPTION_MSYNTAX:
8294 if (strcasecmp (arg, "att") == 0)
8296 else if (strcasecmp (arg, "intel") == 0)
8299 as_fatal (_("Invalid -msyntax= option: `%s'"), arg);
8302 case OPTION_MINDEX_REG:
8303 allow_index_reg = 1;
8306 case OPTION_MNAKED_REG:
8307 allow_naked_reg = 1;
8310 case OPTION_MOLD_GCC:
8314 case OPTION_MSSE2AVX:
8318 case OPTION_MSSE_CHECK:
8319 if (strcasecmp (arg, "error") == 0)
8320 sse_check = sse_check_error;
8321 else if (strcasecmp (arg, "warning") == 0)
8322 sse_check = sse_check_warning;
8323 else if (strcasecmp (arg, "none") == 0)
8324 sse_check = sse_check_none;
8326 as_fatal (_("Invalid -msse-check= option: `%s'"), arg);
8329 case OPTION_MAVXSCALAR:
8330 if (strcasecmp (arg, "128") == 0)
8332 else if (strcasecmp (arg, "256") == 0)
8335 as_fatal (_("Invalid -mavxscalar= option: `%s'"), arg);
8344 #define MESSAGE_TEMPLATE \
8348 show_arch (FILE *stream, int ext, int check)
8350 static char message[] = MESSAGE_TEMPLATE;
8351 char *start = message + 27;
8353 int size = sizeof (MESSAGE_TEMPLATE);
8360 left = size - (start - message);
8361 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8363 /* Should it be skipped? */
8364 if (cpu_arch [j].skip)
8367 name = cpu_arch [j].name;
8368 len = cpu_arch [j].len;
8371 /* It is an extension. Skip if we aren't asked to show it. */
8382 /* It is an processor. Skip if we show only extension. */
8385 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
8387 /* It is an impossible processor - skip. */
8391 /* Reserve 2 spaces for ", " or ",\0" */
8394 /* Check if there is any room. */
8402 p = mempcpy (p, name, len);
8406 /* Output the current message now and start a new one. */
8409 fprintf (stream, "%s\n", message);
8411 left = size - (start - message) - len - 2;
8413 gas_assert (left >= 0);
8415 p = mempcpy (p, name, len);
8420 fprintf (stream, "%s\n", message);
8424 md_show_usage (FILE *stream)
8426 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8427 fprintf (stream, _("\
8429 -V print assembler version number\n\
8432 fprintf (stream, _("\
8433 -n Do not optimize code alignment\n\
8434 -q quieten some warnings\n"));
8435 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8436 fprintf (stream, _("\
8439 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8440 || defined (TE_PE) || defined (TE_PEP))
8441 fprintf (stream, _("\
8442 --32/--64 generate 32bit/64bit code\n"));
8444 #ifdef SVR4_COMMENT_CHARS
8445 fprintf (stream, _("\
8446 --divide do not treat `/' as a comment character\n"));
8448 fprintf (stream, _("\
8449 --divide ignored\n"));
8451 fprintf (stream, _("\
8452 -march=CPU[,+EXTENSION...]\n\
8453 generate code for CPU and EXTENSION, CPU is one of:\n"));
8454 show_arch (stream, 0, 1);
8455 fprintf (stream, _("\
8456 EXTENSION is combination of:\n"));
8457 show_arch (stream, 1, 0);
8458 fprintf (stream, _("\
8459 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8460 show_arch (stream, 0, 0);
8461 fprintf (stream, _("\
8462 -msse2avx encode SSE instructions with VEX prefix\n"));
8463 fprintf (stream, _("\
8464 -msse-check=[none|error|warning]\n\
8465 check SSE instructions\n"));
8466 fprintf (stream, _("\
8467 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8469 fprintf (stream, _("\
8470 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8471 fprintf (stream, _("\
8472 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8473 fprintf (stream, _("\
8474 -mindex-reg support pseudo index registers\n"));
8475 fprintf (stream, _("\
8476 -mnaked-reg don't require `%%' prefix for registers\n"));
8477 fprintf (stream, _("\
8478 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8481 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8482 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8483 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8485 /* Pick the target format to use. */
8488 i386_target_format (void)
8490 if (!strcmp (default_arch, "x86_64"))
8491 update_code_flag (CODE_64BIT, 1);
8492 else if (!strcmp (default_arch, "i386"))
8493 update_code_flag (CODE_32BIT, 1);
8495 as_fatal (_("Unknown architecture"));
8497 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8498 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8499 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8500 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8502 switch (OUTPUT_FLAVOR)
8504 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
8505 case bfd_target_aout_flavour:
8506 return AOUT_TARGET_FORMAT;
8508 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
8509 # if defined (TE_PE) || defined (TE_PEP)
8510 case bfd_target_coff_flavour:
8511 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
8512 # elif defined (TE_GO32)
8513 case bfd_target_coff_flavour:
8516 case bfd_target_coff_flavour:
8520 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8521 case bfd_target_elf_flavour:
8523 if (flag_code == CODE_64BIT)
8526 use_rela_relocations = 1;
8528 if (cpu_arch_isa == PROCESSOR_L1OM)
8530 if (flag_code != CODE_64BIT)
8531 as_fatal (_("Intel L1OM is 64bit only"));
8532 return ELF_TARGET_L1OM_FORMAT;
8535 return (flag_code == CODE_64BIT
8536 ? ELF_TARGET_FORMAT64 : ELF_TARGET_FORMAT);
8539 #if defined (OBJ_MACH_O)
8540 case bfd_target_mach_o_flavour:
8541 return flag_code == CODE_64BIT ? "mach-o-x86-64" : "mach-o-i386";
8549 #endif /* OBJ_MAYBE_ more than one */
8551 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
8553 i386_elf_emit_arch_note (void)
8555 if (IS_ELF && cpu_arch_name != NULL)
8558 asection *seg = now_seg;
8559 subsegT subseg = now_subseg;
8560 Elf_Internal_Note i_note;
8561 Elf_External_Note e_note;
8562 asection *note_secp;
8565 /* Create the .note section. */
8566 note_secp = subseg_new (".note", 0);
8567 bfd_set_section_flags (stdoutput,
8569 SEC_HAS_CONTENTS | SEC_READONLY);
8571 /* Process the arch string. */
8572 len = strlen (cpu_arch_name);
8574 i_note.namesz = len + 1;
8576 i_note.type = NT_ARCH;
8577 p = frag_more (sizeof (e_note.namesz));
8578 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
8579 p = frag_more (sizeof (e_note.descsz));
8580 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
8581 p = frag_more (sizeof (e_note.type));
8582 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
8583 p = frag_more (len + 1);
8584 strcpy (p, cpu_arch_name);
8586 frag_align (2, 0, 0);
8588 subseg_set (seg, subseg);
8594 md_undefined_symbol (name)
8597 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
8598 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
8599 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
8600 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
8604 if (symbol_find (name))
8605 as_bad (_("GOT already in symbol table"));
8606 GOT_symbol = symbol_new (name, undefined_section,
8607 (valueT) 0, &zero_address_frag);
8614 /* Round up a section size to the appropriate boundary. */
8617 md_section_align (segment, size)
8618 segT segment ATTRIBUTE_UNUSED;
8621 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8622 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
8624 /* For a.out, force the section size to be aligned. If we don't do
8625 this, BFD will align it for us, but it will not write out the
8626 final bytes of the section. This may be a bug in BFD, but it is
8627 easier to fix it here since that is how the other a.out targets
8631 align = bfd_get_section_alignment (stdoutput, segment);
8632 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
8639 /* On the i386, PC-relative offsets are relative to the start of the
8640 next instruction. That is, the address of the offset, plus its
8641 size, since the offset is always the last part of the insn. */
8644 md_pcrel_from (fixS *fixP)
8646 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
8652 s_bss (int ignore ATTRIBUTE_UNUSED)
8656 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8658 obj_elf_section_change_hook ();
8660 temp = get_absolute_expression ();
8661 subseg_set (bss_section, (subsegT) temp);
8662 demand_empty_rest_of_line ();
8668 i386_validate_fix (fixS *fixp)
8670 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
8672 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
8676 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
8681 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
8683 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
8690 tc_gen_reloc (section, fixp)
8691 asection *section ATTRIBUTE_UNUSED;
8695 bfd_reloc_code_real_type code;
8697 switch (fixp->fx_r_type)
8699 case BFD_RELOC_X86_64_PLT32:
8700 case BFD_RELOC_X86_64_GOT32:
8701 case BFD_RELOC_X86_64_GOTPCREL:
8702 case BFD_RELOC_386_PLT32:
8703 case BFD_RELOC_386_GOT32:
8704 case BFD_RELOC_386_GOTOFF:
8705 case BFD_RELOC_386_GOTPC:
8706 case BFD_RELOC_386_TLS_GD:
8707 case BFD_RELOC_386_TLS_LDM:
8708 case BFD_RELOC_386_TLS_LDO_32:
8709 case BFD_RELOC_386_TLS_IE_32:
8710 case BFD_RELOC_386_TLS_IE:
8711 case BFD_RELOC_386_TLS_GOTIE:
8712 case BFD_RELOC_386_TLS_LE_32:
8713 case BFD_RELOC_386_TLS_LE:
8714 case BFD_RELOC_386_TLS_GOTDESC:
8715 case BFD_RELOC_386_TLS_DESC_CALL:
8716 case BFD_RELOC_X86_64_TLSGD:
8717 case BFD_RELOC_X86_64_TLSLD:
8718 case BFD_RELOC_X86_64_DTPOFF32:
8719 case BFD_RELOC_X86_64_DTPOFF64:
8720 case BFD_RELOC_X86_64_GOTTPOFF:
8721 case BFD_RELOC_X86_64_TPOFF32:
8722 case BFD_RELOC_X86_64_TPOFF64:
8723 case BFD_RELOC_X86_64_GOTOFF64:
8724 case BFD_RELOC_X86_64_GOTPC32:
8725 case BFD_RELOC_X86_64_GOT64:
8726 case BFD_RELOC_X86_64_GOTPCREL64:
8727 case BFD_RELOC_X86_64_GOTPC64:
8728 case BFD_RELOC_X86_64_GOTPLT64:
8729 case BFD_RELOC_X86_64_PLTOFF64:
8730 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8731 case BFD_RELOC_X86_64_TLSDESC_CALL:
8733 case BFD_RELOC_VTABLE_ENTRY:
8734 case BFD_RELOC_VTABLE_INHERIT:
8736 case BFD_RELOC_32_SECREL:
8738 code = fixp->fx_r_type;
8740 case BFD_RELOC_X86_64_32S:
8741 if (!fixp->fx_pcrel)
8743 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
8744 code = fixp->fx_r_type;
8750 switch (fixp->fx_size)
8753 as_bad_where (fixp->fx_file, fixp->fx_line,
8754 _("can not do %d byte pc-relative relocation"),
8756 code = BFD_RELOC_32_PCREL;
8758 case 1: code = BFD_RELOC_8_PCREL; break;
8759 case 2: code = BFD_RELOC_16_PCREL; break;
8760 case 4: code = BFD_RELOC_32_PCREL; break;
8762 case 8: code = BFD_RELOC_64_PCREL; break;
8768 switch (fixp->fx_size)
8771 as_bad_where (fixp->fx_file, fixp->fx_line,
8772 _("can not do %d byte relocation"),
8774 code = BFD_RELOC_32;
8776 case 1: code = BFD_RELOC_8; break;
8777 case 2: code = BFD_RELOC_16; break;
8778 case 4: code = BFD_RELOC_32; break;
8780 case 8: code = BFD_RELOC_64; break;
8787 if ((code == BFD_RELOC_32
8788 || code == BFD_RELOC_32_PCREL
8789 || code == BFD_RELOC_X86_64_32S)
8791 && fixp->fx_addsy == GOT_symbol)
8794 code = BFD_RELOC_386_GOTPC;
8796 code = BFD_RELOC_X86_64_GOTPC32;
8798 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
8800 && fixp->fx_addsy == GOT_symbol)
8802 code = BFD_RELOC_X86_64_GOTPC64;
8805 rel = (arelent *) xmalloc (sizeof (arelent));
8806 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
8807 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8809 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
8811 if (!use_rela_relocations)
8813 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
8814 vtable entry to be used in the relocation's section offset. */
8815 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
8816 rel->address = fixp->fx_offset;
8817 #if defined (OBJ_COFF) && defined (TE_PE)
8818 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
8819 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
8824 /* Use the rela in 64bit mode. */
8827 if (!fixp->fx_pcrel)
8828 rel->addend = fixp->fx_offset;
8832 case BFD_RELOC_X86_64_PLT32:
8833 case BFD_RELOC_X86_64_GOT32:
8834 case BFD_RELOC_X86_64_GOTPCREL:
8835 case BFD_RELOC_X86_64_TLSGD:
8836 case BFD_RELOC_X86_64_TLSLD:
8837 case BFD_RELOC_X86_64_GOTTPOFF:
8838 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8839 case BFD_RELOC_X86_64_TLSDESC_CALL:
8840 rel->addend = fixp->fx_offset - fixp->fx_size;
8843 rel->addend = (section->vma
8845 + fixp->fx_addnumber
8846 + md_pcrel_from (fixp));
8851 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
8852 if (rel->howto == NULL)
8854 as_bad_where (fixp->fx_file, fixp->fx_line,
8855 _("cannot represent relocation type %s"),
8856 bfd_get_reloc_code_name (code));
8857 /* Set howto to a garbage value so that we can keep going. */
8858 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
8859 gas_assert (rel->howto != NULL);
8865 #include "tc-i386-intel.c"
8868 tc_x86_parse_to_dw2regnum (expressionS *exp)
8870 int saved_naked_reg;
8871 char saved_register_dot;
8873 saved_naked_reg = allow_naked_reg;
8874 allow_naked_reg = 1;
8875 saved_register_dot = register_chars['.'];
8876 register_chars['.'] = '.';
8877 allow_pseudo_reg = 1;
8878 expression_and_evaluate (exp);
8879 allow_pseudo_reg = 0;
8880 register_chars['.'] = saved_register_dot;
8881 allow_naked_reg = saved_naked_reg;
8883 if (exp->X_op == O_register && exp->X_add_number >= 0)
8885 if ((addressT) exp->X_add_number < i386_regtab_size)
8887 exp->X_op = O_constant;
8888 exp->X_add_number = i386_regtab[exp->X_add_number]
8889 .dw2_regnum[flag_code >> 1];
8892 exp->X_op = O_illegal;
8897 tc_x86_frame_initial_instructions (void)
8899 static unsigned int sp_regno[2];
8901 if (!sp_regno[flag_code >> 1])
8903 char *saved_input = input_line_pointer;
8904 char sp[][4] = {"esp", "rsp"};
8907 input_line_pointer = sp[flag_code >> 1];
8908 tc_x86_parse_to_dw2regnum (&exp);
8909 gas_assert (exp.X_op == O_constant);
8910 sp_regno[flag_code >> 1] = exp.X_add_number;
8911 input_line_pointer = saved_input;
8914 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
8915 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
8919 i386_elf_section_type (const char *str, size_t len)
8921 if (flag_code == CODE_64BIT
8922 && len == sizeof ("unwind") - 1
8923 && strncmp (str, "unwind", 6) == 0)
8924 return SHT_X86_64_UNWIND;
8931 i386_solaris_fix_up_eh_frame (segT sec)
8933 if (flag_code == CODE_64BIT)
8934 elf_section_type (sec) = SHT_X86_64_UNWIND;
8940 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
8944 exp.X_op = O_secrel;
8945 exp.X_add_symbol = symbol;
8946 exp.X_add_number = 0;
8947 emit_expr (&exp, size);
8951 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8952 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
8955 x86_64_section_letter (int letter, char **ptr_msg)
8957 if (flag_code == CODE_64BIT)
8960 return SHF_X86_64_LARGE;
8962 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
8965 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
8970 x86_64_section_word (char *str, size_t len)
8972 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
8973 return SHF_X86_64_LARGE;
8979 handle_large_common (int small ATTRIBUTE_UNUSED)
8981 if (flag_code != CODE_64BIT)
8983 s_comm_internal (0, elf_common_parse);
8984 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
8988 static segT lbss_section;
8989 asection *saved_com_section_ptr = elf_com_section_ptr;
8990 asection *saved_bss_section = bss_section;
8992 if (lbss_section == NULL)
8994 flagword applicable;
8996 subsegT subseg = now_subseg;
8998 /* The .lbss section is for local .largecomm symbols. */
8999 lbss_section = subseg_new (".lbss", 0);
9000 applicable = bfd_applicable_section_flags (stdoutput);
9001 bfd_set_section_flags (stdoutput, lbss_section,
9002 applicable & SEC_ALLOC);
9003 seg_info (lbss_section)->bss = 1;
9005 subseg_set (seg, subseg);
9008 elf_com_section_ptr = &_bfd_elf_large_com_section;
9009 bss_section = lbss_section;
9011 s_comm_internal (0, elf_common_parse);
9013 elf_com_section_ptr = saved_com_section_ptr;
9014 bss_section = saved_bss_section;
9017 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */