Powerpc math header.
Modified: trunk/reactos/boot/freeldr/freeldr/math/libgcc2.c
Added: trunk/reactos/boot/freeldr/freeldr/math/powerpc.h
_____
Modified: trunk/reactos/boot/freeldr/freeldr/math/libgcc2.c
--- trunk/reactos/boot/freeldr/freeldr/math/libgcc2.c 2005-04-01
00:15:04 UTC (rev 14397)
+++ trunk/reactos/boot/freeldr/freeldr/math/libgcc2.c 2005-04-01
00:17:10 UTC (rev 14398)
@@ -38,6 +38,8 @@
*/
#ifdef __i386__
#include "i386.h"
+#elif defined(_M_PPC)
+#include "powerpc.h"
#endif
#define L_clz
#define L_udivdi3
_____
Added: trunk/reactos/boot/freeldr/freeldr/math/powerpc.h
--- trunk/reactos/boot/freeldr/freeldr/math/powerpc.h 2005-04-01
00:15:04 UTC (rev 14397)
+++ trunk/reactos/boot/freeldr/freeldr/math/powerpc.h 2005-04-01
00:17:10 UTC (rev 14398)
@@ -0,0 +1,3208 @@
+/* Definitions of target machine for GNU compiler for IA-32.
+ Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
+ 2001, 2002 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* The purpose of this file is to define the characteristics of the
i386,
+ independent of assembler syntax or operating system.
+
+ Three other files build on this one to describe a specific assembler
syntax:
+ bsd386.h, att386.h, and sun386.h.
+
+ The actual tm.h file for a particular system should include
+ this file, and then the file for the appropriate assembler syntax.
+
+ Many macros that specify assembler syntax are omitted entirely from
+ this file because they really belong in the files for particular
+ assemblers. These include RP, IP, LPREFIX, PUT_OP_SIZE, USE_STAR,
+ ADDR_BEG, ADDR_END, PRINT_IREG, PRINT_SCALE, PRINT_B_I_S, and many
+ that start with ASM_ or end in ASM_OP. */
+
+/* Stubs for half-pic support if not OSF/1 reference platform. */
+
+#ifndef HALF_PIC_P
+#define HALF_PIC_P() 0
+#define HALF_PIC_NUMBER_PTRS 0
+#define HALF_PIC_NUMBER_REFS 0
+#define HALF_PIC_ENCODE(DECL)
+#define HALF_PIC_DECLARE(NAME)
+#define HALF_PIC_INIT() error ("half-pic init called on systems
that don't support it")
+#define HALF_PIC_ADDRESS_P(X) 0
+#define HALF_PIC_PTR(X) (X)
+#define HALF_PIC_FINISH(STREAM)
+#endif
+
+/* Define the specific costs for a given cpu */
+
+struct processor_costs {
+ const int add; /* cost of an add instruction */
+ const int lea; /* cost of a lea instruction */
+ const int shift_var; /* variable shift costs */
+ const int shift_const; /* constant shift costs */
+ const int mult_init; /* cost of starting a multiply */
+ const int mult_bit; /* cost of multiply per each bit set */
+ const int divide; /* cost of a divide/mod */
+ int movsx; /* The cost of movsx operation. */
+ int movzx; /* The cost of movzx operation. */
+ const int large_insn; /* insns larger than this cost
more */
+ const int move_ratio; /* The threshold of number of
scalar
+ memory-to-memory move insns. */
+ const int movzbl_load; /* cost of loading using movzbl */
+ const int int_load[3]; /* cost of loading integer registers
+ in QImode, HImode and SImode relative
+ to reg-reg move (2). */
+ const int int_store[3]; /* cost of storing integer register
+ in QImode, HImode and SImode */
+ const int fp_move; /* cost of reg,reg fld/fst */
+ const int fp_load[3]; /* cost of loading FP register
+ in SFmode, DFmode and XFmode */
+ const int fp_store[3]; /* cost of storing FP register
+ in SFmode, DFmode and XFmode */
+ const int mmx_move; /* cost of moving MMX register. */
+ const int mmx_load[2]; /* cost of loading MMX register
+ in SImode and DImode */
+ const int mmx_store[2]; /* cost of storing MMX register
+ in SImode and DImode */
+ const int sse_move; /* cost of moving SSE register. */
+ const int sse_load[3]; /* cost of loading SSE register
+ in SImode, DImode and TImode*/
+ const int sse_store[3]; /* cost of storing SSE register
+ in SImode, DImode and TImode*/
+ const int mmxsse_to_integer; /* cost of moving mmxsse register to
+ integer and vice versa. */
+ const int prefetch_block; /* bytes moved to cache for prefetch.
*/
+ const int simultaneous_prefetches; /* number of parallel prefetch
+ operations. */
+};
+
+extern const struct processor_costs *ix86_cost;
+
+/* Run-time compilation parameters selecting different hardware
subsets. */
+
+extern int target_flags;
+
+/* Macros used in the machine description to test the flags. */
+
+/* configure can arrange to make this 2, to force a 486. */
+
+#ifndef TARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT 0
+#endif
+
+/* Masks for the -m switches */
+#define MASK_80387 0x00000001 /* Hardware floating
point */
+#define MASK_RTD 0x00000002 /* Use ret that pops
args */
+#define MASK_ALIGN_DOUBLE 0x00000004 /* align doubles to 2
word boundary */
+#define MASK_SVR3_SHLIB 0x00000008 /* Uninit locals
into bss */
+#define MASK_IEEE_FP 0x00000010 /* IEEE fp comparisons
*/
+#define MASK_FLOAT_RETURNS 0x00000020 /* Return float in st(0)
*/
+#define MASK_NO_FANCY_MATH_387 0x00000040 /* Disable sin, cos,
sqrt */
+#define MASK_OMIT_LEAF_FRAME_POINTER 0x080 /* omit leaf frame
pointers */
+#define MASK_STACK_PROBE 0x00000100 /* Enable stack probing
*/
+#define MASK_NO_ALIGN_STROPS 0x00000200 /* Enable aligning of
string ops. */
+#define MASK_INLINE_ALL_STROPS 0x00000400 /* Inline stringops in
all cases */
+#define MASK_NO_PUSH_ARGS 0x00000800 /* Use push instructions
*/
+#define MASK_ACCUMULATE_OUTGOING_ARGS 0x00001000/* Accumulate outgoing
args */
+#define MASK_ACCUMULATE_OUTGOING_ARGS_SET 0x00002000
+#define MASK_MMX 0x00004000 /* Support MMX
regs/builtins */
+#define MASK_MMX_SET 0x00008000
+#define MASK_SSE 0x00010000 /* Support SSE
regs/builtins */
+#define MASK_SSE_SET 0x00020000
+#define MASK_SSE2 0x00040000 /* Support SSE2
regs/builtins */
+#define MASK_SSE2_SET 0x00080000
+#define MASK_3DNOW 0x00100000 /* Support 3Dnow
builtins */
+#define MASK_3DNOW_SET 0x00200000
+#define MASK_3DNOW_A 0x00400000 /* Support Athlon 3Dnow
builtins */
+#define MASK_3DNOW_A_SET 0x00800000
+#define MASK_128BIT_LONG_DOUBLE 0x01000000 /* long double size is
128bit */
+#define MASK_64BIT 0x02000000 /* Produce 64bit code */
+/* ... overlap with subtarget options starts by 0x04000000. */
+#define MASK_NO_RED_ZONE 0x04000000 /* Do not use red zone
*/
+
+/* Use the floating point instructions */
+#define TARGET_80387 (target_flags & MASK_80387)
+
+/* Compile using ret insn that pops args.
+ This will not work unless you use prototypes at least
+ for all functions that can take varying numbers of args. */
+#define TARGET_RTD (target_flags & MASK_RTD)
+
+/* Align doubles to a two word boundary. This breaks compatibility
with
+ the published ABI's for structures containing doubles, but produces
+ faster code on the pentium. */
+#define TARGET_ALIGN_DOUBLE (target_flags & MASK_ALIGN_DOUBLE)
+
+/* Use push instructions to save outgoing args. */
+#define TARGET_PUSH_ARGS (!(target_flags & MASK_NO_PUSH_ARGS))
+
+/* Accumulate stack adjustments to prologue/epilogue. */
+#define TARGET_ACCUMULATE_OUTGOING_ARGS \
+ (target_flags & MASK_ACCUMULATE_OUTGOING_ARGS)
+
+/* Put uninitialized locals into bss, not data.
+ Meaningful only on svr3. */
+#define TARGET_SVR3_SHLIB (target_flags & MASK_SVR3_SHLIB)
+
+/* Use IEEE floating point comparisons. These handle correctly the
cases
+ where the result of a comparison is unordered. Normally SIGFPE is
+ generated in such cases, in which case this isn't needed. */
+#define TARGET_IEEE_FP (target_flags & MASK_IEEE_FP)
+
+/* Functions that return a floating point value may return that value
+ in the 387 FPU or in 386 integer registers. If set, this flag
causes
+ the 387 to be used, which is compatible with most calling
conventions. */
+#define TARGET_FLOAT_RETURNS_IN_80387 (target_flags &
MASK_FLOAT_RETURNS)
+
+/* Long double is 128bit instead of 96bit, even when only 80bits are
used.
+ This mode wastes cache, but avoid misaligned data accesses and
simplifies
+ address calculations. */
+#define TARGET_128BIT_LONG_DOUBLE (target_flags &
MASK_128BIT_LONG_DOUBLE)
+
+/* Disable generation of FP sin, cos and sqrt operations for 387.
+ This is because FreeBSD lacks these in the math-emulator-code */
+#define TARGET_NO_FANCY_MATH_387 (target_flags &
MASK_NO_FANCY_MATH_387)
+
+/* Don't create frame pointers for leaf functions */
+#define TARGET_OMIT_LEAF_FRAME_POINTER \
+ (target_flags & MASK_OMIT_LEAF_FRAME_POINTER)
+
+/* Debug GO_IF_LEGITIMATE_ADDRESS */
+#define TARGET_DEBUG_ADDR (ix86_debug_addr_string != 0)
+
+/* Debug FUNCTION_ARG macros */
+#define TARGET_DEBUG_ARG (ix86_debug_arg_string != 0)
+
+#if 0
+/* 64bit Sledgehammer mode */
+#ifdef TARGET_BI_ARCH
+#define TARGET_64BIT (target_flags & MASK_64BIT)
+#else
+#ifdef TARGET_64BIT_DEFAULT
+#define TARGET_64BIT 1
+#else
+#define TARGET_64BIT 0
+#endif
+#endif
+
+#define TARGET_386 (ix86_cpu == PROCESSOR_I386)
+#define TARGET_486 (ix86_cpu == PROCESSOR_I486)
+#define TARGET_PENTIUM (ix86_cpu == PROCESSOR_PENTIUM)
+#define TARGET_PENTIUMPRO (ix86_cpu == PROCESSOR_PENTIUMPRO)
+#define TARGET_K6 (ix86_cpu == PROCESSOR_K6)
+#define TARGET_ATHLON (ix86_cpu == PROCESSOR_ATHLON)
+#define TARGET_PENTIUM4 (ix86_cpu == PROCESSOR_PENTIUM4)
+
+#define CPUMASK (1 << ix86_cpu)
+extern const int x86_use_leave, x86_push_memory,
x86_zero_extend_with_and;
+extern const int x86_use_bit_test, x86_cmove, x86_deep_branch;
+extern const int x86_branch_hints, x86_unroll_strlen;
+extern const int x86_double_with_add, x86_partial_reg_stall, x86_movx;
+extern const int x86_use_loop, x86_use_fiop, x86_use_mov0;
+extern const int x86_use_cltd, x86_read_modify_write;
+extern const int x86_read_modify, x86_split_long_moves;
+extern const int x86_promote_QImode, x86_single_stringop;
+extern const int x86_himode_math, x86_qimode_math, x86_promote_qi_regs;
+extern const int x86_promote_hi_regs, x86_integer_DFmode_moves;
+extern const int x86_add_esp_4, x86_add_esp_8, x86_sub_esp_4,
x86_sub_esp_8;
+extern const int x86_partial_reg_dependency, x86_memory_mismatch_stall;
+extern const int x86_accumulate_outgoing_args, x86_prologue_using_move;
+extern const int x86_epilogue_using_move, x86_decompose_lea;
+extern const int x86_arch_always_fancy_math_387;
+extern int x86_prefetch_sse;
+
+#define TARGET_USE_LEAVE (x86_use_leave & CPUMASK)
+#define TARGET_PUSH_MEMORY (x86_push_memory & CPUMASK)
+#define TARGET_ZERO_EXTEND_WITH_AND (x86_zero_extend_with_and &
CPUMASK)
+#define TARGET_USE_BIT_TEST (x86_use_bit_test & CPUMASK)
+#define TARGET_UNROLL_STRLEN (x86_unroll_strlen & CPUMASK)
+/* For sane SSE instruction set generation we need fcomi instruction.
It is
+ safe to enable all CMOVE instructions. */
+#define TARGET_CMOVE ((x86_cmove & (1 << ix86_arch)) || TARGET_SSE)
+#define TARGET_DEEP_BRANCH_PREDICTION (x86_deep_branch & CPUMASK)
+#define TARGET_BRANCH_PREDICTION_HINTS (x86_branch_hints & CPUMASK)
+#define TARGET_DOUBLE_WITH_ADD (x86_double_with_add & CPUMASK)
+#define TARGET_USE_SAHF ((x86_use_sahf & CPUMASK) && !TARGET_64BIT)
+#define TARGET_MOVX (x86_movx & CPUMASK)
+#define TARGET_PARTIAL_REG_STALL (x86_partial_reg_stall & CPUMASK)
+#define TARGET_USE_LOOP (x86_use_loop & CPUMASK)
+#define TARGET_USE_FIOP (x86_use_fiop & CPUMASK)
+#define TARGET_USE_MOV0 (x86_use_mov0 & CPUMASK)
+#define TARGET_USE_CLTD (x86_use_cltd & CPUMASK)
+#define TARGET_SPLIT_LONG_MOVES (x86_split_long_moves & CPUMASK)
+#define TARGET_READ_MODIFY_WRITE (x86_read_modify_write & CPUMASK)
+#define TARGET_READ_MODIFY (x86_read_modify & CPUMASK)
+#define TARGET_PROMOTE_QImode (x86_promote_QImode & CPUMASK)
+#define TARGET_SINGLE_STRINGOP (x86_single_stringop & CPUMASK)
+#define TARGET_QIMODE_MATH (x86_qimode_math & CPUMASK)
+#define TARGET_HIMODE_MATH (x86_himode_math & CPUMASK)
+#define TARGET_PROMOTE_QI_REGS (x86_promote_qi_regs & CPUMASK)
+#define TARGET_PROMOTE_HI_REGS (x86_promote_hi_regs & CPUMASK)
+#define TARGET_ADD_ESP_4 (x86_add_esp_4 & CPUMASK)
+#define TARGET_ADD_ESP_8 (x86_add_esp_8 & CPUMASK)
+#define TARGET_SUB_ESP_4 (x86_sub_esp_4 & CPUMASK)
+#define TARGET_SUB_ESP_8 (x86_sub_esp_8 & CPUMASK)
+#define TARGET_INTEGER_DFMODE_MOVES (x86_integer_DFmode_moves &
CPUMASK)
+#define TARGET_PARTIAL_REG_DEPENDENCY (x86_partial_reg_dependency &
CPUMASK)
+#define TARGET_MEMORY_MISMATCH_STALL (x86_memory_mismatch_stall &
CPUMASK)
+#define TARGET_PROLOGUE_USING_MOVE (x86_prologue_using_move & CPUMASK)
+#define TARGET_EPILOGUE_USING_MOVE (x86_epilogue_using_move & CPUMASK)
+#define TARGET_DECOMPOSE_LEA (x86_decompose_lea & CPUMASK)
+#define TARGET_PREFETCH_SSE (x86_prefetch_sse)
+
+#define TARGET_STACK_PROBE (target_flags & MASK_STACK_PROBE)
+
+#define TARGET_ALIGN_STRINGOPS (!(target_flags & MASK_NO_ALIGN_STROPS))
+#define TARGET_INLINE_ALL_STRINGOPS (target_flags &
MASK_INLINE_ALL_STROPS)
+
+#define ASSEMBLER_DIALECT (ix86_asm_dialect)
+
+#define TARGET_SSE ((target_flags & (MASK_SSE | MASK_SSE2)) != 0)
+#define TARGET_SSE2 ((target_flags & MASK_SSE2) != 0)
+#define TARGET_SSE_MATH ((ix86_fpmath & FPMATH_SSE) != 0)
+#define TARGET_MIX_SSE_I387 ((ix86_fpmath & FPMATH_SSE) \
+ && (ix86_fpmath & FPMATH_387))
+#define TARGET_MMX ((target_flags & MASK_MMX) != 0)
+#define TARGET_3DNOW ((target_flags & MASK_3DNOW) != 0)
+#define TARGET_3DNOW_A ((target_flags & MASK_3DNOW_A) != 0)
+#endif
+
+#define TARGET_RED_ZONE (!(target_flags & MASK_NO_RED_ZONE))
+
+/* WARNING: Do not mark empty strings for translation, as calling
+ gettext on an empty string does NOT return an empty
+ string. */
+
+#if 0
+#define TARGET_SWITCHES
\
+{ { "80387", MASK_80387, N_("Use hardware fp") },
\
+ { "no-80387", -MASK_80387, N_("Do not use
hardware fp") }, \
+ { "hard-float", MASK_80387, N_("Use hardware fp") },
\
+ { "soft-float", -MASK_80387, N_("Do not use hardware
fp") }, \
+ { "no-soft-float", MASK_80387, N_("Use hardware fp") },
\
+ { "386", 0, "" /*Deprecated.*/},
\
+ { "486", 0, "" /*Deprecated.*/},
\
+ { "pentium", 0, "" /*Deprecated.*/},
\
+ { "pentiumpro", 0, "" /*Deprecated.*/},
\
+ { "intel-syntax", 0, "" /*Deprecated.*/},
\
+ { "no-intel-syntax", 0, "" /*Deprecated.*/},
\
+ { "rtd", MASK_RTD,
\
+ N_("Alternate calling convention") },
\
+ { "no-rtd", -MASK_RTD,
\
+ N_("Use normal calling convention") },
\
+ { "align-double", MASK_ALIGN_DOUBLE,
\
+ N_("Align some doubles on dword boundary") },
\
+ { "no-align-double", -MASK_ALIGN_DOUBLE,
\
+ N_("Align doubles on word boundary") },
\
+ { "svr3-shlib", MASK_SVR3_SHLIB,
\
+ N_("Uninitialized locals in .bss") },
\
+ { "no-svr3-shlib", -MASK_SVR3_SHLIB,
\
+ N_("Uninitialized locals in .data") },
\
+ { "ieee-fp", MASK_IEEE_FP,
\
+ N_("Use IEEE math for fp comparisons") },
\
+ { "no-ieee-fp", -MASK_IEEE_FP,
\
+ N_("Do not use IEEE math for fp comparisons") },
\
+ { "fp-ret-in-387", MASK_FLOAT_RETURNS,
\
+ N_("Return values of functions in FPU registers") },
\
+ { "no-fp-ret-in-387", -MASK_FLOAT_RETURNS ,
\
+ N_("Do not return values of functions in FPU registers")},
\
+ { "no-fancy-math-387", MASK_NO_FANCY_MATH_387,
\
+ N_("Do not generate sin, cos, sqrt for FPU") },
\
+ { "fancy-math-387", -MASK_NO_FANCY_MATH_387,
\
+ N_("Generate sin, cos, sqrt for FPU")},
\
+ { "omit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER,
\
+ N_("Omit the frame pointer in leaf functions") },
\
+ { "no-omit-leaf-frame-pointer",-MASK_OMIT_LEAF_FRAME_POINTER, "" },
\
+ { "stack-arg-probe", MASK_STACK_PROBE,
\
+ N_("Enable stack probing") },
\
+ { "no-stack-arg-probe", -MASK_STACK_PROBE, "" },
\
+ { "windows", 0, 0 /* undocumented */ },
\
+ { "dll", 0, 0 /* undocumented */ },
\
+ { "align-stringops", -MASK_NO_ALIGN_STROPS,
\
+ N_("Align destination of the string operations") },
\
+ { "no-align-stringops", MASK_NO_ALIGN_STROPS,
\
+ N_("Do not align destination of the string operations") },
\
+ { "inline-all-stringops", MASK_INLINE_ALL_STROPS,
\
+ N_("Inline all known string operations") },
\
+ { "no-inline-all-stringops", -MASK_INLINE_ALL_STROPS,
\
+ N_("Do not inline all known string operations") },
\
+ { "push-args", -MASK_NO_PUSH_ARGS,
\
+ N_("Use push instructions to save outgoing arguments") },
\
+ { "no-push-args", MASK_NO_PUSH_ARGS,
\
+ N_("Do not use push instructions to save outgoing arguments") },
\
+ { "accumulate-outgoing-args", (MASK_ACCUMULATE_OUTGOING_ARGS
\
+ | MASK_ACCUMULATE_OUTGOING_ARGS_SET),
\
+ N_("Use push instructions to save outgoing arguments") },
\
+ { "no-accumulate-outgoing-args",MASK_ACCUMULATE_OUTGOING_ARGS_SET,
\
+ N_("Do not use push instructions to save outgoing arguments") },
\
+ { "mmx", MASK_MMX | MASK_MMX_SET,
\
+ N_("Support MMX built-in functions") },
\
+ { "no-mmx", -MASK_MMX,
\
+ N_("Do not support MMX built-in functions") },
\
+ { "no-mmx", MASK_MMX_SET, "" },
\
+ { "3dnow", MASK_3DNOW | MASK_3DNOW_SET,
\
+ N_("Support 3DNow! built-in functions") },
\
+ { "no-3dnow", -MASK_3DNOW, "" },
\
+ { "no-3dnow", MASK_3DNOW_SET,
\
+ N_("Do not support 3DNow! built-in functions") },
\
+ { "sse", MASK_SSE | MASK_SSE_SET,
\
+ N_("Support MMX and SSE built-in functions and code generation") },
\
+ { "no-sse", -MASK_SSE, "" },
\
+ { "no-sse", MASK_SSE_SET,
\
+ N_("Do not support MMX and SSE built-in functions and code
generation") },\
+ { "sse2", MASK_SSE2 | MASK_SSE2_SET,
\
+ N_("Support MMX, SSE and SSE2 built-in functions and code
generation") }, \
+ { "no-sse2", -MASK_SSE2, "" },
\
+ { "no-sse2", MASK_SSE2_SET,
\
+ N_("Do not support MMX, SSE and SSE2 built-in functions and code
generation") }, \
+ { "128bit-long-double", MASK_128BIT_LONG_DOUBLE,
\
+ N_("sizeof(long double) is 16") },
\
+ { "96bit-long-double", -MASK_128BIT_LONG_DOUBLE,
\
+ N_("sizeof(long double) is 12") },
\
+ { "64", MASK_64BIT,
\
+ N_("Generate 64bit x86-64 code") },
\
+ { "32", -MASK_64BIT,
\
+ N_("Generate 32bit i386 code") },
\
+ { "red-zone", -MASK_NO_RED_ZONE,
\
+ N_("Use red-zone in the x86-64 code") },
\
+ { "no-red-zone", MASK_NO_RED_ZONE,
\
+ N_("Do not use red-zone in the x86-64 code") },
\
+ SUBTARGET_SWITCHES
\
+ { "", TARGET_DEFAULT, 0 }}
+
+#ifdef TARGET_64BIT_DEFAULT
+#define TARGET_DEFAULT (MASK_64BIT | TARGET_SUBTARGET_DEFAULT)
+#else
+#define TARGET_DEFAULT TARGET_SUBTARGET_DEFAULT
+#endif
+
+/* Which processor to schedule for. The cpu attribute defines a list
that
+ mirrors this list, so changes to i386.md must be made at the same
time. */
+
+enum processor_type
+{
+ PROCESSOR_I386, /* 80386 */
+ PROCESSOR_I486, /* 80486DX, 80486SX, 80486DX[24]
*/
+ PROCESSOR_PENTIUM,
+ PROCESSOR_PENTIUMPRO,
+ PROCESSOR_K6,
+ PROCESSOR_ATHLON,
+ PROCESSOR_PENTIUM4,
+ PROCESSOR_max
+};
+enum fpmath_unit
+{
+ FPMATH_387 = 1,
+ FPMATH_SSE = 2
+};
+
+extern enum processor_type ix86_cpu;
+extern enum fpmath_unit ix86_fpmath;
+
+extern int ix86_arch;
+
+/* This macro is similar to `TARGET_SWITCHES' but defines names of
+ command options that have values. Its definition is an
+ initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ fixed part of the option name, and the address of a variable. The
+ variable, type `char *', is set to the variable part of the given
+ option if the fixed part matches. The actual option name is made
+ by appending `-m' to the specified name. */
+#define TARGET_OPTIONS \
+{ { "cpu=", &ix86_cpu_string, \
+ N_("Schedule code for given CPU")},
\
+ { "fpmath=", &ix86_fpmath_string, \
+ N_("Generate floating point mathematics using given instruction
set")},\
+ { "arch=", &ix86_arch_string, \
+ N_("Generate code for given CPU")},
\
+ { "regparm=", &ix86_regparm_string,
\
+ N_("Number of registers used to pass integer arguments") },
\
+ { "align-loops=", &ix86_align_loops_string, \
+ N_("Loop code aligned to this power of 2") }, \
+ { "align-jumps=", &ix86_align_jumps_string, \
+ N_("Jump targets are aligned to this power of 2") }, \
+ { "align-functions=", &ix86_align_funcs_string,
\
+ N_("Function starts are aligned to this power of 2") }, \
+ { "preferred-stack-boundary=", \
+ &ix86_preferred_stack_boundary_string, \
+ N_("Attempt to keep stack aligned to this power of 2") }, \
+ { "branch-cost=", &ix86_branch_cost_string, \
+ N_("Branches are this expensive (1-5, arbitrary units)") },
\
+ { "cmodel=", &ix86_cmodel_string, \
+ N_("Use given x86-64 code model") }, \
+ { "debug-arg", &ix86_debug_arg_string, \
+ "" /* Undocumented. */ }, \
+ { "debug-addr", &ix86_debug_addr_string, \
+ "" /* Undocumented. */ }, \
+ { "asm=", &ix86_asm_string, \
+ N_("Use given assembler dialect") }, \
+ SUBTARGET_OPTIONS \
+}
+#endif
+
+/* Sometimes certain combinations of command options do not make
+ sense on a particular target machine. You can define a macro
+ `OVERRIDE_OPTIONS' to take account of this. This macro, if
+ defined, is executed once just after all the command options have
+ been parsed.
+
+ Don't use this macro to turn on various extra optimizations for
+ `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
+
+#define OVERRIDE_OPTIONS override_options ()
+
+/* These are meant to be redefined in the host dependent files */
+#define SUBTARGET_SWITCHES
+#define SUBTARGET_OPTIONS
+
+/* Define this to change the optimizations performed by default. */
+#define OPTIMIZATION_OPTIONS(LEVEL, SIZE) \
+ optimization_options ((LEVEL), (SIZE))
+
+/* Specs for the compiler proper */
+
+#if 0
+#ifndef CC1_CPU_SPEC
+#define CC1_CPU_SPEC "\
+%{!mcpu*: \
+%{m386:-mcpu=i386 \
+%n`-m386' is deprecated. Use `-march=i386' or `-mcpu=i386' instead.\n}
\
+%{m486:-mcpu=i486 \
+%n`-m486' is deprecated. Use `-march=i486' or `-mcpu=i486' instead.\n}
\
+%{mpentium:-mcpu=pentium \
+%n`-mpentium' is deprecated. Use `-march=pentium' or `-mcpu=pentium'
instead.\n} \
+%{mpentiumpro:-mcpu=pentiumpro \
+%n`-mpentiumpro' is deprecated. Use `-march=pentiumpro' or
`-mcpu=pentiumpro' instead.\n}} \
+%{mintel-syntax:-masm=intel \
+%n`-mintel-syntax' is deprecated. Use `-masm=intel' instead.\n} \
+%{mno-intel-syntax:-masm=att \
+%n`-mno-intel-syntax' is deprecated. Use `-masm=att' instead.\n}"
+#endif
+
+#define TARGET_CPU_DEFAULT_i386 0
+#define TARGET_CPU_DEFAULT_i486 1
+#define TARGET_CPU_DEFAULT_pentium 2
+#define TARGET_CPU_DEFAULT_pentium_mmx 3
+#define TARGET_CPU_DEFAULT_pentiumpro 4
+#define TARGET_CPU_DEFAULT_pentium2 5
+#define TARGET_CPU_DEFAULT_pentium3 6
+#define TARGET_CPU_DEFAULT_pentium4 7
+#define TARGET_CPU_DEFAULT_k6 8
+#define TARGET_CPU_DEFAULT_k6_2 9
+#define TARGET_CPU_DEFAULT_k6_3 10
+#define TARGET_CPU_DEFAULT_athlon 11
+#define TARGET_CPU_DEFAULT_athlon_sse 12
+
+#define TARGET_CPU_DEFAULT_NAMES {"i386", "i486", "pentium",
"pentium-mmx",\
+ "pentiumpro", "pentium2", "pentium3",
\
+ "pentium4", "k6", "k6-2", "k6-3",\
+ "athlon", "athlon-4"}
+#ifndef CPP_CPU_DEFAULT_SPEC
+#if TARGET_CPU_DEFAULT == TARGET_CPU_DEFAULT_i486
+#define CPP_CPU_DEFAULT_SPEC "-D__tune_i486__"
+#endif
+#if TARGET_CPU_DEFAULT == TARGET_CPU_DEFAULT_pentium
+#define CPP_CPU_DEFAULT_SPEC "-D__tune_i586__ -D__tune_pentium__"
+#endif
+#if TARGET_CPU_DEFAULT == TARGET_CPU_DEFAULT_pentium_mmx
+#define CPP_CPU_DEFAULT_SPEC "-D__tune_i586__ -D__tune_pentium__
-D__tune_pentium_mmx__"
+#endif
+#if TARGET_CPU_DEFAULT == TARGET_CPU_DEFAULT_pentiumpro
+#define CPP_CPU_DEFAULT_SPEC "-D__tune_i686__ -D__tune_pentiumpro__"
+#endif
+#if TARGET_CPU_DEFAULT == TARGET_CPU_DEFAULT_pentium2
+#define CPP_CPU_DEFAULT_SPEC "-D__tune_i686__ -D__tune_pentiumpro__\
+-D__tune_pentium2__"
+#endif
+#if TARGET_CPU_DEFAULT == TARGET_CPU_DEFAULT_pentium3
+#define CPP_CPU_DEFAULT_SPEC "-D__tune_i686__ -D__tune_pentiumpro__\
+-D__tune_pentium2__ -D__tune_pentium3__"
+#endif
+#if TARGET_CPU_DEFAULT == TARGET_CPU_DEFAULT_pentium4
+#define CPP_CPU_DEFAULT_SPEC "-D__tune_pentium4__"
+#endif
+#if TARGET_CPU_DEFAULT == TARGET_CPU_DEFAULT_k6
+#define CPP_CPU_DEFAULT_SPEC "-D__tune_k6__"
+#endif
+#if TARGET_CPU_DEFAULT == TARGET_CPU_DEFAULT_k6_2
+#define CPP_CPU_DEFAULT_SPEC "-D__tune_k6__ -D__tune_k6_2__"
+#endif
+#if TARGET_CPU_DEFAULT == TARGET_CPU_DEFAULT_k6_3
+#define CPP_CPU_DEFAULT_SPEC "-D__tune_k6__ -D__tune_k6_3__"
+#endif
+#if TARGET_CPU_DEFAULT == TARGET_CPU_DEFAULT_athlon
+#define CPP_CPU_DEFAULT_SPEC "-D__tune_athlon__"
+#endif
+#if TARGET_CPU_DEFAULT == TARGET_CPU_DEFAULT_athlon_sse
+#define CPP_CPU_DEFAULT_SPEC "-D__tune_athlon__ -D__tune_athlon_sse__"
+#endif
+#ifndef CPP_CPU_DEFAULT_SPEC
+#define CPP_CPU_DEFAULT_SPEC "-D__tune_i386__"
+#endif
+#endif /* CPP_CPU_DEFAULT_SPEC */
+
+#ifdef TARGET_BI_ARCH
+#define NO_BUILTIN_SIZE_TYPE
+#define NO_BUILTIN_PTRDIFF_TYPE
+#endif
+
+#ifdef NO_BUILTIN_SIZE_TYPE
+#define CPP_CPU32_SIZE_TYPE_SPEC \
+ " -D__SIZE_TYPE__=unsigned\\ int -D__PTRDIFF_TYPE__=int"
+#define CPP_CPU64_SIZE_TYPE_SPEC \
+ " -D__SIZE_TYPE__=unsigned\\ long\\ int -D__PTRDIFF_TYPE__=long\\
int"
+#else
+#define CPP_CPU32_SIZE_TYPE_SPEC ""
+#define CPP_CPU64_SIZE_TYPE_SPEC ""
+#endif
+
+#define CPP_CPU32_SPEC \
+ "-Acpu=i386 -Amachine=i386 %{!ansi:%{!std=c*:%{!std=i*:-Di386}}}
-D__i386 \
+-D__i386__ %(cpp_cpu32sizet)"
+
+#define CPP_CPU64_SPEC \
+ "-Acpu=x86_64 -Amachine=x86_64 -D__x86_64 -D__x86_64__
%(cpp_cpu64sizet)"
+
+#define CPP_CPUCOMMON_SPEC "\
+%{march=i386:%{!mcpu*:-D__tune_i386__ }}\
+%{march=i486:-D__i486 -D__i486__ %{!mcpu*:-D__tune_i486__ }}\
+%{march=pentium|march=i586:-D__i586 -D__i586__ -D__pentium
-D__pentium__ \
+ %{!mcpu*:-D__tune_i586__ -D__tune_pentium__ }}\
+%{march=pentium-mmx:-D__i586 -D__i586__ -D__pentium -D__pentium__ \
+ -D__pentium__mmx__ \
+ %{!mcpu*:-D__tune_i586__ -D__tune_pentium__ -D__tune_pentium_mmx__}}\
+%{march=pentiumpro|march=i686:-D__i686 -D__i686__ \
+ -D__pentiumpro -D__pentiumpro__ \
+ %{!mcpu*:-D__tune_i686__ -D__tune_pentiumpro__ }}\
+%{march=k6:-D__k6 -D__k6__ %{!mcpu*:-D__tune_k6__ }}\
+%{march=k6-2:-D__k6 -D__k6__ -D__k6_2__ \
+ %{!mcpu*:-D__tune_k6__ -D__tune_k6_2__ }}\
+%{march=k6-3:-D__k6 -D__k6__ -D__k6_3__ \
+ %{!mcpu*:-D__tune_k6__ -D__tune_k6_3__ }}\
+%{march=athlon|march=athlon-tbird:-D__athlon -D__athlon__ \
+ %{!mcpu*:-D__tune_athlon__ }}\
+%{march=athlon-4|march=athlon-xp|march=athlon-mp:-D__athlon
-D__athlon__ \
+ -D__athlon_sse__ \
+ %{!mcpu*:-D__tune_athlon__ -D__tune_athlon_sse__ }}\
+%{march=pentium4:-D__pentium4 -D__pentium4__
%{!mcpu*:-D__tune_pentium4__ }}\
+%{m386|mcpu=i386:-D__tune_i386__ }\
+%{m486|mcpu=i486:-D__tune_i486__ }\
+%{mpentium|mcpu=pentium|mcpu=i586|mcpu=pentium-mmx:-D__tune_i586__
-D__tune_pentium__ }\
+%{mpentiumpro|mcpu=pentiumpro|mcpu=i686|cpu=pentium2|cpu=pentium3:-D__t
une_i686__ \
+-D__tune_pentiumpro__ }\
+%{mcpu=k6|mcpu=k6-2|mcpu=k6-3:-D__tune_k6__ }\
+%{mcpu=athlon|mcpu=athlon-tbird|mcpu=athlon-4|mcpu=athlon-xp|mcpu=athlo
n-mp:\
+-D__tune_athlon__ }\
+%{mcpu=athlon-4|mcpu=athlon-xp|mcpu=athlon-mp:\
+-D__tune_athlon_sse__ }\
+%{mcpu=pentium4:-D__tune_pentium4__ }\
+%{march=athlon-tbird|march=athlon-xp|march=athlon-mp|march=pentium3|mar
ch=pentium4:\
+-D__SSE__ }\
+%{march=pentium-mmx|march=k6|march=k6-2|march=k6-3\
+|march=athlon|march=athlon-tbird|march=athlon-4|march=athlon-xp\
+|march=athlon-mp|march=pentium2|march=pentium3|march=pentium4:
-D__MMX__ }\
+%{march=k6-2|march=k6-3\
+|march=athlon|march=athlon-tbird|march=athlon-4|march=athlon-xp\
+|march=athlon-mp: -D__3dNOW__ }\
+%{march=athlon|march=athlon-tbird|march=athlon-4|march=athlon-xp\
+|march=athlon-mp: -D__3dNOW_A__ }\
+%{march=pentium4: -D__SSE2__ }\
+%{!march*:%{!mcpu*:%{!m386:%{!m486:%{!mpentium*:%(cpp_cpu_default)}}}}}
"
+
+#ifndef CPP_CPU_SPEC
+#ifdef TARGET_BI_ARCH
+#ifdef TARGET_64BIT_DEFAULT
+#define CPP_CPU_SPEC "%{m32:%(cpp_cpu32)}%{!m32:%(cpp_cpu64)}
%(cpp_cpucommon)"
+#else
+#define CPP_CPU_SPEC "%{m64:%(cpp_cpu64)}%{!m64:%(cpp_cpu32)}
%(cpp_cpucommon)"
+#endif
+#else
+#ifdef TARGET_64BIT_DEFAULT
+#define CPP_CPU_SPEC "%(cpp_cpu64) %(cpp_cpucommon)"
+#else
+#define CPP_CPU_SPEC "%(cpp_cpu32) %(cpp_cpucommon)"
+#endif
+#endif
+#endif
+
+#ifndef CC1_SPEC
+#define CC1_SPEC "%(cc1_cpu) "
+#endif
+
+/* This macro defines names of additional specifications to put in the
+ specs that can be used in various specifications like CC1_SPEC. Its
+ definition is an initializer with a subgrouping for each command
option.
+
+ Each subgrouping contains a string constant, that defines the
+ specification name, and a string constant that used by the GNU CC
driver
+ program.
+
+ Do not define this macro if it does not need to do anything. */
+
+#ifndef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS
+#endif
+
+#define EXTRA_SPECS
\
+ { "cpp_cpu_default", CPP_CPU_DEFAULT_SPEC },
\
+ { "cpp_cpu", CPP_CPU_SPEC },
\
+ { "cpp_cpu32", CPP_CPU32_SPEC },
\
+ { "cpp_cpu64", CPP_CPU64_SPEC },
\
+ { "cpp_cpu32sizet", CPP_CPU32_SIZE_TYPE_SPEC },
\
+ { "cpp_cpu64sizet", CPP_CPU64_SIZE_TYPE_SPEC },
\
+ { "cpp_cpucommon", CPP_CPUCOMMON_SPEC },
\
+ { "cc1_cpu", CC1_CPU_SPEC },
\
+ SUBTARGET_EXTRA_SPECS
+#endif
+
+/* target machine storage layout */
+
+/* Define for XFmode or TFmode extended real floating point support.
+ This will automatically cause REAL_ARITHMETIC to be defined.
+
+ The XFmode is specified by i386 ABI, while TFmode may be faster
+ due to alignment and simplifications in the address calculations.
+ */
+#define LONG_DOUBLE_TYPE_SIZE (TARGET_128BIT_LONG_DOUBLE ? 128 : 96)
+#define MAX_LONG_DOUBLE_TYPE_SIZE 64
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 64
+/* Tell real.c that this is the 80-bit Intel extended float format
+ packaged in a 128-bit or 96bit entity. */
+#define INTEL_EXTENDED_IEEE_FORMAT 1
+
+
+#define SHORT_TYPE_SIZE 16
+#define INT_TYPE_SIZE 32
+#define FLOAT_TYPE_SIZE 32
+#define LONG_TYPE_SIZE BITS_PER_WORD
+#define MAX_WCHAR_TYPE_SIZE 32
+#define DOUBLE_TYPE_SIZE 64
+#define LONG_LONG_TYPE_SIZE 64
+
+#if defined (TARGET_BI_ARCH) || defined (TARGET_64BIT_DEFAULT)
+#define MAX_BITS_PER_WORD 64
+#define MAX_LONG_TYPE_SIZE 64
+#else
+#define MAX_BITS_PER_WORD 32
+#define MAX_LONG_TYPE_SIZE 32
+#endif
+
+/* Define if you don't want extended real, but do want to use the
+ software floating point emulator for REAL_ARITHMETIC and
+ decimal <-> binary conversion. */
+/* #define REAL_ARITHMETIC */
+
+/* Define this if most significant byte of a word is the lowest
numbered. */
+/* That is true on the 80386. */
+
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest
numbered. */
+/* That is not true on the 80386. */
+#define BYTES_BIG_ENDIAN 0
+
+/* Define this if most significant word of a multiword number is the
lowest
+ numbered. */
+/* Not true for 80386 */
+#define WORDS_BIG_ENDIAN 0
+
+/* number of bits in an addressable storage unit */
+#define BITS_PER_UNIT 8
+
+/* Width in bits of a "word", which is the contents of a machine
register.
+ Note that this is not necessarily the width of data type `int';
+ if using 16-bit ints on a 80386, this would still be 32.
+ But on a machine with 16-bit registers, this would be 16. */
+#define BITS_PER_WORD (TARGET_64BIT ? 64 : 32)
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD (TARGET_64BIT ? 8 : 4)
+#define MIN_UNITS_PER_WORD 4
+
+/* Width in bits of a pointer.
+ See also the macro `Pmode' defined below. */
+#define POINTER_SIZE BITS_PER_WORD
+
+/* Allocation boundary (in *bits*) for storing arguments in argument
list. */
+#define PARM_BOUNDARY BITS_PER_WORD
+
+/* Boundary (in *bits*) on which stack pointer should be aligned. */
+#define STACK_BOUNDARY BITS_PER_WORD
+
+/* Boundary (in *bits*) on which the stack pointer preferrs to be
+ aligned; the compiler cannot rely on having this alignment. */
+#define PREFERRED_STACK_BOUNDARY ix86_preferred_stack_boundary
+
+/* As of July 2001, many runtimes to not align the stack properly when
+ entering main. This causes expand_main_function to forcably align
+ the stack, which results in aligned frames for functions called from
+ main, though it does nothing for the alignment of main itself. */
+#define FORCE_PREFERRED_STACK_BOUNDARY_IN_MAIN \
+ (ix86_preferred_stack_boundary > STACK_BOUNDARY && !TARGET_64BIT)
+
+/* Allocation boundary for the code of a function. */
+#define FUNCTION_BOUNDARY 16
+
+/* Alignment of field after `int : 0' in a structure. */
+
+#define EMPTY_FIELD_BOUNDARY BITS_PER_WORD
+
+/* Minimum size in bits of the largest boundary to which any
+ and all fundamental data types supported by the hardware
+ might need to be aligned. No data type wants to be aligned
+ rounder than this.
+
+ Pentium+ preferrs DFmode values to be aligned to 64 bit boundary
+ and Pentium Pro XFmode values at 128 bit boundaries. */
+
+#define BIGGEST_ALIGNMENT 128
+
+/* Decide whether a variable of mode MODE must be 128 bit aligned. */
+#define ALIGN_MODE_128(MODE) \
+ ((MODE) == XFmode || (MODE) == TFmode || ((MODE) == TImode) \
+ || (MODE) == V4SFmode || (MODE) == V4SImode)
+
+/* The published ABIs say that doubles should be aligned on word
+ boundaries, so lower the aligment for structure fields unless
+ -malign-double is set. */
+/* BIGGEST_FIELD_ALIGNMENT is also used in libobjc, where it must be
+ constant. Use the smaller value in that context. */
+#ifndef IN_TARGET_LIBS
+#define BIGGEST_FIELD_ALIGNMENT (TARGET_64BIT ? 128 :
(TARGET_ALIGN_DOUBLE ? 64 : 32))
+#else
+#define BIGGEST_FIELD_ALIGNMENT 32
+#endif
+
+/* If defined, a C expression to compute the alignment given to a
+ constant that is being placed in memory. EXP is the constant
+ and ALIGN is the alignment that the object would ordinarily have.
+ The value of this macro is used instead of that alignment to align
+ the object.
+
+ If this macro is not defined, then ALIGN is used.
+
+ The typical use of this macro is to increase alignment for string
+ constants to be word aligned so that `strcpy' calls that copy
+ constants can be done inline. */
+
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) ix86_constant_alignment ((EXP),
(ALIGN))
+
+/* If defined, a C expression to compute the alignment for a static
+ variable. TYPE is the data type, and ALIGN is the alignment that
+ the object would ordinarily have. The value of this macro is used
+ instead of that alignment to align the object.
+
+ If this macro is not defined, then ALIGN is used.
+
+ One use of this macro is to increase alignment of medium-size
+ data to make it all fit in fewer cache lines. Another is to
+ cause character arrays to be word-aligned so that `strcpy' calls
+ that copy constants to character arrays can be done inline. */
+
+#define DATA_ALIGNMENT(TYPE, ALIGN) ix86_data_alignment ((TYPE),
(ALIGN))
+
+/* If defined, a C expression to compute the alignment for a local
+ variable. TYPE is the data type, and ALIGN is the alignment that
+ the object would ordinarily have. The value of this macro is used
+ instead of that alignment to align the object.
+
+ If this macro is not defined, then ALIGN is used.
+
+ One use of this macro is to increase alignment of medium-size
+ data to make it all fit in fewer cache lines. */
+
+#define LOCAL_ALIGNMENT(TYPE, ALIGN) ix86_local_alignment ((TYPE),
(ALIGN))
+
+/* If defined, a C expression that gives the alignment boundary, in
+ bits, of an argument with the specified mode and type. If it is
+ not defined, `PARM_BOUNDARY' is used for all arguments. */
+
+#define FUNCTION_ARG_BOUNDARY(MODE, TYPE) \
+ ix86_function_arg_boundary ((MODE), (TYPE))
+
+/* Set this non-zero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 0
+
+/* If bit field type is int, don't let it cross an int,
+ and give entire struct the alignment of an int. */
+/* Required on the 386 since it doesn't have bitfield insns. */
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+/* Standard register usage. */
+
+/* This processor has special stack-like registers. See reg-stack.c
+ for details. */
+
+#define STACK_REGS
+#define IS_STACK_MODE(MODE) \
+ ((MODE) == DFmode || (MODE) == SFmode || (MODE) == XFmode \
+ || (MODE) == TFmode)
+
+/* Number of actual hardware registers.
+ The hardware registers are assigned numbers for the compiler
+ from 0 to just below FIRST_PSEUDO_REGISTER.
+ All registers that the compiler knows about must be given numbers,
+ even those that are not normally considered general registers.
+
+ In the 80386 we give the 8 general purpose registers the numbers
0-7.
+ We number the floating point registers 8-15.
+ Note that registers 0-7 can be accessed as a short or int,
+ while only 0-3 may be used with byte `mov' instructions.
+
+ Reg 16 does not correspond to any hardware register, but instead
+ appears in the RTL as an argument pointer prior to reload, and is
+ eliminated during reloading in favor of either the stack or frame
+ pointer. */
+
+#define FIRST_PSEUDO_REGISTER 53
+
+/* Number of hardware registers that go into the DWARF-2 unwind info.
+ If not defined, equals FIRST_PSEUDO_REGISTER. */
+
+#define DWARF_FRAME_REGISTERS 17
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator.
+ On the 80386, the stack pointer is such, as is the arg pointer.
+
+ The value is an mask - bit 1 is set for fixed registers
+ for 32bit target, while 2 is set for fixed registers for 64bit.
+ Proper value is computed in the CONDITIONAL_REGISTER_USAGE.
+ */
+#define FIXED_REGISTERS
\
+/*ax,dx,cx,bx,si,di,bp,sp,st,st1,st2,st3,st4,st5,st6,st7*/ \
+{ 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, \
+/*arg,flags,fpsr,dir,frame*/ \
+ 3, 3, 3, 3, 3, \
+/*xmm0,xmm1,xmm2,xmm3,xmm4,xmm5,xmm6,xmm7*/ \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+/*mmx0,mmx1,mmx2,mmx3,mmx4,mmx5,mmx6,mmx7*/ \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+/* r8, r9, r10, r11, r12, r13, r14, r15*/ \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+/*xmm8,xmm9,xmm10,xmm11,xmm12,xmm13,xmm14,xmm15*/ \
+ 1, 1, 1, 1, 1, 1, 1, 1}
+
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you
like.
+
+ The value is an mask - bit 1 is set for call used
+ for 32bit target, while 2 is set for call used for 64bit.
+ Proper value is computed in the CONDITIONAL_REGISTER_USAGE.
+*/
+#define CALL_USED_REGISTERS \
+/*ax,dx,cx,bx,si,di,bp,sp,st,st1,st2,st3,st4,st5,st6,st7*/ \
+{ 3, 3, 3, 0, 2, 2, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, \
+/*arg,flags,fpsr,dir,frame*/ \
+ 3, 3, 3, 3, 3, \
+/*xmm0,xmm1,xmm2,xmm3,xmm4,xmm5,xmm6,xmm7*/ \
+ 3, 3, 3, 3, 3, 3, 3, 3, \
+/*mmx0,mmx1,mmx2,mmx3,mmx4,mmx5,mmx6,mmx7*/ \
+ 3, 3, 3, 3, 3, 3, 3, 3, \
+/* r8, r9, r10, r11, r12, r13, r14, r15*/ \
+ 3, 3, 3, 3, 1, 1, 1, 1, \
+/*xmm8,xmm9,xmm10,xmm11,xmm12,xmm13,xmm14,xmm15*/ \
+ 3, 3, 3, 3, 3, 3, 3, 3} \
+
+/* Order in which to allocate registers. Each register must be
+ listed once, even those in FIXED_REGISTERS. List frame pointer
+ late and fixed registers last. Note that, in general, we prefer
+ registers listed in CALL_USED_REGISTERS, keeping the others
+ available for storage of persistent values.
+
+ The ORDER_REGS_FOR_LOCAL_ALLOC actually overwrite the order,
+ so this is just empty initializer for array. */
+
+#define REG_ALLOC_ORDER \
+{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,\
+ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, \
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, \
+ 48, 49, 50, 51, 52 }
+
+/* ORDER_REGS_FOR_LOCAL_ALLOC is a macro which permits reg_alloc_order
+ to be rearranged based on a particular function. When using sse
math,
+ we want to allocase SSE before x87 registers and vice vera. */
+
+#define ORDER_REGS_FOR_LOCAL_ALLOC x86_order_regs_for_local_alloc ()
+
+
+/* Macro to conditionally modify fixed_regs/call_used_regs. */
+#define CONDITIONAL_REGISTER_USAGE
\
+do {
\
+ int i;
\
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
\
+ {
\
+ fixed_regs[i] = (fixed_regs[i] & (TARGET_64BIT ? 2 : 1)) != 0;
\
+ call_used_regs[i] = (call_used_regs[i]
\
+ & (TARGET_64BIT ? 2 : 1)) != 0;
\
+ }
\
+ if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
\
+ {
\
+ fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
\
+ call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
\
+ }
\
+ if (! TARGET_MMX)
\
+ {
\
+ int i;
\
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
\
+ if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i))
\
+ fixed_regs[i] = call_used_regs[i] = 1;
\
+ }
\
+ if (! TARGET_SSE)
\
+ {
\
+ int i;
\
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
\
+ if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i))
\
+ fixed_regs[i] = call_used_regs[i] = 1;
\
+ }
\
+ if (! TARGET_80387 && ! TARGET_FLOAT_RETURNS_IN_80387)
\
+ {
\
[truncated at 1000 lines; 2226 more skipped]