| /* Definitions of x86 tunable features. |
| Copyright (C) 2013-2025 Free Software Foundation, Inc. |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify |
| it under the terms of the GNU General Public License as published by |
| the Free Software Foundation; either version 3, or (at your option) |
| any later version. |
| |
| GCC is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License and |
| a copy of the GCC Runtime Library Exception along with this program; |
| see the files COPYING3 and COPYING.RUNTIME respectively. If not, see |
| <http://www.gnu.org/licenses/>. */ |
| |
| /* Tuning for a given CPU XXXX consists of: |
| - adding new CPU into: |
| - adding PROCESSOR_XXX to processor_type (in i386.h) |
| - possibly adding XXX into CPU attribute in i386.md |
| - adding XXX to processor_alias_table (in i386.cc) |
| - introducing ix86_XXX_cost in i386.cc |
| - Stringop generation table can be build based on test_stringop |
| - script (once rest of tuning is complete) |
| - designing a scheduler model in |
| - XXXX.md file |
| - Updating ix86_issue_rate and ix86_adjust_cost in i386.md |
| - possibly updating ia32_multipass_dfa_lookahead, ix86_sched_reorder |
| and ix86_sched_init_global if those tricks are needed. |
| - tuning flags below; those are split into sections and each |
| section is very roughly ordered by importance. */ |
| |
| /*****************************************************************************/ |
| /* Scheduling flags. */ |
| /*****************************************************************************/ |
| |
| /* X86_TUNE_SCHEDULE: Enable scheduling. */ |
| DEF_TUNE (X86_TUNE_SCHEDULE, "schedule", |
| m_PENT | m_LAKEMONT | m_PPRO | m_CORE_ALL | m_BONNELL | m_SILVERMONT |
| | m_INTEL | m_K6_GEODE | m_AMD_MULTIPLE | m_ZHAOXIN | m_GOLDMONT |
| | m_GOLDMONT_PLUS | m_TREMONT | m_CORE_HYBRID | m_CORE_ATOM |
| | m_GENERIC) |
| |
| /* X86_TUNE_PARTIAL_REG_DEPENDENCY: Enable more register renaming |
| on modern chips. Prefer stores affecting whole integer register |
| over partial stores. For example prefer MOVZBL or MOVQ to load 8bit |
| value over movb. */ |
| DEF_TUNE (X86_TUNE_PARTIAL_REG_DEPENDENCY, "partial_reg_dependency", |
| m_P4_NOCONA | m_CORE2 | m_NEHALEM | m_SANDYBRIDGE | m_CORE_AVX2 |
| | m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS | m_INTEL |
| | m_AMD_MULTIPLE | m_ZHAOXIN | m_TREMONT | m_CORE_HYBRID |
| | m_CORE_ATOM | m_GENERIC) |
| |
| /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: This knob promotes all store |
| destinations to be 128bit to allow register renaming on 128bit SSE units, |
| but usually results in one extra microop on 64bit SSE units. |
| Experimental results shows that disabling this option on P4 brings over 20% |
| SPECfp regression, while enabling it on K8 brings roughly 2.4% regression |
| that can be partly masked by careful scheduling of moves. */ |
| DEF_TUNE (X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY, "sse_partial_reg_dependency", |
| m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_AMDFAM10 |
| | m_BDVER | m_ZNVER | m_ZHAOXIN | m_TREMONT | m_CORE_HYBRID |
| | m_CORE_ATOM | m_GENERIC) |
| |
| /* X86_TUNE_SSE_PARTIAL_REG_FP_CONVERTS_DEPENDENCY: This knob avoids |
| partial write to the destination in scalar SSE conversion from FP |
| to FP. */ |
| DEF_TUNE (X86_TUNE_SSE_PARTIAL_REG_FP_CONVERTS_DEPENDENCY, |
| "sse_partial_reg_fp_converts_dependency", |
| m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_AMDFAM10 |
| | m_BDVER | m_ZNVER | m_ZHAOXIN | m_CORE_HYBRID | m_CORE_ATOM |
| | m_GENERIC) |
| |
| /* X86_TUNE_SSE_PARTIAL_REG_CONVERTS_DEPENDENCY: This knob avoids partial |
| write to the destination in scalar SSE conversion from integer to FP. */ |
| DEF_TUNE (X86_TUNE_SSE_PARTIAL_REG_CONVERTS_DEPENDENCY, |
| "sse_partial_reg_converts_dependency", |
| m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_AMDFAM10 |
| | m_BDVER | m_ZNVER | m_ZHAOXIN | m_CORE_HYBRID | m_CORE_ATOM |
| | m_GENERIC) |
| |
| /* X86_TUNE_DEST_FALSE_DEP_FOR_GLC: This knob inserts zero-idiom before |
| several insns to break false dependency on the dest register for GLC |
| micro-architecture. */ |
| DEF_TUNE (X86_TUNE_DEST_FALSE_DEP_FOR_GLC, |
| "dest_false_dep_for_glc", m_SAPPHIRERAPIDS | m_ALDERLAKE) |
| |
| /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies |
| are resolved on SSE register parts instead of whole registers, so we may |
| maintain just lower part of scalar values in proper format leaving the |
| upper part undefined. */ |
| DEF_TUNE (X86_TUNE_SSE_SPLIT_REGS, "sse_split_regs", m_ATHLON_K8) |
| |
| /* X86_TUNE_PARTIAL_FLAG_REG_STALL: this flag disables use of flags |
| set by instructions affecting just some flags (in particular shifts). |
| This is because Core2 resolves dependencies on whole flags register |
| and such sequences introduce false dependency on previous instruction |
| setting full flags. |
| |
| The flags does not affect generation of INC and DEC that is controlled |
| by X86_TUNE_USE_INCDEC. */ |
| |
| DEF_TUNE (X86_TUNE_PARTIAL_FLAG_REG_STALL, "partial_flag_reg_stall", |
| m_CORE2) |
| |
| /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid |
| partial dependencies. */ |
| DEF_TUNE (X86_TUNE_MOVX, "movx", |
| m_PPRO | m_P4_NOCONA | m_CORE2 | m_NEHALEM | m_SANDYBRIDGE |
| | m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_INTEL |
| | m_GOLDMONT_PLUS | m_GEODE | m_AMD_MULTIPLE | m_ZHAOXIN |
| | m_CORE_AVX2 | m_TREMONT | m_CORE_HYBRID | m_CORE_ATOM | m_GENERIC) |
| |
| /* X86_TUNE_MEMORY_MISMATCH_STALL: Avoid partial stores that are followed by |
| full sized loads. */ |
| DEF_TUNE (X86_TUNE_MEMORY_MISMATCH_STALL, "memory_mismatch_stall", |
| m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_INTEL |
| | m_GOLDMONT | m_GOLDMONT_PLUS | m_AMD_MULTIPLE | m_ZHAOXIN |
| | m_TREMONT | m_CORE_HYBRID | m_CORE_ATOM | m_GENERIC) |
| |
| /* X86_TUNE_FUSE_CMP_AND_BRANCH_32: Fuse compare with a subsequent |
| conditional jump instruction for 32 bit TARGET. */ |
| DEF_TUNE (X86_TUNE_FUSE_CMP_AND_BRANCH_32, "fuse_cmp_and_branch_32", |
| m_CORE_ALL | m_BDVER | m_ZNVER | m_ZHAOXIN | m_GENERIC) |
| |
| /* X86_TUNE_FUSE_CMP_AND_BRANCH_64: Fuse compare with a subsequent |
| conditional jump instruction for TARGET_64BIT. */ |
| DEF_TUNE (X86_TUNE_FUSE_CMP_AND_BRANCH_64, "fuse_cmp_and_branch_64", |
| m_NEHALEM | m_SANDYBRIDGE | m_CORE_AVX2 | m_BDVER |
| | m_ZNVER | m_ZHAOXIN | m_GENERIC) |
| |
| /* X86_TUNE_FUSE_CMP_AND_BRANCH_SOFLAGS: Fuse compare with a |
| subsequent conditional jump instruction when the condition jump |
| check sign flag (SF) or overflow flag (OF). */ |
| DEF_TUNE (X86_TUNE_FUSE_CMP_AND_BRANCH_SOFLAGS, "fuse_cmp_and_branch_soflags", |
| m_NEHALEM | m_SANDYBRIDGE | m_CORE_AVX2 | m_BDVER |
| | m_ZNVER | m_ZHAOXIN | m_GENERIC) |
| |
| /* X86_TUNE_FUSE_ALU_AND_BRANCH: Fuse alu with a subsequent conditional |
| jump instruction when the alu instruction produces the CCFLAG consumed by |
| the conditional jump instruction. |
| |
| TODO: znver5 supports fusing with SUB, ADD, INC, DEC, OR, AND, |
| There is also limitation for immediate and displacement supported. */ |
| DEF_TUNE (X86_TUNE_FUSE_ALU_AND_BRANCH, "fuse_alu_and_branch", |
| m_SANDYBRIDGE | m_CORE_AVX2 | m_ZHAOXIN | m_GENERIC | m_ZNVER3 | m_ZNVER4 | m_ZNVER5) |
| |
| /* X86_TUNE_FUSE_MOV_AND_ALU: mov and alu in case mov is reg-reg mov |
| and the destination is used by alu. alu must be one of |
| ADD, ADC, AND, XOR, OR, SUB, SBB, INC, DEC, NOT, SAL, SHL, SHR, SAR. */ |
| DEF_TUNE (X86_TUNE_FUSE_MOV_AND_ALU, "fuse_mov_and_alu", |
| m_ZNVER5 | m_GRANITERAPIDS | m_GRANITERAPIDS_D) |
| |
| /* X86_TUNE_FUSE_AND_BRANCH_MEM: Fuse alu with a subsequent conditional |
| jump instruction when alu contains memory operand. |
| TODO: Not suported by TIGERLAKE and COPERLAKE, so m_CORE_AVX2 is wrong. */ |
| DEF_TUNE (X86_TUNE_FUSE_ALU_AND_BRANCH_MEM, "fuse_alu_and_branch_mem", |
| m_SANDYBRIDGE | m_CORE_AVX2 | m_ZHAOXIN | m_GENERIC | m_ZNVER3 | m_ZNVER4 | m_ZNVER5) |
| |
| /* X86_TUNE_FUSE_AND_BRANCH_MEM_IMM: Fuse alu with a subsequent conditional |
| jump instruction when alu contains both immediate and displacement. */ |
| DEF_TUNE (X86_TUNE_FUSE_ALU_AND_BRANCH_MEM_IMM, "fuse_alu_and_branch_mem_imm", |
| m_GENERIC | m_ZNVER4 | m_ZNVER5) |
| |
| /* X86_TUNE_FUSE_AND_BRANCH_RIP_RELATIVE: Fuse alu with a subsequent |
| conditional jump instruction when alu contains IP relative address. */ |
| DEF_TUNE (X86_TUNE_FUSE_ALU_AND_BRANCH_RIP_RELATIVE, |
| "fuse_alu_and_branch_rip_relative", 0) |
| |
| /*****************************************************************************/ |
| /* Function prologue, epilogue and function calling sequences. */ |
| /*****************************************************************************/ |
| |
| /* X86_TUNE_ACCUMULATE_OUTGOING_ARGS: Allocate stack space for outgoing |
| arguments in prologue/epilogue instead of separately for each call |
| by push/pop instructions. |
| This increase code size by about 5% in 32bit mode, less so in 64bit mode |
| because parameters are passed in registers. It is considerable |
| win for targets without stack engine that prevents multiple push operations |
| to happen in parallel. */ |
| |
| DEF_TUNE (X86_TUNE_ACCUMULATE_OUTGOING_ARGS, "accumulate_outgoing_args", |
| m_PPRO | m_P4_NOCONA | m_BONNELL | m_SILVERMONT | m_INTEL |
| | m_GOLDMONT | m_GOLDMONT_PLUS | m_ATHLON_K8 | m_ZHAOXIN) |
| |
| /* X86_TUNE_PROLOGUE_USING_MOVE: Do not use push/pop in prologues that are |
| considered on critical path. */ |
| DEF_TUNE (X86_TUNE_PROLOGUE_USING_MOVE, "prologue_using_move", |
| m_PPRO | m_ATHLON_K8) |
| |
| /* X86_TUNE_PROLOGUE_USING_MOVE: Do not use push/pop in epilogues that are |
| considered on critical path. */ |
| DEF_TUNE (X86_TUNE_EPILOGUE_USING_MOVE, "epilogue_using_move", |
| m_PPRO | m_ATHLON_K8) |
| |
| /* X86_TUNE_USE_LEAVE: Use "leave" instruction in epilogues where it fits. */ |
| DEF_TUNE (X86_TUNE_USE_LEAVE, "use_leave", |
| m_386 | m_CORE_ALL | m_K6_GEODE | m_AMD_MULTIPLE | m_ZHAOXIN |
| | m_TREMONT | m_CORE_HYBRID | m_CORE_ATOM | m_GENERIC) |
| |
| /* X86_TUNE_PUSH_MEMORY: Enable generation of "push mem" instructions. |
| Some chips, like 486 and Pentium works faster with separate load |
| and push instructions. */ |
| DEF_TUNE (X86_TUNE_PUSH_MEMORY, "push_memory", |
| m_386 | m_P4_NOCONA | m_CORE_ALL | m_K6_GEODE | m_AMD_MULTIPLE |
| | m_ZHAOXIN | m_TREMONT | m_CORE_HYBRID | m_CORE_ATOM | m_GENERIC) |
| |
| /* X86_TUNE_SINGLE_PUSH: Enable if single push insn is preferred |
| over esp subtraction. */ |
| DEF_TUNE (X86_TUNE_SINGLE_PUSH, "single_push", m_386 | m_486 | m_PENT |
| | m_LAKEMONT | m_K6_GEODE) |
| |
| /* X86_TUNE_DOUBLE_PUSH. Enable if double push insn is preferred |
| over esp subtraction. */ |
| DEF_TUNE (X86_TUNE_DOUBLE_PUSH, "double_push", m_PENT | m_LAKEMONT |
| | m_K6_GEODE) |
| |
| /* X86_TUNE_SINGLE_POP: Enable if single pop insn is preferred |
| over esp addition. */ |
| DEF_TUNE (X86_TUNE_SINGLE_POP, "single_pop", m_386 | m_486 | m_PENT |
| | m_LAKEMONT | m_PPRO) |
| |
| /* X86_TUNE_DOUBLE_POP: Enable if double pop insn is preferred |
| over esp addition. */ |
| DEF_TUNE (X86_TUNE_DOUBLE_POP, "double_pop", m_PENT | m_LAKEMONT) |
| |
| /*****************************************************************************/ |
| /* Branch predictor tuning and Front-end tuning */ |
| /*****************************************************************************/ |
| |
| /* X86_TUNE_PAD_SHORT_FUNCTION: Make every function to be at least 4 |
| instructions long. */ |
| DEF_TUNE (X86_TUNE_PAD_SHORT_FUNCTION, "pad_short_function", m_BONNELL) |
| |
| /* X86_TUNE_PAD_RETURNS: Place NOP before every RET that is a destination |
| of conditional jump or directly preceded by other jump instruction. |
| This is important for AND K8-AMDFAM10 because the branch prediction |
| architecture expect at most one jump per 2 byte window. Failing to |
| pad returns leads to misaligned return stack. */ |
| DEF_TUNE (X86_TUNE_PAD_RETURNS, "pad_returns", |
| m_ATHLON_K8 | m_AMDFAM10) |
| |
| /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more |
| than 4 branch instructions in the 16 byte window. */ |
| DEF_TUNE (X86_TUNE_FOUR_JUMP_LIMIT, "four_jump_limit", |
| m_PPRO | m_P4_NOCONA | m_BONNELL | m_SILVERMONT | m_GOLDMONT |
| | m_GOLDMONT_PLUS | m_INTEL | m_ATHLON_K8 | m_AMDFAM10) |
| |
| /* X86_TUNE_ALIGN_TIGHT_LOOPS: if false, tight loops are not aligned. */ |
| DEF_TUNE (X86_TUNE_ALIGN_TIGHT_LOOPS, "align_tight_loops", |
| ~(m_ZHAOXIN | m_CASCADELAKE | m_SKYLAKE_AVX512)) |
| |
| /*****************************************************************************/ |
| /* Integer instruction selection tuning */ |
| /*****************************************************************************/ |
| |
| /* X86_TUNE_SOFTWARE_PREFETCHING_BENEFICIAL: Enable software prefetching |
| at -O3. For the moment, the prefetching seems badly tuned for Intel |
| chips. */ |
| DEF_TUNE (X86_TUNE_SOFTWARE_PREFETCHING_BENEFICIAL, "software_prefetching_beneficial", |
| m_K6_GEODE | m_ATHLON_K8 | m_AMDFAM10 | m_BDVER | m_BTVER) |
| |
| /* X86_TUNE_LCP_STALL: Avoid an expensive length-changing prefix stall |
| on 16-bit immediate moves into memory on Core2 and Corei7. */ |
| DEF_TUNE (X86_TUNE_LCP_STALL, "lcp_stall", m_CORE_ALL | m_ZHAOXIN | m_GENERIC) |
| |
| /* X86_TUNE_READ_MODIFY: Enable use of read-modify instructions such |
| as "add mem, reg". */ |
| DEF_TUNE (X86_TUNE_READ_MODIFY, "read_modify", ~(m_PENT | m_LAKEMONT | m_PPRO)) |
| |
| /* X86_TUNE_USE_INCDEC: Enable use of inc/dec instructions. |
| |
| Core2 and nehalem has stall of 7 cycles for partial flag register stalls. |
| Sandy bridge and Ivy bridge generate extra uop. On Haswell this extra uop |
| is output only when the values needs to be really merged, which is not |
| done by GCC generated code. */ |
| DEF_TUNE (X86_TUNE_USE_INCDEC, "use_incdec", |
| ~(m_P4_NOCONA | m_CORE2 | m_NEHALEM | m_SANDYBRIDGE |
| | m_BONNELL | m_SILVERMONT | m_INTEL | m_GOLDMONT |
| | m_GOLDMONT_PLUS | m_TREMONT | m_CORE_HYBRID | m_CORE_ATOM |
| | m_ZHAOXIN | m_GENERIC)) |
| |
| /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred |
| for DFmode copies */ |
| DEF_TUNE (X86_TUNE_INTEGER_DFMODE_MOVES, "integer_dfmode_moves", |
| ~(m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT |
| | m_INTEL | m_GEODE | m_AMD_MULTIPLE | m_ZHAOXIN | m_GOLDMONT |
| | m_GOLDMONT_PLUS | m_TREMONT | m_CORE_HYBRID | m_CORE_ATOM |
| | m_GENERIC)) |
| |
| /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag |
| will impact LEA instruction selection. */ |
| DEF_TUNE (X86_TUNE_OPT_AGU, "opt_agu", m_BONNELL | m_SILVERMONT | m_GOLDMONT |
| | m_GOLDMONT_PLUS | m_INTEL | m_ZHAOXIN) |
| |
| /* X86_TUNE_AVOID_LEA_FOR_ADDR: Avoid lea for address computation. */ |
| DEF_TUNE (X86_TUNE_AVOID_LEA_FOR_ADDR, "avoid_lea_for_addr", |
| m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS) |
| |
| /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is |
| vector path on AMD machines. |
| FIXME: Do we need to enable this for core? */ |
| DEF_TUNE (X86_TUNE_SLOW_IMUL_IMM32_MEM, "slow_imul_imm32_mem", |
| m_K8 | m_AMDFAM10) |
| |
| /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD |
| machines. |
| FIXME: Do we need to enable this for core? */ |
| DEF_TUNE (X86_TUNE_SLOW_IMUL_IMM8, "slow_imul_imm8", |
| m_K8 | m_AMDFAM10) |
| |
| /* X86_TUNE_AVOID_MEM_OPND_FOR_CMOVE: Try to avoid memory operands for |
| a conditional move. */ |
| DEF_TUNE (X86_TUNE_AVOID_MEM_OPND_FOR_CMOVE, "avoid_mem_opnd_for_cmove", |
| m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS | m_INTEL) |
| |
| /* X86_TUNE_SINGLE_STRINGOP: Enable use of single string operations, such |
| as MOVS and STOS (without a REP prefix) to move/set sequences of bytes. */ |
| DEF_TUNE (X86_TUNE_SINGLE_STRINGOP, "single_stringop", m_386 | m_P4_NOCONA) |
| |
| /* X86_TUNE_PREFER_KNOWN_REP_MOVSB_STOSB: Enable use of REP MOVSB/STOSB to |
| move/set sequences of bytes with known size. */ |
| DEF_TUNE (X86_TUNE_PREFER_KNOWN_REP_MOVSB_STOSB, |
| "prefer_known_rep_movsb_stosb", |
| m_SKYLAKE | m_CORE_HYBRID | m_CORE_ATOM | m_TREMONT | m_CORE_AVX512 |
| | m_ZHAOXIN) |
| |
| /* X86_TUNE_MISALIGNED_MOVE_STRING_PRO_EPILOGUES: Enable generation of |
| compact prologues and epilogues by issuing a misaligned moves. This |
| requires target to handle misaligned moves and partial memory stalls |
| reasonably well. |
| FIXME: This may actualy be a win on more targets than listed here. */ |
| DEF_TUNE (X86_TUNE_MISALIGNED_MOVE_STRING_PRO_EPILOGUES, |
| "misaligned_move_string_pro_epilogues", |
| m_386 | m_486 | m_CORE_ALL | m_AMD_MULTIPLE | m_ZHAOXIN | m_TREMONT |
| | m_CORE_HYBRID | m_CORE_ATOM | m_GENERIC) |
| |
| /* X86_TUNE_USE_SAHF: Controls use of SAHF. */ |
| DEF_TUNE (X86_TUNE_USE_SAHF, "use_sahf", |
| m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT |
| | m_INTEL | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_BDVER | m_BTVER |
| | m_ZNVER | m_ZHAOXIN | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT |
| | m_CORE_HYBRID | m_CORE_ATOM | m_GENERIC) |
| |
| /* X86_TUNE_USE_CLTD: Controls use of CLTD and CTQO instructions. */ |
| DEF_TUNE (X86_TUNE_USE_CLTD, "use_cltd", |
| ~(m_PENT | m_LAKEMONT | m_BONNELL | m_SILVERMONT | m_INTEL |
| | m_K6 | m_GOLDMONT | m_GOLDMONT_PLUS)) |
| |
| /* X86_TUNE_USE_BT: Enable use of BT (bit test) instructions. */ |
| DEF_TUNE (X86_TUNE_USE_BT, "use_bt", |
| m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_INTEL | m_LAKEMONT |
| | m_AMD_MULTIPLE | m_ZHAOXIN | m_GOLDMONT | m_GOLDMONT_PLUS |
| | m_TREMONT | m_CORE_HYBRID | m_CORE_ATOM | m_GENERIC) |
| |
| /* X86_TUNE_AVOID_FALSE_DEP_FOR_BMI: Avoid false dependency |
| for bit-manipulation instructions. */ |
| DEF_TUNE (X86_TUNE_AVOID_FALSE_DEP_FOR_BMI, "avoid_false_dep_for_bmi", |
| m_SANDYBRIDGE | m_HASWELL | m_SKYLAKE | m_SKYLAKE_AVX512 |
| | m_CANNONLAKE | m_CASCADELAKE | m_COOPERLAKE |
| | m_ZHAOXIN | m_GENERIC) |
| |
| /* X86_TUNE_AVOID_FALSE_DEP_FOR_TZCNT: Avoid false dependency |
| for tzcnt instruction (also included in X86_TUNE_AVOID_FALSE_DEP_FOR_BMI). */ |
| DEF_TUNE (X86_TUNE_AVOID_FALSE_DEP_FOR_TZCNT, "avoid_false_dep_for_tzcnt", |
| m_ZNVER5) |
| |
| /* X86_TUNE_AVOID_FALSE_DEP_FOR_BLS: Avoid false dependency |
| for blsi, blsr and blsmsk instructions. */ |
| DEF_TUNE (X86_TUNE_AVOID_FALSE_DEP_FOR_BLS, "avoid_false_dep_for_bls", |
| m_ZNVER5) |
| |
| /* X86_TUNE_ADJUST_UNROLL: This enables adjusting the unroll factor based |
| on hardware capabilities. Bdver3 hardware has a loop buffer which makes |
| unrolling small loop less important. For, such architectures we adjust |
| the unroll factor so that the unrolled loop fits the loop buffer. */ |
| DEF_TUNE (X86_TUNE_ADJUST_UNROLL, "adjust_unroll_factor", m_BDVER3 | m_BDVER4) |
| |
| /* X86_TUNE_ONE_IF_CONV_INSNS: Restrict a number of cmov insns in |
| if-converted sequence to one. */ |
| DEF_TUNE (X86_TUNE_ONE_IF_CONV_INSN, "one_if_conv_insn", |
| m_SILVERMONT | m_HASWELL | m_SKYLAKE | m_GOLDMONT | m_GOLDMONT_PLUS |
| | m_TREMONT | m_ZHAOXIN) |
| |
| /* X86_TUNE_AVOID_MFENCE: Use lock prefixed instructions instead of mfence. */ |
| DEF_TUNE (X86_TUNE_AVOID_MFENCE, "avoid_mfence", |
| m_CORE_ALL | m_BDVER | m_ZNVER | m_ZHAOXIN | m_TREMONT | m_CORE_HYBRID |
| | m_CORE_ATOM | m_GENERIC) |
| |
| /* X86_TUNE_EXPAND_ABS: This enables a new abs pattern by |
| generating instructions for abs (x) = (((signed) x >> (W-1) ^ x) - |
| (signed) x >> (W-1)) instead of cmove or SSE max/abs instructions. */ |
| DEF_TUNE (X86_TUNE_EXPAND_ABS, "expand_abs", |
| m_CORE_ALL | m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS | m_ZHAOXIN) |
| |
| /*****************************************************************************/ |
| /* 387 instruction selection tuning */ |
| /*****************************************************************************/ |
| |
| /* X86_TUNE_USE_HIMODE_FIOP: Enables use of x87 instructions with 16bit |
| integer operand. |
| FIXME: Why this is disabled for modern chips? */ |
| DEF_TUNE (X86_TUNE_USE_HIMODE_FIOP, "use_himode_fiop", |
| m_386 | m_486 | m_K6_GEODE) |
| |
| /* X86_TUNE_USE_SIMODE_FIOP: Enables use of x87 instructions with 32bit |
| integer operand. */ |
| DEF_TUNE (X86_TUNE_USE_SIMODE_FIOP, "use_simode_fiop", |
| ~(m_PENT | m_LAKEMONT | m_PPRO | m_CORE_ALL | m_BONNELL |
| | m_SILVERMONT | m_INTEL | m_AMD_MULTIPLE | m_ZHAOXIN | m_GOLDMONT |
| | m_GOLDMONT_PLUS | m_TREMONT | m_CORE_HYBRID | m_CORE_ATOM |
| | m_GENERIC)) |
| |
| /* X86_TUNE_USE_FFREEP: Use freep instruction instead of fstp. */ |
| DEF_TUNE (X86_TUNE_USE_FFREEP, "use_ffreep", m_AMD_MULTIPLE | m_ZHAOXIN) |
| |
| /* X86_TUNE_EXT_80387_CONSTANTS: Use fancy 80387 constants, such as PI. */ |
| DEF_TUNE (X86_TUNE_EXT_80387_CONSTANTS, "ext_80387_constants", |
| m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT |
| | m_INTEL | m_K6_GEODE | m_ATHLON_K8 | m_ZHAOXIN | m_GOLDMONT |
| | m_GOLDMONT_PLUS | m_TREMONT | m_CORE_HYBRID | m_CORE_ATOM |
| | m_GENERIC) |
| |
| /*****************************************************************************/ |
| /* SSE instruction selection tuning */ |
| /*****************************************************************************/ |
| |
| /* X86_TUNE_GENERAL_REGS_SSE_SPILL: Try to spill general regs to SSE |
| regs instead of memory. */ |
| DEF_TUNE (X86_TUNE_GENERAL_REGS_SSE_SPILL, "general_regs_sse_spill", |
| m_CORE_ALL) |
| |
| /* X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL: Use movups for misaligned loads instead |
| of a sequence loading registers by parts. */ |
| DEF_TUNE (X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL, "sse_unaligned_load_optimal", |
| m_NEHALEM | m_SANDYBRIDGE | m_CORE_AVX2 | m_SILVERMONT | m_INTEL |
| | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_CORE_HYBRID |
| | m_CORE_ATOM | m_AMDFAM10 | m_BDVER | m_BTVER | m_ZNVER | m_ZHAOXIN |
| | m_GENERIC) |
| |
| /* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL: Use movups for misaligned stores |
| instead of a sequence loading registers by parts. */ |
| DEF_TUNE (X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL, "sse_unaligned_store_optimal", |
| m_NEHALEM | m_SANDYBRIDGE | m_CORE_AVX2 | m_SILVERMONT |
| | m_INTEL | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_CORE_HYBRID |
| | m_CORE_ATOM | m_BDVER | m_ZNVER | m_ZHAOXIN | m_GENERIC) |
| |
| /* X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL: Use packed single |
| precision 128bit instructions instead of double where possible. */ |
| DEF_TUNE (X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL, "sse_packed_single_insn_optimal", |
| m_BDVER | m_ZNVER) |
| |
| /* X86_TUNE_SSE_TYPELESS_STORES: Always movaps/movups for 128bit stores. */ |
| DEF_TUNE (X86_TUNE_SSE_TYPELESS_STORES, "sse_typeless_stores", |
| m_AMD_MULTIPLE | m_ZHAOXIN | m_CORE_ALL | m_TREMONT | m_CORE_HYBRID |
| | m_CORE_ATOM | m_GENERIC) |
| |
| /* X86_TUNE_SSE_LOAD0_BY_PXOR: Always use pxor to load0 as opposed to |
| xorps/xorpd and other variants. */ |
| DEF_TUNE (X86_TUNE_SSE_LOAD0_BY_PXOR, "sse_load0_by_pxor", |
| m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BDVER | m_BTVER | m_ZNVER |
| | m_ZHAOXIN | m_TREMONT | m_CORE_HYBRID | m_CORE_ATOM | m_GENERIC) |
| |
| /* X86_TUNE_INTER_UNIT_MOVES_TO_VEC: Enable moves in from integer |
| to SSE registers. If disabled, the moves will be done by storing |
| the value to memory and reloading. |
| Enable this flag for generic - the only relevant architecture preferring |
| no inter-unit moves is Buldozer. While this makes small regression on SPECfp |
| scores (sub 0.3%), disabling inter-unit moves penalizes noticeably hand |
| written vectorized code which use i.e. _mm_set_epi16. */ |
| DEF_TUNE (X86_TUNE_INTER_UNIT_MOVES_TO_VEC, "inter_unit_moves_to_vec", |
| ~(m_ATHLON_K8 | m_AMDFAM10 | m_BDVER | m_BTVER)) |
| |
| /* X86_TUNE_INTER_UNIT_MOVES_TO_VEC: Enable moves in from SSE |
| to integer registers. If disabled, the moves will be done by storing |
| the value to memory and reloading. */ |
| DEF_TUNE (X86_TUNE_INTER_UNIT_MOVES_FROM_VEC, "inter_unit_moves_from_vec", |
| ~m_ATHLON_K8) |
| |
| /* X86_TUNE_INTER_UNIT_CONVERSIONS: Enable float<->integer conversions |
| to use both SSE and integer registers at a same time. */ |
| DEF_TUNE (X86_TUNE_INTER_UNIT_CONVERSIONS, "inter_unit_conversions", |
| ~(m_AMDFAM10 | m_BDVER)) |
| |
| /* X86_TUNE_SPLIT_MEM_OPND_FOR_FP_CONVERTS: Try to split memory operand for |
| fp converts to destination register. */ |
| DEF_TUNE (X86_TUNE_SPLIT_MEM_OPND_FOR_FP_CONVERTS, "split_mem_opnd_for_fp_converts", |
| m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS | m_INTEL) |
| |
| /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion |
| from FP to FP. This form of instructions avoids partial write to the |
| destination. */ |
| DEF_TUNE (X86_TUNE_USE_VECTOR_FP_CONVERTS, "use_vector_fp_converts", |
| m_AMDFAM10) |
| |
| /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion |
| from integer to FP. */ |
| DEF_TUNE (X86_TUNE_USE_VECTOR_CONVERTS, "use_vector_converts", m_AMDFAM10) |
| |
| /* X86_TUNE_SLOW_SHUFB: Indicates tunings with slow pshufb instruction. */ |
| DEF_TUNE (X86_TUNE_SLOW_PSHUFB, "slow_pshufb", |
| m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS | m_INTEL) |
| |
| /* X86_TUNE_AVOID_4BYTE_PREFIXES: Avoid instructions requiring 4+ bytes of prefixes. */ |
| DEF_TUNE (X86_TUNE_AVOID_4BYTE_PREFIXES, "avoid_4byte_prefixes", |
| m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_CORE_HYBRID |
| | m_CORE_ATOM | m_INTEL) |
| |
| /* X86_TUNE_USE_GATHER_2PARTS: Use gather instructions for vectors with 2 |
| elements. */ |
| DEF_TUNE (X86_TUNE_USE_GATHER_2PARTS, "use_gather_2parts", |
| ~(m_ZNVER | m_CORE_HYBRID |
| | m_YONGFENG | m_SHIJIDADAO | m_CORE_ATOM | m_GENERIC | m_GDS)) |
| |
| /* X86_TUNE_USE_SCATTER_2PARTS: Use scater instructions for vectors with 2 |
| elements. */ |
| DEF_TUNE (X86_TUNE_USE_SCATTER_2PARTS, "use_scatter_2parts", |
| ~(m_ZNVER4 | m_ZNVER5)) |
| |
| /* X86_TUNE_USE_GATHER_4PARTS: Use gather instructions for vectors with 4 |
| elements. */ |
| DEF_TUNE (X86_TUNE_USE_GATHER_4PARTS, "use_gather_4parts", |
| ~(m_ZNVER | m_CORE_HYBRID |
| | m_YONGFENG | m_SHIJIDADAO | m_CORE_ATOM | m_GENERIC | m_GDS)) |
| |
| /* X86_TUNE_USE_SCATTER_4PARTS: Use scater instructions for vectors with 4 |
| elements. */ |
| DEF_TUNE (X86_TUNE_USE_SCATTER_4PARTS, "use_scatter_4parts", |
| ~(m_ZNVER4 | m_ZNVER5)) |
| |
| /* X86_TUNE_USE_GATHER: Use gather instructions for vectors with 8 or more |
| elements. */ |
| DEF_TUNE (X86_TUNE_USE_GATHER_8PARTS, "use_gather_8parts", |
| ~(m_ZNVER | m_CORE_HYBRID | m_CORE_ATOM |
| | m_YONGFENG | m_SHIJIDADAO | m_GENERIC | m_GDS)) |
| |
| /* X86_TUNE_USE_SCATTER: Use scater instructions for vectors with 8 or more |
| elements. */ |
| DEF_TUNE (X86_TUNE_USE_SCATTER_8PARTS, "use_scatter_8parts", |
| ~(m_ZNVER4 | m_ZNVER5)) |
| |
| /* X86_TUNE_AVOID_128FMA_CHAINS: Avoid creating loops with tight 128bit or |
| smaller FMA chain. */ |
| DEF_TUNE (X86_TUNE_AVOID_128FMA_CHAINS, "avoid_fma_chains", m_ZNVER |
| | m_YONGFENG | m_SHIJIDADAO | m_GENERIC) |
| |
| /* X86_TUNE_AVOID_256FMA_CHAINS: Avoid creating loops with tight 256bit or |
| smaller FMA chain. */ |
| DEF_TUNE (X86_TUNE_AVOID_256FMA_CHAINS, "avoid_fma256_chains", |
| m_ZNVER2 | m_ZNVER3 | m_ZNVER4 | m_ZNVER5 | m_CORE_HYBRID |
| | m_SAPPHIRERAPIDS | m_GRANITERAPIDS | m_GRANITERAPIDS_D |
| | m_DIAMONDRAPIDS | m_CORE_ATOM | m_GENERIC) |
| |
| /* X86_TUNE_AVOID_512FMA_CHAINS: Avoid creating loops with tight 512bit or |
| smaller FMA chain. */ |
| DEF_TUNE (X86_TUNE_AVOID_512FMA_CHAINS, "avoid_fma512_chains", m_ZNVER5) |
| |
| /* X86_TUNE_V2DF_REDUCTION_PREFER_PHADDPD: Prefer haddpd |
| for v2df vector reduction. */ |
| DEF_TUNE (X86_TUNE_V2DF_REDUCTION_PREFER_HADDPD, |
| "v2df_reduction_prefer_haddpd", m_NONE) |
| |
| /* X86_TUNE_SSE_MOVCC_USE_BLENDV: Prefer blendv instructions to |
| 3-instruction sequence (op1 & mask) | (op2 & ~mask) |
| for vector condition move. |
| For Crestmont, 4-operand vex blendv instructions come from MSROM |
| which is slow. */ |
| DEF_TUNE (X86_TUNE_SSE_MOVCC_USE_BLENDV, |
| "sse_movcc_use_blendv", ~m_CORE_ATOM) |
| |
| /* X86_TUNE_V4SI_REDUCTION_PREFER_SHUFD: Prefer pshuf to reduce V16QI, |
| V8HI, V8HI, V4SI, V4FI, V2DI modes when lshr are costlier. */ |
| DEF_TUNE (X86_TUNE_SSE_REDUCTION_PREFER_PSHUF, |
| "sse_reduction_prefer_pshuf", m_ZNVER4 | m_ZNVER5) |
| |
| /*****************************************************************************/ |
| /* AVX instruction selection tuning (some of SSE flags affects AVX, too) */ |
| /*****************************************************************************/ |
| |
| /* X86_TUNE_AVX256_UNALIGNED_LOAD_OPTIMAL: if false, unaligned loads are |
| split. */ |
| DEF_TUNE (X86_TUNE_AVX256_UNALIGNED_LOAD_OPTIMAL, "256_unaligned_load_optimal", |
| ~(m_NEHALEM | m_SANDYBRIDGE)) |
| |
| /* X86_TUNE_AVX256_UNALIGNED_STORE_OPTIMAL: if false, unaligned stores are |
| split. */ |
| DEF_TUNE (X86_TUNE_AVX256_UNALIGNED_STORE_OPTIMAL, "256_unaligned_store_optimal", |
| ~(m_NEHALEM | m_SANDYBRIDGE | m_BDVER | m_ZNVER1)) |
| |
| /* X86_TUNE_AVX256_SPLIT_REGS: if true, AVX256 ops are split into two AVX128 ops. */ |
| DEF_TUNE (X86_TUNE_AVX256_SPLIT_REGS, "avx256_split_regs",m_BDVER | m_BTVER2 |
| | m_ZNVER1 | m_CORE_ATOM) |
| |
| /* X86_TUNE_AVX128_OPTIMAL: Enable 128-bit AVX instruction generation for |
| the auto-vectorizer. */ |
| DEF_TUNE (X86_TUNE_AVX128_OPTIMAL, "avx128_optimal", m_BDVER | m_BTVER2 |
| | m_ZNVER1) |
| |
| /* X86_TUNE_AVX256_OPTIMAL: Use 256-bit AVX instructions instead of 512-bit AVX |
| instructions in the auto-vectorizer. */ |
| DEF_TUNE (X86_TUNE_AVX256_OPTIMAL, "avx256_optimal", m_CORE_AVX512) |
| |
| /* X86_TUNE_AVX256_AVOID_VEC_PERM: Avoid using 256-bit cross-lane |
| vector permutation instructions in the auto-vectorizer. */ |
| DEF_TUNE (X86_TUNE_AVX256_AVOID_VEC_PERM, |
| "avx256_avoid_vec_perm", m_CORE_ATOM) |
| |
| /* X86_TUNE_AVX256_SPLIT_REGS: if true, AVX512 ops are split into two AVX256 ops. */ |
| DEF_TUNE (X86_TUNE_AVX512_SPLIT_REGS, "avx512_split_regs", m_ZNVER4) |
| |
| /* It's better to align MOVE_MAX with prefer_vector_width to reduce |
| risk of STLF stalls(small store followed by big load.) */ |
| /* X86_TUNE_AVX256_MOVE_BY_PIECES: Optimize move_by_pieces with 256-bit |
| AVX instructions. */ |
| DEF_TUNE (X86_TUNE_AVX256_MOVE_BY_PIECES, "avx256_move_by_pieces", |
| m_CORE_HYBRID | m_CORE_AVX2 | m_ZNVER1 | m_ZNVER2 | m_ZNVER3) |
| |
| /* X86_TUNE_AVX512_MOVE_BY_PIECES: Optimize move_by_pieces with 512-bit |
| AVX instructions. */ |
| DEF_TUNE (X86_TUNE_AVX512_MOVE_BY_PIECES, "avx512_move_by_pieces", |
| m_ZNVER4 | m_ZNVER5) |
| |
| /* X86_TUNE_AVX512_TWO_EPILOGUES: Use two vector epilogues for 512-bit |
| vectorized loops. */ |
| DEF_TUNE (X86_TUNE_AVX512_TWO_EPILOGUES, "avx512_two_epilogues", |
| m_ZNVER4 | m_ZNVER5) |
| |
| /* X86_TUNE_AVX512_MAKED_EPILOGUES: Use two masked vector epilogues |
| when fit. */ |
| DEF_TUNE (X86_TUNE_AVX512_MASKED_EPILOGUES, "avx512_masked_epilogues", |
| m_ZNVER4 | m_ZNVER5) |
| |
| /*****************************************************************************/ |
| /*****************************************************************************/ |
| /* Historical relics: tuning flags that helps a specific old CPU designs */ |
| /*****************************************************************************/ |
| |
| /* X86_TUNE_DOUBLE_WITH_ADD: Use add instead of sal to double value in |
| an integer register. */ |
| DEF_TUNE (X86_TUNE_DOUBLE_WITH_ADD, "double_with_add", ~m_386) |
| |
| /* X86_TUNE_ALWAYS_FANCY_MATH_387: controls use of fancy 387 operations, |
| such as fsqrt, fprem, fsin, fcos, fsincos etc. |
| Should be enabled for all targets that always has coprocesor. */ |
| DEF_TUNE (X86_TUNE_ALWAYS_FANCY_MATH_387, "always_fancy_math_387", |
| ~(m_386 | m_486 | m_LAKEMONT)) |
| |
| /* X86_TUNE_UNROLL_STRLEN: Produce (quite lame) unrolled sequence for |
| inline strlen. This affects only -minline-all-stringops mode. By |
| default we always dispatch to a library since our internal strlen |
| is bad. */ |
| DEF_TUNE (X86_TUNE_UNROLL_STRLEN, "unroll_strlen", ~m_386) |
| |
| /* X86_TUNE_SHIFT1: Enables use of short encoding of "sal reg" instead of |
| longer "sal $1, reg". */ |
| DEF_TUNE (X86_TUNE_SHIFT1, "shift1", ~m_486) |
| |
| /* X86_TUNE_ZERO_EXTEND_WITH_AND: Use AND instruction instead |
| of mozbl/movwl. */ |
| DEF_TUNE (X86_TUNE_ZERO_EXTEND_WITH_AND, "zero_extend_with_and", |
| m_486 | m_PENT) |
| |
| /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode |
| and SImode multiply, but 386 and 486 do HImode multiply faster. */ |
| DEF_TUNE (X86_TUNE_PROMOTE_HIMODE_IMUL, "promote_himode_imul", |
| ~(m_386 | m_486)) |
| |
| /* X86_TUNE_FAST_PREFIX: Enable demoting some 32bit or 64bit arithmetic |
| into 16bit/8bit when resulting sequence is shorter. For example |
| for "and $-65536, reg" to 16bit store of 0. */ |
| DEF_TUNE (X86_TUNE_FAST_PREFIX, "fast_prefix", |
| ~(m_386 | m_486 | m_PENT | m_LAKEMONT)) |
| |
| /* X86_TUNE_READ_MODIFY_WRITE: Enable use of read modify write instructions |
| such as "add $1, mem". */ |
| DEF_TUNE (X86_TUNE_READ_MODIFY_WRITE, "read_modify_write", |
| ~(m_PENT | m_LAKEMONT)) |
| |
| /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR |
| than a MOV. */ |
| DEF_TUNE (X86_TUNE_MOVE_M1_VIA_OR, "move_m1_via_or", m_PENT | m_LAKEMONT) |
| |
| /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is, |
| but one byte longer. */ |
| DEF_TUNE (X86_TUNE_NOT_UNPAIRABLE, "not_unpairable", m_PENT | m_LAKEMONT) |
| |
| /* X86_TUNE_PARTIAL_REG_STALL: Pentium pro, unlike later chips, handled |
| use of partial registers by renaming. This improved performance of 16bit |
| code where upper halves of registers are not used. It also leads to |
| an penalty whenever a 16bit store is followed by 32bit use. This flag |
| disables production of such sequences in common cases. |
| See also X86_TUNE_HIMODE_MATH. |
| |
| In current implementation the partial register stalls are not eliminated |
| very well - they can be introduced via subregs synthesized by combine |
| and can happen in caller/callee saving sequences. */ |
| DEF_TUNE (X86_TUNE_PARTIAL_REG_STALL, "partial_reg_stall", m_PPRO) |
| |
| /* X86_TUNE_PARTIAL_MEMORY_READ_STALL: Reading (possible unaligned) part of |
| memory location after a large write to the same address causes |
| store-to-load forwarding stall. */ |
| DEF_TUNE (X86_TUNE_PARTIAL_MEMORY_READ_STALL, "partial_memory_read_stall", |
| m_386 | m_486 | m_PENT | m_LAKEMONT | m_PPRO | m_P4_NOCONA | m_CORE2 |
| | m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT |
| | m_K6_GEODE | m_ATHLON_K8 | m_AMDFAM10) |
| |
| /* X86_TUNE_PROMOTE_QIMODE: When it is cheap, turn 8bit arithmetic to |
| corresponding 32bit arithmetic. */ |
| DEF_TUNE (X86_TUNE_PROMOTE_QIMODE, "promote_qimode", |
| ~m_PPRO) |
| |
| /* X86_TUNE_PROMOTE_HI_REGS: Same, but for 16bit artihmetic. Again we avoid |
| partial register stalls on PentiumPro targets. */ |
| DEF_TUNE (X86_TUNE_PROMOTE_HI_REGS, "promote_hi_regs", m_PPRO) |
| |
| /* X86_TUNE_HIMODE_MATH: Enable use of 16bit arithmetic. |
| On PPro this flag is meant to avoid partial register stalls. */ |
| DEF_TUNE (X86_TUNE_HIMODE_MATH, "himode_math", ~m_PPRO) |
| |
| /* X86_TUNE_SPLIT_LONG_MOVES: Avoid instructions moving immediates |
| directly to memory. */ |
| DEF_TUNE (X86_TUNE_SPLIT_LONG_MOVES, "split_long_moves", m_PPRO) |
| |
| /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */ |
| DEF_TUNE (X86_TUNE_USE_XCHGB, "use_xchgb", m_PENT4) |
| |
| /* X86_TUNE_USE_MOV0: Use "mov $0, reg" instead of "xor reg, reg" to clear |
| integer register. */ |
| DEF_TUNE (X86_TUNE_USE_MOV0, "use_mov0", m_K6) |
| |
| /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory |
| operand that cannot be represented using a modRM byte. The XOR |
| replacement is long decoded, so this split helps here as well. */ |
| DEF_TUNE (X86_TUNE_NOT_VECTORMODE, "not_vectormode", m_K6) |
| |
| /* X86_TUNE_AVOID_VECTOR_DECODE: Enable splitters that avoid vector decoded |
| forms of instructions on K8 targets. */ |
| DEF_TUNE (X86_TUNE_AVOID_VECTOR_DECODE, "avoid_vector_decode", |
| m_K8) |
| |
| /* X86_TUNE_BRANCH_PREDICTION_HINTS_TAKEN, starting with the Redwood Cove |
| microarchitecture, if the predictor has no stored information about a branch, |
| the branch has the Intel® SSE2 branch taken hint |
| (i.e., instruction prefix 3EH), When the codec decodes the branch, it flips |
| the branch’s prediction from not-taken to taken. It then flushes the pipeline |
| in front of it and steers this pipeline to fetch the taken path of the |
| branch. */ |
| DEF_TUNE (X86_TUNE_BRANCH_PREDICTION_HINTS_TAKEN, "branch_prediction_hints_taken", m_NONE) |
| |
| /*****************************************************************************/ |
| /* This never worked well before. */ |
| /*****************************************************************************/ |
| |
| /* X86_TUNE_BRANCH_PREDICTION_HINTS_NOT_TAKEN: Branch hints were put in P4 based |
| on simulation result. But after P4 was made, no performance benefit |
| was observed with branch hints. It also increases the code size. |
| As a result, icc never generates branch hints. */ |
| DEF_TUNE (X86_TUNE_BRANCH_PREDICTION_HINTS_NOT_TAKEN, "branch_prediction_hints_not_taken", m_NONE) |
| |
| /* X86_TUNE_QIMODE_MATH: Enable use of 8bit arithmetic. */ |
| DEF_TUNE (X86_TUNE_QIMODE_MATH, "qimode_math", m_ALL) |
| |
| /* X86_TUNE_PROMOTE_QI_REGS: This enables generic code that promotes all 8bit |
| arithmetic to 32bit via PROMOTE_MODE macro. This code generation scheme |
| is usually used for RISC targets. */ |
| DEF_TUNE (X86_TUNE_PROMOTE_QI_REGS, "promote_qi_regs", m_NONE) |
| |
| /* X86_TUNE_SLOW_STC: This disables use of stc, clc and cmc carry flag |
| modifications on architectures where theses operations are slow. */ |
| DEF_TUNE (X86_TUNE_SLOW_STC, "slow_stc", m_PENT4) |
| |
| /* X86_TUNE_USE_RCR: Controls use of rcr 1 instruction instead of shrd. */ |
| DEF_TUNE (X86_TUNE_USE_RCR, "use_rcr", m_AMD_MULTIPLE) |