-
Notifications
You must be signed in to change notification settings - Fork 14.2k
[Target] Use TableGen named argument syntax. NFC. #133418
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Conversation
Convert named argument comments like `/*foo=*/99` into proper named argument syntax like `foo = 99`. This can only be done for trailing arguments, since TableGen does not allow a positional argument to folow a named argument. The patch was semi-automated with: ```sh sed -Ei 's|/\* *(\w+) *= *\*/ *|\1 = |g' $(find lib -name "*.td") ``` plus a bunch of manual fixups.
@llvm/pr-subscribers-backend-amdgpu @llvm/pr-subscribers-backend-x86 Author: Jay Foad (jayfoad) ChangesConvert named argument comments like The patch was semi-automated with: sed -Ei 's|/\* *(\w+) *= *\*/ *|\1 = |g' $(find lib -name "*.td") plus a bunch of manual fixups. Patch is 46.37 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/133418.diff 17 Files Affected:
diff --git a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
index d2aa86f388db2..fbc179eeff2ab 100644
--- a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
@@ -88,9 +88,9 @@ def SDT_AArch64RDSVL : SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisInt<1>]>;
def AArch64rdsvl : SDNode<"AArch64ISD::RDSVL", SDT_AArch64RDSVL>;
let Predicates = [HasSMEandIsNonStreamingSafe] in {
-def RDSVLI_XI : sve_int_read_vl_a<0b0, 0b11111, "rdsvl", /*streaming_sve=*/0b1>;
-def ADDSPL_XXI : sve_int_arith_vl<0b1, "addspl", /*streaming_sve=*/0b1>;
-def ADDSVL_XXI : sve_int_arith_vl<0b0, "addsvl", /*streaming_sve=*/0b1>;
+def RDSVLI_XI : sve_int_read_vl_a<0b0, 0b11111, "rdsvl", streaming_sve = 0b1>;
+def ADDSPL_XXI : sve_int_arith_vl<0b1, "addspl", streaming_sve = 0b1>;
+def ADDSVL_XXI : sve_int_arith_vl<0b0, "addsvl", streaming_sve = 0b1>;
def : Pat<(AArch64rdsvl (i32 simm6_32b:$imm)), (RDSVLI_XI simm6_32b:$imm)>;
}
diff --git a/llvm/lib/Target/AArch64/SMEInstrFormats.td b/llvm/lib/Target/AArch64/SMEInstrFormats.td
index 4f6a413ba5e5c..495607660f840 100644
--- a/llvm/lib/Target/AArch64/SMEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SMEInstrFormats.td
@@ -869,8 +869,8 @@ multiclass sme_mem_ld_v_ss<string mnemonic, bit is_col> {
}
multiclass sme_mem_ld_ss<string mnemonic> {
- defm _H : sme_mem_ld_v_ss<mnemonic, /*is_col=*/0b0>;
- defm _V : sme_mem_ld_v_ss<mnemonic, /*is_col=*/0b1>;
+ defm _H : sme_mem_ld_v_ss<mnemonic, is_col = 0b0>;
+ defm _V : sme_mem_ld_v_ss<mnemonic, is_col = 0b1>;
}
//===----------------------------------------------------------------------===//
@@ -999,8 +999,8 @@ multiclass sme_mem_st_v_ss<string mnemonic, bit is_col> {
}
multiclass sme_mem_st_ss<string mnemonic> {
- defm _H : sme_mem_st_v_ss<mnemonic, /*is_col=*/0b0>;
- defm _V : sme_mem_st_v_ss<mnemonic, /*is_col=*/0b1>;
+ defm _H : sme_mem_st_v_ss<mnemonic, is_col = 0b0>;
+ defm _V : sme_mem_st_v_ss<mnemonic, is_col = 0b1>;
}
//===----------------------------------------------------------------------===//
@@ -1256,8 +1256,8 @@ multiclass sme_vector_v_to_tile<string mnemonic, bit is_col> {
}
multiclass sme_vector_to_tile<string mnemonic> {
- defm _H : sme_vector_v_to_tile<mnemonic, /*is_col=*/0b0>;
- defm _V : sme_vector_v_to_tile<mnemonic, /*is_col=*/0b1>;
+ defm _H : sme_vector_v_to_tile<mnemonic, is_col = 0b0>;
+ defm _V : sme_vector_v_to_tile<mnemonic, is_col = 0b1>;
}
class sme_tile_to_vector_base<bit Q, bit V, bits<2> sz, dag outs, dag ins,
@@ -1423,8 +1423,8 @@ multiclass sme_tile_to_vector_v<string mnemonic, bit is_col> {
}
multiclass sme_tile_to_vector<string mnemonic> {
- defm _H : sme_tile_to_vector_v<mnemonic, /*is_col=*/0b0>;
- defm _V : sme_tile_to_vector_v<mnemonic, /*is_col=*/0b1>;
+ defm _H : sme_tile_to_vector_v<mnemonic, is_col = 0b0>;
+ defm _V : sme_tile_to_vector_v<mnemonic, is_col = 0b1>;
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AMDGPU/BUFInstructions.td b/llvm/lib/Target/AMDGPU/BUFInstructions.td
index f4edfe1387731..e98bc9cbf047f 100644
--- a/llvm/lib/Target/AMDGPU/BUFInstructions.td
+++ b/llvm/lib/Target/AMDGPU/BUFInstructions.td
@@ -947,10 +947,10 @@ defm BUFFER_LOAD_DWORDX2 : MUBUF_Pseudo_Loads <
"buffer_load_dwordx2", v2i32
>;
defm BUFFER_LOAD_DWORDX3 : MUBUF_Pseudo_Loads_Lds <
- "buffer_load_dwordx3", v3i32, /*LDSPred=*/HasGFX950Insts
+ "buffer_load_dwordx3", v3i32, LDSPred = HasGFX950Insts
>;
defm BUFFER_LOAD_DWORDX4 : MUBUF_Pseudo_Loads_Lds <
- "buffer_load_dwordx4", v4i32, /*LDSPred=*/HasGFX950Insts
+ "buffer_load_dwordx4", v4i32, LDSPred = HasGFX950Insts
>;
defm : MUBUF_Pseudo_Load_Pats<"BUFFER_LOAD_UBYTE", i32, atomic_load_8_global>;
diff --git a/llvm/lib/Target/AMDGPU/DSInstructions.td b/llvm/lib/Target/AMDGPU/DSInstructions.td
index bc1db52eeeb2f..3c22c4f7e5f31 100644
--- a/llvm/lib/Target/AMDGPU/DSInstructions.td
+++ b/llvm/lib/Target/AMDGPU/DSInstructions.td
@@ -1308,7 +1308,7 @@ multiclass DS_Real_gfx12<bits<8> op, string name = !tolower(NAME), bit needAlias
let DecoderNamespace = "GFX12" in
def _gfx12 :
Base_DS_Real_gfx6_gfx7_gfx10_gfx11_gfx12<op, ps, SIEncodingFamily.GFX12,
- name, /*hasGDS=*/false>;
+ name, hasGDS = false>;
if !and(needAlias, !ne(ps.Mnemonic, name)) then
def : AMDGPUMnemonicAlias<ps.Mnemonic, name>;
} // End AssemblerPredicate
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index 23a7f508dcda2..2b9ebe22cf473 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -1214,7 +1214,7 @@ def FORMAT : CustomOperand<i8>;
let PrintInHex = 1 in
def DMask : NamedIntOperand<"dmask">;
-def Dim : CustomOperand<i8, /*optional=*/1>;
+def Dim : CustomOperand<i8, optional = 1>;
def dst_sel : SDWAOperand<"dst_sel", "SDWADstSel">;
def src0_sel : SDWAOperand<"src0_sel", "SDWASrc0Sel">;
@@ -2061,7 +2061,7 @@ class getInsVOP3OpSel <RegisterOperand Src0RC, RegisterOperand Src1RC,
dag ret = getInsVOP3Base<Src0RC, Src1RC,
Src2RC, NumSrcArgs,
HasClamp, 1/*HasModifiers*/, 1/*HasSrc2Mods*/, HasOMod,
- Src0Mod, Src1Mod, Src2Mod, /*HasOpSel=*/1>.ret;
+ Src0Mod, Src1Mod, Src2Mod, HasOpSel = 1>.ret;
}
class getInsDPPBase <RegisterOperand OldRC, RegisterOperand Src0RC, RegisterOperand Src1RC,
@@ -2798,7 +2798,7 @@ def VOP_F16_F16_F16 : VOPProfile <[f16, f16, f16, untyped]>;
def VOP_F16_F16_I16 : VOPProfile <[f16, f16, i16, untyped]>;
def VOP_F16_F16_I32 : VOPProfile <[f16, f16, i32, untyped]>;
def VOP_I16_I16_I16 : VOPProfile <[i16, i16, i16, untyped]>;
-def VOP_I16_I16_I16_ARITH : VOPProfile <[i16, i16, i16, untyped], /*EnableClamp=*/1>;
+def VOP_I16_I16_I16_ARITH : VOPProfile <[i16, i16, i16, untyped], _EnableClamp = 1>;
def VOP_I16_I16_I16_I16 : VOPProfile <[i16, i16, i16, i16, untyped]>;
def VOP_F16_F16_F16_F16 : VOPProfile <[f16, f16, f16, f16, untyped]>;
@@ -2846,7 +2846,7 @@ def VOP_F64_F64_I32 : VOPProfile <[f64, f64, i32, untyped]>;
def VOP_I32_F32_F32 : VOPProfile <[i32, f32, f32, untyped]>;
def VOP_I32_F32_I32 : VOPProfile <[i32, f32, i32, untyped]>;
def VOP_I32_I32_I32 : VOPProfile <[i32, i32, i32, untyped]>;
-def VOP_I32_I32_I32_ARITH : VOPProfile <[i32, i32, i32, untyped], /*EnableClamp=*/1>;
+def VOP_I32_I32_I32_ARITH : VOPProfile <[i32, i32, i32, untyped], _EnableClamp = 1>;
def VOP_V2F16_F32_F32 : VOPProfile <[v2f16, f32, f32, untyped]>;
def VOP_F32_F16_F16_F16 : VOPProfile <[f32, f16, f16, f16]>;
def VOP_V2BF16_F32_F32 : VOPProfile <[v2bf16, f32, f32, untyped]>;
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
index 35c7b393a8ca4..1a68d450a2297 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
@@ -158,8 +158,8 @@ def VCC : RegisterWithSubRegs<"vcc", [VCC_LO, VCC_HI]> {
let HWEncoding = VCC_LO.HWEncoding;
}
-defm EXEC_LO : SIRegLoHi16<"exec_lo", 126, /*ArtificialHigh=*/1, /*isVGPR=*/0,
- /*isAGPR=*/0, /*DwarfEncodings=*/[1, 1]>;
+defm EXEC_LO : SIRegLoHi16<"exec_lo", 126, ArtificialHigh = 1, isVGPR = 0,
+ isAGPR = 0, DwarfEncodings = [1, 1]>;
defm EXEC_HI : SIRegLoHi16<"exec_hi", 127>;
def EXEC : RegisterWithSubRegs<"exec", [EXEC_LO, EXEC_HI]>, DwarfRegNum<[17, 1]> {
@@ -299,8 +299,8 @@ def FLAT_SCR : FlatReg<FLAT_SCR_LO, FLAT_SCR_HI, 0>;
// SGPR registers
foreach Index = 0...105 in {
defm SGPR#Index :
- SIRegLoHi16 <"s"#Index, Index, /*ArtificialHigh=*/1,
- /*isVGPR=*/0, /*isAGPR=*/0, /*DwarfEncodings=*/
+ SIRegLoHi16 <"s"#Index, Index, ArtificialHigh = 1,
+ isVGPR = 0, isAGPR = 0, DwarfEncodings =
[!if(!le(Index, 63), !add(Index, 32), !add(Index, 1024)),
!if(!le(Index, 63), !add(Index, 32), !add(Index, 1024))]>;
}
@@ -308,16 +308,16 @@ foreach Index = 0...105 in {
// VGPR registers
foreach Index = 0...255 in {
defm VGPR#Index :
- SIRegLoHi16 <"v"#Index, Index, /*ArtificialHigh=*/ 0,
- /*isVGPR=*/ 1, /*isAGPR=*/ 0, /*DwarfEncodings=*/
+ SIRegLoHi16 <"v"#Index, Index, ArtificialHigh = 0,
+ isVGPR = 1, isAGPR = 0, DwarfEncodings =
[!add(Index, 2560), !add(Index, 1536)]>;
}
// AccVGPR registers
foreach Index = 0...255 in {
defm AGPR#Index :
- SIRegLoHi16 <"a"#Index, Index, /*ArtificialHigh=*/ 1,
- /*isVGPR=*/ 0, /*isAGPR=*/ 1, /*DwarfEncodings=*/
+ SIRegLoHi16 <"a"#Index, Index, ArtificialHigh = 1,
+ isVGPR = 0, isAGPR = 1, DwarfEncodings =
[!add(Index, 3072), !add(Index, 2048)]>;
}
diff --git a/llvm/lib/Target/AMDGPU/SMInstructions.td b/llvm/lib/Target/AMDGPU/SMInstructions.td
index 37dcc10086257..5a820b6f87484 100644
--- a/llvm/lib/Target/AMDGPU/SMInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SMInstructions.td
@@ -932,7 +932,7 @@ multiclass SMRD_Pattern <string Instr, ValueType vt, bit immci = true> {
// XNACK is enabled and the load wasn't naturally aligned. The constrained sload variant.
if !gt(vt.Size, 32) then {
let OtherPredicates = [HasXNACKEnabled], AddedComplexity = 101 in
- defm: SMRD_Patterns <Instr, vt, smrd_load, /*immci=*/false, /*suffix=*/"_ec">;
+ defm: SMRD_Patterns <Instr, vt, smrd_load, immci = false, suffix = "_ec">;
}
// XNACK is disabled.
diff --git a/llvm/lib/Target/AMDGPU/VOP1Instructions.td b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
index def06c1e9a0d7..3b719d3b40131 100644
--- a/llvm/lib/Target/AMDGPU/VOP1Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
@@ -767,7 +767,7 @@ def VOP_SWAP_I16 : VOPProfile_True16<VOP_I16_I16> {
}
let SubtargetPredicate = isGFX11Plus in {
- def V_SWAP_B16 : VOP1_Pseudo<"v_swap_b16", VOP_SWAP_I16, [], /* VOP1Only= */true> {
+ def V_SWAP_B16 : VOP1_Pseudo<"v_swap_b16", VOP_SWAP_I16, [], VOP1Only = true> {
let Constraints = "$vdst = $src1, $vdst1 = $src0";
let DisableEncoding = "$vdst1, $src1";
let SchedRW = [Write64Bit, Write64Bit];
@@ -775,7 +775,7 @@ let SubtargetPredicate = isGFX11Plus in {
}
// Restrict src0 to be VGPR
def V_PERMLANE64_B32 : VOP1_Pseudo<"v_permlane64_b32", VOP_MOVRELS,
- [], /*VOP1Only=*/ 1>;
+ [], VOP1Only = 1>;
defm V_MOV_B16 : VOP1Inst_t16<"v_mov_b16", VOP_I16_I16>;
defm V_NOT_B16 : VOP1Inst_t16<"v_not_b16", VOP_I16_I16>;
defm V_CVT_I32_I16 : VOP1Inst_t16<"v_cvt_i32_i16", VOP_I32_I16>;
diff --git a/llvm/lib/Target/AMDGPU/VOP2Instructions.td b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
index 1bac8656192a7..44b096ca620c0 100644
--- a/llvm/lib/Target/AMDGPU/VOP2Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
@@ -604,7 +604,7 @@ def VOP_DOT_ACC_I32_I32 : VOP_DOT_ACC<i32, i32> {
}
// Write out to vcc or arbitrary SGPR.
-def VOP2b_I32_I1_I32_I32 : VOPProfile<[i32, i32, i32, untyped], /*EnableClamp=*/1> {
+def VOP2b_I32_I1_I32_I32 : VOPProfile<[i32, i32, i32, untyped], _EnableClamp = 1> {
let Asm32 = "$vdst, vcc, $src0, $src1";
let AsmVOP3Base = "$vdst, $sdst, $src0, $src1$clamp";
let AsmSDWA = "$vdst, vcc, $src0_modifiers, $src1_modifiers$clamp $dst_sel $dst_unused $src0_sel $src1_sel";
@@ -630,7 +630,7 @@ def VOP2b_I32_I1_I32_I32 : VOPProfile<[i32, i32, i32, untyped], /*EnableClamp=*/
// Write out to vcc or arbitrary SGPR and read in from vcc or
// arbitrary SGPR.
-def VOP2b_I32_I1_I32_I32_I1 : VOPProfile<[i32, i32, i32, i1], /*EnableClamp=*/1> {
+def VOP2b_I32_I1_I32_I32_I1 : VOPProfile<[i32, i32, i32, i1], _EnableClamp = 1> {
let HasSrc2Mods = 0;
let Asm32 = "$vdst, vcc, $src0, $src1, vcc";
let AsmSDWA = "$vdst, vcc, $src0_modifiers, $src1_modifiers, vcc$clamp $dst_sel $dst_unused $src0_sel $src1_sel";
diff --git a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
index d8088b8c638fd..48a59c7efbf4a 100644
--- a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
@@ -890,7 +890,7 @@ class MAIInst<string OpName, VOPProfile P, SDPatternOperator node, bit Scaled =
//
// FIXME: Usual syntax for op_sel is quite hostile here.
class ScaledMAIInst<string OpName, MAIInst BaseInst, SDPatternOperator node> :
- MAIInst<OpName, BaseInst.Pfl, node, /*Scaled=*/true> {
+ MAIInst<OpName, BaseInst.Pfl, node, Scaled = true> {
// Append operands from V_MFMA_LD_SCALE_B32, but we need to rename them.
let InOperandList = !con(BaseInst.InOperandList,
(ins VSrc_b32:$scale_src0,
@@ -2046,11 +2046,11 @@ multiclass VOP3PX_Real_ScaledMFMA<bits<7> op> {
DecoderNamespace = "GFX940",
AsmString = Name # PS_ACD.AsmOperands, Constraints = "" in {
def _gfx940_acd : VOP3P_Real<PS_ACD, SIEncodingFamily.GFX940>,
- VOP3PXe <op, PS_ACD.Pfl, /*acc_cd=*/1>,
+ VOP3PXe <op, PS_ACD.Pfl, acc_cd = 1>,
MFMA_F8F6F4_WithSizeTable_Helper<PS_ACD, F8F8Name#"_gfx940_acd">;
def _gfx940_vcd : VOP3P_Real<PS_VCD, SIEncodingFamily.GFX940>,
- VOP3PXe <op, PS_VCD.Pfl, /*acc_cd=*/0>,
+ VOP3PXe <op, PS_VCD.Pfl, acc_cd = 0>,
MFMA_F8F6F4_WithSizeTable_Helper<PS_VCD, F8F8Name#"_gfx940_vcd">;
}
}
diff --git a/llvm/lib/Target/ARM/ARMInstrCDE.td b/llvm/lib/Target/ARM/ARMInstrCDE.td
index 54e27a6be5583..add8b0d4651b7 100644
--- a/llvm/lib/Target/ARM/ARMInstrCDE.td
+++ b/llvm/lib/Target/ARM/ARMInstrCDE.td
@@ -52,7 +52,7 @@ def imm_13b : BitWidthImm<13>;
class CDE_Instr<bit acc, dag oops, dag iops, string asm, string cstr>
: Thumb2XI<oops, !con((ins p_imm:$coproc), iops),
AddrModeNone, /*sz=*/4, NoItinerary,
- asm, cstr, /*pattern=*/[]>,
+ asm, cstr, pattern = []>,
Sched<[]> {
bits<3> coproc;
diff --git a/llvm/lib/Target/M68k/M68kRegisterInfo.td b/llvm/lib/Target/M68k/M68kRegisterInfo.td
index 4942636ffd529..17f822e278904 100644
--- a/llvm/lib/Target/M68k/M68kRegisterInfo.td
+++ b/llvm/lib/Target/M68k/M68kRegisterInfo.td
@@ -70,8 +70,8 @@ defm SP : MxAddressRegister<7, "sp", ["usp", "ssp", "isp", "a7"]>;
// Floating Point Registers
class MxFPRegister<int INDEX, string REG_NAME, list<string> ALTNAMES = []>
- : MxReg<REG_NAME, INDEX, /*SUBREGS=*/[], /*SUBIDX=*/[],
- /*DWREGS=*/[!add(18,INDEX)], ALTNAMES>;
+ : MxReg<REG_NAME, INDEX, SUBREGS = [], SUBIDX = [],
+ DWREGS = [!add(18,INDEX)], ALTNAMES>;
foreach i = {0-7} in
def FP#i : MxFPRegister<i, "fp"#i>;
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index 1786503a6dd4e..4c1b596aef8c1 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -819,8 +819,8 @@ defm SUB_i1 : ADD_SUB_i1<sub>;
// int16, int32, and int64 signed addition. Since nvptx is 2's complement, we
// also use these for unsigned arithmetic.
-defm ADD : I3<"add.s", add, /*commutative=*/ true>;
-defm SUB : I3<"sub.s", sub, /*commutative=*/ false>;
+defm ADD : I3<"add.s", add, commutative = true>;
+defm SUB : I3<"sub.s", sub, commutative = false>;
def ADD16x2 : I16x2<"add.s", add>;
@@ -832,18 +832,18 @@ defm SUBCC : ADD_SUB_INT_CARRY<"sub.cc", subc>;
defm ADDCCC : ADD_SUB_INT_CARRY<"addc.cc", adde>;
defm SUBCCC : ADD_SUB_INT_CARRY<"subc.cc", sube>;
-defm MULT : I3<"mul.lo.s", mul, /*commutative=*/ true>;
+defm MULT : I3<"mul.lo.s", mul, commutative = true>;
-defm MULTHS : I3<"mul.hi.s", mulhs, /*commutative=*/ true>;
-defm MULTHU : I3<"mul.hi.u", mulhu, /*commutative=*/ true>;
+defm MULTHS : I3<"mul.hi.s", mulhs, commutative = true>;
+defm MULTHU : I3<"mul.hi.u", mulhu, commutative = true>;
-defm SDIV : I3<"div.s", sdiv, /*commutative=*/ false>;
-defm UDIV : I3<"div.u", udiv, /*commutative=*/ false>;
+defm SDIV : I3<"div.s", sdiv, commutative = false>;
+defm UDIV : I3<"div.u", udiv, commutative = false>;
// The ri versions of rem.s and rem.u won't be selected; DAGCombiner::visitSREM
// will lower it.
-defm SREM : I3<"rem.s", srem, /*commutative=*/ false>;
-defm UREM : I3<"rem.u", urem, /*commutative=*/ false>;
+defm SREM : I3<"rem.s", srem, commutative = false>;
+defm UREM : I3<"rem.u", urem, commutative = false>;
// Integer absolute value. NumBits should be one minus the bit width of RC.
// This idiom implements the algorithm at
@@ -858,10 +858,10 @@ defm ABS_32 : ABS<i32, Int32Regs, ".s32">;
defm ABS_64 : ABS<i64, Int64Regs, ".s64">;
// Integer min/max.
-defm SMAX : I3<"max.s", smax, /*commutative=*/ true>;
-defm UMAX : I3<"max.u", umax, /*commutative=*/ true>;
-defm SMIN : I3<"min.s", smin, /*commutative=*/ true>;
-defm UMIN : I3<"min.u", umin, /*commutative=*/ true>;
+defm SMAX : I3<"max.s", smax, commutative = true>;
+defm UMAX : I3<"max.u", umax, commutative = true>;
+defm SMIN : I3<"min.s", smin, commutative = true>;
+defm UMIN : I3<"min.u", umin, commutative = true>;
def SMAX16x2 : I16x2<"max.s", smax>;
def UMAX16x2 : I16x2<"max.u", umax>;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
index fe43a2be4aab9..0bf051ee731f1 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
@@ -1699,10 +1699,10 @@ def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd_wb),
let Predicates = [HasVInstructions] in {
// Vector Slide Instructions
let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
-defm VSLIDEUP_V : VSLD_IV_X_I<"vslideup", 0b001110, /*slidesUp=*/true>;
+defm VSLIDEUP_V : VSLD_IV_X_I<"vslideup", 0b001110, slidesUp = true>;
defm VSLIDE1UP_V : VSLD1_MV_X<"vslide1up", 0b001110>;
} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
-defm VSLIDEDOWN_V : VSLD_IV_X_I<"vslidedown", 0b001111, /*slidesUp=*/false>;
+defm VSLIDEDOWN_V : VSLD_IV_X_I<"vslidedown", 0b001111, slidesUp = false>;
let ElementsDependOn = EltDepsVL in
defm VSLIDE1DOWN_V : VSLD1_MV_X<"vslide1down", 0b001111>;
} // Predicates = [HasVInstructions]
diff --git a/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td b/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td
index 2bfd5ef811c7b..1bd2c2ed69435 100644
--- a/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td
+++ b/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td
@@ -1003,9 +1003,9 @@ foreach mx = ["M8", "M4", "M2"] in {
defvar LMulLat = SiFiveP600GetLMulCycles<mx>.c;
defvar IsWorstCase = SiFiveP600IsWorstCaseMX<mx, SchedMxList>.c;
let Latency = SiFiveP600VSlideXComplex<mx>.latency in {
- let ReleaseAtCycles = [SiFiveP600VSlideXComplex<mx, /*isUp=*/true>.cycles] in
+ let ReleaseAtCycles = [SiFiveP600VSlideXComplex<mx, isUp = true>.cycles] in
defm "" : LMULWriteResMX<"WriteVSlideUpX", [SiFiveP600VEXQ1], mx, IsWorstCase>;
- let ReleaseAtCycles = [SiFiveP600VSlideXComplex<mx, /*isUp=*/false>.cycles] in
+ let ReleaseAtCycles = [SiFiveP600VSlideXComplex<mx, isUp = false>.cycles] in
defm "" : LMULWriteResMX<"WriteVSlideDownX", [SiFiveP600VEXQ1], mx, IsWorstCase>;
}
}
diff --git a/llvm/lib/Target/X86/X86ScheduleZnver3.td b/llvm/lib/Target/X86/X86ScheduleZnver3.td
index 9e271c1ee3709..18f8bf1023c6f 100644
--- a/llvm/lib/Target/X86/X86ScheduleZnver3.td
+++ b/llvm/lib/Target/X86/X86ScheduleZnver3.td
@@ -610,7 +610,7 @@ def : InstRW<[Zn3SlowLEA16r], (instrs LEA16r)>;
// Integer multiplication
defm : Zn3WriteResIntPair<WriteIMul8, [Zn3Multiplier], 3, [3], 1>; // Integer 8-bit multiplication.
-defm : Zn3WriteResIntPair<WriteIMul16, [Zn3Multiplier], 3, [3], 3, /*LoadUOps=*/1>; // Integer 16-bit multiplication.
+defm : Zn3WriteResIntPair<WriteIMul16, [Zn3Multiplier], 3, [3], 3, LoadUOps = 1>; // Integer 16-bit multiplication.
defm : Zn3WriteResIntPair<WriteIMul16Imm, [Zn3Multiplier], 4, [4], 2>; // Integer 16-bit multiplication by immediate.
defm : Zn3WriteResIntPair<WriteIMul16Reg, [Zn3Multiplier], 3, [1], 1>; // Integer 16-bit multiplication by register.
defm : Zn3WriteResIntPair<WriteIMul32, [Zn3Multiplier], 3, [3], 2>; // Integer 32-bit multiplication.
@@ -692,8 +692,8 @@ defm : Zn3WriteResIntPair<WriteIDiv16, [Z...
[truncated]
|
@llvm/pr-subscribers-backend-risc-v Author: Jay Foad (jayfoad) ChangesConvert named argument comments like The patch was semi-automated with: sed -Ei 's|/\* *(\w+) *= *\*/ *|\1 = |g' $(find lib -name "*.td") plus a bunch of manual fixups. Patch is 46.37 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/133418.diff 17 Files Affected:
diff --git a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
index d2aa86f388db2..fbc179eeff2ab 100644
--- a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
@@ -88,9 +88,9 @@ def SDT_AArch64RDSVL : SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisInt<1>]>;
def AArch64rdsvl : SDNode<"AArch64ISD::RDSVL", SDT_AArch64RDSVL>;
let Predicates = [HasSMEandIsNonStreamingSafe] in {
-def RDSVLI_XI : sve_int_read_vl_a<0b0, 0b11111, "rdsvl", /*streaming_sve=*/0b1>;
-def ADDSPL_XXI : sve_int_arith_vl<0b1, "addspl", /*streaming_sve=*/0b1>;
-def ADDSVL_XXI : sve_int_arith_vl<0b0, "addsvl", /*streaming_sve=*/0b1>;
+def RDSVLI_XI : sve_int_read_vl_a<0b0, 0b11111, "rdsvl", streaming_sve = 0b1>;
+def ADDSPL_XXI : sve_int_arith_vl<0b1, "addspl", streaming_sve = 0b1>;
+def ADDSVL_XXI : sve_int_arith_vl<0b0, "addsvl", streaming_sve = 0b1>;
def : Pat<(AArch64rdsvl (i32 simm6_32b:$imm)), (RDSVLI_XI simm6_32b:$imm)>;
}
diff --git a/llvm/lib/Target/AArch64/SMEInstrFormats.td b/llvm/lib/Target/AArch64/SMEInstrFormats.td
index 4f6a413ba5e5c..495607660f840 100644
--- a/llvm/lib/Target/AArch64/SMEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SMEInstrFormats.td
@@ -869,8 +869,8 @@ multiclass sme_mem_ld_v_ss<string mnemonic, bit is_col> {
}
multiclass sme_mem_ld_ss<string mnemonic> {
- defm _H : sme_mem_ld_v_ss<mnemonic, /*is_col=*/0b0>;
- defm _V : sme_mem_ld_v_ss<mnemonic, /*is_col=*/0b1>;
+ defm _H : sme_mem_ld_v_ss<mnemonic, is_col = 0b0>;
+ defm _V : sme_mem_ld_v_ss<mnemonic, is_col = 0b1>;
}
//===----------------------------------------------------------------------===//
@@ -999,8 +999,8 @@ multiclass sme_mem_st_v_ss<string mnemonic, bit is_col> {
}
multiclass sme_mem_st_ss<string mnemonic> {
- defm _H : sme_mem_st_v_ss<mnemonic, /*is_col=*/0b0>;
- defm _V : sme_mem_st_v_ss<mnemonic, /*is_col=*/0b1>;
+ defm _H : sme_mem_st_v_ss<mnemonic, is_col = 0b0>;
+ defm _V : sme_mem_st_v_ss<mnemonic, is_col = 0b1>;
}
//===----------------------------------------------------------------------===//
@@ -1256,8 +1256,8 @@ multiclass sme_vector_v_to_tile<string mnemonic, bit is_col> {
}
multiclass sme_vector_to_tile<string mnemonic> {
- defm _H : sme_vector_v_to_tile<mnemonic, /*is_col=*/0b0>;
- defm _V : sme_vector_v_to_tile<mnemonic, /*is_col=*/0b1>;
+ defm _H : sme_vector_v_to_tile<mnemonic, is_col = 0b0>;
+ defm _V : sme_vector_v_to_tile<mnemonic, is_col = 0b1>;
}
class sme_tile_to_vector_base<bit Q, bit V, bits<2> sz, dag outs, dag ins,
@@ -1423,8 +1423,8 @@ multiclass sme_tile_to_vector_v<string mnemonic, bit is_col> {
}
multiclass sme_tile_to_vector<string mnemonic> {
- defm _H : sme_tile_to_vector_v<mnemonic, /*is_col=*/0b0>;
- defm _V : sme_tile_to_vector_v<mnemonic, /*is_col=*/0b1>;
+ defm _H : sme_tile_to_vector_v<mnemonic, is_col = 0b0>;
+ defm _V : sme_tile_to_vector_v<mnemonic, is_col = 0b1>;
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AMDGPU/BUFInstructions.td b/llvm/lib/Target/AMDGPU/BUFInstructions.td
index f4edfe1387731..e98bc9cbf047f 100644
--- a/llvm/lib/Target/AMDGPU/BUFInstructions.td
+++ b/llvm/lib/Target/AMDGPU/BUFInstructions.td
@@ -947,10 +947,10 @@ defm BUFFER_LOAD_DWORDX2 : MUBUF_Pseudo_Loads <
"buffer_load_dwordx2", v2i32
>;
defm BUFFER_LOAD_DWORDX3 : MUBUF_Pseudo_Loads_Lds <
- "buffer_load_dwordx3", v3i32, /*LDSPred=*/HasGFX950Insts
+ "buffer_load_dwordx3", v3i32, LDSPred = HasGFX950Insts
>;
defm BUFFER_LOAD_DWORDX4 : MUBUF_Pseudo_Loads_Lds <
- "buffer_load_dwordx4", v4i32, /*LDSPred=*/HasGFX950Insts
+ "buffer_load_dwordx4", v4i32, LDSPred = HasGFX950Insts
>;
defm : MUBUF_Pseudo_Load_Pats<"BUFFER_LOAD_UBYTE", i32, atomic_load_8_global>;
diff --git a/llvm/lib/Target/AMDGPU/DSInstructions.td b/llvm/lib/Target/AMDGPU/DSInstructions.td
index bc1db52eeeb2f..3c22c4f7e5f31 100644
--- a/llvm/lib/Target/AMDGPU/DSInstructions.td
+++ b/llvm/lib/Target/AMDGPU/DSInstructions.td
@@ -1308,7 +1308,7 @@ multiclass DS_Real_gfx12<bits<8> op, string name = !tolower(NAME), bit needAlias
let DecoderNamespace = "GFX12" in
def _gfx12 :
Base_DS_Real_gfx6_gfx7_gfx10_gfx11_gfx12<op, ps, SIEncodingFamily.GFX12,
- name, /*hasGDS=*/false>;
+ name, hasGDS = false>;
if !and(needAlias, !ne(ps.Mnemonic, name)) then
def : AMDGPUMnemonicAlias<ps.Mnemonic, name>;
} // End AssemblerPredicate
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index 23a7f508dcda2..2b9ebe22cf473 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -1214,7 +1214,7 @@ def FORMAT : CustomOperand<i8>;
let PrintInHex = 1 in
def DMask : NamedIntOperand<"dmask">;
-def Dim : CustomOperand<i8, /*optional=*/1>;
+def Dim : CustomOperand<i8, optional = 1>;
def dst_sel : SDWAOperand<"dst_sel", "SDWADstSel">;
def src0_sel : SDWAOperand<"src0_sel", "SDWASrc0Sel">;
@@ -2061,7 +2061,7 @@ class getInsVOP3OpSel <RegisterOperand Src0RC, RegisterOperand Src1RC,
dag ret = getInsVOP3Base<Src0RC, Src1RC,
Src2RC, NumSrcArgs,
HasClamp, 1/*HasModifiers*/, 1/*HasSrc2Mods*/, HasOMod,
- Src0Mod, Src1Mod, Src2Mod, /*HasOpSel=*/1>.ret;
+ Src0Mod, Src1Mod, Src2Mod, HasOpSel = 1>.ret;
}
class getInsDPPBase <RegisterOperand OldRC, RegisterOperand Src0RC, RegisterOperand Src1RC,
@@ -2798,7 +2798,7 @@ def VOP_F16_F16_F16 : VOPProfile <[f16, f16, f16, untyped]>;
def VOP_F16_F16_I16 : VOPProfile <[f16, f16, i16, untyped]>;
def VOP_F16_F16_I32 : VOPProfile <[f16, f16, i32, untyped]>;
def VOP_I16_I16_I16 : VOPProfile <[i16, i16, i16, untyped]>;
-def VOP_I16_I16_I16_ARITH : VOPProfile <[i16, i16, i16, untyped], /*EnableClamp=*/1>;
+def VOP_I16_I16_I16_ARITH : VOPProfile <[i16, i16, i16, untyped], _EnableClamp = 1>;
def VOP_I16_I16_I16_I16 : VOPProfile <[i16, i16, i16, i16, untyped]>;
def VOP_F16_F16_F16_F16 : VOPProfile <[f16, f16, f16, f16, untyped]>;
@@ -2846,7 +2846,7 @@ def VOP_F64_F64_I32 : VOPProfile <[f64, f64, i32, untyped]>;
def VOP_I32_F32_F32 : VOPProfile <[i32, f32, f32, untyped]>;
def VOP_I32_F32_I32 : VOPProfile <[i32, f32, i32, untyped]>;
def VOP_I32_I32_I32 : VOPProfile <[i32, i32, i32, untyped]>;
-def VOP_I32_I32_I32_ARITH : VOPProfile <[i32, i32, i32, untyped], /*EnableClamp=*/1>;
+def VOP_I32_I32_I32_ARITH : VOPProfile <[i32, i32, i32, untyped], _EnableClamp = 1>;
def VOP_V2F16_F32_F32 : VOPProfile <[v2f16, f32, f32, untyped]>;
def VOP_F32_F16_F16_F16 : VOPProfile <[f32, f16, f16, f16]>;
def VOP_V2BF16_F32_F32 : VOPProfile <[v2bf16, f32, f32, untyped]>;
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
index 35c7b393a8ca4..1a68d450a2297 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
@@ -158,8 +158,8 @@ def VCC : RegisterWithSubRegs<"vcc", [VCC_LO, VCC_HI]> {
let HWEncoding = VCC_LO.HWEncoding;
}
-defm EXEC_LO : SIRegLoHi16<"exec_lo", 126, /*ArtificialHigh=*/1, /*isVGPR=*/0,
- /*isAGPR=*/0, /*DwarfEncodings=*/[1, 1]>;
+defm EXEC_LO : SIRegLoHi16<"exec_lo", 126, ArtificialHigh = 1, isVGPR = 0,
+ isAGPR = 0, DwarfEncodings = [1, 1]>;
defm EXEC_HI : SIRegLoHi16<"exec_hi", 127>;
def EXEC : RegisterWithSubRegs<"exec", [EXEC_LO, EXEC_HI]>, DwarfRegNum<[17, 1]> {
@@ -299,8 +299,8 @@ def FLAT_SCR : FlatReg<FLAT_SCR_LO, FLAT_SCR_HI, 0>;
// SGPR registers
foreach Index = 0...105 in {
defm SGPR#Index :
- SIRegLoHi16 <"s"#Index, Index, /*ArtificialHigh=*/1,
- /*isVGPR=*/0, /*isAGPR=*/0, /*DwarfEncodings=*/
+ SIRegLoHi16 <"s"#Index, Index, ArtificialHigh = 1,
+ isVGPR = 0, isAGPR = 0, DwarfEncodings =
[!if(!le(Index, 63), !add(Index, 32), !add(Index, 1024)),
!if(!le(Index, 63), !add(Index, 32), !add(Index, 1024))]>;
}
@@ -308,16 +308,16 @@ foreach Index = 0...105 in {
// VGPR registers
foreach Index = 0...255 in {
defm VGPR#Index :
- SIRegLoHi16 <"v"#Index, Index, /*ArtificialHigh=*/ 0,
- /*isVGPR=*/ 1, /*isAGPR=*/ 0, /*DwarfEncodings=*/
+ SIRegLoHi16 <"v"#Index, Index, ArtificialHigh = 0,
+ isVGPR = 1, isAGPR = 0, DwarfEncodings =
[!add(Index, 2560), !add(Index, 1536)]>;
}
// AccVGPR registers
foreach Index = 0...255 in {
defm AGPR#Index :
- SIRegLoHi16 <"a"#Index, Index, /*ArtificialHigh=*/ 1,
- /*isVGPR=*/ 0, /*isAGPR=*/ 1, /*DwarfEncodings=*/
+ SIRegLoHi16 <"a"#Index, Index, ArtificialHigh = 1,
+ isVGPR = 0, isAGPR = 1, DwarfEncodings =
[!add(Index, 3072), !add(Index, 2048)]>;
}
diff --git a/llvm/lib/Target/AMDGPU/SMInstructions.td b/llvm/lib/Target/AMDGPU/SMInstructions.td
index 37dcc10086257..5a820b6f87484 100644
--- a/llvm/lib/Target/AMDGPU/SMInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SMInstructions.td
@@ -932,7 +932,7 @@ multiclass SMRD_Pattern <string Instr, ValueType vt, bit immci = true> {
// XNACK is enabled and the load wasn't naturally aligned. The constrained sload variant.
if !gt(vt.Size, 32) then {
let OtherPredicates = [HasXNACKEnabled], AddedComplexity = 101 in
- defm: SMRD_Patterns <Instr, vt, smrd_load, /*immci=*/false, /*suffix=*/"_ec">;
+ defm: SMRD_Patterns <Instr, vt, smrd_load, immci = false, suffix = "_ec">;
}
// XNACK is disabled.
diff --git a/llvm/lib/Target/AMDGPU/VOP1Instructions.td b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
index def06c1e9a0d7..3b719d3b40131 100644
--- a/llvm/lib/Target/AMDGPU/VOP1Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
@@ -767,7 +767,7 @@ def VOP_SWAP_I16 : VOPProfile_True16<VOP_I16_I16> {
}
let SubtargetPredicate = isGFX11Plus in {
- def V_SWAP_B16 : VOP1_Pseudo<"v_swap_b16", VOP_SWAP_I16, [], /* VOP1Only= */true> {
+ def V_SWAP_B16 : VOP1_Pseudo<"v_swap_b16", VOP_SWAP_I16, [], VOP1Only = true> {
let Constraints = "$vdst = $src1, $vdst1 = $src0";
let DisableEncoding = "$vdst1, $src1";
let SchedRW = [Write64Bit, Write64Bit];
@@ -775,7 +775,7 @@ let SubtargetPredicate = isGFX11Plus in {
}
// Restrict src0 to be VGPR
def V_PERMLANE64_B32 : VOP1_Pseudo<"v_permlane64_b32", VOP_MOVRELS,
- [], /*VOP1Only=*/ 1>;
+ [], VOP1Only = 1>;
defm V_MOV_B16 : VOP1Inst_t16<"v_mov_b16", VOP_I16_I16>;
defm V_NOT_B16 : VOP1Inst_t16<"v_not_b16", VOP_I16_I16>;
defm V_CVT_I32_I16 : VOP1Inst_t16<"v_cvt_i32_i16", VOP_I32_I16>;
diff --git a/llvm/lib/Target/AMDGPU/VOP2Instructions.td b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
index 1bac8656192a7..44b096ca620c0 100644
--- a/llvm/lib/Target/AMDGPU/VOP2Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
@@ -604,7 +604,7 @@ def VOP_DOT_ACC_I32_I32 : VOP_DOT_ACC<i32, i32> {
}
// Write out to vcc or arbitrary SGPR.
-def VOP2b_I32_I1_I32_I32 : VOPProfile<[i32, i32, i32, untyped], /*EnableClamp=*/1> {
+def VOP2b_I32_I1_I32_I32 : VOPProfile<[i32, i32, i32, untyped], _EnableClamp = 1> {
let Asm32 = "$vdst, vcc, $src0, $src1";
let AsmVOP3Base = "$vdst, $sdst, $src0, $src1$clamp";
let AsmSDWA = "$vdst, vcc, $src0_modifiers, $src1_modifiers$clamp $dst_sel $dst_unused $src0_sel $src1_sel";
@@ -630,7 +630,7 @@ def VOP2b_I32_I1_I32_I32 : VOPProfile<[i32, i32, i32, untyped], /*EnableClamp=*/
// Write out to vcc or arbitrary SGPR and read in from vcc or
// arbitrary SGPR.
-def VOP2b_I32_I1_I32_I32_I1 : VOPProfile<[i32, i32, i32, i1], /*EnableClamp=*/1> {
+def VOP2b_I32_I1_I32_I32_I1 : VOPProfile<[i32, i32, i32, i1], _EnableClamp = 1> {
let HasSrc2Mods = 0;
let Asm32 = "$vdst, vcc, $src0, $src1, vcc";
let AsmSDWA = "$vdst, vcc, $src0_modifiers, $src1_modifiers, vcc$clamp $dst_sel $dst_unused $src0_sel $src1_sel";
diff --git a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
index d8088b8c638fd..48a59c7efbf4a 100644
--- a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
@@ -890,7 +890,7 @@ class MAIInst<string OpName, VOPProfile P, SDPatternOperator node, bit Scaled =
//
// FIXME: Usual syntax for op_sel is quite hostile here.
class ScaledMAIInst<string OpName, MAIInst BaseInst, SDPatternOperator node> :
- MAIInst<OpName, BaseInst.Pfl, node, /*Scaled=*/true> {
+ MAIInst<OpName, BaseInst.Pfl, node, Scaled = true> {
// Append operands from V_MFMA_LD_SCALE_B32, but we need to rename them.
let InOperandList = !con(BaseInst.InOperandList,
(ins VSrc_b32:$scale_src0,
@@ -2046,11 +2046,11 @@ multiclass VOP3PX_Real_ScaledMFMA<bits<7> op> {
DecoderNamespace = "GFX940",
AsmString = Name # PS_ACD.AsmOperands, Constraints = "" in {
def _gfx940_acd : VOP3P_Real<PS_ACD, SIEncodingFamily.GFX940>,
- VOP3PXe <op, PS_ACD.Pfl, /*acc_cd=*/1>,
+ VOP3PXe <op, PS_ACD.Pfl, acc_cd = 1>,
MFMA_F8F6F4_WithSizeTable_Helper<PS_ACD, F8F8Name#"_gfx940_acd">;
def _gfx940_vcd : VOP3P_Real<PS_VCD, SIEncodingFamily.GFX940>,
- VOP3PXe <op, PS_VCD.Pfl, /*acc_cd=*/0>,
+ VOP3PXe <op, PS_VCD.Pfl, acc_cd = 0>,
MFMA_F8F6F4_WithSizeTable_Helper<PS_VCD, F8F8Name#"_gfx940_vcd">;
}
}
diff --git a/llvm/lib/Target/ARM/ARMInstrCDE.td b/llvm/lib/Target/ARM/ARMInstrCDE.td
index 54e27a6be5583..add8b0d4651b7 100644
--- a/llvm/lib/Target/ARM/ARMInstrCDE.td
+++ b/llvm/lib/Target/ARM/ARMInstrCDE.td
@@ -52,7 +52,7 @@ def imm_13b : BitWidthImm<13>;
class CDE_Instr<bit acc, dag oops, dag iops, string asm, string cstr>
: Thumb2XI<oops, !con((ins p_imm:$coproc), iops),
AddrModeNone, /*sz=*/4, NoItinerary,
- asm, cstr, /*pattern=*/[]>,
+ asm, cstr, pattern = []>,
Sched<[]> {
bits<3> coproc;
diff --git a/llvm/lib/Target/M68k/M68kRegisterInfo.td b/llvm/lib/Target/M68k/M68kRegisterInfo.td
index 4942636ffd529..17f822e278904 100644
--- a/llvm/lib/Target/M68k/M68kRegisterInfo.td
+++ b/llvm/lib/Target/M68k/M68kRegisterInfo.td
@@ -70,8 +70,8 @@ defm SP : MxAddressRegister<7, "sp", ["usp", "ssp", "isp", "a7"]>;
// Floating Point Registers
class MxFPRegister<int INDEX, string REG_NAME, list<string> ALTNAMES = []>
- : MxReg<REG_NAME, INDEX, /*SUBREGS=*/[], /*SUBIDX=*/[],
- /*DWREGS=*/[!add(18,INDEX)], ALTNAMES>;
+ : MxReg<REG_NAME, INDEX, SUBREGS = [], SUBIDX = [],
+ DWREGS = [!add(18,INDEX)], ALTNAMES>;
foreach i = {0-7} in
def FP#i : MxFPRegister<i, "fp"#i>;
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index 1786503a6dd4e..4c1b596aef8c1 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -819,8 +819,8 @@ defm SUB_i1 : ADD_SUB_i1<sub>;
// int16, int32, and int64 signed addition. Since nvptx is 2's complement, we
// also use these for unsigned arithmetic.
-defm ADD : I3<"add.s", add, /*commutative=*/ true>;
-defm SUB : I3<"sub.s", sub, /*commutative=*/ false>;
+defm ADD : I3<"add.s", add, commutative = true>;
+defm SUB : I3<"sub.s", sub, commutative = false>;
def ADD16x2 : I16x2<"add.s", add>;
@@ -832,18 +832,18 @@ defm SUBCC : ADD_SUB_INT_CARRY<"sub.cc", subc>;
defm ADDCCC : ADD_SUB_INT_CARRY<"addc.cc", adde>;
defm SUBCCC : ADD_SUB_INT_CARRY<"subc.cc", sube>;
-defm MULT : I3<"mul.lo.s", mul, /*commutative=*/ true>;
+defm MULT : I3<"mul.lo.s", mul, commutative = true>;
-defm MULTHS : I3<"mul.hi.s", mulhs, /*commutative=*/ true>;
-defm MULTHU : I3<"mul.hi.u", mulhu, /*commutative=*/ true>;
+defm MULTHS : I3<"mul.hi.s", mulhs, commutative = true>;
+defm MULTHU : I3<"mul.hi.u", mulhu, commutative = true>;
-defm SDIV : I3<"div.s", sdiv, /*commutative=*/ false>;
-defm UDIV : I3<"div.u", udiv, /*commutative=*/ false>;
+defm SDIV : I3<"div.s", sdiv, commutative = false>;
+defm UDIV : I3<"div.u", udiv, commutative = false>;
// The ri versions of rem.s and rem.u won't be selected; DAGCombiner::visitSREM
// will lower it.
-defm SREM : I3<"rem.s", srem, /*commutative=*/ false>;
-defm UREM : I3<"rem.u", urem, /*commutative=*/ false>;
+defm SREM : I3<"rem.s", srem, commutative = false>;
+defm UREM : I3<"rem.u", urem, commutative = false>;
// Integer absolute value. NumBits should be one minus the bit width of RC.
// This idiom implements the algorithm at
@@ -858,10 +858,10 @@ defm ABS_32 : ABS<i32, Int32Regs, ".s32">;
defm ABS_64 : ABS<i64, Int64Regs, ".s64">;
// Integer min/max.
-defm SMAX : I3<"max.s", smax, /*commutative=*/ true>;
-defm UMAX : I3<"max.u", umax, /*commutative=*/ true>;
-defm SMIN : I3<"min.s", smin, /*commutative=*/ true>;
-defm UMIN : I3<"min.u", umin, /*commutative=*/ true>;
+defm SMAX : I3<"max.s", smax, commutative = true>;
+defm UMAX : I3<"max.u", umax, commutative = true>;
+defm SMIN : I3<"min.s", smin, commutative = true>;
+defm UMIN : I3<"min.u", umin, commutative = true>;
def SMAX16x2 : I16x2<"max.s", smax>;
def UMAX16x2 : I16x2<"max.u", umax>;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
index fe43a2be4aab9..0bf051ee731f1 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
@@ -1699,10 +1699,10 @@ def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd_wb),
let Predicates = [HasVInstructions] in {
// Vector Slide Instructions
let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
-defm VSLIDEUP_V : VSLD_IV_X_I<"vslideup", 0b001110, /*slidesUp=*/true>;
+defm VSLIDEUP_V : VSLD_IV_X_I<"vslideup", 0b001110, slidesUp = true>;
defm VSLIDE1UP_V : VSLD1_MV_X<"vslide1up", 0b001110>;
} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
-defm VSLIDEDOWN_V : VSLD_IV_X_I<"vslidedown", 0b001111, /*slidesUp=*/false>;
+defm VSLIDEDOWN_V : VSLD_IV_X_I<"vslidedown", 0b001111, slidesUp = false>;
let ElementsDependOn = EltDepsVL in
defm VSLIDE1DOWN_V : VSLD1_MV_X<"vslide1down", 0b001111>;
} // Predicates = [HasVInstructions]
diff --git a/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td b/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td
index 2bfd5ef811c7b..1bd2c2ed69435 100644
--- a/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td
+++ b/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td
@@ -1003,9 +1003,9 @@ foreach mx = ["M8", "M4", "M2"] in {
defvar LMulLat = SiFiveP600GetLMulCycles<mx>.c;
defvar IsWorstCase = SiFiveP600IsWorstCaseMX<mx, SchedMxList>.c;
let Latency = SiFiveP600VSlideXComplex<mx>.latency in {
- let ReleaseAtCycles = [SiFiveP600VSlideXComplex<mx, /*isUp=*/true>.cycles] in
+ let ReleaseAtCycles = [SiFiveP600VSlideXComplex<mx, isUp = true>.cycles] in
defm "" : LMULWriteResMX<"WriteVSlideUpX", [SiFiveP600VEXQ1], mx, IsWorstCase>;
- let ReleaseAtCycles = [SiFiveP600VSlideXComplex<mx, /*isUp=*/false>.cycles] in
+ let ReleaseAtCycles = [SiFiveP600VSlideXComplex<mx, isUp = false>.cycles] in
defm "" : LMULWriteResMX<"WriteVSlideDownX", [SiFiveP600VEXQ1], mx, IsWorstCase>;
}
}
diff --git a/llvm/lib/Target/X86/X86ScheduleZnver3.td b/llvm/lib/Target/X86/X86ScheduleZnver3.td
index 9e271c1ee3709..18f8bf1023c6f 100644
--- a/llvm/lib/Target/X86/X86ScheduleZnver3.td
+++ b/llvm/lib/Target/X86/X86ScheduleZnver3.td
@@ -610,7 +610,7 @@ def : InstRW<[Zn3SlowLEA16r], (instrs LEA16r)>;
// Integer multiplication
defm : Zn3WriteResIntPair<WriteIMul8, [Zn3Multiplier], 3, [3], 1>; // Integer 8-bit multiplication.
-defm : Zn3WriteResIntPair<WriteIMul16, [Zn3Multiplier], 3, [3], 3, /*LoadUOps=*/1>; // Integer 16-bit multiplication.
+defm : Zn3WriteResIntPair<WriteIMul16, [Zn3Multiplier], 3, [3], 3, LoadUOps = 1>; // Integer 16-bit multiplication.
defm : Zn3WriteResIntPair<WriteIMul16Imm, [Zn3Multiplier], 4, [4], 2>; // Integer 16-bit multiplication by immediate.
defm : Zn3WriteResIntPair<WriteIMul16Reg, [Zn3Multiplier], 3, [1], 1>; // Integer 16-bit multiplication by register.
defm : Zn3WriteResIntPair<WriteIMul32, [Zn3Multiplier], 3, [3], 2>; // Integer 32-bit multiplication.
@@ -692,8 +692,8 @@ defm : Zn3WriteResIntPair<WriteIDiv16, [Z...
[truncated]
|
@llvm/pr-subscribers-backend-arm Author: Jay Foad (jayfoad) ChangesConvert named argument comments like The patch was semi-automated with: sed -Ei 's|/\* *(\w+) *= *\*/ *|\1 = |g' $(find lib -name "*.td") plus a bunch of manual fixups. Patch is 46.37 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/133418.diff 17 Files Affected:
diff --git a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
index d2aa86f388db2..fbc179eeff2ab 100644
--- a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
@@ -88,9 +88,9 @@ def SDT_AArch64RDSVL : SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisInt<1>]>;
def AArch64rdsvl : SDNode<"AArch64ISD::RDSVL", SDT_AArch64RDSVL>;
let Predicates = [HasSMEandIsNonStreamingSafe] in {
-def RDSVLI_XI : sve_int_read_vl_a<0b0, 0b11111, "rdsvl", /*streaming_sve=*/0b1>;
-def ADDSPL_XXI : sve_int_arith_vl<0b1, "addspl", /*streaming_sve=*/0b1>;
-def ADDSVL_XXI : sve_int_arith_vl<0b0, "addsvl", /*streaming_sve=*/0b1>;
+def RDSVLI_XI : sve_int_read_vl_a<0b0, 0b11111, "rdsvl", streaming_sve = 0b1>;
+def ADDSPL_XXI : sve_int_arith_vl<0b1, "addspl", streaming_sve = 0b1>;
+def ADDSVL_XXI : sve_int_arith_vl<0b0, "addsvl", streaming_sve = 0b1>;
def : Pat<(AArch64rdsvl (i32 simm6_32b:$imm)), (RDSVLI_XI simm6_32b:$imm)>;
}
diff --git a/llvm/lib/Target/AArch64/SMEInstrFormats.td b/llvm/lib/Target/AArch64/SMEInstrFormats.td
index 4f6a413ba5e5c..495607660f840 100644
--- a/llvm/lib/Target/AArch64/SMEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SMEInstrFormats.td
@@ -869,8 +869,8 @@ multiclass sme_mem_ld_v_ss<string mnemonic, bit is_col> {
}
multiclass sme_mem_ld_ss<string mnemonic> {
- defm _H : sme_mem_ld_v_ss<mnemonic, /*is_col=*/0b0>;
- defm _V : sme_mem_ld_v_ss<mnemonic, /*is_col=*/0b1>;
+ defm _H : sme_mem_ld_v_ss<mnemonic, is_col = 0b0>;
+ defm _V : sme_mem_ld_v_ss<mnemonic, is_col = 0b1>;
}
//===----------------------------------------------------------------------===//
@@ -999,8 +999,8 @@ multiclass sme_mem_st_v_ss<string mnemonic, bit is_col> {
}
multiclass sme_mem_st_ss<string mnemonic> {
- defm _H : sme_mem_st_v_ss<mnemonic, /*is_col=*/0b0>;
- defm _V : sme_mem_st_v_ss<mnemonic, /*is_col=*/0b1>;
+ defm _H : sme_mem_st_v_ss<mnemonic, is_col = 0b0>;
+ defm _V : sme_mem_st_v_ss<mnemonic, is_col = 0b1>;
}
//===----------------------------------------------------------------------===//
@@ -1256,8 +1256,8 @@ multiclass sme_vector_v_to_tile<string mnemonic, bit is_col> {
}
multiclass sme_vector_to_tile<string mnemonic> {
- defm _H : sme_vector_v_to_tile<mnemonic, /*is_col=*/0b0>;
- defm _V : sme_vector_v_to_tile<mnemonic, /*is_col=*/0b1>;
+ defm _H : sme_vector_v_to_tile<mnemonic, is_col = 0b0>;
+ defm _V : sme_vector_v_to_tile<mnemonic, is_col = 0b1>;
}
class sme_tile_to_vector_base<bit Q, bit V, bits<2> sz, dag outs, dag ins,
@@ -1423,8 +1423,8 @@ multiclass sme_tile_to_vector_v<string mnemonic, bit is_col> {
}
multiclass sme_tile_to_vector<string mnemonic> {
- defm _H : sme_tile_to_vector_v<mnemonic, /*is_col=*/0b0>;
- defm _V : sme_tile_to_vector_v<mnemonic, /*is_col=*/0b1>;
+ defm _H : sme_tile_to_vector_v<mnemonic, is_col = 0b0>;
+ defm _V : sme_tile_to_vector_v<mnemonic, is_col = 0b1>;
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AMDGPU/BUFInstructions.td b/llvm/lib/Target/AMDGPU/BUFInstructions.td
index f4edfe1387731..e98bc9cbf047f 100644
--- a/llvm/lib/Target/AMDGPU/BUFInstructions.td
+++ b/llvm/lib/Target/AMDGPU/BUFInstructions.td
@@ -947,10 +947,10 @@ defm BUFFER_LOAD_DWORDX2 : MUBUF_Pseudo_Loads <
"buffer_load_dwordx2", v2i32
>;
defm BUFFER_LOAD_DWORDX3 : MUBUF_Pseudo_Loads_Lds <
- "buffer_load_dwordx3", v3i32, /*LDSPred=*/HasGFX950Insts
+ "buffer_load_dwordx3", v3i32, LDSPred = HasGFX950Insts
>;
defm BUFFER_LOAD_DWORDX4 : MUBUF_Pseudo_Loads_Lds <
- "buffer_load_dwordx4", v4i32, /*LDSPred=*/HasGFX950Insts
+ "buffer_load_dwordx4", v4i32, LDSPred = HasGFX950Insts
>;
defm : MUBUF_Pseudo_Load_Pats<"BUFFER_LOAD_UBYTE", i32, atomic_load_8_global>;
diff --git a/llvm/lib/Target/AMDGPU/DSInstructions.td b/llvm/lib/Target/AMDGPU/DSInstructions.td
index bc1db52eeeb2f..3c22c4f7e5f31 100644
--- a/llvm/lib/Target/AMDGPU/DSInstructions.td
+++ b/llvm/lib/Target/AMDGPU/DSInstructions.td
@@ -1308,7 +1308,7 @@ multiclass DS_Real_gfx12<bits<8> op, string name = !tolower(NAME), bit needAlias
let DecoderNamespace = "GFX12" in
def _gfx12 :
Base_DS_Real_gfx6_gfx7_gfx10_gfx11_gfx12<op, ps, SIEncodingFamily.GFX12,
- name, /*hasGDS=*/false>;
+ name, hasGDS = false>;
if !and(needAlias, !ne(ps.Mnemonic, name)) then
def : AMDGPUMnemonicAlias<ps.Mnemonic, name>;
} // End AssemblerPredicate
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index 23a7f508dcda2..2b9ebe22cf473 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -1214,7 +1214,7 @@ def FORMAT : CustomOperand<i8>;
let PrintInHex = 1 in
def DMask : NamedIntOperand<"dmask">;
-def Dim : CustomOperand<i8, /*optional=*/1>;
+def Dim : CustomOperand<i8, optional = 1>;
def dst_sel : SDWAOperand<"dst_sel", "SDWADstSel">;
def src0_sel : SDWAOperand<"src0_sel", "SDWASrc0Sel">;
@@ -2061,7 +2061,7 @@ class getInsVOP3OpSel <RegisterOperand Src0RC, RegisterOperand Src1RC,
dag ret = getInsVOP3Base<Src0RC, Src1RC,
Src2RC, NumSrcArgs,
HasClamp, 1/*HasModifiers*/, 1/*HasSrc2Mods*/, HasOMod,
- Src0Mod, Src1Mod, Src2Mod, /*HasOpSel=*/1>.ret;
+ Src0Mod, Src1Mod, Src2Mod, HasOpSel = 1>.ret;
}
class getInsDPPBase <RegisterOperand OldRC, RegisterOperand Src0RC, RegisterOperand Src1RC,
@@ -2798,7 +2798,7 @@ def VOP_F16_F16_F16 : VOPProfile <[f16, f16, f16, untyped]>;
def VOP_F16_F16_I16 : VOPProfile <[f16, f16, i16, untyped]>;
def VOP_F16_F16_I32 : VOPProfile <[f16, f16, i32, untyped]>;
def VOP_I16_I16_I16 : VOPProfile <[i16, i16, i16, untyped]>;
-def VOP_I16_I16_I16_ARITH : VOPProfile <[i16, i16, i16, untyped], /*EnableClamp=*/1>;
+def VOP_I16_I16_I16_ARITH : VOPProfile <[i16, i16, i16, untyped], _EnableClamp = 1>;
def VOP_I16_I16_I16_I16 : VOPProfile <[i16, i16, i16, i16, untyped]>;
def VOP_F16_F16_F16_F16 : VOPProfile <[f16, f16, f16, f16, untyped]>;
@@ -2846,7 +2846,7 @@ def VOP_F64_F64_I32 : VOPProfile <[f64, f64, i32, untyped]>;
def VOP_I32_F32_F32 : VOPProfile <[i32, f32, f32, untyped]>;
def VOP_I32_F32_I32 : VOPProfile <[i32, f32, i32, untyped]>;
def VOP_I32_I32_I32 : VOPProfile <[i32, i32, i32, untyped]>;
-def VOP_I32_I32_I32_ARITH : VOPProfile <[i32, i32, i32, untyped], /*EnableClamp=*/1>;
+def VOP_I32_I32_I32_ARITH : VOPProfile <[i32, i32, i32, untyped], _EnableClamp = 1>;
def VOP_V2F16_F32_F32 : VOPProfile <[v2f16, f32, f32, untyped]>;
def VOP_F32_F16_F16_F16 : VOPProfile <[f32, f16, f16, f16]>;
def VOP_V2BF16_F32_F32 : VOPProfile <[v2bf16, f32, f32, untyped]>;
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
index 35c7b393a8ca4..1a68d450a2297 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
@@ -158,8 +158,8 @@ def VCC : RegisterWithSubRegs<"vcc", [VCC_LO, VCC_HI]> {
let HWEncoding = VCC_LO.HWEncoding;
}
-defm EXEC_LO : SIRegLoHi16<"exec_lo", 126, /*ArtificialHigh=*/1, /*isVGPR=*/0,
- /*isAGPR=*/0, /*DwarfEncodings=*/[1, 1]>;
+defm EXEC_LO : SIRegLoHi16<"exec_lo", 126, ArtificialHigh = 1, isVGPR = 0,
+ isAGPR = 0, DwarfEncodings = [1, 1]>;
defm EXEC_HI : SIRegLoHi16<"exec_hi", 127>;
def EXEC : RegisterWithSubRegs<"exec", [EXEC_LO, EXEC_HI]>, DwarfRegNum<[17, 1]> {
@@ -299,8 +299,8 @@ def FLAT_SCR : FlatReg<FLAT_SCR_LO, FLAT_SCR_HI, 0>;
// SGPR registers
foreach Index = 0...105 in {
defm SGPR#Index :
- SIRegLoHi16 <"s"#Index, Index, /*ArtificialHigh=*/1,
- /*isVGPR=*/0, /*isAGPR=*/0, /*DwarfEncodings=*/
+ SIRegLoHi16 <"s"#Index, Index, ArtificialHigh = 1,
+ isVGPR = 0, isAGPR = 0, DwarfEncodings =
[!if(!le(Index, 63), !add(Index, 32), !add(Index, 1024)),
!if(!le(Index, 63), !add(Index, 32), !add(Index, 1024))]>;
}
@@ -308,16 +308,16 @@ foreach Index = 0...105 in {
// VGPR registers
foreach Index = 0...255 in {
defm VGPR#Index :
- SIRegLoHi16 <"v"#Index, Index, /*ArtificialHigh=*/ 0,
- /*isVGPR=*/ 1, /*isAGPR=*/ 0, /*DwarfEncodings=*/
+ SIRegLoHi16 <"v"#Index, Index, ArtificialHigh = 0,
+ isVGPR = 1, isAGPR = 0, DwarfEncodings =
[!add(Index, 2560), !add(Index, 1536)]>;
}
// AccVGPR registers
foreach Index = 0...255 in {
defm AGPR#Index :
- SIRegLoHi16 <"a"#Index, Index, /*ArtificialHigh=*/ 1,
- /*isVGPR=*/ 0, /*isAGPR=*/ 1, /*DwarfEncodings=*/
+ SIRegLoHi16 <"a"#Index, Index, ArtificialHigh = 1,
+ isVGPR = 0, isAGPR = 1, DwarfEncodings =
[!add(Index, 3072), !add(Index, 2048)]>;
}
diff --git a/llvm/lib/Target/AMDGPU/SMInstructions.td b/llvm/lib/Target/AMDGPU/SMInstructions.td
index 37dcc10086257..5a820b6f87484 100644
--- a/llvm/lib/Target/AMDGPU/SMInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SMInstructions.td
@@ -932,7 +932,7 @@ multiclass SMRD_Pattern <string Instr, ValueType vt, bit immci = true> {
// XNACK is enabled and the load wasn't naturally aligned. The constrained sload variant.
if !gt(vt.Size, 32) then {
let OtherPredicates = [HasXNACKEnabled], AddedComplexity = 101 in
- defm: SMRD_Patterns <Instr, vt, smrd_load, /*immci=*/false, /*suffix=*/"_ec">;
+ defm: SMRD_Patterns <Instr, vt, smrd_load, immci = false, suffix = "_ec">;
}
// XNACK is disabled.
diff --git a/llvm/lib/Target/AMDGPU/VOP1Instructions.td b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
index def06c1e9a0d7..3b719d3b40131 100644
--- a/llvm/lib/Target/AMDGPU/VOP1Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
@@ -767,7 +767,7 @@ def VOP_SWAP_I16 : VOPProfile_True16<VOP_I16_I16> {
}
let SubtargetPredicate = isGFX11Plus in {
- def V_SWAP_B16 : VOP1_Pseudo<"v_swap_b16", VOP_SWAP_I16, [], /* VOP1Only= */true> {
+ def V_SWAP_B16 : VOP1_Pseudo<"v_swap_b16", VOP_SWAP_I16, [], VOP1Only = true> {
let Constraints = "$vdst = $src1, $vdst1 = $src0";
let DisableEncoding = "$vdst1, $src1";
let SchedRW = [Write64Bit, Write64Bit];
@@ -775,7 +775,7 @@ let SubtargetPredicate = isGFX11Plus in {
}
// Restrict src0 to be VGPR
def V_PERMLANE64_B32 : VOP1_Pseudo<"v_permlane64_b32", VOP_MOVRELS,
- [], /*VOP1Only=*/ 1>;
+ [], VOP1Only = 1>;
defm V_MOV_B16 : VOP1Inst_t16<"v_mov_b16", VOP_I16_I16>;
defm V_NOT_B16 : VOP1Inst_t16<"v_not_b16", VOP_I16_I16>;
defm V_CVT_I32_I16 : VOP1Inst_t16<"v_cvt_i32_i16", VOP_I32_I16>;
diff --git a/llvm/lib/Target/AMDGPU/VOP2Instructions.td b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
index 1bac8656192a7..44b096ca620c0 100644
--- a/llvm/lib/Target/AMDGPU/VOP2Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
@@ -604,7 +604,7 @@ def VOP_DOT_ACC_I32_I32 : VOP_DOT_ACC<i32, i32> {
}
// Write out to vcc or arbitrary SGPR.
-def VOP2b_I32_I1_I32_I32 : VOPProfile<[i32, i32, i32, untyped], /*EnableClamp=*/1> {
+def VOP2b_I32_I1_I32_I32 : VOPProfile<[i32, i32, i32, untyped], _EnableClamp = 1> {
let Asm32 = "$vdst, vcc, $src0, $src1";
let AsmVOP3Base = "$vdst, $sdst, $src0, $src1$clamp";
let AsmSDWA = "$vdst, vcc, $src0_modifiers, $src1_modifiers$clamp $dst_sel $dst_unused $src0_sel $src1_sel";
@@ -630,7 +630,7 @@ def VOP2b_I32_I1_I32_I32 : VOPProfile<[i32, i32, i32, untyped], /*EnableClamp=*/
// Write out to vcc or arbitrary SGPR and read in from vcc or
// arbitrary SGPR.
-def VOP2b_I32_I1_I32_I32_I1 : VOPProfile<[i32, i32, i32, i1], /*EnableClamp=*/1> {
+def VOP2b_I32_I1_I32_I32_I1 : VOPProfile<[i32, i32, i32, i1], _EnableClamp = 1> {
let HasSrc2Mods = 0;
let Asm32 = "$vdst, vcc, $src0, $src1, vcc";
let AsmSDWA = "$vdst, vcc, $src0_modifiers, $src1_modifiers, vcc$clamp $dst_sel $dst_unused $src0_sel $src1_sel";
diff --git a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
index d8088b8c638fd..48a59c7efbf4a 100644
--- a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
@@ -890,7 +890,7 @@ class MAIInst<string OpName, VOPProfile P, SDPatternOperator node, bit Scaled =
//
// FIXME: Usual syntax for op_sel is quite hostile here.
class ScaledMAIInst<string OpName, MAIInst BaseInst, SDPatternOperator node> :
- MAIInst<OpName, BaseInst.Pfl, node, /*Scaled=*/true> {
+ MAIInst<OpName, BaseInst.Pfl, node, Scaled = true> {
// Append operands from V_MFMA_LD_SCALE_B32, but we need to rename them.
let InOperandList = !con(BaseInst.InOperandList,
(ins VSrc_b32:$scale_src0,
@@ -2046,11 +2046,11 @@ multiclass VOP3PX_Real_ScaledMFMA<bits<7> op> {
DecoderNamespace = "GFX940",
AsmString = Name # PS_ACD.AsmOperands, Constraints = "" in {
def _gfx940_acd : VOP3P_Real<PS_ACD, SIEncodingFamily.GFX940>,
- VOP3PXe <op, PS_ACD.Pfl, /*acc_cd=*/1>,
+ VOP3PXe <op, PS_ACD.Pfl, acc_cd = 1>,
MFMA_F8F6F4_WithSizeTable_Helper<PS_ACD, F8F8Name#"_gfx940_acd">;
def _gfx940_vcd : VOP3P_Real<PS_VCD, SIEncodingFamily.GFX940>,
- VOP3PXe <op, PS_VCD.Pfl, /*acc_cd=*/0>,
+ VOP3PXe <op, PS_VCD.Pfl, acc_cd = 0>,
MFMA_F8F6F4_WithSizeTable_Helper<PS_VCD, F8F8Name#"_gfx940_vcd">;
}
}
diff --git a/llvm/lib/Target/ARM/ARMInstrCDE.td b/llvm/lib/Target/ARM/ARMInstrCDE.td
index 54e27a6be5583..add8b0d4651b7 100644
--- a/llvm/lib/Target/ARM/ARMInstrCDE.td
+++ b/llvm/lib/Target/ARM/ARMInstrCDE.td
@@ -52,7 +52,7 @@ def imm_13b : BitWidthImm<13>;
class CDE_Instr<bit acc, dag oops, dag iops, string asm, string cstr>
: Thumb2XI<oops, !con((ins p_imm:$coproc), iops),
AddrModeNone, /*sz=*/4, NoItinerary,
- asm, cstr, /*pattern=*/[]>,
+ asm, cstr, pattern = []>,
Sched<[]> {
bits<3> coproc;
diff --git a/llvm/lib/Target/M68k/M68kRegisterInfo.td b/llvm/lib/Target/M68k/M68kRegisterInfo.td
index 4942636ffd529..17f822e278904 100644
--- a/llvm/lib/Target/M68k/M68kRegisterInfo.td
+++ b/llvm/lib/Target/M68k/M68kRegisterInfo.td
@@ -70,8 +70,8 @@ defm SP : MxAddressRegister<7, "sp", ["usp", "ssp", "isp", "a7"]>;
// Floating Point Registers
class MxFPRegister<int INDEX, string REG_NAME, list<string> ALTNAMES = []>
- : MxReg<REG_NAME, INDEX, /*SUBREGS=*/[], /*SUBIDX=*/[],
- /*DWREGS=*/[!add(18,INDEX)], ALTNAMES>;
+ : MxReg<REG_NAME, INDEX, SUBREGS = [], SUBIDX = [],
+ DWREGS = [!add(18,INDEX)], ALTNAMES>;
foreach i = {0-7} in
def FP#i : MxFPRegister<i, "fp"#i>;
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index 1786503a6dd4e..4c1b596aef8c1 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -819,8 +819,8 @@ defm SUB_i1 : ADD_SUB_i1<sub>;
// int16, int32, and int64 signed addition. Since nvptx is 2's complement, we
// also use these for unsigned arithmetic.
-defm ADD : I3<"add.s", add, /*commutative=*/ true>;
-defm SUB : I3<"sub.s", sub, /*commutative=*/ false>;
+defm ADD : I3<"add.s", add, commutative = true>;
+defm SUB : I3<"sub.s", sub, commutative = false>;
def ADD16x2 : I16x2<"add.s", add>;
@@ -832,18 +832,18 @@ defm SUBCC : ADD_SUB_INT_CARRY<"sub.cc", subc>;
defm ADDCCC : ADD_SUB_INT_CARRY<"addc.cc", adde>;
defm SUBCCC : ADD_SUB_INT_CARRY<"subc.cc", sube>;
-defm MULT : I3<"mul.lo.s", mul, /*commutative=*/ true>;
+defm MULT : I3<"mul.lo.s", mul, commutative = true>;
-defm MULTHS : I3<"mul.hi.s", mulhs, /*commutative=*/ true>;
-defm MULTHU : I3<"mul.hi.u", mulhu, /*commutative=*/ true>;
+defm MULTHS : I3<"mul.hi.s", mulhs, commutative = true>;
+defm MULTHU : I3<"mul.hi.u", mulhu, commutative = true>;
-defm SDIV : I3<"div.s", sdiv, /*commutative=*/ false>;
-defm UDIV : I3<"div.u", udiv, /*commutative=*/ false>;
+defm SDIV : I3<"div.s", sdiv, commutative = false>;
+defm UDIV : I3<"div.u", udiv, commutative = false>;
// The ri versions of rem.s and rem.u won't be selected; DAGCombiner::visitSREM
// will lower it.
-defm SREM : I3<"rem.s", srem, /*commutative=*/ false>;
-defm UREM : I3<"rem.u", urem, /*commutative=*/ false>;
+defm SREM : I3<"rem.s", srem, commutative = false>;
+defm UREM : I3<"rem.u", urem, commutative = false>;
// Integer absolute value. NumBits should be one minus the bit width of RC.
// This idiom implements the algorithm at
@@ -858,10 +858,10 @@ defm ABS_32 : ABS<i32, Int32Regs, ".s32">;
defm ABS_64 : ABS<i64, Int64Regs, ".s64">;
// Integer min/max.
-defm SMAX : I3<"max.s", smax, /*commutative=*/ true>;
-defm UMAX : I3<"max.u", umax, /*commutative=*/ true>;
-defm SMIN : I3<"min.s", smin, /*commutative=*/ true>;
-defm UMIN : I3<"min.u", umin, /*commutative=*/ true>;
+defm SMAX : I3<"max.s", smax, commutative = true>;
+defm UMAX : I3<"max.u", umax, commutative = true>;
+defm SMIN : I3<"min.s", smin, commutative = true>;
+defm UMIN : I3<"min.u", umin, commutative = true>;
def SMAX16x2 : I16x2<"max.s", smax>;
def UMAX16x2 : I16x2<"max.u", umax>;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
index fe43a2be4aab9..0bf051ee731f1 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
@@ -1699,10 +1699,10 @@ def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd_wb),
let Predicates = [HasVInstructions] in {
// Vector Slide Instructions
let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
-defm VSLIDEUP_V : VSLD_IV_X_I<"vslideup", 0b001110, /*slidesUp=*/true>;
+defm VSLIDEUP_V : VSLD_IV_X_I<"vslideup", 0b001110, slidesUp = true>;
defm VSLIDE1UP_V : VSLD1_MV_X<"vslide1up", 0b001110>;
} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
-defm VSLIDEDOWN_V : VSLD_IV_X_I<"vslidedown", 0b001111, /*slidesUp=*/false>;
+defm VSLIDEDOWN_V : VSLD_IV_X_I<"vslidedown", 0b001111, slidesUp = false>;
let ElementsDependOn = EltDepsVL in
defm VSLIDE1DOWN_V : VSLD1_MV_X<"vslide1down", 0b001111>;
} // Predicates = [HasVInstructions]
diff --git a/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td b/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td
index 2bfd5ef811c7b..1bd2c2ed69435 100644
--- a/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td
+++ b/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td
@@ -1003,9 +1003,9 @@ foreach mx = ["M8", "M4", "M2"] in {
defvar LMulLat = SiFiveP600GetLMulCycles<mx>.c;
defvar IsWorstCase = SiFiveP600IsWorstCaseMX<mx, SchedMxList>.c;
let Latency = SiFiveP600VSlideXComplex<mx>.latency in {
- let ReleaseAtCycles = [SiFiveP600VSlideXComplex<mx, /*isUp=*/true>.cycles] in
+ let ReleaseAtCycles = [SiFiveP600VSlideXComplex<mx, isUp = true>.cycles] in
defm "" : LMULWriteResMX<"WriteVSlideUpX", [SiFiveP600VEXQ1], mx, IsWorstCase>;
- let ReleaseAtCycles = [SiFiveP600VSlideXComplex<mx, /*isUp=*/false>.cycles] in
+ let ReleaseAtCycles = [SiFiveP600VSlideXComplex<mx, isUp = false>.cycles] in
defm "" : LMULWriteResMX<"WriteVSlideDownX", [SiFiveP600VEXQ1], mx, IsWorstCase>;
}
}
diff --git a/llvm/lib/Target/X86/X86ScheduleZnver3.td b/llvm/lib/Target/X86/X86ScheduleZnver3.td
index 9e271c1ee3709..18f8bf1023c6f 100644
--- a/llvm/lib/Target/X86/X86ScheduleZnver3.td
+++ b/llvm/lib/Target/X86/X86ScheduleZnver3.td
@@ -610,7 +610,7 @@ def : InstRW<[Zn3SlowLEA16r], (instrs LEA16r)>;
// Integer multiplication
defm : Zn3WriteResIntPair<WriteIMul8, [Zn3Multiplier], 3, [3], 1>; // Integer 8-bit multiplication.
-defm : Zn3WriteResIntPair<WriteIMul16, [Zn3Multiplier], 3, [3], 3, /*LoadUOps=*/1>; // Integer 16-bit multiplication.
+defm : Zn3WriteResIntPair<WriteIMul16, [Zn3Multiplier], 3, [3], 3, LoadUOps = 1>; // Integer 16-bit multiplication.
defm : Zn3WriteResIntPair<WriteIMul16Imm, [Zn3Multiplier], 4, [4], 2>; // Integer 16-bit multiplication by immediate.
defm : Zn3WriteResIntPair<WriteIMul16Reg, [Zn3Multiplier], 3, [1], 1>; // Integer 16-bit multiplication by register.
defm : Zn3WriteResIntPair<WriteIMul32, [Zn3Multiplier], 3, [3], 2>; // Integer 32-bit multiplication.
@@ -692,8 +692,8 @@ defm : Zn3WriteResIntPair<WriteIDiv16, [Z...
[truncated]
|
There are more cases that could be converted, that don't use that exact syntax for the comment, like:
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM! I am glad there is someone who realizes this feature finally! That is a great honor to me. 😄
Convert named argument comments like
/*foo=*/99
into proper namedargument syntax like
foo = 99
. This can only be done for trailingarguments, since TableGen does not allow a positional argument to folow
a named argument.
The patch was semi-automated with:
plus a bunch of manual fixups.