Unverified Commit 778a4846 authored by Nick Desaulniers's avatar Nick Desaulniers Committed by GitHub
Browse files

[InlineAsm] Steal a bit to denote a register is foldable (#70738)


When using the inline asm constraint string "rm" (or "g"), we generally
would like the compiler to choose "r", but it is permitted to choose "m"
if there's register pressure. This is distinct from "r" in which the
register is not permitted to be spilled to the stack.

The decision of which to use must be made at some point.  Currently, the
instruction selection frameworks (ISELs) make the choice, and the
register allocators had better be able to handle the result.

Steal a bit from Storage when using register operands to disambiguate
between the two cases.  Add helpers/getters/setters, and print in MIR
when such a register is foldable.

The getter will later be used by the register allocation frameworks (and
asserted by the ISELs) while the setters will be used by the instruction
selection frameworks.

Link: https://github.com/llvm/llvm-project/issues/20571
parent 17798ad7
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -1364,6 +1364,10 @@ public:
    return getOpcode() == TargetOpcode::INLINEASM ||
           getOpcode() == TargetOpcode::INLINEASM_BR;
  }
  /// Returns true if the register operand can be folded with a load or store
  /// into a frame index. Does so by checking the InlineAsm::Flag immediate
  /// operand at OpId - 1.
  bool mayFoldInlineAsmRegOp(unsigned OpId) const;

  bool isStackAligningInlineAsm() const;
  InlineAsm::AsmDialect getInlineAsmDialect() const;
+30 −5
Original line number Diff line number Diff line
@@ -291,18 +291,23 @@ public:
  //     Bits 30-16 - A ConstraintCode:: value indicating the original
  //                  constraint code. (MemConstraintCode)
  //   Else:
  //     Bits 30-16 - The register class ID to use for the operand. (RegClass)
  //     Bits 29-16 - The register class ID to use for the operand. (RegClass)
  //     Bit  30    - If the register is permitted to be spilled.
  //                  (RegMayBeFolded)
  //                  Defaults to false "r", may be set for constraints like
  //                  "rm" (or "g").
  //
  //   As such, MatchedOperandNo, MemConstraintCode, and RegClass are views of
  //   the same slice of bits, but are mutually exclusive depending on the
  //   fields IsMatched then KindField.
  //   As such, MatchedOperandNo, MemConstraintCode, and
  //   (RegClass+RegMayBeFolded) are views of the same slice of bits, but are
  //   mutually exclusive depending on the fields IsMatched then KindField.
  class Flag {
    uint32_t Storage;
    using KindField = Bitfield::Element<Kind, 0, 3, Kind::Func>;
    using NumOperands = Bitfield::Element<unsigned, 3, 13>;
    using MatchedOperandNo = Bitfield::Element<unsigned, 16, 15>;
    using MemConstraintCode = Bitfield::Element<ConstraintCode, 16, 15, ConstraintCode::Max>;
    using RegClass = Bitfield::Element<unsigned, 16, 15>;
    using RegClass = Bitfield::Element<unsigned, 16, 14>;
    using RegMayBeFolded = Bitfield::Element<bool, 30, 1>;
    using IsMatched = Bitfield::Element<bool, 31, 1>;


@@ -413,6 +418,26 @@ public:
             "Flag is not a memory or function constraint!");
      Bitfield::set<MemConstraintCode>(Storage, ConstraintCode::Unknown);
    }

    /// Set a bit to denote that while this operand is some kind of register
    /// (use, def, ...), a memory flag did appear in the original constraint
    /// list.  This is set by the instruction selection framework, and consumed
    /// by the register allocator. While the register allocator is generally
    /// responsible for spilling registers, we need to be able to distinguish
    /// between registers that the register allocator has permission to fold
    /// ("rm") vs ones it does not ("r"). This is because the inline asm may use
    /// instructions which don't support memory addressing modes for that
    /// operand.
    void setRegMayBeFolded(bool B) {
      assert((isRegDefKind() || isRegDefEarlyClobberKind() || isRegUseKind()) &&
             "Must be reg");
      Bitfield::set<RegMayBeFolded>(Storage, B);
    }
    bool getRegMayBeFolded() const {
      assert((isRegDefKind() || isRegDefEarlyClobberKind() || isRegUseKind()) &&
             "Must be reg");
      return Bitfield::get<RegMayBeFolded>(Storage);
    }
  };

  static std::vector<StringRef> getExtraInfoNames(unsigned ExtraInfo) {
+23 −0
Original line number Diff line number Diff line
@@ -1792,6 +1792,12 @@ void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
      if (F.isUseOperandTiedToDef(TiedTo))
        OS << " tiedto:$" << TiedTo;

      if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() ||
           F.isRegUseKind()) &&
          F.getRegMayBeFolded()) {
        OS << " foldable";
      }

      OS << ']';

      // Compute the index of the next operand descriptor.
@@ -2526,3 +2532,20 @@ void MachineInstr::insert(mop_iterator InsertBefore,
    tieOperands(Tie1, Tie2);
  }
}

bool MachineInstr::mayFoldInlineAsmRegOp(unsigned OpId) const {
  assert(OpId && "expected non-zero operand id");
  assert(isInlineAsm() && "should only be used on inline asm");

  if (!getOperand(OpId).isReg())
    return false;

  const MachineOperand &MD = getOperand(OpId - 1);
  if (!MD.isImm())
    return false;

  InlineAsm::Flag F(MD.getImm());
  if (F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind())
    return F.getRegMayBeFolded();
  return false;
}
+4 −0
Original line number Diff line number Diff line
@@ -1639,6 +1639,10 @@ std::string TargetInstrInfo::createMIROperandComment(
  if (F.isUseOperandTiedToDef(TiedTo))
    OS << " tiedto:$" << TiedTo;

  if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isRegUseKind()) &&
      F.getRegMayBeFolded())
    OS << " foldable";

  return OS.str();
}