Commit 42204c94 authored by Bill Wendling's avatar Bill Wendling
Browse files

Revert "[TargetInstrInfo] enable foldMemoryOperand for InlineAsm (#70743)"

This reverts commit 99ee2db1.

It's causing ICEs in the ARM tests. See the comment here:

https://github.com/llvm/llvm-project/commit/99ee2db198d86f685bcb07a1495a7115ffc31d7e
parent 23c47eba
Loading
Loading
Loading
Loading
+0 −10
Original line number Diff line number Diff line
@@ -2188,16 +2188,6 @@ public:
  // Get the call frame size just before MI.
  unsigned getCallFrameSizeAt(MachineInstr &MI) const;

  /// Fills in the necessary MachineOperands to refer to a frame index.
  /// The best way to understand this is to print `asm(""::"m"(x));` after
  /// finalize-isel. Example:
  /// INLINEASM ... 262190 /* mem:m */, %stack.0.x.addr, 1, $noreg, 0, $noreg
  /// we would add placeholders for:                     ^  ^       ^  ^
  virtual void
  getFrameIndexOperands(SmallVectorImpl<MachineOperand> &Ops) const {
    llvm_unreachable("unknown number of operands necessary");
  }

private:
  mutable std::unique_ptr<MIRFormatter> Formatter;
  unsigned CallFrameSetupOpcode, CallFrameDestroyOpcode;
+0 −62
Original line number Diff line number Diff line
@@ -565,64 +565,6 @@ static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI,
  return NewMI;
}

static void foldInlineAsmMemOperand(MachineInstr *MI, unsigned OpNo, int FI,
                                    const TargetInstrInfo &TII) {
  MachineOperand &MO = MI->getOperand(OpNo);
  const VirtRegInfo &RI = AnalyzeVirtRegInBundle(*MI, MO.getReg());

  // If the machine operand is tied, untie it first.
  if (MO.isTied()) {
    unsigned TiedTo = MI->findTiedOperandIdx(OpNo);
    MI->untieRegOperand(OpNo);
    // Intentional recursion!
    foldInlineAsmMemOperand(MI, TiedTo, FI, TII);
  }

  // Change the operand from a register to a frame index.
  MO.ChangeToFrameIndex(FI, MO.getTargetFlags());

  SmallVector<MachineOperand, 4> NewOps;
  TII.getFrameIndexOperands(NewOps);
  assert(!NewOps.empty() && "getFrameIndexOperands didn't create any operands");
  MI->insert(MI->operands_begin() + OpNo + 1, NewOps);

  // Change the previous operand to a MemKind InlineAsm::Flag. The second param
  // is the per-target number of operands that represent the memory operand
  // excluding this one (MD). This includes MO.
  InlineAsm::Flag F(InlineAsm::Kind::Mem, NewOps.size() + 1);
  F.setMemConstraint(InlineAsm::ConstraintCode::m);
  MachineOperand &MD = MI->getOperand(OpNo - 1);
  MD.setImm(F);

  // Update mayload/maystore metadata.
  MachineOperand &ExtraMO = MI->getOperand(InlineAsm::MIOp_ExtraInfo);
  if (RI.Reads)
    ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayLoad);
  if (RI.Writes)
    ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayStore);
}

// Returns nullptr if not possible to fold.
static MachineInstr *foldInlineAsmMemOperand(MachineInstr &MI,
                                             ArrayRef<unsigned> Ops, int FI,
                                             const TargetInstrInfo &TII) {
  assert(MI.isInlineAsm() && "wrong opcode");
  if (Ops.size() > 1)
    return nullptr;
  unsigned Op = Ops[0];
  assert(Op && "should never be first operand");
  assert(MI.getOperand(Op).isReg() && "shouldn't be folding non-reg operands");

  if (!MI.mayFoldInlineAsmRegOp(Op))
    return nullptr;

  MachineInstr &NewMI = TII.duplicate(*MI.getParent(), MI.getIterator(), MI);

  foldInlineAsmMemOperand(&NewMI, Op, FI, TII);

  return &NewMI;
}

MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
                                                 ArrayRef<unsigned> Ops, int FI,
                                                 LiveIntervals *LIS,
@@ -670,8 +612,6 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
    NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
    if (NewMI)
      MBB->insert(MI, NewMI);
  } else if (MI.isInlineAsm()) {
    NewMI = foldInlineAsmMemOperand(MI, Ops, FI, *this);
  } else {
    // Ask the target to do the actual folding.
    NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS, VRM);
@@ -743,8 +683,6 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
    NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
    if (NewMI)
      NewMI = &*MBB.insert(MI, NewMI);
  } else if (MI.isInlineAsm() && isLoadFromStackSlot(LoadMI, FrameIndex)) {
    NewMI = foldInlineAsmMemOperand(MI, Ops, FrameIndex, *this);
  } else {
    // Ask the target to do the actual folding.
    NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);