1//===- SILoadStoreOptimizer.cpp -------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass tries to fuse DS instructions with close by immediate offsets.
10// This will fuse operations such as
11//  ds_read_b32 v0, v2 offset:16
12//  ds_read_b32 v1, v2 offset:32
13// ==>
14//   ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
15//
16// The same is done for certain SMEM and VMEM opcodes, e.g.:
17//  s_buffer_load_dword s4, s[0:3], 4
18//  s_buffer_load_dword s5, s[0:3], 8
19// ==>
20//  s_buffer_load_dwordx2 s[4:5], s[0:3], 4
21//
22// This pass also tries to promote constant offset to the immediate by
23// adjusting the base. It tries to use a base from the nearby instructions that
24// allows it to have a 13bit constant offset and then promotes the 13bit offset
25// to the immediate.
26// E.g.
27//  s_movk_i32 s0, 0x1800
28//  v_add_co_u32_e32 v0, vcc, s0, v2
29//  v_addc_co_u32_e32 v1, vcc, 0, v6, vcc
30//
31//  s_movk_i32 s0, 0x1000
32//  v_add_co_u32_e32 v5, vcc, s0, v2
33//  v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
34//  global_load_dwordx2 v[5:6], v[5:6], off
35//  global_load_dwordx2 v[0:1], v[0:1], off
36// =>
37//  s_movk_i32 s0, 0x1000
38//  v_add_co_u32_e32 v5, vcc, s0, v2
39//  v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
40//  global_load_dwordx2 v[5:6], v[5:6], off
41//  global_load_dwordx2 v[0:1], v[5:6], off offset:2048
42//
43// Future improvements:
44//
45// - This currently relies on the scheduler to place loads and stores next to
46//   each other, and then only merges adjacent pairs of instructions. It would
47//   be good to be more flexible with interleaved instructions, and possibly run
48//   before scheduling. It currently missing stores of constants because loading
49//   the constant into the data register is placed between the stores, although
50//   this is arguably a scheduling problem.
51//
52// - Live interval recomputing seems inefficient. This currently only matches
53//   one pair, and recomputes live intervals and moves on to the next pair. It
54//   would be better to compute a list of all merges that need to occur.
55//
56// - With a list of instructions to process, we can also merge more. If a
57//   cluster of loads have offsets that are too large to fit in the 8-bit
58//   offsets, but are close enough to fit in the 8 bits, we can add to the base
59//   pointer and use the new reduced offsets.
60//
61//===----------------------------------------------------------------------===//
62
63#include "AMDGPU.h"
64#include "AMDGPUSubtarget.h"
65#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
66#include "SIInstrInfo.h"
67#include "SIRegisterInfo.h"
68#include "Utils/AMDGPUBaseInfo.h"
69#include "llvm/ADT/ArrayRef.h"
70#include "llvm/ADT/SmallVector.h"
71#include "llvm/ADT/StringRef.h"
72#include "llvm/Analysis/AliasAnalysis.h"
73#include "llvm/CodeGen/MachineBasicBlock.h"
74#include "llvm/CodeGen/MachineFunction.h"
75#include "llvm/CodeGen/MachineFunctionPass.h"
76#include "llvm/CodeGen/MachineInstr.h"
77#include "llvm/CodeGen/MachineInstrBuilder.h"
78#include "llvm/CodeGen/MachineOperand.h"
79#include "llvm/CodeGen/MachineRegisterInfo.h"
80#include "llvm/IR/DebugLoc.h"
81#include "llvm/Pass.h"
82#include "llvm/Support/Debug.h"
83#include "llvm/Support/MathExtras.h"
84#include "llvm/Support/raw_ostream.h"
85#include <algorithm>
86#include <cassert>
87#include <cstdlib>
88#include <iterator>
89#include <utility>
90
91using namespace llvm;
92
93#define DEBUG_TYPE "si-load-store-opt"
94
95namespace {
96enum InstClassEnum {
97  UNKNOWN,
98  DS_READ,
99  DS_WRITE,
100  S_BUFFER_LOAD_IMM,
101  BUFFER_LOAD_OFFEN = AMDGPU::BUFFER_LOAD_DWORD_OFFEN,
102  BUFFER_LOAD_OFFSET = AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
103  BUFFER_STORE_OFFEN = AMDGPU::BUFFER_STORE_DWORD_OFFEN,
104  BUFFER_STORE_OFFSET = AMDGPU::BUFFER_STORE_DWORD_OFFSET,
105  BUFFER_LOAD_OFFEN_exact = AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact,
106  BUFFER_LOAD_OFFSET_exact = AMDGPU::BUFFER_LOAD_DWORD_OFFSET_exact,
107  BUFFER_STORE_OFFEN_exact = AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact,
108  BUFFER_STORE_OFFSET_exact = AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact,
109};
110
111enum RegisterEnum {
112  SBASE = 0x1,
113  SRSRC = 0x2,
114  SOFFSET = 0x4,
115  VADDR = 0x8,
116  ADDR = 0x10,
117};
118
119class SILoadStoreOptimizer : public MachineFunctionPass {
120  struct CombineInfo {
121    MachineBasicBlock::iterator I;
122    MachineBasicBlock::iterator Paired;
123    unsigned EltSize;
124    unsigned Offset0;
125    unsigned Offset1;
126    unsigned Width0;
127    unsigned Width1;
128    unsigned BaseOff;
129    InstClassEnum InstClass;
130    bool GLC0;
131    bool GLC1;
132    bool SLC0;
133    bool SLC1;
134    bool DLC0;
135    bool DLC1;
136    bool UseST64;
137    SmallVector<MachineInstr *, 8> InstsToMove;
138  };
139
140  struct BaseRegisters {
141    unsigned LoReg = 0;
142    unsigned HiReg = 0;
143
144    unsigned LoSubReg = 0;
145    unsigned HiSubReg = 0;
146  };
147
148  struct MemAddress {
149    BaseRegisters Base;
150    int64_t Offset = 0;
151  };
152
153  using MemInfoMap = DenseMap<MachineInstr *, MemAddress>;
154
155private:
156  const GCNSubtarget *STM = nullptr;
157  const SIInstrInfo *TII = nullptr;
158  const SIRegisterInfo *TRI = nullptr;
159  MachineRegisterInfo *MRI = nullptr;
160  AliasAnalysis *AA = nullptr;
161  bool OptimizeAgain;
162
163  static bool offsetsCanBeCombined(CombineInfo &CI);
164  static bool widthsFit(const GCNSubtarget &STM, const CombineInfo &CI);
165  static unsigned getNewOpcode(const CombineInfo &CI);
166  static std::pair<unsigned, unsigned> getSubRegIdxs(const CombineInfo &CI);
167  const TargetRegisterClass *getTargetRegisterClass(const CombineInfo &CI);
168  unsigned getOpcodeWidth(const MachineInstr &MI);
169  InstClassEnum getInstClass(unsigned Opc);
170  unsigned getRegs(unsigned Opc);
171
172  bool findMatchingInst(CombineInfo &CI);
173
174  unsigned read2Opcode(unsigned EltSize) const;
175  unsigned read2ST64Opcode(unsigned EltSize) const;
176  MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI);
177
178  unsigned write2Opcode(unsigned EltSize) const;
179  unsigned write2ST64Opcode(unsigned EltSize) const;
180  MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI);
181  MachineBasicBlock::iterator mergeSBufferLoadImmPair(CombineInfo &CI);
182  MachineBasicBlock::iterator mergeBufferLoadPair(CombineInfo &CI);
183  MachineBasicBlock::iterator mergeBufferStorePair(CombineInfo &CI);
184
185  void updateBaseAndOffset(MachineInstr &I, unsigned NewBase,
186                           int32_t NewOffset);
187  unsigned computeBase(MachineInstr &MI, const MemAddress &Addr);
188  MachineOperand createRegOrImm(int32_t Val, MachineInstr &MI);
189  Optional<int32_t> extractConstOffset(const MachineOperand &Op);
190  void processBaseWithConstOffset(const MachineOperand &Base, MemAddress &Addr);
191  /// Promotes constant offset to the immediate by adjusting the base. It
192  /// tries to use a base from the nearby instructions that allows it to have
193  /// a 13bit constant offset which gets promoted to the immediate.
194  bool promoteConstantOffsetToImm(MachineInstr &CI,
195                                  MemInfoMap &Visited,
196                                  SmallPtrSet<MachineInstr *, 4> &Promoted);
197
198public:
199  static char ID;
200
201  SILoadStoreOptimizer() : MachineFunctionPass(ID) {
202    initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
203  }
204
205  bool optimizeBlock(MachineBasicBlock &MBB);
206
207  bool runOnMachineFunction(MachineFunction &MF) override;
208
209  StringRef getPassName() const override { return "SI Load Store Optimizer"; }
210
211  void getAnalysisUsage(AnalysisUsage &AU) const override {
212    AU.setPreservesCFG();
213    AU.addRequired<AAResultsWrapperPass>();
214
215    MachineFunctionPass::getAnalysisUsage(AU);
216  }
217};
218
219} // end anonymous namespace.
220
221INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
222                      "SI Load Store Optimizer", false, false)
223INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
224INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE, "SI Load Store Optimizer",
225                    false, false)
226
227char SILoadStoreOptimizer::ID = 0;
228
229char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
230
231FunctionPass *llvm::createSILoadStoreOptimizerPass() {
232  return new SILoadStoreOptimizer();
233}
234
235static void moveInstsAfter(MachineBasicBlock::iterator I,
236                           ArrayRef<MachineInstr *> InstsToMove) {
237  MachineBasicBlock *MBB = I->getParent();
238  ++I;
239  for (MachineInstr *MI : InstsToMove) {
240    MI->removeFromParent();
241    MBB->insert(I, MI);
242  }
243}
244
245static void addDefsUsesToList(const MachineInstr &MI,
246                              DenseSet<unsigned> &RegDefs,
247                              DenseSet<unsigned> &PhysRegUses) {
248  for (const MachineOperand &Op : MI.operands()) {
249    if (Op.isReg()) {
250      if (Op.isDef())
251        RegDefs.insert(Op.getReg());
252      else if (Op.readsReg() &&
253               TargetRegisterInfo::isPhysicalRegister(Op.getReg()))
254        PhysRegUses.insert(Op.getReg());
255    }
256  }
257}
258
259static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
260                                      MachineBasicBlock::iterator B,
261                                      AliasAnalysis *AA) {
262  // RAW or WAR - cannot reorder
263  // WAW - cannot reorder
264  // RAR - safe to reorder
265  return !(A->mayStore() || B->mayStore()) || !A->mayAlias(AA, *B, true);
266}
267
268// Add MI and its defs to the lists if MI reads one of the defs that are
269// already in the list. Returns true in that case.
270static bool addToListsIfDependent(MachineInstr &MI, DenseSet<unsigned> &RegDefs,
271                                  DenseSet<unsigned> &PhysRegUses,
272                                  SmallVectorImpl<MachineInstr *> &Insts) {
273  for (MachineOperand &Use : MI.operands()) {
274    // If one of the defs is read, then there is a use of Def between I and the
275    // instruction that I will potentially be merged with. We will need to move
276    // this instruction after the merged instructions.
277    //
278    // Similarly, if there is a def which is read by an instruction that is to
279    // be moved for merging, then we need to move the def-instruction as well.
280    // This can only happen for physical registers such as M0; virtual
281    // registers are in SSA form.
282    if (Use.isReg() &&
283        ((Use.readsReg() && RegDefs.count(Use.getReg())) ||
284         (Use.isDef() && RegDefs.count(Use.getReg())) ||
285         (Use.isDef() && TargetRegisterInfo::isPhysicalRegister(Use.getReg()) &&
286          PhysRegUses.count(Use.getReg())))) {
287      Insts.push_back(&MI);
288      addDefsUsesToList(MI, RegDefs, PhysRegUses);
289      return true;
290    }
291  }
292
293  return false;
294}
295
296static bool canMoveInstsAcrossMemOp(MachineInstr &MemOp,
297                                    ArrayRef<MachineInstr *> InstsToMove,
298                                    AliasAnalysis *AA) {
299  assert(MemOp.mayLoadOrStore());
300
301  for (MachineInstr *InstToMove : InstsToMove) {
302    if (!InstToMove->mayLoadOrStore())
303      continue;
304    if (!memAccessesCanBeReordered(MemOp, *InstToMove, AA))
305      return false;
306  }
307  return true;
308}
309
310bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI) {
311  // XXX - Would the same offset be OK? Is there any reason this would happen or
312  // be useful?
313  if (CI.Offset0 == CI.Offset1)
314    return false;
315
316  // This won't be valid if the offset isn't aligned.
317  if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0))
318    return false;
319
320  unsigned EltOffset0 = CI.Offset0 / CI.EltSize;
321  unsigned EltOffset1 = CI.Offset1 / CI.EltSize;
322  CI.UseST64 = false;
323  CI.BaseOff = 0;
324
325  // Handle SMEM and VMEM instructions.
326  if ((CI.InstClass != DS_READ) && (CI.InstClass != DS_WRITE)) {
327    return (EltOffset0 + CI.Width0 == EltOffset1 ||
328            EltOffset1 + CI.Width1 == EltOffset0) &&
329           CI.GLC0 == CI.GLC1 && CI.DLC0 == CI.DLC1 &&
330           (CI.InstClass == S_BUFFER_LOAD_IMM || CI.SLC0 == CI.SLC1);
331  }
332
333  // If the offset in elements doesn't fit in 8-bits, we might be able to use
334  // the stride 64 versions.
335  if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
336      isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
337    CI.Offset0 = EltOffset0 / 64;
338    CI.Offset1 = EltOffset1 / 64;
339    CI.UseST64 = true;
340    return true;
341  }
342
343  // Check if the new offsets fit in the reduced 8-bit range.
344  if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
345    CI.Offset0 = EltOffset0;
346    CI.Offset1 = EltOffset1;
347    return true;
348  }
349
350  // Try to shift base address to decrease offsets.
351  unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
352  CI.BaseOff = std::min(CI.Offset0, CI.Offset1);
353
354  if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
355    CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
356    CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
357    CI.UseST64 = true;
358    return true;
359  }
360
361  if (isUInt<8>(OffsetDiff)) {
362    CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize;
363    CI.Offset1 = EltOffset1 - CI.BaseOff / CI.EltSize;
364    return true;
365  }
366
367  return false;
368}
369
370bool SILoadStoreOptimizer::widthsFit(const GCNSubtarget &STM,
371                                     const CombineInfo &CI) {
372  const unsigned Width = (CI.Width0 + CI.Width1);
373  switch (CI.InstClass) {
374  default:
375    return (Width <= 4) && (STM.hasDwordx3LoadStores() || (Width != 3));
376  case S_BUFFER_LOAD_IMM:
377    switch (Width) {
378    default:
379      return false;
380    case 2:
381    case 4:
382      return true;
383    }
384  }
385}
386
387unsigned SILoadStoreOptimizer::getOpcodeWidth(const MachineInstr &MI) {
388  const unsigned Opc = MI.getOpcode();
389
390  if (TII->isMUBUF(MI)) {
391    return AMDGPU::getMUBUFDwords(Opc);
392  }
393
394  switch (Opc) {
395  default:
396    return 0;
397  case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
398    return 1;
399  case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
400    return 2;
401  case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
402    return 4;
403  }
404}
405
406InstClassEnum SILoadStoreOptimizer::getInstClass(unsigned Opc) {
407  if (TII->isMUBUF(Opc)) {
408    const int baseOpcode = AMDGPU::getMUBUFBaseOpcode(Opc);
409
410    // If we couldn't identify the opcode, bail out.
411    if (baseOpcode == -1) {
412      return UNKNOWN;
413    }
414
415    switch (baseOpcode) {
416    default:
417      return UNKNOWN;
418    case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
419      return BUFFER_LOAD_OFFEN;
420    case AMDGPU::BUFFER_LOAD_DWORD_OFFSET:
421      return BUFFER_LOAD_OFFSET;
422    case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
423      return BUFFER_STORE_OFFEN;
424    case AMDGPU::BUFFER_STORE_DWORD_OFFSET:
425      return BUFFER_STORE_OFFSET;
426    case AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact:
427      return BUFFER_LOAD_OFFEN_exact;
428    case AMDGPU::BUFFER_LOAD_DWORD_OFFSET_exact:
429      return BUFFER_LOAD_OFFSET_exact;
430    case AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact:
431      return BUFFER_STORE_OFFEN_exact;
432    case AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact:
433      return BUFFER_STORE_OFFSET_exact;
434    }
435  }
436
437  switch (Opc) {
438  default:
439    return UNKNOWN;
440  case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
441  case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
442  case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
443    return S_BUFFER_LOAD_IMM;
444  case AMDGPU::DS_READ_B32:
445  case AMDGPU::DS_READ_B64:
446  case AMDGPU::DS_READ_B32_gfx9:
447  case AMDGPU::DS_READ_B64_gfx9:
448    return DS_READ;
449  case AMDGPU::DS_WRITE_B32:
450  case AMDGPU::DS_WRITE_B64:
451  case AMDGPU::DS_WRITE_B32_gfx9:
452  case AMDGPU::DS_WRITE_B64_gfx9:
453    return DS_WRITE;
454  }
455}
456
457unsigned SILoadStoreOptimizer::getRegs(unsigned Opc) {
458  if (TII->isMUBUF(Opc)) {
459    unsigned result = 0;
460
461    if (AMDGPU::getMUBUFHasVAddr(Opc)) {
462      result |= VADDR;
463    }
464
465    if (AMDGPU::getMUBUFHasSrsrc(Opc)) {
466      result |= SRSRC;
467    }
468
469    if (AMDGPU::getMUBUFHasSoffset(Opc)) {
470      result |= SOFFSET;
471    }
472
473    return result;
474  }
475
476  switch (Opc) {
477  default:
478    return 0;
479  case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
480  case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
481  case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
482    return SBASE;
483  case AMDGPU::DS_READ_B32:
484  case AMDGPU::DS_READ_B64:
485  case AMDGPU::DS_READ_B32_gfx9:
486  case AMDGPU::DS_READ_B64_gfx9:
487  case AMDGPU::DS_WRITE_B32:
488  case AMDGPU::DS_WRITE_B64:
489  case AMDGPU::DS_WRITE_B32_gfx9:
490  case AMDGPU::DS_WRITE_B64_gfx9:
491    return ADDR;
492  }
493}
494
495bool SILoadStoreOptimizer::findMatchingInst(CombineInfo &CI) {
496  MachineBasicBlock *MBB = CI.I->getParent();
497  MachineBasicBlock::iterator E = MBB->end();
498  MachineBasicBlock::iterator MBBI = CI.I;
499
500  const unsigned Opc = CI.I->getOpcode();
501  const InstClassEnum InstClass = getInstClass(Opc);
502
503  if (InstClass == UNKNOWN) {
504    return false;
505  }
506
507  const unsigned Regs = getRegs(Opc);
508
509  unsigned AddrOpName[5] = {0};
510  int AddrIdx[5];
511  const MachineOperand *AddrReg[5];
512  unsigned NumAddresses = 0;
513
514  if (Regs & ADDR) {
515    AddrOpName[NumAddresses++] = AMDGPU::OpName::addr;
516  }
517
518  if (Regs & SBASE) {
519    AddrOpName[NumAddresses++] = AMDGPU::OpName::sbase;
520  }
521
522  if (Regs & SRSRC) {
523    AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc;
524  }
525
526  if (Regs & SOFFSET) {
527    AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset;
528  }
529
530  if (Regs & VADDR) {
531    AddrOpName[NumAddresses++] = AMDGPU::OpName::vaddr;
532  }
533
534  for (unsigned i = 0; i < NumAddresses; i++) {
535    AddrIdx[i] = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AddrOpName[i]);
536    AddrReg[i] = &CI.I->getOperand(AddrIdx[i]);
537
538    // We only ever merge operations with the same base address register, so
539    // don't bother scanning forward if there are no other uses.
540    if (AddrReg[i]->isReg() &&
541        (TargetRegisterInfo::isPhysicalRegister(AddrReg[i]->getReg()) ||
542         MRI->hasOneNonDBGUse(AddrReg[i]->getReg())))
543      return false;
544  }
545
546  ++MBBI;
547
548  DenseSet<unsigned> RegDefsToMove;
549  DenseSet<unsigned> PhysRegUsesToMove;
550  addDefsUsesToList(*CI.I, RegDefsToMove, PhysRegUsesToMove);
551
552  for (; MBBI != E; ++MBBI) {
553    const bool IsDS = (InstClass == DS_READ) || (InstClass == DS_WRITE);
554
555    if ((getInstClass(MBBI->getOpcode()) != InstClass) ||
556        (IsDS && (MBBI->getOpcode() != Opc))) {
557      // This is not a matching DS instruction, but we can keep looking as
558      // long as one of these conditions are met:
559      // 1. It is safe to move I down past MBBI.
560      // 2. It is safe to move MBBI down past the instruction that I will
561      //    be merged into.
562
563      if (MBBI->hasUnmodeledSideEffects()) {
564        // We can't re-order this instruction with respect to other memory
565        // operations, so we fail both conditions mentioned above.
566        return false;
567      }
568
569      if (MBBI->mayLoadOrStore() &&
570          (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) ||
571           !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))) {
572        // We fail condition #1, but we may still be able to satisfy condition
573        // #2.  Add this instruction to the move list and then we will check
574        // if condition #2 holds once we have selected the matching instruction.
575        CI.InstsToMove.push_back(&*MBBI);
576        addDefsUsesToList(*MBBI, RegDefsToMove, PhysRegUsesToMove);
577        continue;
578      }
579
580      // When we match I with another DS instruction we will be moving I down
581      // to the location of the matched instruction any uses of I will need to
582      // be moved down as well.
583      addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
584                            CI.InstsToMove);
585      continue;
586    }
587
588    // Don't merge volatiles.
589    if (MBBI->hasOrderedMemoryRef())
590      return false;
591
592    // Handle a case like
593    //   DS_WRITE_B32 addr, v, idx0
594    //   w = DS_READ_B32 addr, idx0
595    //   DS_WRITE_B32 addr, f(w), idx1
596    // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
597    // merging of the two writes.
598    if (addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
599                              CI.InstsToMove))
600      continue;
601
602    bool Match = true;
603    for (unsigned i = 0; i < NumAddresses; i++) {
604      const MachineOperand &AddrRegNext = MBBI->getOperand(AddrIdx[i]);
605
606      if (AddrReg[i]->isImm() || AddrRegNext.isImm()) {
607        if (AddrReg[i]->isImm() != AddrRegNext.isImm() ||
608            AddrReg[i]->getImm() != AddrRegNext.getImm()) {
609          Match = false;
610          break;
611        }
612        continue;
613      }
614
615      // Check same base pointer. Be careful of subregisters, which can occur
616      // with vectors of pointers.
617      if (AddrReg[i]->getReg() != AddrRegNext.getReg() ||
618          AddrReg[i]->getSubReg() != AddrRegNext.getSubReg()) {
619        Match = false;
620        break;
621      }
622    }
623
624    if (Match) {
625      int OffsetIdx =
626          AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AMDGPU::OpName::offset);
627      CI.Offset0 = CI.I->getOperand(OffsetIdx).getImm();
628      CI.Width0 = getOpcodeWidth(*CI.I);
629      CI.Offset1 = MBBI->getOperand(OffsetIdx).getImm();
630      CI.Width1 = getOpcodeWidth(*MBBI);
631      CI.Paired = MBBI;
632
633      if ((CI.InstClass == DS_READ) || (CI.InstClass == DS_WRITE)) {
634        CI.Offset0 &= 0xffff;
635        CI.Offset1 &= 0xffff;
636      } else {
637        CI.GLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::glc)->getImm();
638        CI.GLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::glc)->getImm();
639        if (CI.InstClass != S_BUFFER_LOAD_IMM) {
640          CI.SLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::slc)->getImm();
641          CI.SLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::slc)->getImm();
642        }
643        CI.DLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::dlc)->getImm();
644        CI.DLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::dlc)->getImm();
645      }
646
647      // Check both offsets fit in the reduced range.
648      // We also need to go through the list of instructions that we plan to
649      // move and make sure they are all safe to move down past the merged
650      // instruction.
651      if (widthsFit(*STM, CI) && offsetsCanBeCombined(CI))
652        if (canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))
653          return true;
654    }
655
656    // We've found a load/store that we couldn't merge for some reason.
657    // We could potentially keep looking, but we'd need to make sure that
658    // it was safe to move I and also all the instruction in InstsToMove
659    // down past this instruction.
660    // check if we can move I across MBBI and if we can move all I's users
661    if (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) ||
662        !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))
663      break;
664  }
665  return false;
666}
667
668unsigned SILoadStoreOptimizer::read2Opcode(unsigned EltSize) const {
669  if (STM->ldsRequiresM0Init())
670    return (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
671  return (EltSize == 4) ? AMDGPU::DS_READ2_B32_gfx9 : AMDGPU::DS_READ2_B64_gfx9;
672}
673
674unsigned SILoadStoreOptimizer::read2ST64Opcode(unsigned EltSize) const {
675  if (STM->ldsRequiresM0Init())
676    return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
677
678  return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32_gfx9
679                        : AMDGPU::DS_READ2ST64_B64_gfx9;
680}
681
682MachineBasicBlock::iterator
683SILoadStoreOptimizer::mergeRead2Pair(CombineInfo &CI) {
684  MachineBasicBlock *MBB = CI.I->getParent();
685
686  // Be careful, since the addresses could be subregisters themselves in weird
687  // cases, like vectors of pointers.
688  const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
689
690  const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
691  const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdst);
692
693  unsigned NewOffset0 = CI.Offset0;
694  unsigned NewOffset1 = CI.Offset1;
695  unsigned Opc =
696      CI.UseST64 ? read2ST64Opcode(CI.EltSize) : read2Opcode(CI.EltSize);
697
698  unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
699  unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
700
701  if (NewOffset0 > NewOffset1) {
702    // Canonicalize the merged instruction so the smaller offset comes first.
703    std::swap(NewOffset0, NewOffset1);
704    std::swap(SubRegIdx0, SubRegIdx1);
705  }
706
707  assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
708         (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
709
710  const MCInstrDesc &Read2Desc = TII->get(Opc);
711
712  const TargetRegisterClass *SuperRC =
713      (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
714  unsigned DestReg = MRI->createVirtualRegister(SuperRC);
715
716  DebugLoc DL = CI.I->getDebugLoc();
717
718  unsigned BaseReg = AddrReg->getReg();
719  unsigned BaseSubReg = AddrReg->getSubReg();
720  unsigned BaseRegFlags = 0;
721  if (CI.BaseOff) {
722    unsigned ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
723    BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
724        .addImm(CI.BaseOff);
725
726    BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
727    BaseRegFlags = RegState::Kill;
728
729    TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
730        .addReg(ImmReg)
731        .addReg(AddrReg->getReg(), 0, BaseSubReg)
732        .addImm(0); // clamp bit
733    BaseSubReg = 0;
734  }
735
736  MachineInstrBuilder Read2 =
737      BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
738          .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
739          .addImm(NewOffset0)                        // offset0
740          .addImm(NewOffset1)                        // offset1
741          .addImm(0)                                 // gds
742          .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
743
744  (void)Read2;
745
746  const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
747
748  // Copy to the old destination registers.
749  BuildMI(*MBB, CI.Paired, DL, CopyDesc)
750      .add(*Dest0) // Copy to same destination including flags and sub reg.
751      .addReg(DestReg, 0, SubRegIdx0);
752  MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
753                            .add(*Dest1)
754                            .addReg(DestReg, RegState::Kill, SubRegIdx1);
755
756  moveInstsAfter(Copy1, CI.InstsToMove);
757
758  MachineBasicBlock::iterator Next = std::next(CI.I);
759  CI.I->eraseFromParent();
760  CI.Paired->eraseFromParent();
761
762  LLVM_DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
763  return Next;
764}
765
766unsigned SILoadStoreOptimizer::write2Opcode(unsigned EltSize) const {
767  if (STM->ldsRequiresM0Init())
768    return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
769  return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32_gfx9
770                        : AMDGPU::DS_WRITE2_B64_gfx9;
771}
772
773unsigned SILoadStoreOptimizer::write2ST64Opcode(unsigned EltSize) const {
774  if (STM->ldsRequiresM0Init())
775    return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32
776                          : AMDGPU::DS_WRITE2ST64_B64;
777
778  return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32_gfx9
779                        : AMDGPU::DS_WRITE2ST64_B64_gfx9;
780}
781
782MachineBasicBlock::iterator
783SILoadStoreOptimizer::mergeWrite2Pair(CombineInfo &CI) {
784  MachineBasicBlock *MBB = CI.I->getParent();
785
786  // Be sure to use .addOperand(), and not .addReg() with these. We want to be
787  // sure we preserve the subregister index and any register flags set on them.
788  const MachineOperand *AddrReg =
789      TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
790  const MachineOperand *Data0 =
791      TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
792  const MachineOperand *Data1 =
793      TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::data0);
794
795  unsigned NewOffset0 = CI.Offset0;
796  unsigned NewOffset1 = CI.Offset1;
797  unsigned Opc =
798      CI.UseST64 ? write2ST64Opcode(CI.EltSize) : write2Opcode(CI.EltSize);
799
800  if (NewOffset0 > NewOffset1) {
801    // Canonicalize the merged instruction so the smaller offset comes first.
802    std::swap(NewOffset0, NewOffset1);
803    std::swap(Data0, Data1);
804  }
805
806  assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
807         (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
808
809  const MCInstrDesc &Write2Desc = TII->get(Opc);
810  DebugLoc DL = CI.I->getDebugLoc();
811
812  unsigned BaseReg = AddrReg->getReg();
813  unsigned BaseSubReg = AddrReg->getSubReg();
814  unsigned BaseRegFlags = 0;
815  if (CI.BaseOff) {
816    unsigned ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
817    BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
818        .addImm(CI.BaseOff);
819
820    BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
821    BaseRegFlags = RegState::Kill;
822
823    TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
824        .addReg(ImmReg)
825        .addReg(AddrReg->getReg(), 0, BaseSubReg)
826        .addImm(0); // clamp bit
827    BaseSubReg = 0;
828  }
829
830  MachineInstrBuilder Write2 =
831      BuildMI(*MBB, CI.Paired, DL, Write2Desc)
832          .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
833          .add(*Data0)                               // data0
834          .add(*Data1)                               // data1
835          .addImm(NewOffset0)                        // offset0
836          .addImm(NewOffset1)                        // offset1
837          .addImm(0)                                 // gds
838          .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
839
840  moveInstsAfter(Write2, CI.InstsToMove);
841
842  MachineBasicBlock::iterator Next = std::next(CI.I);
843  CI.I->eraseFromParent();
844  CI.Paired->eraseFromParent();
845
846  LLVM_DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
847  return Next;
848}
849
850MachineBasicBlock::iterator
851SILoadStoreOptimizer::mergeSBufferLoadImmPair(CombineInfo &CI) {
852  MachineBasicBlock *MBB = CI.I->getParent();
853  DebugLoc DL = CI.I->getDebugLoc();
854  const unsigned Opcode = getNewOpcode(CI);
855
856  const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
857
858  unsigned DestReg = MRI->createVirtualRegister(SuperRC);
859  unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
860
861  BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg)
862      .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase))
863      .addImm(MergedOffset) // offset
864      .addImm(CI.GLC0)      // glc
865      .addImm(CI.DLC0)      // dlc
866      .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
867
868  std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
869  const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
870  const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
871
872  // Copy to the old destination registers.
873  const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
874  const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::sdst);
875  const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::sdst);
876
877  BuildMI(*MBB, CI.Paired, DL, CopyDesc)
878      .add(*Dest0) // Copy to same destination including flags and sub reg.
879      .addReg(DestReg, 0, SubRegIdx0);
880  MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
881                            .add(*Dest1)
882                            .addReg(DestReg, RegState::Kill, SubRegIdx1);
883
884  moveInstsAfter(Copy1, CI.InstsToMove);
885
886  MachineBasicBlock::iterator Next = std::next(CI.I);
887  CI.I->eraseFromParent();
888  CI.Paired->eraseFromParent();
889  return Next;
890}
891
892MachineBasicBlock::iterator
893SILoadStoreOptimizer::mergeBufferLoadPair(CombineInfo &CI) {
894  MachineBasicBlock *MBB = CI.I->getParent();
895  DebugLoc DL = CI.I->getDebugLoc();
896
897  const unsigned Opcode = getNewOpcode(CI);
898
899  const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
900
901  // Copy to the new source register.
902  unsigned DestReg = MRI->createVirtualRegister(SuperRC);
903  unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
904
905  auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg);
906
907  const unsigned Regs = getRegs(Opcode);
908
909  if (Regs & VADDR)
910    MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
911
912  MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
913      .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
914      .addImm(MergedOffset) // offset
915      .addImm(CI.GLC0)      // glc
916      .addImm(CI.SLC0)      // slc
917      .addImm(0)            // tfe
918      .addImm(CI.DLC0)      // dlc
919      .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
920
921  std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
922  const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
923  const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
924
925  // Copy to the old destination registers.
926  const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
927  const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
928  const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
929
930  BuildMI(*MBB, CI.Paired, DL, CopyDesc)
931      .add(*Dest0) // Copy to same destination including flags and sub reg.
932      .addReg(DestReg, 0, SubRegIdx0);
933  MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
934                            .add(*Dest1)
935                            .addReg(DestReg, RegState::Kill, SubRegIdx1);
936
937  moveInstsAfter(Copy1, CI.InstsToMove);
938
939  MachineBasicBlock::iterator Next = std::next(CI.I);
940  CI.I->eraseFromParent();
941  CI.Paired->eraseFromParent();
942  return Next;
943}
944
945unsigned SILoadStoreOptimizer::getNewOpcode(const CombineInfo &CI) {
946  const unsigned Width = CI.Width0 + CI.Width1;
947
948  switch (CI.InstClass) {
949  default:
950    return AMDGPU::getMUBUFOpcode(CI.InstClass, Width);
951  case UNKNOWN:
952    llvm_unreachable("Unknown instruction class");
953  case S_BUFFER_LOAD_IMM:
954    switch (Width) {
955    default:
956      return 0;
957    case 2:
958      return AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
959    case 4:
960      return AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM;
961    }
962  }
963}
964
965std::pair<unsigned, unsigned>
966SILoadStoreOptimizer::getSubRegIdxs(const CombineInfo &CI) {
967  if (CI.Offset0 > CI.Offset1) {
968    switch (CI.Width0) {
969    default:
970      return std::make_pair(0, 0);
971    case 1:
972      switch (CI.Width1) {
973      default:
974        return std::make_pair(0, 0);
975      case 1:
976        return std::make_pair(AMDGPU::sub1, AMDGPU::sub0);
977      case 2:
978        return std::make_pair(AMDGPU::sub2, AMDGPU::sub0_sub1);
979      case 3:
980        return std::make_pair(AMDGPU::sub3, AMDGPU::sub0_sub1_sub2);
981      }
982    case 2:
983      switch (CI.Width1) {
984      default:
985        return std::make_pair(0, 0);
986      case 1:
987        return std::make_pair(AMDGPU::sub1_sub2, AMDGPU::sub0);
988      case 2:
989        return std::make_pair(AMDGPU::sub2_sub3, AMDGPU::sub0_sub1);
990      }
991    case 3:
992      switch (CI.Width1) {
993      default:
994        return std::make_pair(0, 0);
995      case 1:
996        return std::make_pair(AMDGPU::sub1_sub2_sub3, AMDGPU::sub0);
997      }
998    }
999  } else {
1000    switch (CI.Width0) {
1001    default:
1002      return std::make_pair(0, 0);
1003    case 1:
1004      switch (CI.Width1) {
1005      default:
1006        return std::make_pair(0, 0);
1007      case 1:
1008        return std::make_pair(AMDGPU::sub0, AMDGPU::sub1);
1009      case 2:
1010        return std::make_pair(AMDGPU::sub0, AMDGPU::sub1_sub2);
1011      case 3:
1012        return std::make_pair(AMDGPU::sub0, AMDGPU::sub1_sub2_sub3);
1013      }
1014    case 2:
1015      switch (CI.Width1) {
1016      default:
1017        return std::make_pair(0, 0);
1018      case 1:
1019        return std::make_pair(AMDGPU::sub0_sub1, AMDGPU::sub2);
1020      case 2:
1021        return std::make_pair(AMDGPU::sub0_sub1, AMDGPU::sub2_sub3);
1022      }
1023    case 3:
1024      switch (CI.Width1) {
1025      default:
1026        return std::make_pair(0, 0);
1027      case 1:
1028        return std::make_pair(AMDGPU::sub0_sub1_sub2, AMDGPU::sub3);
1029      }
1030    }
1031  }
1032}
1033
1034const TargetRegisterClass *
1035SILoadStoreOptimizer::getTargetRegisterClass(const CombineInfo &CI) {
1036  if (CI.InstClass == S_BUFFER_LOAD_IMM) {
1037    switch (CI.Width0 + CI.Width1) {
1038    default:
1039      return nullptr;
1040    case 2:
1041      return &AMDGPU::SReg_64_XEXECRegClass;
1042    case 4:
1043      return &AMDGPU::SReg_128RegClass;
1044    case 8:
1045      return &AMDGPU::SReg_256RegClass;
1046    case 16:
1047      return &AMDGPU::SReg_512RegClass;
1048    }
1049  } else {
1050    switch (CI.Width0 + CI.Width1) {
1051    default:
1052      return nullptr;
1053    case 2:
1054      return &AMDGPU::VReg_64RegClass;
1055    case 3:
1056      return &AMDGPU::VReg_96RegClass;
1057    case 4:
1058      return &AMDGPU::VReg_128RegClass;
1059    }
1060  }
1061}
1062
1063MachineBasicBlock::iterator
1064SILoadStoreOptimizer::mergeBufferStorePair(CombineInfo &CI) {
1065  MachineBasicBlock *MBB = CI.I->getParent();
1066  DebugLoc DL = CI.I->getDebugLoc();
1067
1068  const unsigned Opcode = getNewOpcode(CI);
1069
1070  std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
1071  const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1072  const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
1073
1074  // Copy to the new source register.
1075  const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
1076  unsigned SrcReg = MRI->createVirtualRegister(SuperRC);
1077
1078  const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1079  const auto *Src1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
1080
1081  BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg)
1082      .add(*Src0)
1083      .addImm(SubRegIdx0)
1084      .add(*Src1)
1085      .addImm(SubRegIdx1);
1086
1087  auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode))
1088                 .addReg(SrcReg, RegState::Kill);
1089
1090  const unsigned Regs = getRegs(Opcode);
1091
1092  if (Regs & VADDR)
1093    MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
1094
1095  MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
1096      .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
1097      .addImm(std::min(CI.Offset0, CI.Offset1)) // offset
1098      .addImm(CI.GLC0)      // glc
1099      .addImm(CI.SLC0)      // slc
1100      .addImm(0)            // tfe
1101      .addImm(CI.DLC0)      // dlc
1102      .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
1103
1104  moveInstsAfter(MIB, CI.InstsToMove);
1105
1106  MachineBasicBlock::iterator Next = std::next(CI.I);
1107  CI.I->eraseFromParent();
1108  CI.Paired->eraseFromParent();
1109  return Next;
1110}
1111
1112MachineOperand
1113SILoadStoreOptimizer::createRegOrImm(int32_t Val, MachineInstr &MI) {
1114  APInt V(32, Val, true);
1115  if (TII->isInlineConstant(V))
1116    return MachineOperand::CreateImm(Val);
1117
1118  unsigned Reg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1119  MachineInstr *Mov =
1120  BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
1121          TII->get(AMDGPU::S_MOV_B32), Reg)
1122    .addImm(Val);
1123  (void)Mov;
1124  LLVM_DEBUG(dbgs() << "    "; Mov->dump());
1125  return MachineOperand::CreateReg(Reg, false);
1126}
1127
1128// Compute base address using Addr and return the final register.
1129unsigned SILoadStoreOptimizer::computeBase(MachineInstr &MI,
1130                                           const MemAddress &Addr) {
1131  MachineBasicBlock *MBB = MI.getParent();
1132  MachineBasicBlock::iterator MBBI = MI.getIterator();
1133  DebugLoc DL = MI.getDebugLoc();
1134
1135  assert((TRI->getRegSizeInBits(Addr.Base.LoReg, *MRI) == 32 ||
1136          Addr.Base.LoSubReg) &&
1137         "Expected 32-bit Base-Register-Low!!");
1138
1139  assert((TRI->getRegSizeInBits(Addr.Base.HiReg, *MRI) == 32 ||
1140          Addr.Base.HiSubReg) &&
1141         "Expected 32-bit Base-Register-Hi!!");
1142
1143  LLVM_DEBUG(dbgs() << "  Re-Computed Anchor-Base:\n");
1144  MachineOperand OffsetLo = createRegOrImm(static_cast<int32_t>(Addr.Offset), MI);
1145  MachineOperand OffsetHi =
1146    createRegOrImm(static_cast<int32_t>(Addr.Offset >> 32), MI);
1147
1148  const auto *CarryRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
1149  unsigned CarryReg = MRI->createVirtualRegister(CarryRC);
1150  unsigned DeadCarryReg = MRI->createVirtualRegister(CarryRC);
1151
1152  unsigned DestSub0 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1153  unsigned DestSub1 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1154  MachineInstr *LoHalf =
1155    BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADD_I32_e64), DestSub0)
1156      .addReg(CarryReg, RegState::Define)
1157      .addReg(Addr.Base.LoReg, 0, Addr.Base.LoSubReg)
1158      .add(OffsetLo)
1159      .addImm(0); // clamp bit
1160  (void)LoHalf;
1161  LLVM_DEBUG(dbgs() << "    "; LoHalf->dump(););
1162
1163  MachineInstr *HiHalf =
1164  BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADDC_U32_e64), DestSub1)
1165    .addReg(DeadCarryReg, RegState::Define | RegState::Dead)
1166    .addReg(Addr.Base.HiReg, 0, Addr.Base.HiSubReg)
1167    .add(OffsetHi)
1168    .addReg(CarryReg, RegState::Kill)
1169    .addImm(0); // clamp bit
1170  (void)HiHalf;
1171  LLVM_DEBUG(dbgs() << "    "; HiHalf->dump(););
1172
1173  unsigned FullDestReg = MRI->createVirtualRegister(&AMDGPU::VReg_64RegClass);
1174  MachineInstr *FullBase =
1175    BuildMI(*MBB, MBBI, DL, TII->get(TargetOpcode::REG_SEQUENCE), FullDestReg)
1176      .addReg(DestSub0)
1177      .addImm(AMDGPU::sub0)
1178      .addReg(DestSub1)
1179      .addImm(AMDGPU::sub1);
1180  (void)FullBase;
1181  LLVM_DEBUG(dbgs() << "    "; FullBase->dump(); dbgs() << "\n";);
1182
1183  return FullDestReg;
1184}
1185
1186// Update base and offset with the NewBase and NewOffset in MI.
1187void SILoadStoreOptimizer::updateBaseAndOffset(MachineInstr &MI,
1188                                               unsigned NewBase,
1189                                               int32_t NewOffset) {
1190  TII->getNamedOperand(MI, AMDGPU::OpName::vaddr)->setReg(NewBase);
1191  TII->getNamedOperand(MI, AMDGPU::OpName::offset)->setImm(NewOffset);
1192}
1193
1194Optional<int32_t>
1195SILoadStoreOptimizer::extractConstOffset(const MachineOperand &Op) {
1196  if (Op.isImm())
1197    return Op.getImm();
1198
1199  if (!Op.isReg())
1200    return None;
1201
1202  MachineInstr *Def = MRI->getUniqueVRegDef(Op.getReg());
1203  if (!Def || Def->getOpcode() != AMDGPU::S_MOV_B32 ||
1204      !Def->getOperand(1).isImm())
1205    return None;
1206
1207  return Def->getOperand(1).getImm();
1208}
1209
1210// Analyze Base and extracts:
1211//  - 32bit base registers, subregisters
1212//  - 64bit constant offset
1213// Expecting base computation as:
1214//   %OFFSET0:sgpr_32 = S_MOV_B32 8000
1215//   %LO:vgpr_32, %c:sreg_64_xexec =
1216//       V_ADD_I32_e64 %BASE_LO:vgpr_32, %103:sgpr_32,
1217//   %HI:vgpr_32, = V_ADDC_U32_e64 %BASE_HI:vgpr_32, 0, killed %c:sreg_64_xexec
1218//   %Base:vreg_64 =
1219//       REG_SEQUENCE %LO:vgpr_32, %subreg.sub0, %HI:vgpr_32, %subreg.sub1
1220void SILoadStoreOptimizer::processBaseWithConstOffset(const MachineOperand &Base,
1221                                                      MemAddress &Addr) {
1222  if (!Base.isReg())
1223    return;
1224
1225  MachineInstr *Def = MRI->getUniqueVRegDef(Base.getReg());
1226  if (!Def || Def->getOpcode() != AMDGPU::REG_SEQUENCE
1227      || Def->getNumOperands() != 5)
1228    return;
1229
1230  MachineOperand BaseLo = Def->getOperand(1);
1231  MachineOperand BaseHi = Def->getOperand(3);
1232  if (!BaseLo.isReg() || !BaseHi.isReg())
1233    return;
1234
1235  MachineInstr *BaseLoDef = MRI->getUniqueVRegDef(BaseLo.getReg());
1236  MachineInstr *BaseHiDef = MRI->getUniqueVRegDef(BaseHi.getReg());
1237
1238  if (!BaseLoDef || BaseLoDef->getOpcode() != AMDGPU::V_ADD_I32_e64 ||
1239      !BaseHiDef || BaseHiDef->getOpcode() != AMDGPU::V_ADDC_U32_e64)
1240    return;
1241
1242  const auto *Src0 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src0);
1243  const auto *Src1 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src1);
1244
1245  auto Offset0P = extractConstOffset(*Src0);
1246  if (Offset0P)
1247    BaseLo = *Src1;
1248  else {
1249    if (!(Offset0P = extractConstOffset(*Src1)))
1250      return;
1251    BaseLo = *Src0;
1252  }
1253
1254  Src0 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src0);
1255  Src1 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src1);
1256
1257  if (Src0->isImm())
1258    std::swap(Src0, Src1);
1259
1260  if (!Src1->isImm())
1261    return;
1262
1263  uint64_t Offset1 = Src1->getImm();
1264  BaseHi = *Src0;
1265
1266  Addr.Base.LoReg = BaseLo.getReg();
1267  Addr.Base.HiReg = BaseHi.getReg();
1268  Addr.Base.LoSubReg = BaseLo.getSubReg();
1269  Addr.Base.HiSubReg = BaseHi.getSubReg();
1270  Addr.Offset = (*Offset0P & 0x00000000ffffffff) | (Offset1 << 32);
1271}
1272
1273bool SILoadStoreOptimizer::promoteConstantOffsetToImm(
1274    MachineInstr &MI,
1275    MemInfoMap &Visited,
1276    SmallPtrSet<MachineInstr *, 4> &AnchorList) {
1277
1278  // TODO: Support flat and scratch.
1279  if (AMDGPU::getGlobalSaddrOp(MI.getOpcode()) < 0 ||
1280      TII->getNamedOperand(MI, AMDGPU::OpName::vdata) != NULL)
1281    return false;
1282
1283  // TODO: Support Store.
1284  if (!MI.mayLoad())
1285    return false;
1286
1287  if (AnchorList.count(&MI))
1288    return false;
1289
1290  LLVM_DEBUG(dbgs() << "\nTryToPromoteConstantOffsetToImmFor "; MI.dump());
1291
1292  if (TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm()) {
1293    LLVM_DEBUG(dbgs() << "  Const-offset is already promoted.\n";);
1294    return false;
1295  }
1296
1297  // Step1: Find the base-registers and a 64bit constant offset.
1298  MachineOperand &Base = *TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
1299  MemAddress MAddr;
1300  if (Visited.find(&MI) == Visited.end()) {
1301    processBaseWithConstOffset(Base, MAddr);
1302    Visited[&MI] = MAddr;
1303  } else
1304    MAddr = Visited[&MI];
1305
1306  if (MAddr.Offset == 0) {
1307    LLVM_DEBUG(dbgs() << "  Failed to extract constant-offset or there are no"
1308                         " constant offsets that can be promoted.\n";);
1309    return false;
1310  }
1311
1312  LLVM_DEBUG(dbgs() << "  BASE: {" << MAddr.Base.HiReg << ", "
1313             << MAddr.Base.LoReg << "} Offset: " << MAddr.Offset << "\n\n";);
1314
1315  // Step2: Traverse through MI's basic block and find an anchor(that has the
1316  // same base-registers) with the highest 13bit distance from MI's offset.
1317  // E.g. (64bit loads)
1318  // bb:
1319  //   addr1 = &a + 4096;   load1 = load(addr1,  0)
1320  //   addr2 = &a + 6144;   load2 = load(addr2,  0)
1321  //   addr3 = &a + 8192;   load3 = load(addr3,  0)
1322  //   addr4 = &a + 10240;  load4 = load(addr4,  0)
1323  //   addr5 = &a + 12288;  load5 = load(addr5,  0)
1324  //
1325  // Starting from the first load, the optimization will try to find a new base
1326  // from which (&a + 4096) has 13 bit distance. Both &a + 6144 and &a + 8192
1327  // has 13bit distance from &a + 4096. The heuristic considers &a + 8192
1328  // as the new-base(anchor) because of the maximum distance which can
1329  // accomodate more intermediate bases presumeably.
1330  //
1331  // Step3: move (&a + 8192) above load1. Compute and promote offsets from
1332  // (&a + 8192) for load1, load2, load4.
1333  //   addr = &a + 8192
1334  //   load1 = load(addr,       -4096)
1335  //   load2 = load(addr,       -2048)
1336  //   load3 = load(addr,       0)
1337  //   load4 = load(addr,       2048)
1338  //   addr5 = &a + 12288;  load5 = load(addr5,  0)
1339  //
1340  MachineInstr *AnchorInst = nullptr;
1341  MemAddress AnchorAddr;
1342  uint32_t MaxDist = std::numeric_limits<uint32_t>::min();
1343  SmallVector<std::pair<MachineInstr *, int64_t>, 4> InstsWCommonBase;
1344
1345  MachineBasicBlock *MBB = MI.getParent();
1346  MachineBasicBlock::iterator E = MBB->end();
1347  MachineBasicBlock::iterator MBBI = MI.getIterator();
1348  ++MBBI;
1349  const SITargetLowering *TLI =
1350    static_cast<const SITargetLowering *>(STM->getTargetLowering());
1351
1352  for ( ; MBBI != E; ++MBBI) {
1353    MachineInstr &MINext = *MBBI;
1354    // TODO: Support finding an anchor(with same base) from store addresses or
1355    // any other load addresses where the opcodes are different.
1356    if (MINext.getOpcode() != MI.getOpcode() ||
1357        TII->getNamedOperand(MINext, AMDGPU::OpName::offset)->getImm())
1358      continue;
1359
1360    const MachineOperand &BaseNext =
1361      *TII->getNamedOperand(MINext, AMDGPU::OpName::vaddr);
1362    MemAddress MAddrNext;
1363    if (Visited.find(&MINext) == Visited.end()) {
1364      processBaseWithConstOffset(BaseNext, MAddrNext);
1365      Visited[&MINext] = MAddrNext;
1366    } else
1367      MAddrNext = Visited[&MINext];
1368
1369    if (MAddrNext.Base.LoReg != MAddr.Base.LoReg ||
1370        MAddrNext.Base.HiReg != MAddr.Base.HiReg ||
1371        MAddrNext.Base.LoSubReg != MAddr.Base.LoSubReg ||
1372        MAddrNext.Base.HiSubReg != MAddr.Base.HiSubReg)
1373      continue;
1374
1375    InstsWCommonBase.push_back(std::make_pair(&MINext, MAddrNext.Offset));
1376
1377    int64_t Dist = MAddr.Offset - MAddrNext.Offset;
1378    TargetLoweringBase::AddrMode AM;
1379    AM.HasBaseReg = true;
1380    AM.BaseOffs = Dist;
1381    if (TLI->isLegalGlobalAddressingMode(AM) &&
1382        (uint32_t)std::abs(Dist) > MaxDist) {
1383      MaxDist = std::abs(Dist);
1384
1385      AnchorAddr = MAddrNext;
1386      AnchorInst = &MINext;
1387    }
1388  }
1389
1390  if (AnchorInst) {
1391    LLVM_DEBUG(dbgs() << "  Anchor-Inst(with max-distance from Offset): ";
1392               AnchorInst->dump());
1393    LLVM_DEBUG(dbgs() << "  Anchor-Offset from BASE: "
1394               <<  AnchorAddr.Offset << "\n\n");
1395
1396    // Instead of moving up, just re-compute anchor-instruction's base address.
1397    unsigned Base = computeBase(MI, AnchorAddr);
1398
1399    updateBaseAndOffset(MI, Base, MAddr.Offset - AnchorAddr.Offset);
1400    LLVM_DEBUG(dbgs() << "  After promotion: "; MI.dump(););
1401
1402    for (auto P : InstsWCommonBase) {
1403      TargetLoweringBase::AddrMode AM;
1404      AM.HasBaseReg = true;
1405      AM.BaseOffs = P.second - AnchorAddr.Offset;
1406
1407      if (TLI->isLegalGlobalAddressingMode(AM)) {
1408        LLVM_DEBUG(dbgs() << "  Promote Offset(" << P.second;
1409                   dbgs() << ")"; P.first->dump());
1410        updateBaseAndOffset(*P.first, Base, P.second - AnchorAddr.Offset);
1411        LLVM_DEBUG(dbgs() << "     After promotion: "; P.first->dump());
1412      }
1413    }
1414    AnchorList.insert(AnchorInst);
1415    return true;
1416  }
1417
1418  return false;
1419}
1420
1421// Scan through looking for adjacent LDS operations with constant offsets from
1422// the same base register. We rely on the scheduler to do the hard work of
1423// clustering nearby loads, and assume these are all adjacent.
1424bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
1425  bool Modified = false;
1426
1427  // Contain the list
1428  MemInfoMap Visited;
1429  // Contains the list of instructions for which constant offsets are being
1430  // promoted to the IMM.
1431  SmallPtrSet<MachineInstr *, 4> AnchorList;
1432
1433  for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
1434    MachineInstr &MI = *I;
1435
1436    if (promoteConstantOffsetToImm(MI, Visited, AnchorList))
1437      Modified = true;
1438
1439    // Don't combine if volatile.
1440    if (MI.hasOrderedMemoryRef()) {
1441      ++I;
1442      continue;
1443    }
1444
1445    const unsigned Opc = MI.getOpcode();
1446
1447    CombineInfo CI;
1448    CI.I = I;
1449    CI.InstClass = getInstClass(Opc);
1450
1451    switch (CI.InstClass) {
1452    default:
1453      break;
1454    case DS_READ:
1455      CI.EltSize =
1456          (Opc == AMDGPU::DS_READ_B64 || Opc == AMDGPU::DS_READ_B64_gfx9) ? 8
1457                                                                          : 4;
1458      if (findMatchingInst(CI)) {
1459        Modified = true;
1460        I = mergeRead2Pair(CI);
1461      } else {
1462        ++I;
1463      }
1464      continue;
1465    case DS_WRITE:
1466      CI.EltSize =
1467          (Opc == AMDGPU::DS_WRITE_B64 || Opc == AMDGPU::DS_WRITE_B64_gfx9) ? 8
1468                                                                            : 4;
1469      if (findMatchingInst(CI)) {
1470        Modified = true;
1471        I = mergeWrite2Pair(CI);
1472      } else {
1473        ++I;
1474      }
1475      continue;
1476    case S_BUFFER_LOAD_IMM:
1477      CI.EltSize = AMDGPU::getSMRDEncodedOffset(*STM, 4);
1478      if (findMatchingInst(CI)) {
1479        Modified = true;
1480        I = mergeSBufferLoadImmPair(CI);
1481        OptimizeAgain |= (CI.Width0 + CI.Width1) < 16;
1482      } else {
1483        ++I;
1484      }
1485      continue;
1486    case BUFFER_LOAD_OFFEN:
1487    case BUFFER_LOAD_OFFSET:
1488    case BUFFER_LOAD_OFFEN_exact:
1489    case BUFFER_LOAD_OFFSET_exact:
1490      CI.EltSize = 4;
1491      if (findMatchingInst(CI)) {
1492        Modified = true;
1493        I = mergeBufferLoadPair(CI);
1494        OptimizeAgain |= (CI.Width0 + CI.Width1) < 4;
1495      } else {
1496        ++I;
1497      }
1498      continue;
1499    case BUFFER_STORE_OFFEN:
1500    case BUFFER_STORE_OFFSET:
1501    case BUFFER_STORE_OFFEN_exact:
1502    case BUFFER_STORE_OFFSET_exact:
1503      CI.EltSize = 4;
1504      if (findMatchingInst(CI)) {
1505        Modified = true;
1506        I = mergeBufferStorePair(CI);
1507        OptimizeAgain |= (CI.Width0 + CI.Width1) < 4;
1508      } else {
1509        ++I;
1510      }
1511      continue;
1512    }
1513
1514    ++I;
1515  }
1516
1517  return Modified;
1518}
1519
1520bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
1521  if (skipFunction(MF.getFunction()))
1522    return false;
1523
1524  STM = &MF.getSubtarget<GCNSubtarget>();
1525  if (!STM->loadStoreOptEnabled())
1526    return false;
1527
1528  TII = STM->getInstrInfo();
1529  TRI = &TII->getRegisterInfo();
1530
1531  MRI = &MF.getRegInfo();
1532  AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1533
1534  assert(MRI->isSSA() && "Must be run on SSA");
1535
1536  LLVM_DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
1537
1538  bool Modified = false;
1539
1540  for (MachineBasicBlock &MBB : MF) {
1541    do {
1542      OptimizeAgain = false;
1543      Modified |= optimizeBlock(MBB);
1544    } while (OptimizeAgain);
1545  }
1546
1547  return Modified;
1548}
1549