Changes in directory llvm/lib/Target/X86:

X86RegisterInfo.cpp updated: 1.192 -> 1.193
X86RegisterInfo.h updated: 1.43 -> 1.44
X86RegisterInfo.td updated: 1.39 -> 1.40
---
Log message:

hasFP() is now a virtual method of MRegisterInfo.

---
Diffs of the changes:  (+23 -13)

 X86RegisterInfo.cpp |    4 ++--
 X86RegisterInfo.h   |    2 ++
 X86RegisterInfo.td  |   30 +++++++++++++++++++-----------
 3 files changed, 23 insertions(+), 13 deletions(-)


Index: llvm/lib/Target/X86/X86RegisterInfo.cpp
diff -u llvm/lib/Target/X86/X86RegisterInfo.cpp:1.192 
llvm/lib/Target/X86/X86RegisterInfo.cpp:1.193
--- llvm/lib/Target/X86/X86RegisterInfo.cpp:1.192       Sat Jan 20 04:17:53 2007
+++ llvm/lib/Target/X86/X86RegisterInfo.cpp     Mon Jan 22 18:57:47 2007
@@ -891,7 +891,7 @@
 // pointer register.  This is true if the function has variable sized allocas 
or
 // if frame pointer elimination is disabled.
 //
-static bool hasFP(const MachineFunction &MF) {
+bool X86RegisterInfo::hasFP(const MachineFunction &MF) const {
   return (NoFramePointerElim || 
           MF.getFrameInfo()->hasVarSizedObjects() ||
           MF.getInfo<X86FunctionInfo>()->getForceFramePointer());
@@ -998,7 +998,7 @@
   
   // Get the number of bytes to allocate from the FrameInfo
   unsigned NumBytes = MFI->getStackSize();
-  if (MFI->hasCalls() || MF.getFrameInfo()->hasVarSizedObjects()) {
+  if (MFI->hasCalls() || MFI->hasVarSizedObjects()) {
     // When we have no frame pointer, we reserve argument space for call sites
     // in the function immediately on entry to the current function.  This
     // eliminates the need for add/sub ESP brackets around call sites.


Index: llvm/lib/Target/X86/X86RegisterInfo.h
diff -u llvm/lib/Target/X86/X86RegisterInfo.h:1.43 
llvm/lib/Target/X86/X86RegisterInfo.h:1.44
--- llvm/lib/Target/X86/X86RegisterInfo.h:1.43  Tue Jan  2 15:33:40 2007
+++ llvm/lib/Target/X86/X86RegisterInfo.h       Mon Jan 22 18:57:47 2007
@@ -78,6 +78,8 @@
   /// length of this list match the getCalleeSavedRegs() list.
   const TargetRegisterClass* const* getCalleeSavedRegClasses() const;
 
+  bool hasFP(const MachineFunction &MF) const;
+
   void eliminateCallFramePseudoInstr(MachineFunction &MF,
                                      MachineBasicBlock &MBB,
                                      MachineBasicBlock::iterator MI) const;


Index: llvm/lib/Target/X86/X86RegisterInfo.td
diff -u llvm/lib/Target/X86/X86RegisterInfo.td:1.39 
llvm/lib/Target/X86/X86RegisterInfo.td:1.40
--- llvm/lib/Target/X86/X86RegisterInfo.td:1.39 Fri Sep  8 01:48:29 2006
+++ llvm/lib/Target/X86/X86RegisterInfo.td      Mon Jan 22 18:57:47 2007
@@ -197,10 +197,11 @@
     GR8Class::iterator
     GR8Class::allocation_order_begin(const MachineFunction &MF) const {
       const TargetMachine &TM = MF.getTarget();
+      const MRegisterInfo *RI = TM.getRegisterInfo();
       const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
       if (!Subtarget.is64Bit())
         return X86_GR8_AO_32;
-      else if (hasFP(MF))
+      else if (RI->hasFP(MF))
         return X86_GR8_AO_64_fp;
       else
         return X86_GR8_AO_64;
@@ -209,10 +210,11 @@
     GR8Class::iterator
     GR8Class::allocation_order_end(const MachineFunction &MF) const {
       const TargetMachine &TM = MF.getTarget();
+      const MRegisterInfo *RI = TM.getRegisterInfo();
       const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
       if (!Subtarget.is64Bit())
         return X86_GR8_AO_32 + (sizeof(X86_GR8_AO_32) / sizeof(unsigned));
-      else if (hasFP(MF))
+      else if (RI->hasFP(MF))
         return X86_GR8_AO_64_fp + (sizeof(X86_GR8_AO_64_fp) / 
sizeof(unsigned));
       else
         return X86_GR8_AO_64 + (sizeof(X86_GR8_AO_64) / sizeof(unsigned));
@@ -248,14 +250,15 @@
     GR16Class::iterator
     GR16Class::allocation_order_begin(const MachineFunction &MF) const {
       const TargetMachine &TM = MF.getTarget();
+      const MRegisterInfo *RI = TM.getRegisterInfo();
       const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
       if (Subtarget.is64Bit()) {
-        if (hasFP(MF))
+        if (RI->hasFP(MF))
           return X86_GR16_AO_64_fp;
         else
           return X86_GR16_AO_64;
       } else {
-        if (hasFP(MF))
+        if (RI->hasFP(MF))
           return X86_GR16_AO_32_fp;
         else
           return X86_GR16_AO_32;
@@ -265,14 +268,15 @@
     GR16Class::iterator
     GR16Class::allocation_order_end(const MachineFunction &MF) const {
       const TargetMachine &TM = MF.getTarget();
+      const MRegisterInfo *RI = TM.getRegisterInfo();
       const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
       if (Subtarget.is64Bit()) {
-        if (hasFP(MF))
+        if (RI->hasFP(MF))
           return 
X86_GR16_AO_64_fp+(sizeof(X86_GR16_AO_64_fp)/sizeof(unsigned));
         else
           return X86_GR16_AO_64 + (sizeof(X86_GR16_AO_64) / sizeof(unsigned));
       } else {
-        if (hasFP(MF))
+        if (RI->hasFP(MF))
           return 
X86_GR16_AO_32_fp+(sizeof(X86_GR16_AO_32_fp)/sizeof(unsigned));
         else
           return X86_GR16_AO_32 + (sizeof(X86_GR16_AO_32) / sizeof(unsigned));
@@ -309,14 +313,15 @@
     GR32Class::iterator
     GR32Class::allocation_order_begin(const MachineFunction &MF) const {
       const TargetMachine &TM = MF.getTarget();
+      const MRegisterInfo *RI = TM.getRegisterInfo();
       const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
       if (Subtarget.is64Bit()) {
-        if (hasFP(MF))
+        if (RI->hasFP(MF))
           return X86_GR32_AO_64_fp;
         else
           return X86_GR32_AO_64;
       } else {
-        if (hasFP(MF))
+        if (RI->hasFP(MF))
           return X86_GR32_AO_32_fp;
         else
           return X86_GR32_AO_32;
@@ -326,14 +331,15 @@
     GR32Class::iterator
     GR32Class::allocation_order_end(const MachineFunction &MF) const {
       const TargetMachine &TM = MF.getTarget();
+      const MRegisterInfo *RI = TM.getRegisterInfo();
       const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
       if (Subtarget.is64Bit()) {
-        if (hasFP(MF))
+        if (RI->hasFP(MF))
           return 
X86_GR32_AO_64_fp+(sizeof(X86_GR32_AO_64_fp)/sizeof(unsigned));
         else
           return X86_GR32_AO_64 + (sizeof(X86_GR32_AO_64) / sizeof(unsigned));
       } else {
-        if (hasFP(MF))
+        if (RI->hasFP(MF))
           return 
X86_GR32_AO_32_fp+(sizeof(X86_GR32_AO_32_fp)/sizeof(unsigned));
         else
           return X86_GR32_AO_32 + (sizeof(X86_GR32_AO_32) / sizeof(unsigned));
@@ -352,7 +358,9 @@
   let MethodBodies = [{
     GR64Class::iterator
     GR64Class::allocation_order_end(const MachineFunction &MF) const {
-      if (hasFP(MF))     // Does the function dedicate RBP to being a frame 
ptr?
+      const TargetMachine &TM = MF.getTarget();
+      const MRegisterInfo *RI = TM.getRegisterInfo();
+      if (RI->hasFP(MF)) // Does the function dedicate RBP to being a frame 
ptr?
         return end()-2;  // If so, don't allocate RSP or RBP
       else
         return end()-1;  // If not, just don't allocate RSP



_______________________________________________
llvm-commits mailing list
llvm-commits@cs.uiuc.edu
http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits

Reply via email to