Index: test/CodeGen/ARM/a15.ll
===================================================================
--- test/CodeGen/ARM/a15.ll	(revision 0)
+++ test/CodeGen/ARM/a15.ll	(revision 0)
@@ -0,0 +1,6 @@
+; RUN: llc < %s  -mcpu=cortex-a15 | FileCheck %s
+
+; CHECK: a
+define i32 @a(i32 %x) {
+  ret i32 %x;
+}
Index: lib/Target/ARM/ARMHazardRecognizer.cpp
===================================================================
--- lib/Target/ARM/ARMHazardRecognizer.cpp	(revision 163113)
+++ lib/Target/ARM/ARMHazardRecognizer.cpp	(working copy)
@@ -47,7 +47,9 @@
       // Skip over one non-VFP / NEON instruction.
       if (!LastMI->isBarrier() &&
           // On A9, AGU and NEON/FPU are muxed.
-          !(STI.isCortexA9() && (LastMI->mayLoad() || LastMI->mayStore())) &&
+          // FIXME: It has not been determined if this is optimal on A15.
+          !((STI.isCortexA9() || STI.isCortexA15()) && (LastMI->mayLoad() ||
+           LastMI->mayStore())) &&
           (LastMCID.TSFlags & ARMII::DomainMask) == ARMII::DomainGeneral) {
         MachineBasicBlock::iterator I = LastMI;
         if (I != LastMI->getParent()->begin()) {
Index: lib/Target/ARM/ARMTargetMachine.cpp
===================================================================
--- lib/Target/ARM/ARMTargetMachine.cpp	(revision 163113)
+++ lib/Target/ARM/ARMTargetMachine.cpp	(working copy)
@@ -150,7 +150,9 @@
   // FIXME: temporarily disabling load / store optimization pass for Thumb1.
   if (getOptLevel() != CodeGenOpt::None && !getARMSubtarget().isThumb1Only())
     addPass(createARMLoadStoreOptimizationPass(true));
-  if (getOptLevel() != CodeGenOpt::None && getARMSubtarget().isCortexA9())
+  if (getOptLevel() != CodeGenOpt::None &&
+       // FIXME: It has not been determined if this is optimal on A15.
+      (getARMSubtarget().isCortexA9() || getARMSubtarget().isCortexA15()))
     addPass(createMLxExpansionPass());
   return true;
 }
Index: lib/Target/ARM/ARMISelLowering.cpp
===================================================================
--- lib/Target/ARM/ARMISelLowering.cpp	(revision 163113)
+++ lib/Target/ARM/ARMISelLowering.cpp	(working copy)
@@ -821,7 +821,8 @@
   benefitFromCodePlacementOpt = true;
 
   // Prefer likely predicted branches to selects on out-of-order cores.
-  predictableSelectIsExpensive = Subtarget->isCortexA9();
+  predictableSelectIsExpensive = Subtarget->isCortexA9() ||
+                                 Subtarget->isCortexA15();
 
   setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2);
 }
Index: lib/Target/ARM/ARMSubtarget.h
===================================================================
--- lib/Target/ARM/ARMSubtarget.h	(revision 163113)
+++ lib/Target/ARM/ARMSubtarget.h	(working copy)
@@ -30,7 +30,7 @@
 class ARMSubtarget : public ARMGenSubtargetInfo {
 protected:
   enum ARMProcFamilyEnum {
-    Others, CortexA8, CortexA9
+    Others, CortexA8, CortexA9, CortexA15
   };
 
   /// ARMProcFamily - ARM processor family: Cortex-A8, Cortex-A9, and others.
@@ -199,6 +199,7 @@
 
   bool isCortexA8() const { return ARMProcFamily == CortexA8; }
   bool isCortexA9() const { return ARMProcFamily == CortexA9; }
+  bool isCortexA15() const { return ARMProcFamily == CortexA15; }
   bool isCortexM3() const { return CPUString == "cortex-m3"; }
 
   bool hasARMOps() const { return !NoARM; }
Index: lib/Target/ARM/MLxExpansionPass.cpp
===================================================================
--- lib/Target/ARM/MLxExpansionPass.cpp	(revision 163113)
+++ lib/Target/ARM/MLxExpansionPass.cpp	(working copy)
@@ -51,7 +51,7 @@
     const TargetRegisterInfo *TRI;
     MachineRegisterInfo *MRI;
 
-    bool isA9;
+    bool isLikeA9;
     unsigned MIIdx;
     MachineInstr* LastMIs[4];
     SmallPtrSet<MachineInstr*, 4> IgnoreStall;
@@ -179,8 +179,8 @@
   // preserves the in-order retirement of the instructions.
   // Look at the next few instructions, if *most* of them can cause hazards,
   // then the scheduler can't *fix* this, we'd better break up the VMLA.
-  unsigned Limit1 = isA9 ? 1 : 4;
-  unsigned Limit2 = isA9 ? 1 : 4;
+  unsigned Limit1 = isLikeA9 ? 1 : 4;
+  unsigned Limit2 = isLikeA9 ? 1 : 4;
   for (unsigned i = 1; i <= 4; ++i) {
     int Idx = ((int)MIIdx - i + 4) % 4;
     MachineInstr *NextMI = LastMIs[Idx];
@@ -316,7 +316,9 @@
   TRI = Fn.getTarget().getRegisterInfo();
   MRI = &Fn.getRegInfo();
   const ARMSubtarget *STI = &Fn.getTarget().getSubtarget<ARMSubtarget>();
-  isA9 = STI->isCortexA9();
+  // FIXME: It has not been determined if the transformations done here are
+  // beneficial on A15.
+  isLikeA9 = STI->isCortexA9() || STI->isCortexA15();
 
   bool Modified = false;
   for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
Index: lib/Target/ARM/ARMISelDAGToDAG.cpp
===================================================================
--- lib/Target/ARM/ARMISelDAGToDAG.cpp	(revision 163113)
+++ lib/Target/ARM/ARMISelDAGToDAG.cpp	(working copy)
@@ -335,8 +335,9 @@
 
   if (!CheckVMLxHazard)
     return true;
-
-  if (!Subtarget->isCortexA8() && !Subtarget->isCortexA9())
+  // FIXME: It has not been determined if this is profitable on A15.
+  if (!Subtarget->isCortexA8() && !Subtarget->isCortexA9() &&
+      !Subtarget->isCortexA15())
     return true;
 
   if (!N->hasOneUse())
@@ -374,7 +375,8 @@
 bool ARMDAGToDAGISel::isShifterOpProfitable(const SDValue &Shift,
                                             ARM_AM::ShiftOpc ShOpcVal,
                                             unsigned ShAmt) {
-  if (!Subtarget->isCortexA9())
+  // FIXME: It has not been determined that this is the best action for A15.
+  if (!Subtarget->isCortexA9() && !Subtarget->isCortexA15())
     return true;
   if (Shift.hasOneUse())
     return true;
@@ -486,7 +488,9 @@
 bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset,
                                       SDValue &Opc) {
   if (N.getOpcode() == ISD::MUL &&
-      (!Subtarget->isCortexA9() || N.hasOneUse())) {
+      // FIXME: It has not been determined if this is profitable on A15.
+      ((!Subtarget->isCortexA9() && !Subtarget->isCortexA15()) ||
+       N.hasOneUse())) {
     if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
       // X * [3,5,9] -> X + X * [2,4,8] etc.
       int RHSC = (int)RHS->getZExtValue();
@@ -550,7 +554,9 @@
 
   // Try matching (R shl C) + (R).
   if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
-      !(Subtarget->isCortexA9() || N.getOperand(0).hasOneUse())) {
+      // FIXME: It has not been determined if this is profitable on A15.
+      !(Subtarget->isCortexA9() || Subtarget->isCortexA15() ||
+        N.getOperand(0).hasOneUse())) {
     ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
     if (ShOpcVal != ARM_AM::no_shift) {
       // Check to see if the RHS of the shift is a constant, if not, we can't
@@ -584,7 +590,9 @@
                                                      SDValue &Offset,
                                                      SDValue &Opc) {
   if (N.getOpcode() == ISD::MUL &&
-      (!Subtarget->isCortexA9() || N.hasOneUse())) {
+      // FIXME: It has not been determined if this is profitable on A15.
+      ((!Subtarget->isCortexA9() && !Subtarget->isCortexA15()) ||
+       N.hasOneUse())) {
     if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
       // X * [3,5,9] -> X + X * [2,4,8] etc.
       int RHSC = (int)RHS->getZExtValue();
@@ -650,7 +658,9 @@
     }
   }
 
-  if (Subtarget->isCortexA9() && !N.hasOneUse()) {
+  // FIXME: It has not been determined if this is profitable on A15.
+  if ((Subtarget->isCortexA9() || Subtarget->isCortexA15()) &&
+      !N.hasOneUse()) {
     // Compute R +/- (R << N) and reuse it.
     Base = N;
     Offset = CurDAG->getRegister(0, MVT::i32);
@@ -688,7 +698,9 @@
 
   // Try matching (R shl C) + (R).
   if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
-      !(Subtarget->isCortexA9() || N.getOperand(0).hasOneUse())) {
+      // FIXME: It has not been determined if this is profitable on A15.
+      !(Subtarget->isCortexA9() || Subtarget->isCortexA15() || 
+        N.getOperand(0).hasOneUse())) {
     ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
     if (ShOpcVal != ARM_AM::no_shift) {
       // Check to see if the RHS of the shift is a constant, if not, we can't
Index: lib/Target/ARM/ARMBaseRegisterInfo.cpp
===================================================================
--- lib/Target/ARM/ARMBaseRegisterInfo.cpp	(revision 163113)
+++ lib/Target/ARM/ARMBaseRegisterInfo.cpp	(working copy)
@@ -476,7 +476,8 @@
 bool
 ARMBaseRegisterInfo::avoidWriteAfterWrite(const TargetRegisterClass *RC) const {
   // CortexA9 has a Write-after-write hazard for NEON registers.
-  if (!STI.isCortexA9())
+  // FIXME: It is not clear that this is true for A15.
+  if (!STI.isCortexA9() && !STI.isCortexA15())
     return false;
 
   switch (RC->getID()) {
Index: lib/Target/ARM/ARMBaseInstrInfo.cpp
===================================================================
--- lib/Target/ARM/ARMBaseInstrInfo.cpp	(revision 163113)
+++ lib/Target/ARM/ARMBaseInstrInfo.cpp	(working copy)
@@ -2449,7 +2449,8 @@
       if (NumRegs % 2)
         ++A8UOps;
       return A8UOps;
-    } else if (Subtarget.isCortexA9()) {
+    } else if (Subtarget.isCortexA9() || Subtarget.isCortexA15()) {
+      // FIXME: It has not been determined that this is correct on A15.
       int A9UOps = (NumRegs / 2);
       // If there are odd number of registers or if it's not 64-bit aligned,
       // then it takes an extra AGU (Address Generation Unit) cycle.
@@ -2482,7 +2483,8 @@
     DefCycle = RegNo / 2 + 1;
     if (RegNo % 2)
       ++DefCycle;
-  } else if (Subtarget.isCortexA9()) {
+  } else if (Subtarget.isCortexA9() || Subtarget.isCortexA15()) {
+    // FIXME: It has not been determined that this is correct on A15.
     DefCycle = RegNo;
     bool isSLoad = false;
 
@@ -2526,7 +2528,8 @@
       DefCycle = 1;
     // Result latency is issue cycle + 2: E2.
     DefCycle += 2;
-  } else if (Subtarget.isCortexA9()) {
+  } else if (Subtarget.isCortexA9() || Subtarget.isCortexA15()) {
+    // FIXME: It has not been determined that this is correct on A15.
     DefCycle = (RegNo / 2);
     // If there are odd number of registers or if it's not 64-bit aligned,
     // then it takes an extra AGU (Address Generation Unit) cycle.
@@ -2557,7 +2560,8 @@
     UseCycle = RegNo / 2 + 1;
     if (RegNo % 2)
       ++UseCycle;
-  } else if (Subtarget.isCortexA9()) {
+  } else if (Subtarget.isCortexA9() || Subtarget.isCortexA15()) {
+    // FIXME: It has not been determined that this is correct on A15.
     UseCycle = RegNo;
     bool isSStore = false;
 
@@ -2598,7 +2602,8 @@
       UseCycle = 2;
     // Read in E3.
     UseCycle += 2;
-  } else if (Subtarget.isCortexA9()) {
+  } else if (Subtarget.isCortexA9() || Subtarget.isCortexA15()) {
+    // FIXME: It has not been determined that this is correct on A15.
     UseCycle = (RegNo / 2);
     // If there are odd number of registers or if it's not 64-bit aligned,
     // then it takes an extra AGU (Address Generation Unit) cycle.
@@ -2783,7 +2788,9 @@
                             const MachineInstr *DefMI,
                             const MCInstrDesc *DefMCID, unsigned DefAlign) {
   int Adjust = 0;
-  if (Subtarget.isCortexA8() || Subtarget.isCortexA9()) {
+  if (Subtarget.isCortexA8() || Subtarget.isCortexA9() ||
+      Subtarget.isCortexA15()) {
+    // FIXME: It has not been determined that this is correct on A15.
     // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
     // variants are one cycle cheaper.
     switch (DefMCID->getOpcode()) {
@@ -2810,7 +2817,8 @@
     }
   }
 
-  if (DefAlign < 8 && Subtarget.isCortexA9()) {
+  if (DefAlign < 8 && (Subtarget.isCortexA9() || Subtarget.isCortexA15())) {
+    // FIXME: It has not been determined that this is correct on A15.
     switch (DefMCID->getOpcode()) {
     default: break;
     case ARM::VLD1q8:
@@ -2968,7 +2976,8 @@
   if (Reg == ARM::CPSR) {
     if (DefMI->getOpcode() == ARM::FMSTAT) {
       // fpscr -> cpsr stalls over 20 cycles on A8 (and earlier?)
-      return Subtarget.isCortexA9() ? 1 : 20;
+      // FIXME: It has not been determined that this is correct on A15.
+      return (Subtarget.isCortexA9() || Subtarget.isCortexA15()) ? 1 : 20;
     }
 
     // CPSR set and branch can be paired in the same cycle.
@@ -3034,7 +3043,8 @@
 
   if (!UseNode->isMachineOpcode()) {
     int Latency = ItinData->getOperandCycle(DefMCID.getSchedClass(), DefIdx);
-    if (Subtarget.isCortexA9())
+    // FIXME: It has not been determined that this is correct on A15.
+    if (Subtarget.isCortexA9() || Subtarget.isCortexA15())
       return Latency <= 2 ? 1 : Latency - 1;
     else
       return Latency <= 3 ? 1 : Latency - 2;
@@ -3051,7 +3061,9 @@
                                   UseMCID, UseIdx, UseAlign);
 
   if (Latency > 1 &&
-      (Subtarget.isCortexA8() || Subtarget.isCortexA9())) {
+      (Subtarget.isCortexA8() || Subtarget.isCortexA9() ||
+       // FIXME: It has not been determined that this is correct on A15.
+       Subtarget.isCortexA15())) {
     // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
     // variants are one cycle cheaper.
     switch (DefMCID.getOpcode()) {
@@ -3080,7 +3092,8 @@
     }
   }
 
-  if (DefAlign < 8 && Subtarget.isCortexA9())
+  // FIXME: It has not been determined that this is correct on A15.
+  if (DefAlign < 8 && (Subtarget.isCortexA9() || Subtarget.isCortexA15()))
     switch (DefMCID.getOpcode()) {
     default: break;
     case ARM::VLD1q8:
@@ -3373,9 +3386,10 @@
   if (MI->getOpcode() == ARM::VMOVD && !isPredicated(MI))
     return std::make_pair(ExeVFP, (1<<ExeVFP) | (1<<ExeNEON));
 
-  // Cortex-A9 is particularly picky about mixing the two and wants these
-  // converted.
-  if (Subtarget.isCortexA9() && !isPredicated(MI) &&
+  // Cortex-A9 and Cortex-A15 are particularly picky about mixing the two and
+  // want these converted.
+  if ((Subtarget.isCortexA9() || Subtarget.isCortexA15()) &&
+      !isPredicated(MI) &&
       (MI->getOpcode() == ARM::VMOVRS ||
        MI->getOpcode() == ARM::VMOVSR ||
        MI->getOpcode() == ARM::VMOVS))
Index: lib/Target/ARM/ARM.td
===================================================================
--- lib/Target/ARM/ARM.td	(revision 163113)
+++ lib/Target/ARM/ARM.td	(working copy)
@@ -139,6 +139,12 @@
                                    [FeatureVMLxForwarding,
                                     FeatureT2XtPk, FeatureFP16,
                                     FeatureAvoidPartialCPSR]>;
+// FIXME: It has not been determined if A15 has these features.
+def ProcA15      : SubtargetFeature<"a15", "ARMProcFamily", "CortexA15",
+                                   "Cortex-A15 ARM processors",
+                                   [FeatureVMLxForwarding,
+                                    FeatureT2XtPk, FeatureFP16,
+                                    FeatureAvoidPartialCPSR]>;
 
 class ProcNoItin<string Name, list<SubtargetFeature> Features>
  : Processor<Name, NoItineraries, Features>;
@@ -214,6 +220,10 @@
                                     [ProcA9, HasV7Ops, FeatureNEON, FeatureDB,
                                      FeatureDSPThumb2, FeatureMP,
                                      FeatureHasRAS]>;
+// FIXME: A15 has currently the same ProcessorModel as A9.
+def : ProcessorModel<"cortex-a15",   CortexA9Model,
+                                    [ProcA15, HasV7Ops, FeatureNEON, FeatureDB,
+                                     FeatureDSPThumb2, FeatureHasRAS]>;
 
 // V7M Processors.
 def : ProcNoItin<"cortex-m3",       [HasV7Ops,
