LLVM API Documentation
00001 //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===// 00002 // 00003 // The LLVM Compiler Infrastructure 00004 // 00005 // This file was developed by the LLVM research group and is distributed under 00006 // the University of Illinois Open Source License. See LICENSE.TXT for details. 00007 // 00008 //===----------------------------------------------------------------------===// 00009 // 00010 // This implements the TargetLowering class. 00011 // 00012 //===----------------------------------------------------------------------===// 00013 00014 #include "llvm/Target/TargetLowering.h" 00015 #include "llvm/Target/TargetMachine.h" 00016 #include "llvm/Target/MRegisterInfo.h" 00017 #include "llvm/DerivedTypes.h" 00018 #include "llvm/CodeGen/SelectionDAG.h" 00019 #include "llvm/ADT/StringExtras.h" 00020 #include "llvm/Support/MathExtras.h" 00021 using namespace llvm; 00022 00023 TargetLowering::TargetLowering(TargetMachine &tm) 00024 : TM(tm), TD(TM.getTargetData()) { 00025 assert(ISD::BUILTIN_OP_END <= 156 && 00026 "Fixed size array in TargetLowering is not large enough!"); 00027 // All operations default to being supported. 00028 memset(OpActions, 0, sizeof(OpActions)); 00029 00030 IsLittleEndian = TD.isLittleEndian(); 00031 ShiftAmountTy = SetCCResultTy = PointerTy = getValueType(TD.getIntPtrType()); 00032 ShiftAmtHandling = Undefined; 00033 memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*)); 00034 memset(TargetDAGCombineArray, 0, 00035 sizeof(TargetDAGCombineArray)/sizeof(TargetDAGCombineArray[0])); 00036 maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8; 00037 allowUnalignedMemoryAccesses = false; 00038 UseUnderscoreSetJmpLongJmp = false; 00039 IntDivIsCheap = false; 00040 Pow2DivIsCheap = false; 00041 StackPointerRegisterToSaveRestore = 0; 00042 SchedPreferenceInfo = SchedulingForLatency; 00043 } 00044 00045 TargetLowering::~TargetLowering() {} 00046 00047 /// setValueTypeAction - Set the action for a particular value type. This 00048 /// assumes an action has not already been set for this value type. 00049 static void SetValueTypeAction(MVT::ValueType VT, 00050 TargetLowering::LegalizeAction Action, 00051 TargetLowering &TLI, 00052 MVT::ValueType *TransformToType, 00053 TargetLowering::ValueTypeActionImpl &ValueTypeActions) { 00054 ValueTypeActions.setTypeAction(VT, Action); 00055 if (Action == TargetLowering::Promote) { 00056 MVT::ValueType PromoteTo; 00057 if (VT == MVT::f32) 00058 PromoteTo = MVT::f64; 00059 else { 00060 unsigned LargerReg = VT+1; 00061 while (!TLI.isTypeLegal((MVT::ValueType)LargerReg)) { 00062 ++LargerReg; 00063 assert(MVT::isInteger((MVT::ValueType)LargerReg) && 00064 "Nothing to promote to??"); 00065 } 00066 PromoteTo = (MVT::ValueType)LargerReg; 00067 } 00068 00069 assert(MVT::isInteger(VT) == MVT::isInteger(PromoteTo) && 00070 MVT::isFloatingPoint(VT) == MVT::isFloatingPoint(PromoteTo) && 00071 "Can only promote from int->int or fp->fp!"); 00072 assert(VT < PromoteTo && "Must promote to a larger type!"); 00073 TransformToType[VT] = PromoteTo; 00074 } else if (Action == TargetLowering::Expand) { 00075 assert((VT == MVT::Vector || MVT::isInteger(VT)) && VT > MVT::i8 && 00076 "Cannot expand this type: target must support SOME integer reg!"); 00077 // Expand to the next smaller integer type! 00078 TransformToType[VT] = (MVT::ValueType)(VT-1); 00079 } 00080 } 00081 00082 00083 /// computeRegisterProperties - Once all of the register classes are added, 00084 /// this allows us to compute derived properties we expose. 00085 void TargetLowering::computeRegisterProperties() { 00086 assert(MVT::LAST_VALUETYPE <= 32 && 00087 "Too many value types for ValueTypeActions to hold!"); 00088 00089 // Everything defaults to one. 00090 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) 00091 NumElementsForVT[i] = 1; 00092 00093 // Find the largest integer register class. 00094 unsigned LargestIntReg = MVT::i128; 00095 for (; RegClassForVT[LargestIntReg] == 0; --LargestIntReg) 00096 assert(LargestIntReg != MVT::i1 && "No integer registers defined!"); 00097 00098 // Every integer value type larger than this largest register takes twice as 00099 // many registers to represent as the previous ValueType. 00100 unsigned ExpandedReg = LargestIntReg; ++LargestIntReg; 00101 for (++ExpandedReg; MVT::isInteger((MVT::ValueType)ExpandedReg);++ExpandedReg) 00102 NumElementsForVT[ExpandedReg] = 2*NumElementsForVT[ExpandedReg-1]; 00103 00104 // Inspect all of the ValueType's possible, deciding how to process them. 00105 for (unsigned IntReg = MVT::i1; IntReg <= MVT::i128; ++IntReg) 00106 // If we are expanding this type, expand it! 00107 if (getNumElements((MVT::ValueType)IntReg) != 1) 00108 SetValueTypeAction((MVT::ValueType)IntReg, Expand, *this, TransformToType, 00109 ValueTypeActions); 00110 else if (!isTypeLegal((MVT::ValueType)IntReg)) 00111 // Otherwise, if we don't have native support, we must promote to a 00112 // larger type. 00113 SetValueTypeAction((MVT::ValueType)IntReg, Promote, *this, 00114 TransformToType, ValueTypeActions); 00115 else 00116 TransformToType[(MVT::ValueType)IntReg] = (MVT::ValueType)IntReg; 00117 00118 // If the target does not have native support for F32, promote it to F64. 00119 if (!isTypeLegal(MVT::f32)) 00120 SetValueTypeAction(MVT::f32, Promote, *this, 00121 TransformToType, ValueTypeActions); 00122 else 00123 TransformToType[MVT::f32] = MVT::f32; 00124 00125 // Set MVT::Vector to always be Expanded 00126 SetValueTypeAction(MVT::Vector, Expand, *this, TransformToType, 00127 ValueTypeActions); 00128 00129 // Loop over all of the legal vector value types, specifying an identity type 00130 // transformation. 00131 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE; 00132 i <= MVT::LAST_VECTOR_VALUETYPE; ++i) { 00133 if (isTypeLegal((MVT::ValueType)i)) 00134 TransformToType[i] = (MVT::ValueType)i; 00135 } 00136 00137 assert(isTypeLegal(MVT::f64) && "Target does not support FP?"); 00138 TransformToType[MVT::f64] = MVT::f64; 00139 } 00140 00141 const char *TargetLowering::getTargetNodeName(unsigned Opcode) const { 00142 return NULL; 00143 } 00144 00145 /// getPackedTypeBreakdown - Packed types are broken down into some number of 00146 /// legal scalar types. For example, <8 x float> maps to 2 MVT::v2f32 values 00147 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack. 00148 /// 00149 /// This method returns the number and type of the resultant breakdown. 00150 /// 00151 unsigned TargetLowering::getPackedTypeBreakdown(const PackedType *PTy, 00152 MVT::ValueType &PTyElementVT, 00153 MVT::ValueType &PTyLegalElementVT) const { 00154 // Figure out the right, legal destination reg to copy into. 00155 unsigned NumElts = PTy->getNumElements(); 00156 MVT::ValueType EltTy = getValueType(PTy->getElementType()); 00157 00158 unsigned NumVectorRegs = 1; 00159 00160 // Divide the input until we get to a supported size. This will always 00161 // end with a scalar if the target doesn't support vectors. 00162 while (NumElts > 1 && !isTypeLegal(getVectorType(EltTy, NumElts))) { 00163 NumElts >>= 1; 00164 NumVectorRegs <<= 1; 00165 } 00166 00167 MVT::ValueType VT; 00168 if (NumElts == 1) { 00169 VT = EltTy; 00170 } else { 00171 VT = getVectorType(EltTy, NumElts); 00172 } 00173 PTyElementVT = VT; 00174 00175 MVT::ValueType DestVT = getTypeToTransformTo(VT); 00176 PTyLegalElementVT = DestVT; 00177 if (DestVT < VT) { 00178 // Value is expanded, e.g. i64 -> i16. 00179 return NumVectorRegs*(MVT::getSizeInBits(VT)/MVT::getSizeInBits(DestVT)); 00180 } else { 00181 // Otherwise, promotion or legal types use the same number of registers as 00182 // the vector decimated to the appropriate level. 00183 return NumVectorRegs; 00184 } 00185 00186 return DestVT; 00187 } 00188 00189 //===----------------------------------------------------------------------===// 00190 // Optimization Methods 00191 //===----------------------------------------------------------------------===// 00192 00193 /// ShrinkDemandedConstant - Check to see if the specified operand of the 00194 /// specified instruction is a constant integer. If so, check to see if there 00195 /// are any bits set in the constant that are not demanded. If so, shrink the 00196 /// constant and return true. 00197 bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDOperand Op, 00198 uint64_t Demanded) { 00199 // FIXME: ISD::SELECT, ISD::SELECT_CC 00200 switch(Op.getOpcode()) { 00201 default: break; 00202 case ISD::AND: 00203 case ISD::OR: 00204 case ISD::XOR: 00205 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 00206 if ((~Demanded & C->getValue()) != 0) { 00207 MVT::ValueType VT = Op.getValueType(); 00208 SDOperand New = DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0), 00209 DAG.getConstant(Demanded & C->getValue(), 00210 VT)); 00211 return CombineTo(Op, New); 00212 } 00213 break; 00214 } 00215 return false; 00216 } 00217 00218 /// SimplifyDemandedBits - Look at Op. At this point, we know that only the 00219 /// DemandedMask bits of the result of Op are ever used downstream. If we can 00220 /// use this information to simplify Op, create a new simplified DAG node and 00221 /// return true, returning the original and new nodes in Old and New. Otherwise, 00222 /// analyze the expression and return a mask of KnownOne and KnownZero bits for 00223 /// the expression (used to simplify the caller). The KnownZero/One bits may 00224 /// only be accurate for those bits in the DemandedMask. 00225 bool TargetLowering::SimplifyDemandedBits(SDOperand Op, uint64_t DemandedMask, 00226 uint64_t &KnownZero, 00227 uint64_t &KnownOne, 00228 TargetLoweringOpt &TLO, 00229 unsigned Depth) const { 00230 KnownZero = KnownOne = 0; // Don't know anything. 00231 // Other users may use these bits. 00232 if (!Op.Val->hasOneUse()) { 00233 if (Depth != 0) { 00234 // If not at the root, Just compute the KnownZero/KnownOne bits to 00235 // simplify things downstream. 00236 ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth); 00237 return false; 00238 } 00239 // If this is the root being simplified, allow it to have multiple uses, 00240 // just set the DemandedMask to all bits. 00241 DemandedMask = MVT::getIntVTBitMask(Op.getValueType()); 00242 } else if (DemandedMask == 0) { 00243 // Not demanding any bits from Op. 00244 if (Op.getOpcode() != ISD::UNDEF) 00245 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::UNDEF, Op.getValueType())); 00246 return false; 00247 } else if (Depth == 6) { // Limit search depth. 00248 return false; 00249 } 00250 00251 uint64_t KnownZero2, KnownOne2, KnownZeroOut, KnownOneOut; 00252 switch (Op.getOpcode()) { 00253 case ISD::Constant: 00254 // We know all of the bits for a constant! 00255 KnownOne = cast<ConstantSDNode>(Op)->getValue() & DemandedMask; 00256 KnownZero = ~KnownOne & DemandedMask; 00257 return false; // Don't fall through, will infinitely loop. 00258 case ISD::AND: 00259 // If the RHS is a constant, check to see if the LHS would be zero without 00260 // using the bits from the RHS. Below, we use knowledge about the RHS to 00261 // simplify the LHS, here we're using information from the LHS to simplify 00262 // the RHS. 00263 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 00264 uint64_t LHSZero, LHSOne; 00265 ComputeMaskedBits(Op.getOperand(0), DemandedMask, 00266 LHSZero, LHSOne, Depth+1); 00267 // If the LHS already has zeros where RHSC does, this and is dead. 00268 if ((LHSZero & DemandedMask) == (~RHSC->getValue() & DemandedMask)) 00269 return TLO.CombineTo(Op, Op.getOperand(0)); 00270 // If any of the set bits in the RHS are known zero on the LHS, shrink 00271 // the constant. 00272 if (TLO.ShrinkDemandedConstant(Op, ~LHSZero & DemandedMask)) 00273 return true; 00274 } 00275 00276 if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero, 00277 KnownOne, TLO, Depth+1)) 00278 return true; 00279 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 00280 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & ~KnownZero, 00281 KnownZero2, KnownOne2, TLO, Depth+1)) 00282 return true; 00283 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 00284 00285 // If all of the demanded bits are known one on one side, return the other. 00286 // These bits cannot contribute to the result of the 'and'. 00287 if ((DemandedMask & ~KnownZero2 & KnownOne)==(DemandedMask & ~KnownZero2)) 00288 return TLO.CombineTo(Op, Op.getOperand(0)); 00289 if ((DemandedMask & ~KnownZero & KnownOne2)==(DemandedMask & ~KnownZero)) 00290 return TLO.CombineTo(Op, Op.getOperand(1)); 00291 // If all of the demanded bits in the inputs are known zeros, return zero. 00292 if ((DemandedMask & (KnownZero|KnownZero2)) == DemandedMask) 00293 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, Op.getValueType())); 00294 // If the RHS is a constant, see if we can simplify it. 00295 if (TLO.ShrinkDemandedConstant(Op, DemandedMask & ~KnownZero2)) 00296 return true; 00297 00298 // Output known-1 bits are only known if set in both the LHS & RHS. 00299 KnownOne &= KnownOne2; 00300 // Output known-0 are known to be clear if zero in either the LHS | RHS. 00301 KnownZero |= KnownZero2; 00302 break; 00303 case ISD::OR: 00304 if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero, 00305 KnownOne, TLO, Depth+1)) 00306 return true; 00307 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 00308 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & ~KnownOne, 00309 KnownZero2, KnownOne2, TLO, Depth+1)) 00310 return true; 00311 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 00312 00313 // If all of the demanded bits are known zero on one side, return the other. 00314 // These bits cannot contribute to the result of the 'or'. 00315 if ((DemandedMask & ~KnownOne2 & KnownZero) == (DemandedMask & ~KnownOne2)) 00316 return TLO.CombineTo(Op, Op.getOperand(0)); 00317 if ((DemandedMask & ~KnownOne & KnownZero2) == (DemandedMask & ~KnownOne)) 00318 return TLO.CombineTo(Op, Op.getOperand(1)); 00319 // If all of the potentially set bits on one side are known to be set on 00320 // the other side, just use the 'other' side. 00321 if ((DemandedMask & (~KnownZero) & KnownOne2) == 00322 (DemandedMask & (~KnownZero))) 00323 return TLO.CombineTo(Op, Op.getOperand(0)); 00324 if ((DemandedMask & (~KnownZero2) & KnownOne) == 00325 (DemandedMask & (~KnownZero2))) 00326 return TLO.CombineTo(Op, Op.getOperand(1)); 00327 // If the RHS is a constant, see if we can simplify it. 00328 if (TLO.ShrinkDemandedConstant(Op, DemandedMask)) 00329 return true; 00330 00331 // Output known-0 bits are only known if clear in both the LHS & RHS. 00332 KnownZero &= KnownZero2; 00333 // Output known-1 are known to be set if set in either the LHS | RHS. 00334 KnownOne |= KnownOne2; 00335 break; 00336 case ISD::XOR: 00337 if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero, 00338 KnownOne, TLO, Depth+1)) 00339 return true; 00340 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 00341 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask, KnownZero2, 00342 KnownOne2, TLO, Depth+1)) 00343 return true; 00344 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 00345 00346 // If all of the demanded bits are known zero on one side, return the other. 00347 // These bits cannot contribute to the result of the 'xor'. 00348 if ((DemandedMask & KnownZero) == DemandedMask) 00349 return TLO.CombineTo(Op, Op.getOperand(0)); 00350 if ((DemandedMask & KnownZero2) == DemandedMask) 00351 return TLO.CombineTo(Op, Op.getOperand(1)); 00352 00353 // Output known-0 bits are known if clear or set in both the LHS & RHS. 00354 KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 00355 // Output known-1 are known to be set if set in only one of the LHS, RHS. 00356 KnownOneOut = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 00357 00358 // If all of the unknown bits are known to be zero on one side or the other 00359 // (but not both) turn this into an *inclusive* or. 00360 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 00361 if (uint64_t UnknownBits = DemandedMask & ~(KnownZeroOut|KnownOneOut)) 00362 if ((UnknownBits & (KnownZero|KnownZero2)) == UnknownBits) 00363 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, Op.getValueType(), 00364 Op.getOperand(0), 00365 Op.getOperand(1))); 00366 // If all of the demanded bits on one side are known, and all of the set 00367 // bits on that side are also known to be set on the other side, turn this 00368 // into an AND, as we know the bits will be cleared. 00369 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 00370 if ((DemandedMask & (KnownZero|KnownOne)) == DemandedMask) { // all known 00371 if ((KnownOne & KnownOne2) == KnownOne) { 00372 MVT::ValueType VT = Op.getValueType(); 00373 SDOperand ANDC = TLO.DAG.getConstant(~KnownOne & DemandedMask, VT); 00374 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, VT, Op.getOperand(0), 00375 ANDC)); 00376 } 00377 } 00378 00379 // If the RHS is a constant, see if we can simplify it. 00380 // FIXME: for XOR, we prefer to force bits to 1 if they will make a -1. 00381 if (TLO.ShrinkDemandedConstant(Op, DemandedMask)) 00382 return true; 00383 00384 KnownZero = KnownZeroOut; 00385 KnownOne = KnownOneOut; 00386 break; 00387 case ISD::SETCC: 00388 // If we know the result of a setcc has the top bits zero, use this info. 00389 if (getSetCCResultContents() == TargetLowering::ZeroOrOneSetCCResult) 00390 KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL); 00391 break; 00392 case ISD::SELECT: 00393 if (SimplifyDemandedBits(Op.getOperand(2), DemandedMask, KnownZero, 00394 KnownOne, TLO, Depth+1)) 00395 return true; 00396 if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero2, 00397 KnownOne2, TLO, Depth+1)) 00398 return true; 00399 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 00400 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 00401 00402 // If the operands are constants, see if we can simplify them. 00403 if (TLO.ShrinkDemandedConstant(Op, DemandedMask)) 00404 return true; 00405 00406 // Only known if known in both the LHS and RHS. 00407 KnownOne &= KnownOne2; 00408 KnownZero &= KnownZero2; 00409 break; 00410 case ISD::SELECT_CC: 00411 if (SimplifyDemandedBits(Op.getOperand(3), DemandedMask, KnownZero, 00412 KnownOne, TLO, Depth+1)) 00413 return true; 00414 if (SimplifyDemandedBits(Op.getOperand(2), DemandedMask, KnownZero2, 00415 KnownOne2, TLO, Depth+1)) 00416 return true; 00417 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 00418 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 00419 00420 // If the operands are constants, see if we can simplify them. 00421 if (TLO.ShrinkDemandedConstant(Op, DemandedMask)) 00422 return true; 00423 00424 // Only known if known in both the LHS and RHS. 00425 KnownOne &= KnownOne2; 00426 KnownZero &= KnownZero2; 00427 break; 00428 case ISD::SHL: 00429 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 00430 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask >> SA->getValue(), 00431 KnownZero, KnownOne, TLO, Depth+1)) 00432 return true; 00433 KnownZero <<= SA->getValue(); 00434 KnownOne <<= SA->getValue(); 00435 KnownZero |= (1ULL << SA->getValue())-1; // low bits known zero. 00436 } 00437 break; 00438 case ISD::SRL: 00439 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 00440 MVT::ValueType VT = Op.getValueType(); 00441 unsigned ShAmt = SA->getValue(); 00442 00443 // Compute the new bits that are at the top now. 00444 uint64_t HighBits = (1ULL << ShAmt)-1; 00445 HighBits <<= MVT::getSizeInBits(VT) - ShAmt; 00446 uint64_t TypeMask = MVT::getIntVTBitMask(VT); 00447 00448 if (SimplifyDemandedBits(Op.getOperand(0), 00449 (DemandedMask << ShAmt) & TypeMask, 00450 KnownZero, KnownOne, TLO, Depth+1)) 00451 return true; 00452 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 00453 KnownZero &= TypeMask; 00454 KnownOne &= TypeMask; 00455 KnownZero >>= ShAmt; 00456 KnownOne >>= ShAmt; 00457 KnownZero |= HighBits; // high bits known zero. 00458 } 00459 break; 00460 case ISD::SRA: 00461 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 00462 MVT::ValueType VT = Op.getValueType(); 00463 unsigned ShAmt = SA->getValue(); 00464 00465 // Compute the new bits that are at the top now. 00466 uint64_t HighBits = (1ULL << ShAmt)-1; 00467 HighBits <<= MVT::getSizeInBits(VT) - ShAmt; 00468 uint64_t TypeMask = MVT::getIntVTBitMask(VT); 00469 00470 if (SimplifyDemandedBits(Op.getOperand(0), 00471 (DemandedMask << ShAmt) & TypeMask, 00472 KnownZero, KnownOne, TLO, Depth+1)) 00473 return true; 00474 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 00475 KnownZero &= TypeMask; 00476 KnownOne &= TypeMask; 00477 KnownZero >>= SA->getValue(); 00478 KnownOne >>= SA->getValue(); 00479 00480 // Handle the sign bits. 00481 uint64_t SignBit = MVT::getIntVTSignBit(VT); 00482 SignBit >>= SA->getValue(); // Adjust to where it is now in the mask. 00483 00484 // If the input sign bit is known to be zero, or if none of the top bits 00485 // are demanded, turn this into an unsigned shift right. 00486 if ((KnownZero & SignBit) || (HighBits & ~DemandedMask) == HighBits) { 00487 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, VT, Op.getOperand(0), 00488 Op.getOperand(1))); 00489 } else if (KnownOne & SignBit) { // New bits are known one. 00490 KnownOne |= HighBits; 00491 } 00492 } 00493 break; 00494 case ISD::SIGN_EXTEND_INREG: { 00495 MVT::ValueType VT = Op.getValueType(); 00496 MVT::ValueType EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 00497 00498 // Sign extension. Compute the demanded bits in the result that are not 00499 // present in the input. 00500 uint64_t NewBits = ~MVT::getIntVTBitMask(EVT) & DemandedMask; 00501 00502 // If none of the extended bits are demanded, eliminate the sextinreg. 00503 if (NewBits == 0) 00504 return TLO.CombineTo(Op, Op.getOperand(0)); 00505 00506 uint64_t InSignBit = MVT::getIntVTSignBit(EVT); 00507 int64_t InputDemandedBits = DemandedMask & MVT::getIntVTBitMask(EVT); 00508 00509 // Since the sign extended bits are demanded, we know that the sign 00510 // bit is demanded. 00511 InputDemandedBits |= InSignBit; 00512 00513 if (SimplifyDemandedBits(Op.getOperand(0), InputDemandedBits, 00514 KnownZero, KnownOne, TLO, Depth+1)) 00515 return true; 00516 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 00517 00518 // If the sign bit of the input is known set or clear, then we know the 00519 // top bits of the result. 00520 00521 // If the input sign bit is known zero, convert this into a zero extension. 00522 if (KnownZero & InSignBit) 00523 return TLO.CombineTo(Op, 00524 TLO.DAG.getZeroExtendInReg(Op.getOperand(0), EVT)); 00525 00526 if (KnownOne & InSignBit) { // Input sign bit known set 00527 KnownOne |= NewBits; 00528 KnownZero &= ~NewBits; 00529 } else { // Input sign bit unknown 00530 KnownZero &= ~NewBits; 00531 KnownOne &= ~NewBits; 00532 } 00533 break; 00534 } 00535 case ISD::CTTZ: 00536 case ISD::CTLZ: 00537 case ISD::CTPOP: { 00538 MVT::ValueType VT = Op.getValueType(); 00539 unsigned LowBits = Log2_32(MVT::getSizeInBits(VT))+1; 00540 KnownZero = ~((1ULL << LowBits)-1) & MVT::getIntVTBitMask(VT); 00541 KnownOne = 0; 00542 break; 00543 } 00544 case ISD::ZEXTLOAD: { 00545 MVT::ValueType VT = cast<VTSDNode>(Op.getOperand(3))->getVT(); 00546 KnownZero |= ~MVT::getIntVTBitMask(VT) & DemandedMask; 00547 break; 00548 } 00549 case ISD::ZERO_EXTEND: { 00550 uint64_t InMask = MVT::getIntVTBitMask(Op.getOperand(0).getValueType()); 00551 00552 // If none of the top bits are demanded, convert this into an any_extend. 00553 uint64_t NewBits = (~InMask) & DemandedMask; 00554 if (NewBits == 0) 00555 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ANY_EXTEND, 00556 Op.getValueType(), 00557 Op.getOperand(0))); 00558 00559 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & InMask, 00560 KnownZero, KnownOne, TLO, Depth+1)) 00561 return true; 00562 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 00563 KnownZero |= NewBits; 00564 break; 00565 } 00566 case ISD::SIGN_EXTEND: { 00567 MVT::ValueType InVT = Op.getOperand(0).getValueType(); 00568 uint64_t InMask = MVT::getIntVTBitMask(InVT); 00569 uint64_t InSignBit = MVT::getIntVTSignBit(InVT); 00570 uint64_t NewBits = (~InMask) & DemandedMask; 00571 00572 // If none of the top bits are demanded, convert this into an any_extend. 00573 if (NewBits == 0) 00574 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ANY_EXTEND,Op.getValueType(), 00575 Op.getOperand(0))); 00576 00577 // Since some of the sign extended bits are demanded, we know that the sign 00578 // bit is demanded. 00579 uint64_t InDemandedBits = DemandedMask & InMask; 00580 InDemandedBits |= InSignBit; 00581 00582 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedBits, KnownZero, 00583 KnownOne, TLO, Depth+1)) 00584 return true; 00585 00586 // If the sign bit is known zero, convert this to a zero extend. 00587 if (KnownZero & InSignBit) 00588 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ZERO_EXTEND, 00589 Op.getValueType(), 00590 Op.getOperand(0))); 00591 00592 // If the sign bit is known one, the top bits match. 00593 if (KnownOne & InSignBit) { 00594 KnownOne |= NewBits; 00595 KnownZero &= ~NewBits; 00596 } else { // Otherwise, top bits aren't known. 00597 KnownOne &= ~NewBits; 00598 KnownZero &= ~NewBits; 00599 } 00600 break; 00601 } 00602 case ISD::ANY_EXTEND: { 00603 uint64_t InMask = MVT::getIntVTBitMask(Op.getOperand(0).getValueType()); 00604 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & InMask, 00605 KnownZero, KnownOne, TLO, Depth+1)) 00606 return true; 00607 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 00608 break; 00609 } 00610 case ISD::AssertZext: { 00611 MVT::ValueType VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 00612 uint64_t InMask = MVT::getIntVTBitMask(VT); 00613 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & InMask, 00614 KnownZero, KnownOne, TLO, Depth+1)) 00615 return true; 00616 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 00617 KnownZero |= ~InMask & DemandedMask; 00618 break; 00619 } 00620 case ISD::ADD: 00621 case ISD::SUB: 00622 case ISD::INTRINSIC_WO_CHAIN: 00623 case ISD::INTRINSIC_W_CHAIN: 00624 case ISD::INTRINSIC_VOID: 00625 // Just use ComputeMaskedBits to compute output bits. 00626 ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth); 00627 break; 00628 } 00629 00630 // If we know the value of all of the demanded bits, return this as a 00631 // constant. 00632 if ((DemandedMask & (KnownZero|KnownOne)) == DemandedMask) 00633 return TLO.CombineTo(Op, TLO.DAG.getConstant(KnownOne, Op.getValueType())); 00634 00635 return false; 00636 } 00637 00638 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 00639 /// this predicate to simplify operations downstream. Mask is known to be zero 00640 /// for bits that V cannot have. 00641 bool TargetLowering::MaskedValueIsZero(SDOperand Op, uint64_t Mask, 00642 unsigned Depth) const { 00643 uint64_t KnownZero, KnownOne; 00644 ComputeMaskedBits(Op, Mask, KnownZero, KnownOne, Depth); 00645 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 00646 return (KnownZero & Mask) == Mask; 00647 } 00648 00649 /// ComputeMaskedBits - Determine which of the bits specified in Mask are 00650 /// known to be either zero or one and return them in the KnownZero/KnownOne 00651 /// bitsets. This code only analyzes bits in Mask, in order to short-circuit 00652 /// processing. 00653 void TargetLowering::ComputeMaskedBits(SDOperand Op, uint64_t Mask, 00654 uint64_t &KnownZero, uint64_t &KnownOne, 00655 unsigned Depth) const { 00656 KnownZero = KnownOne = 0; // Don't know anything. 00657 if (Depth == 6 || Mask == 0) 00658 return; // Limit search depth. 00659 00660 uint64_t KnownZero2, KnownOne2; 00661 00662 switch (Op.getOpcode()) { 00663 case ISD::Constant: 00664 // We know all of the bits for a constant! 00665 KnownOne = cast<ConstantSDNode>(Op)->getValue() & Mask; 00666 KnownZero = ~KnownOne & Mask; 00667 return; 00668 case ISD::AND: 00669 // If either the LHS or the RHS are Zero, the result is zero. 00670 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1); 00671 Mask &= ~KnownZero; 00672 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1); 00673 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 00674 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 00675 00676 // Output known-1 bits are only known if set in both the LHS & RHS. 00677 KnownOne &= KnownOne2; 00678 // Output known-0 are known to be clear if zero in either the LHS | RHS. 00679 KnownZero |= KnownZero2; 00680 return; 00681 case ISD::OR: 00682 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1); 00683 Mask &= ~KnownOne; 00684 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1); 00685 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 00686 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 00687 00688 // Output known-0 bits are only known if clear in both the LHS & RHS. 00689 KnownZero &= KnownZero2; 00690 // Output known-1 are known to be set if set in either the LHS | RHS. 00691 KnownOne |= KnownOne2; 00692 return; 00693 case ISD::XOR: { 00694 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1); 00695 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1); 00696 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 00697 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 00698 00699 // Output known-0 bits are known if clear or set in both the LHS & RHS. 00700 uint64_t KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 00701 // Output known-1 are known to be set if set in only one of the LHS, RHS. 00702 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 00703 KnownZero = KnownZeroOut; 00704 return; 00705 } 00706 case ISD::SELECT: 00707 ComputeMaskedBits(Op.getOperand(2), Mask, KnownZero, KnownOne, Depth+1); 00708 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero2, KnownOne2, Depth+1); 00709 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 00710 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 00711 00712 // Only known if known in both the LHS and RHS. 00713 KnownOne &= KnownOne2; 00714 KnownZero &= KnownZero2; 00715 return; 00716 case ISD::SELECT_CC: 00717 ComputeMaskedBits(Op.getOperand(3), Mask, KnownZero, KnownOne, Depth+1); 00718 ComputeMaskedBits(Op.getOperand(2), Mask, KnownZero2, KnownOne2, Depth+1); 00719 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 00720 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 00721 00722 // Only known if known in both the LHS and RHS. 00723 KnownOne &= KnownOne2; 00724 KnownZero &= KnownZero2; 00725 return; 00726 case ISD::SETCC: 00727 // If we know the result of a setcc has the top bits zero, use this info. 00728 if (getSetCCResultContents() == TargetLowering::ZeroOrOneSetCCResult) 00729 KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL); 00730 return; 00731 case ISD::SHL: 00732 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 00733 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 00734 Mask >>= SA->getValue(); 00735 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); 00736 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 00737 KnownZero <<= SA->getValue(); 00738 KnownOne <<= SA->getValue(); 00739 KnownZero |= (1ULL << SA->getValue())-1; // low bits known zero. 00740 } 00741 return; 00742 case ISD::SRL: 00743 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 00744 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 00745 uint64_t HighBits = (1ULL << SA->getValue())-1; 00746 HighBits <<= MVT::getSizeInBits(Op.getValueType())-SA->getValue(); 00747 Mask <<= SA->getValue(); 00748 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); 00749 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 00750 KnownZero >>= SA->getValue(); 00751 KnownOne >>= SA->getValue(); 00752 KnownZero |= HighBits; // high bits known zero. 00753 } 00754 return; 00755 case ISD::SRA: 00756 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 00757 uint64_t HighBits = (1ULL << SA->getValue())-1; 00758 HighBits <<= MVT::getSizeInBits(Op.getValueType())-SA->getValue(); 00759 Mask <<= SA->getValue(); 00760 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); 00761 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?"); 00762 KnownZero >>= SA->getValue(); 00763 KnownOne >>= SA->getValue(); 00764 00765 // Handle the sign bits. 00766 uint64_t SignBit = 1ULL << (MVT::getSizeInBits(Op.getValueType())-1); 00767 SignBit >>= SA->getValue(); // Adjust to where it is now in the mask. 00768 00769 if (KnownZero & SignBit) { // New bits are known zero. 00770 KnownZero |= HighBits; 00771 } else if (KnownOne & SignBit) { // New bits are known one. 00772 KnownOne |= HighBits; 00773 } 00774 } 00775 return; 00776 case ISD::SIGN_EXTEND_INREG: { 00777 MVT::ValueType VT = Op.getValueType(); 00778 MVT::ValueType EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 00779 00780 // Sign extension. Compute the demanded bits in the result that are not 00781 // present in the input. 00782 uint64_t NewBits = ~MVT::getIntVTBitMask(EVT) & Mask; 00783 00784 uint64_t InSignBit = MVT::getIntVTSignBit(EVT); 00785 int64_t InputDemandedBits = Mask & MVT::getIntVTBitMask(EVT); 00786 00787 // If the sign extended bits are demanded, we know that the sign 00788 // bit is demanded. 00789 if (NewBits) 00790 InputDemandedBits |= InSignBit; 00791 00792 ComputeMaskedBits(Op.getOperand(0), InputDemandedBits, 00793 KnownZero, KnownOne, Depth+1); 00794 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 00795 00796 // If the sign bit of the input is known set or clear, then we know the 00797 // top bits of the result. 00798 if (KnownZero & InSignBit) { // Input sign bit known clear 00799 KnownZero |= NewBits; 00800 KnownOne &= ~NewBits; 00801 } else if (KnownOne & InSignBit) { // Input sign bit known set 00802 KnownOne |= NewBits; 00803 KnownZero &= ~NewBits; 00804 } else { // Input sign bit unknown 00805 KnownZero &= ~NewBits; 00806 KnownOne &= ~NewBits; 00807 } 00808 return; 00809 } 00810 case ISD::CTTZ: 00811 case ISD::CTLZ: 00812 case ISD::CTPOP: { 00813 MVT::ValueType VT = Op.getValueType(); 00814 unsigned LowBits = Log2_32(MVT::getSizeInBits(VT))+1; 00815 KnownZero = ~((1ULL << LowBits)-1) & MVT::getIntVTBitMask(VT); 00816 KnownOne = 0; 00817 return; 00818 } 00819 case ISD::ZEXTLOAD: { 00820 MVT::ValueType VT = cast<VTSDNode>(Op.getOperand(3))->getVT(); 00821 KnownZero |= ~MVT::getIntVTBitMask(VT) & Mask; 00822 return; 00823 } 00824 case ISD::ZERO_EXTEND: { 00825 uint64_t InMask = MVT::getIntVTBitMask(Op.getOperand(0).getValueType()); 00826 uint64_t NewBits = (~InMask) & Mask; 00827 ComputeMaskedBits(Op.getOperand(0), Mask & InMask, KnownZero, 00828 KnownOne, Depth+1); 00829 KnownZero |= NewBits & Mask; 00830 KnownOne &= ~NewBits; 00831 return; 00832 } 00833 case ISD::SIGN_EXTEND: { 00834 MVT::ValueType InVT = Op.getOperand(0).getValueType(); 00835 unsigned InBits = MVT::getSizeInBits(InVT); 00836 uint64_t InMask = MVT::getIntVTBitMask(InVT); 00837 uint64_t InSignBit = 1ULL << (InBits-1); 00838 uint64_t NewBits = (~InMask) & Mask; 00839 uint64_t InDemandedBits = Mask & InMask; 00840 00841 // If any of the sign extended bits are demanded, we know that the sign 00842 // bit is demanded. 00843 if (NewBits & Mask) 00844 InDemandedBits |= InSignBit; 00845 00846 ComputeMaskedBits(Op.getOperand(0), InDemandedBits, KnownZero, 00847 KnownOne, Depth+1); 00848 // If the sign bit is known zero or one, the top bits match. 00849 if (KnownZero & InSignBit) { 00850 KnownZero |= NewBits; 00851 KnownOne &= ~NewBits; 00852 } else if (KnownOne & InSignBit) { 00853 KnownOne |= NewBits; 00854 KnownZero &= ~NewBits; 00855 } else { // Otherwise, top bits aren't known. 00856 KnownOne &= ~NewBits; 00857 KnownZero &= ~NewBits; 00858 } 00859 return; 00860 } 00861 case ISD::ANY_EXTEND: { 00862 MVT::ValueType VT = Op.getOperand(0).getValueType(); 00863 ComputeMaskedBits(Op.getOperand(0), Mask & MVT::getIntVTBitMask(VT), 00864 KnownZero, KnownOne, Depth+1); 00865 return; 00866 } 00867 case ISD::AssertZext: { 00868 MVT::ValueType VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 00869 uint64_t InMask = MVT::getIntVTBitMask(VT); 00870 ComputeMaskedBits(Op.getOperand(0), Mask & InMask, KnownZero, 00871 KnownOne, Depth+1); 00872 KnownZero |= (~InMask) & Mask; 00873 return; 00874 } 00875 case ISD::ADD: { 00876 // If either the LHS or the RHS are Zero, the result is zero. 00877 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1); 00878 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1); 00879 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 00880 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 00881 00882 // Output known-0 bits are known if clear or set in both the low clear bits 00883 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the 00884 // low 3 bits clear. 00885 uint64_t KnownZeroOut = std::min(CountTrailingZeros_64(~KnownZero), 00886 CountTrailingZeros_64(~KnownZero2)); 00887 00888 KnownZero = (1ULL << KnownZeroOut) - 1; 00889 KnownOne = 0; 00890 return; 00891 } 00892 case ISD::SUB: { 00893 ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)); 00894 if (!CLHS) return; 00895 00896 // We know that the top bits of C-X are clear if X contains less bits 00897 // than C (i.e. no wrap-around can happen). For example, 20-X is 00898 // positive if we can prove that X is >= 0 and < 16. 00899 MVT::ValueType VT = CLHS->getValueType(0); 00900 if ((CLHS->getValue() & MVT::getIntVTSignBit(VT)) == 0) { // sign bit clear 00901 unsigned NLZ = CountLeadingZeros_64(CLHS->getValue()+1); 00902 uint64_t MaskV = (1ULL << (63-NLZ))-1; // NLZ can't be 64 with no sign bit 00903 MaskV = ~MaskV & MVT::getIntVTBitMask(VT); 00904 ComputeMaskedBits(Op.getOperand(1), MaskV, KnownZero, KnownOne, Depth+1); 00905 00906 // If all of the MaskV bits are known to be zero, then we know the output 00907 // top bits are zero, because we now know that the output is from [0-C]. 00908 if ((KnownZero & MaskV) == MaskV) { 00909 unsigned NLZ2 = CountLeadingZeros_64(CLHS->getValue()); 00910 KnownZero = ~((1ULL << (64-NLZ2))-1) & Mask; // Top bits known zero. 00911 KnownOne = 0; // No one bits known. 00912 } else { 00913 KnownOne = KnownOne = 0; // Otherwise, nothing known. 00914 } 00915 } 00916 return; 00917 } 00918 default: 00919 // Allow the target to implement this method for its nodes. 00920 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) { 00921 case ISD::INTRINSIC_WO_CHAIN: 00922 case ISD::INTRINSIC_W_CHAIN: 00923 case ISD::INTRINSIC_VOID: 00924 computeMaskedBitsForTargetNode(Op, Mask, KnownZero, KnownOne); 00925 } 00926 return; 00927 } 00928 } 00929 00930 /// computeMaskedBitsForTargetNode - Determine which of the bits specified 00931 /// in Mask are known to be either zero or one and return them in the 00932 /// KnownZero/KnownOne bitsets. 00933 void TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 00934 uint64_t Mask, 00935 uint64_t &KnownZero, 00936 uint64_t &KnownOne, 00937 unsigned Depth) const { 00938 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 00939 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 00940 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 00941 Op.getOpcode() == ISD::INTRINSIC_VOID) && 00942 "Should use MaskedValueIsZero if you don't know whether Op" 00943 " is a target node!"); 00944 KnownZero = 0; 00945 KnownOne = 0; 00946 } 00947 00948 SDOperand TargetLowering:: 00949 PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { 00950 // Default implementation: no optimization. 00951 return SDOperand(); 00952 } 00953 00954 //===----------------------------------------------------------------------===// 00955 // Inline Assembler Implementation Methods 00956 //===----------------------------------------------------------------------===// 00957 00958 TargetLowering::ConstraintType 00959 TargetLowering::getConstraintType(char ConstraintLetter) const { 00960 // FIXME: lots more standard ones to handle. 00961 switch (ConstraintLetter) { 00962 default: return C_Unknown; 00963 case 'r': return C_RegisterClass; 00964 case 'm': // memory 00965 case 'o': // offsetable 00966 case 'V': // not offsetable 00967 return C_Memory; 00968 case 'i': // Simple Integer or Relocatable Constant 00969 case 'n': // Simple Integer 00970 case 's': // Relocatable Constant 00971 case 'I': // Target registers. 00972 case 'J': 00973 case 'K': 00974 case 'L': 00975 case 'M': 00976 case 'N': 00977 case 'O': 00978 case 'P': 00979 return C_Other; 00980 } 00981 } 00982 00983 bool TargetLowering::isOperandValidForConstraint(SDOperand Op, 00984 char ConstraintLetter) { 00985 switch (ConstraintLetter) { 00986 default: return false; 00987 case 'i': // Simple Integer or Relocatable Constant 00988 case 'n': // Simple Integer 00989 case 's': // Relocatable Constant 00990 return true; // FIXME: not right. 00991 } 00992 } 00993 00994 00995 std::vector<unsigned> TargetLowering:: 00996 getRegClassForInlineAsmConstraint(const std::string &Constraint, 00997 MVT::ValueType VT) const { 00998 return std::vector<unsigned>(); 00999 } 01000 01001 01002 std::pair<unsigned, const TargetRegisterClass*> TargetLowering:: 01003 getRegForInlineAsmConstraint(const std::string &Constraint, 01004 MVT::ValueType VT) const { 01005 if (Constraint[0] != '{') 01006 return std::pair<unsigned, const TargetRegisterClass*>(0, 0); 01007 assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?"); 01008 01009 // Remove the braces from around the name. 01010 std::string RegName(Constraint.begin()+1, Constraint.end()-1); 01011 01012 // Figure out which register class contains this reg. 01013 const MRegisterInfo *RI = TM.getRegisterInfo(); 01014 for (MRegisterInfo::regclass_iterator RCI = RI->regclass_begin(), 01015 E = RI->regclass_end(); RCI != E; ++RCI) { 01016 const TargetRegisterClass *RC = *RCI; 01017 01018 // If none of the the value types for this register class are valid, we 01019 // can't use it. For example, 64-bit reg classes on 32-bit targets. 01020 bool isLegal = false; 01021 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end(); 01022 I != E; ++I) { 01023 if (isTypeLegal(*I)) { 01024 isLegal = true; 01025 break; 01026 } 01027 } 01028 01029 if (!isLegal) continue; 01030 01031 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); 01032 I != E; ++I) { 01033 if (StringsEqualNoCase(RegName, RI->get(*I).Name)) 01034 return std::make_pair(*I, RC); 01035 } 01036 } 01037 01038 return std::pair<unsigned, const TargetRegisterClass*>(0, 0); 01039 } 01040 01041 //===----------------------------------------------------------------------===// 01042 // Loop Strength Reduction hooks 01043 //===----------------------------------------------------------------------===// 01044 01045 /// isLegalAddressImmediate - Return true if the integer value or 01046 /// GlobalValue can be used as the offset of the target addressing mode. 01047 bool TargetLowering::isLegalAddressImmediate(int64_t V) const { 01048 return false; 01049 } 01050 bool TargetLowering::isLegalAddressImmediate(GlobalValue *GV) const { 01051 return false; 01052 }