LLVM API Documentation
00001 //===- DataStructure.cpp - Implement the core data structure analysis -----===// 00002 // 00003 // The LLVM Compiler Infrastructure 00004 // 00005 // This file was developed by the LLVM research group and is distributed under 00006 // the University of Illinois Open Source License. See LICENSE.TXT for details. 00007 // 00008 //===----------------------------------------------------------------------===// 00009 // 00010 // This file implements the core data structure functionality. 00011 // 00012 //===----------------------------------------------------------------------===// 00013 00014 #include "llvm/Analysis/DataStructure/DSGraphTraits.h" 00015 #include "llvm/Constants.h" 00016 #include "llvm/Function.h" 00017 #include "llvm/GlobalVariable.h" 00018 #include "llvm/Instructions.h" 00019 #include "llvm/DerivedTypes.h" 00020 #include "llvm/Target/TargetData.h" 00021 #include "llvm/Assembly/Writer.h" 00022 #include "llvm/Support/CommandLine.h" 00023 #include "llvm/Support/Debug.h" 00024 #include "llvm/ADT/DepthFirstIterator.h" 00025 #include "llvm/ADT/STLExtras.h" 00026 #include "llvm/ADT/SCCIterator.h" 00027 #include "llvm/ADT/Statistic.h" 00028 #include "llvm/Support/Timer.h" 00029 #include <iostream> 00030 #include <algorithm> 00031 using namespace llvm; 00032 00033 #define COLLAPSE_ARRAYS_AGGRESSIVELY 0 00034 00035 namespace { 00036 Statistic<> NumFolds ("dsa", "Number of nodes completely folded"); 00037 Statistic<> NumCallNodesMerged("dsa", "Number of call nodes merged"); 00038 Statistic<> NumNodeAllocated ("dsa", "Number of nodes allocated"); 00039 Statistic<> NumDNE ("dsa", "Number of nodes removed by reachability"); 00040 Statistic<> NumTrivialDNE ("dsa", "Number of nodes trivially removed"); 00041 Statistic<> NumTrivialGlobalDNE("dsa", "Number of globals trivially removed"); 00042 static cl::opt<unsigned> 00043 DSAFieldLimit("dsa-field-limit", cl::Hidden, 00044 cl::desc("Number of fields to track before collapsing a node"), 00045 cl::init(256)); 00046 }; 00047 00048 #if 0 00049 #define TIME_REGION(VARNAME, DESC) \ 00050 NamedRegionTimer VARNAME(DESC) 00051 #else 00052 #define TIME_REGION(VARNAME, DESC) 00053 #endif 00054 00055 using namespace DS; 00056 00057 /// isForwarding - Return true if this NodeHandle is forwarding to another 00058 /// one. 00059 bool DSNodeHandle::isForwarding() const { 00060 return N && N->isForwarding(); 00061 } 00062 00063 DSNode *DSNodeHandle::HandleForwarding() const { 00064 assert(N->isForwarding() && "Can only be invoked if forwarding!"); 00065 00066 // Handle node forwarding here! 00067 DSNode *Next = N->ForwardNH.getNode(); // Cause recursive shrinkage 00068 Offset += N->ForwardNH.getOffset(); 00069 00070 if (--N->NumReferrers == 0) { 00071 // Removing the last referrer to the node, sever the forwarding link 00072 N->stopForwarding(); 00073 } 00074 00075 N = Next; 00076 N->NumReferrers++; 00077 if (N->Size <= Offset) { 00078 assert(N->Size <= 1 && "Forwarded to shrunk but not collapsed node?"); 00079 Offset = 0; 00080 } 00081 return N; 00082 } 00083 00084 //===----------------------------------------------------------------------===// 00085 // DSScalarMap Implementation 00086 //===----------------------------------------------------------------------===// 00087 00088 DSNodeHandle &DSScalarMap::AddGlobal(GlobalValue *GV) { 00089 assert(ValueMap.count(GV) == 0 && "GV already exists!"); 00090 00091 // If the node doesn't exist, check to see if it's a global that is 00092 // equated to another global in the program. 00093 EquivalenceClasses<GlobalValue*>::iterator ECI = GlobalECs.findValue(GV); 00094 if (ECI != GlobalECs.end()) { 00095 GlobalValue *Leader = *GlobalECs.findLeader(ECI); 00096 if (Leader != GV) { 00097 GV = Leader; 00098 iterator I = ValueMap.find(GV); 00099 if (I != ValueMap.end()) 00100 return I->second; 00101 } 00102 } 00103 00104 // Okay, this is either not an equivalenced global or it is the leader, it 00105 // will be inserted into the scalar map now. 00106 GlobalSet.insert(GV); 00107 00108 return ValueMap.insert(std::make_pair(GV, DSNodeHandle())).first->second; 00109 } 00110 00111 00112 //===----------------------------------------------------------------------===// 00113 // DSNode Implementation 00114 //===----------------------------------------------------------------------===// 00115 00116 DSNode::DSNode(const Type *T, DSGraph *G) 00117 : NumReferrers(0), Size(0), ParentGraph(G), Ty(Type::VoidTy), NodeType(0) { 00118 // Add the type entry if it is specified... 00119 if (T) mergeTypeInfo(T, 0); 00120 if (G) G->addNode(this); 00121 ++NumNodeAllocated; 00122 } 00123 00124 // DSNode copy constructor... do not copy over the referrers list! 00125 DSNode::DSNode(const DSNode &N, DSGraph *G, bool NullLinks) 00126 : NumReferrers(0), Size(N.Size), ParentGraph(G), 00127 Ty(N.Ty), Globals(N.Globals), NodeType(N.NodeType) { 00128 if (!NullLinks) { 00129 Links = N.Links; 00130 } else 00131 Links.resize(N.Links.size()); // Create the appropriate number of null links 00132 G->addNode(this); 00133 ++NumNodeAllocated; 00134 } 00135 00136 /// getTargetData - Get the target data object used to construct this node. 00137 /// 00138 const TargetData &DSNode::getTargetData() const { 00139 return ParentGraph->getTargetData(); 00140 } 00141 00142 void DSNode::assertOK() const { 00143 assert((Ty != Type::VoidTy || 00144 Ty == Type::VoidTy && (Size == 0 || 00145 (NodeType & DSNode::Array))) && 00146 "Node not OK!"); 00147 00148 assert(ParentGraph && "Node has no parent?"); 00149 const DSScalarMap &SM = ParentGraph->getScalarMap(); 00150 for (unsigned i = 0, e = Globals.size(); i != e; ++i) { 00151 assert(SM.global_count(Globals[i])); 00152 assert(SM.find(Globals[i])->second.getNode() == this); 00153 } 00154 } 00155 00156 /// forwardNode - Mark this node as being obsolete, and all references to it 00157 /// should be forwarded to the specified node and offset. 00158 /// 00159 void DSNode::forwardNode(DSNode *To, unsigned Offset) { 00160 assert(this != To && "Cannot forward a node to itself!"); 00161 assert(ForwardNH.isNull() && "Already forwarding from this node!"); 00162 if (To->Size <= 1) Offset = 0; 00163 assert((Offset < To->Size || (Offset == To->Size && Offset == 0)) && 00164 "Forwarded offset is wrong!"); 00165 ForwardNH.setTo(To, Offset); 00166 NodeType = DEAD; 00167 Size = 0; 00168 Ty = Type::VoidTy; 00169 00170 // Remove this node from the parent graph's Nodes list. 00171 ParentGraph->unlinkNode(this); 00172 ParentGraph = 0; 00173 } 00174 00175 // addGlobal - Add an entry for a global value to the Globals list. This also 00176 // marks the node with the 'G' flag if it does not already have it. 00177 // 00178 void DSNode::addGlobal(GlobalValue *GV) { 00179 // First, check to make sure this is the leader if the global is in an 00180 // equivalence class. 00181 GV = getParentGraph()->getScalarMap().getLeaderForGlobal(GV); 00182 00183 // Keep the list sorted. 00184 std::vector<GlobalValue*>::iterator I = 00185 std::lower_bound(Globals.begin(), Globals.end(), GV); 00186 00187 if (I == Globals.end() || *I != GV) { 00188 Globals.insert(I, GV); 00189 NodeType |= GlobalNode; 00190 } 00191 } 00192 00193 // removeGlobal - Remove the specified global that is explicitly in the globals 00194 // list. 00195 void DSNode::removeGlobal(GlobalValue *GV) { 00196 std::vector<GlobalValue*>::iterator I = 00197 std::lower_bound(Globals.begin(), Globals.end(), GV); 00198 assert(I != Globals.end() && *I == GV && "Global not in node!"); 00199 Globals.erase(I); 00200 } 00201 00202 /// foldNodeCompletely - If we determine that this node has some funny 00203 /// behavior happening to it that we cannot represent, we fold it down to a 00204 /// single, completely pessimistic, node. This node is represented as a 00205 /// single byte with a single TypeEntry of "void". 00206 /// 00207 void DSNode::foldNodeCompletely() { 00208 if (isNodeCompletelyFolded()) return; // If this node is already folded... 00209 00210 ++NumFolds; 00211 00212 // If this node has a size that is <= 1, we don't need to create a forwarding 00213 // node. 00214 if (getSize() <= 1) { 00215 NodeType |= DSNode::Array; 00216 Ty = Type::VoidTy; 00217 Size = 1; 00218 assert(Links.size() <= 1 && "Size is 1, but has more links?"); 00219 Links.resize(1); 00220 } else { 00221 // Create the node we are going to forward to. This is required because 00222 // some referrers may have an offset that is > 0. By forcing them to 00223 // forward, the forwarder has the opportunity to correct the offset. 00224 DSNode *DestNode = new DSNode(0, ParentGraph); 00225 DestNode->NodeType = NodeType|DSNode::Array; 00226 DestNode->Ty = Type::VoidTy; 00227 DestNode->Size = 1; 00228 DestNode->Globals.swap(Globals); 00229 00230 // Start forwarding to the destination node... 00231 forwardNode(DestNode, 0); 00232 00233 if (!Links.empty()) { 00234 DestNode->Links.reserve(1); 00235 00236 DSNodeHandle NH(DestNode); 00237 DestNode->Links.push_back(Links[0]); 00238 00239 // If we have links, merge all of our outgoing links together... 00240 for (unsigned i = Links.size()-1; i != 0; --i) 00241 NH.getNode()->Links[0].mergeWith(Links[i]); 00242 Links.clear(); 00243 } else { 00244 DestNode->Links.resize(1); 00245 } 00246 } 00247 } 00248 00249 /// isNodeCompletelyFolded - Return true if this node has been completely 00250 /// folded down to something that can never be expanded, effectively losing 00251 /// all of the field sensitivity that may be present in the node. 00252 /// 00253 bool DSNode::isNodeCompletelyFolded() const { 00254 return getSize() == 1 && Ty == Type::VoidTy && isArray(); 00255 } 00256 00257 /// addFullGlobalsList - Compute the full set of global values that are 00258 /// represented by this node. Unlike getGlobalsList(), this requires fair 00259 /// amount of work to compute, so don't treat this method call as free. 00260 void DSNode::addFullGlobalsList(std::vector<GlobalValue*> &List) const { 00261 if (globals_begin() == globals_end()) return; 00262 00263 EquivalenceClasses<GlobalValue*> &EC = getParentGraph()->getGlobalECs(); 00264 00265 for (globals_iterator I = globals_begin(), E = globals_end(); I != E; ++I) { 00266 EquivalenceClasses<GlobalValue*>::iterator ECI = EC.findValue(*I); 00267 if (ECI == EC.end()) 00268 List.push_back(*I); 00269 else 00270 List.insert(List.end(), EC.member_begin(ECI), EC.member_end()); 00271 } 00272 } 00273 00274 /// addFullFunctionList - Identical to addFullGlobalsList, but only return the 00275 /// functions in the full list. 00276 void DSNode::addFullFunctionList(std::vector<Function*> &List) const { 00277 if (globals_begin() == globals_end()) return; 00278 00279 EquivalenceClasses<GlobalValue*> &EC = getParentGraph()->getGlobalECs(); 00280 00281 for (globals_iterator I = globals_begin(), E = globals_end(); I != E; ++I) { 00282 EquivalenceClasses<GlobalValue*>::iterator ECI = EC.findValue(*I); 00283 if (ECI == EC.end()) { 00284 if (Function *F = dyn_cast<Function>(*I)) 00285 List.push_back(F); 00286 } else { 00287 for (EquivalenceClasses<GlobalValue*>::member_iterator MI = 00288 EC.member_begin(ECI), E = EC.member_end(); MI != E; ++MI) 00289 if (Function *F = dyn_cast<Function>(*MI)) 00290 List.push_back(F); 00291 } 00292 } 00293 } 00294 00295 namespace { 00296 /// TypeElementWalker Class - Used for implementation of physical subtyping... 00297 /// 00298 class TypeElementWalker { 00299 struct StackState { 00300 const Type *Ty; 00301 unsigned Offset; 00302 unsigned Idx; 00303 StackState(const Type *T, unsigned Off = 0) 00304 : Ty(T), Offset(Off), Idx(0) {} 00305 }; 00306 00307 std::vector<StackState> Stack; 00308 const TargetData &TD; 00309 public: 00310 TypeElementWalker(const Type *T, const TargetData &td) : TD(td) { 00311 Stack.push_back(T); 00312 StepToLeaf(); 00313 } 00314 00315 bool isDone() const { return Stack.empty(); } 00316 const Type *getCurrentType() const { return Stack.back().Ty; } 00317 unsigned getCurrentOffset() const { return Stack.back().Offset; } 00318 00319 void StepToNextType() { 00320 PopStackAndAdvance(); 00321 StepToLeaf(); 00322 } 00323 00324 private: 00325 /// PopStackAndAdvance - Pop the current element off of the stack and 00326 /// advance the underlying element to the next contained member. 00327 void PopStackAndAdvance() { 00328 assert(!Stack.empty() && "Cannot pop an empty stack!"); 00329 Stack.pop_back(); 00330 while (!Stack.empty()) { 00331 StackState &SS = Stack.back(); 00332 if (const StructType *ST = dyn_cast<StructType>(SS.Ty)) { 00333 ++SS.Idx; 00334 if (SS.Idx != ST->getNumElements()) { 00335 const StructLayout *SL = TD.getStructLayout(ST); 00336 SS.Offset += 00337 unsigned(SL->MemberOffsets[SS.Idx]-SL->MemberOffsets[SS.Idx-1]); 00338 return; 00339 } 00340 Stack.pop_back(); // At the end of the structure 00341 } else { 00342 const ArrayType *AT = cast<ArrayType>(SS.Ty); 00343 ++SS.Idx; 00344 if (SS.Idx != AT->getNumElements()) { 00345 SS.Offset += unsigned(TD.getTypeSize(AT->getElementType())); 00346 return; 00347 } 00348 Stack.pop_back(); // At the end of the array 00349 } 00350 } 00351 } 00352 00353 /// StepToLeaf - Used by physical subtyping to move to the first leaf node 00354 /// on the type stack. 00355 void StepToLeaf() { 00356 if (Stack.empty()) return; 00357 while (!Stack.empty() && !Stack.back().Ty->isFirstClassType()) { 00358 StackState &SS = Stack.back(); 00359 if (const StructType *ST = dyn_cast<StructType>(SS.Ty)) { 00360 if (ST->getNumElements() == 0) { 00361 assert(SS.Idx == 0); 00362 PopStackAndAdvance(); 00363 } else { 00364 // Step into the structure... 00365 assert(SS.Idx < ST->getNumElements()); 00366 const StructLayout *SL = TD.getStructLayout(ST); 00367 Stack.push_back(StackState(ST->getElementType(SS.Idx), 00368 SS.Offset+unsigned(SL->MemberOffsets[SS.Idx]))); 00369 } 00370 } else { 00371 const ArrayType *AT = cast<ArrayType>(SS.Ty); 00372 if (AT->getNumElements() == 0) { 00373 assert(SS.Idx == 0); 00374 PopStackAndAdvance(); 00375 } else { 00376 // Step into the array... 00377 assert(SS.Idx < AT->getNumElements()); 00378 Stack.push_back(StackState(AT->getElementType(), 00379 SS.Offset+SS.Idx* 00380 unsigned(TD.getTypeSize(AT->getElementType())))); 00381 } 00382 } 00383 } 00384 } 00385 }; 00386 } // end anonymous namespace 00387 00388 /// ElementTypesAreCompatible - Check to see if the specified types are 00389 /// "physically" compatible. If so, return true, else return false. We only 00390 /// have to check the fields in T1: T2 may be larger than T1. If AllowLargerT1 00391 /// is true, then we also allow a larger T1. 00392 /// 00393 static bool ElementTypesAreCompatible(const Type *T1, const Type *T2, 00394 bool AllowLargerT1, const TargetData &TD){ 00395 TypeElementWalker T1W(T1, TD), T2W(T2, TD); 00396 00397 while (!T1W.isDone() && !T2W.isDone()) { 00398 if (T1W.getCurrentOffset() != T2W.getCurrentOffset()) 00399 return false; 00400 00401 const Type *T1 = T1W.getCurrentType(); 00402 const Type *T2 = T2W.getCurrentType(); 00403 if (T1 != T2 && !T1->isLosslesslyConvertibleTo(T2)) 00404 return false; 00405 00406 T1W.StepToNextType(); 00407 T2W.StepToNextType(); 00408 } 00409 00410 return AllowLargerT1 || T1W.isDone(); 00411 } 00412 00413 00414 /// mergeTypeInfo - This method merges the specified type into the current node 00415 /// at the specified offset. This may update the current node's type record if 00416 /// this gives more information to the node, it may do nothing to the node if 00417 /// this information is already known, or it may merge the node completely (and 00418 /// return true) if the information is incompatible with what is already known. 00419 /// 00420 /// This method returns true if the node is completely folded, otherwise false. 00421 /// 00422 bool DSNode::mergeTypeInfo(const Type *NewTy, unsigned Offset, 00423 bool FoldIfIncompatible) { 00424 const TargetData &TD = getTargetData(); 00425 // Check to make sure the Size member is up-to-date. Size can be one of the 00426 // following: 00427 // Size = 0, Ty = Void: Nothing is known about this node. 00428 // Size = 0, Ty = FnTy: FunctionPtr doesn't have a size, so we use zero 00429 // Size = 1, Ty = Void, Array = 1: The node is collapsed 00430 // Otherwise, sizeof(Ty) = Size 00431 // 00432 assert(((Size == 0 && Ty == Type::VoidTy && !isArray()) || 00433 (Size == 0 && !Ty->isSized() && !isArray()) || 00434 (Size == 1 && Ty == Type::VoidTy && isArray()) || 00435 (Size == 0 && !Ty->isSized() && !isArray()) || 00436 (TD.getTypeSize(Ty) == Size)) && 00437 "Size member of DSNode doesn't match the type structure!"); 00438 assert(NewTy != Type::VoidTy && "Cannot merge void type into DSNode!"); 00439 00440 if (Offset == 0 && NewTy == Ty) 00441 return false; // This should be a common case, handle it efficiently 00442 00443 // Return true immediately if the node is completely folded. 00444 if (isNodeCompletelyFolded()) return true; 00445 00446 // If this is an array type, eliminate the outside arrays because they won't 00447 // be used anyway. This greatly reduces the size of large static arrays used 00448 // as global variables, for example. 00449 // 00450 bool WillBeArray = false; 00451 while (const ArrayType *AT = dyn_cast<ArrayType>(NewTy)) { 00452 // FIXME: we might want to keep small arrays, but must be careful about 00453 // things like: [2 x [10000 x int*]] 00454 NewTy = AT->getElementType(); 00455 WillBeArray = true; 00456 } 00457 00458 // Figure out how big the new type we're merging in is... 00459 unsigned NewTySize = NewTy->isSized() ? (unsigned)TD.getTypeSize(NewTy) : 0; 00460 00461 // Otherwise check to see if we can fold this type into the current node. If 00462 // we can't, we fold the node completely, if we can, we potentially update our 00463 // internal state. 00464 // 00465 if (Ty == Type::VoidTy) { 00466 // If this is the first type that this node has seen, just accept it without 00467 // question.... 00468 assert(Offset == 0 && !isArray() && 00469 "Cannot have an offset into a void node!"); 00470 00471 // If this node would have to have an unreasonable number of fields, just 00472 // collapse it. This can occur for fortran common blocks, which have stupid 00473 // things like { [100000000 x double], [1000000 x double] }. 00474 unsigned NumFields = (NewTySize+DS::PointerSize-1) >> DS::PointerShift; 00475 if (NumFields > DSAFieldLimit) { 00476 foldNodeCompletely(); 00477 return true; 00478 } 00479 00480 Ty = NewTy; 00481 NodeType &= ~Array; 00482 if (WillBeArray) NodeType |= Array; 00483 Size = NewTySize; 00484 00485 // Calculate the number of outgoing links from this node. 00486 Links.resize(NumFields); 00487 return false; 00488 } 00489 00490 // Handle node expansion case here... 00491 if (Offset+NewTySize > Size) { 00492 // It is illegal to grow this node if we have treated it as an array of 00493 // objects... 00494 if (isArray()) { 00495 if (FoldIfIncompatible) foldNodeCompletely(); 00496 return true; 00497 } 00498 00499 // If this node would have to have an unreasonable number of fields, just 00500 // collapse it. This can occur for fortran common blocks, which have stupid 00501 // things like { [100000000 x double], [1000000 x double] }. 00502 unsigned NumFields = (NewTySize+Offset+DS::PointerSize-1) >> DS::PointerShift; 00503 if (NumFields > DSAFieldLimit) { 00504 foldNodeCompletely(); 00505 return true; 00506 } 00507 00508 if (Offset) { 00509 //handle some common cases: 00510 // Ty: struct { t1, t2, t3, t4, ..., tn} 00511 // NewTy: struct { offset, stuff...} 00512 // try merge with NewTy: struct {t1, t2, stuff...} if offset lands exactly on a field in Ty 00513 if (isa<StructType>(NewTy) && isa<StructType>(Ty)) { 00514 DEBUG(std::cerr << "Ty: " << *Ty << "\nNewTy: " << *NewTy << "@" << Offset << "\n"); 00515 unsigned O = 0; 00516 const StructType *STy = cast<StructType>(Ty); 00517 const StructLayout &SL = *TD.getStructLayout(STy); 00518 unsigned i = SL.getElementContainingOffset(Offset); 00519 //Either we hit it exactly or give up 00520 if (SL.MemberOffsets[i] != Offset) { 00521 if (FoldIfIncompatible) foldNodeCompletely(); 00522 return true; 00523 } 00524 std::vector<const Type*> nt; 00525 for (unsigned x = 0; x < i; ++x) 00526 nt.push_back(STy->getElementType(x)); 00527 STy = cast<StructType>(NewTy); 00528 nt.insert(nt.end(), STy->element_begin(), STy->element_end()); 00529 //and merge 00530 STy = StructType::get(nt); 00531 DEBUG(std::cerr << "Trying with: " << *STy << "\n"); 00532 return mergeTypeInfo(STy, 0); 00533 } 00534 00535 std::cerr << "UNIMP: Trying to merge a growth type into " 00536 << "offset != 0: Collapsing!\n"; 00537 abort(); 00538 if (FoldIfIncompatible) foldNodeCompletely(); 00539 return true; 00540 00541 } 00542 00543 00544 // Okay, the situation is nice and simple, we are trying to merge a type in 00545 // at offset 0 that is bigger than our current type. Implement this by 00546 // switching to the new type and then merge in the smaller one, which should 00547 // hit the other code path here. If the other code path decides it's not 00548 // ok, it will collapse the node as appropriate. 00549 // 00550 00551 const Type *OldTy = Ty; 00552 Ty = NewTy; 00553 NodeType &= ~Array; 00554 if (WillBeArray) NodeType |= Array; 00555 Size = NewTySize; 00556 00557 // Must grow links to be the appropriate size... 00558 Links.resize(NumFields); 00559 00560 // Merge in the old type now... which is guaranteed to be smaller than the 00561 // "current" type. 00562 return mergeTypeInfo(OldTy, 0); 00563 } 00564 00565 assert(Offset <= Size && 00566 "Cannot merge something into a part of our type that doesn't exist!"); 00567 00568 // Find the section of Ty that NewTy overlaps with... first we find the 00569 // type that starts at offset Offset. 00570 // 00571 unsigned O = 0; 00572 const Type *SubType = Ty; 00573 while (O < Offset) { 00574 assert(Offset-O < TD.getTypeSize(SubType) && "Offset out of range!"); 00575 00576 switch (SubType->getTypeID()) { 00577 case Type::StructTyID: { 00578 const StructType *STy = cast<StructType>(SubType); 00579 const StructLayout &SL = *TD.getStructLayout(STy); 00580 unsigned i = SL.getElementContainingOffset(Offset-O); 00581 00582 // The offset we are looking for must be in the i'th element... 00583 SubType = STy->getElementType(i); 00584 O += (unsigned)SL.MemberOffsets[i]; 00585 break; 00586 } 00587 case Type::ArrayTyID: { 00588 SubType = cast<ArrayType>(SubType)->getElementType(); 00589 unsigned ElSize = (unsigned)TD.getTypeSize(SubType); 00590 unsigned Remainder = (Offset-O) % ElSize; 00591 O = Offset-Remainder; 00592 break; 00593 } 00594 default: 00595 if (FoldIfIncompatible) foldNodeCompletely(); 00596 return true; 00597 } 00598 } 00599 00600 assert(O == Offset && "Could not achieve the correct offset!"); 00601 00602 // If we found our type exactly, early exit 00603 if (SubType == NewTy) return false; 00604 00605 // Differing function types don't require us to merge. They are not values 00606 // anyway. 00607 if (isa<FunctionType>(SubType) && 00608 isa<FunctionType>(NewTy)) return false; 00609 00610 unsigned SubTypeSize = SubType->isSized() ? 00611 (unsigned)TD.getTypeSize(SubType) : 0; 00612 00613 // Ok, we are getting desperate now. Check for physical subtyping, where we 00614 // just require each element in the node to be compatible. 00615 if (NewTySize <= SubTypeSize && NewTySize && NewTySize < 256 && 00616 SubTypeSize && SubTypeSize < 256 && 00617 ElementTypesAreCompatible(NewTy, SubType, !isArray(), TD)) 00618 return false; 00619 00620 // Okay, so we found the leader type at the offset requested. Search the list 00621 // of types that starts at this offset. If SubType is currently an array or 00622 // structure, the type desired may actually be the first element of the 00623 // composite type... 00624 // 00625 unsigned PadSize = SubTypeSize; // Size, including pad memory which is ignored 00626 while (SubType != NewTy) { 00627 const Type *NextSubType = 0; 00628 unsigned NextSubTypeSize = 0; 00629 unsigned NextPadSize = 0; 00630 switch (SubType->getTypeID()) { 00631 case Type::StructTyID: { 00632 const StructType *STy = cast<StructType>(SubType); 00633 const StructLayout &SL = *TD.getStructLayout(STy); 00634 if (SL.MemberOffsets.size() > 1) 00635 NextPadSize = (unsigned)SL.MemberOffsets[1]; 00636 else 00637 NextPadSize = SubTypeSize; 00638 NextSubType = STy->getElementType(0); 00639 NextSubTypeSize = (unsigned)TD.getTypeSize(NextSubType); 00640 break; 00641 } 00642 case Type::ArrayTyID: 00643 NextSubType = cast<ArrayType>(SubType)->getElementType(); 00644 NextSubTypeSize = (unsigned)TD.getTypeSize(NextSubType); 00645 NextPadSize = NextSubTypeSize; 00646 break; 00647 default: ; 00648 // fall out 00649 } 00650 00651 if (NextSubType == 0) 00652 break; // In the default case, break out of the loop 00653 00654 if (NextPadSize < NewTySize) 00655 break; // Don't allow shrinking to a smaller type than NewTySize 00656 SubType = NextSubType; 00657 SubTypeSize = NextSubTypeSize; 00658 PadSize = NextPadSize; 00659 } 00660 00661 // If we found the type exactly, return it... 00662 if (SubType == NewTy) 00663 return false; 00664 00665 // Check to see if we have a compatible, but different type... 00666 if (NewTySize == SubTypeSize) { 00667 // Check to see if this type is obviously convertible... int -> uint f.e. 00668 if (NewTy->isLosslesslyConvertibleTo(SubType)) 00669 return false; 00670 00671 // Check to see if we have a pointer & integer mismatch going on here, 00672 // loading a pointer as a long, for example. 00673 // 00674 if (SubType->isInteger() && isa<PointerType>(NewTy) || 00675 NewTy->isInteger() && isa<PointerType>(SubType)) 00676 return false; 00677 } else if (NewTySize > SubTypeSize && NewTySize <= PadSize) { 00678 // We are accessing the field, plus some structure padding. Ignore the 00679 // structure padding. 00680 return false; 00681 } 00682 00683 Module *M = 0; 00684 if (getParentGraph()->retnodes_begin() != getParentGraph()->retnodes_end()) 00685 M = getParentGraph()->retnodes_begin()->first->getParent(); 00686 DEBUG(std::cerr << "MergeTypeInfo Folding OrigTy: "; 00687 WriteTypeSymbolic(std::cerr, Ty, M) << "\n due to:"; 00688 WriteTypeSymbolic(std::cerr, NewTy, M) << " @ " << Offset << "!\n" 00689 << "SubType: "; 00690 WriteTypeSymbolic(std::cerr, SubType, M) << "\n\n"); 00691 00692 if (FoldIfIncompatible) foldNodeCompletely(); 00693 return true; 00694 } 00695 00696 00697 00698 /// addEdgeTo - Add an edge from the current node to the specified node. This 00699 /// can cause merging of nodes in the graph. 00700 /// 00701 void DSNode::addEdgeTo(unsigned Offset, const DSNodeHandle &NH) { 00702 if (NH.isNull()) return; // Nothing to do 00703 00704 if (isNodeCompletelyFolded()) 00705 Offset = 0; 00706 00707 DSNodeHandle &ExistingEdge = getLink(Offset); 00708 if (!ExistingEdge.isNull()) { 00709 // Merge the two nodes... 00710 ExistingEdge.mergeWith(NH); 00711 } else { // No merging to perform... 00712 setLink(Offset, NH); // Just force a link in there... 00713 } 00714 } 00715 00716 00717 /// MergeSortedVectors - Efficiently merge a vector into another vector where 00718 /// duplicates are not allowed and both are sorted. This assumes that 'T's are 00719 /// efficiently copyable and have sane comparison semantics. 00720 /// 00721 static void MergeSortedVectors(std::vector<GlobalValue*> &Dest, 00722 const std::vector<GlobalValue*> &Src) { 00723 // By far, the most common cases will be the simple ones. In these cases, 00724 // avoid having to allocate a temporary vector... 00725 // 00726 if (Src.empty()) { // Nothing to merge in... 00727 return; 00728 } else if (Dest.empty()) { // Just copy the result in... 00729 Dest = Src; 00730 } else if (Src.size() == 1) { // Insert a single element... 00731 const GlobalValue *V = Src[0]; 00732 std::vector<GlobalValue*>::iterator I = 00733 std::lower_bound(Dest.begin(), Dest.end(), V); 00734 if (I == Dest.end() || *I != Src[0]) // If not already contained... 00735 Dest.insert(I, Src[0]); 00736 } else if (Dest.size() == 1) { 00737 GlobalValue *Tmp = Dest[0]; // Save value in temporary... 00738 Dest = Src; // Copy over list... 00739 std::vector<GlobalValue*>::iterator I = 00740 std::lower_bound(Dest.begin(), Dest.end(), Tmp); 00741 if (I == Dest.end() || *I != Tmp) // If not already contained... 00742 Dest.insert(I, Tmp); 00743 00744 } else { 00745 // Make a copy to the side of Dest... 00746 std::vector<GlobalValue*> Old(Dest); 00747 00748 // Make space for all of the type entries now... 00749 Dest.resize(Dest.size()+Src.size()); 00750 00751 // Merge the two sorted ranges together... into Dest. 00752 std::merge(Old.begin(), Old.end(), Src.begin(), Src.end(), Dest.begin()); 00753 00754 // Now erase any duplicate entries that may have accumulated into the 00755 // vectors (because they were in both of the input sets) 00756 Dest.erase(std::unique(Dest.begin(), Dest.end()), Dest.end()); 00757 } 00758 } 00759 00760 void DSNode::mergeGlobals(const std::vector<GlobalValue*> &RHS) { 00761 MergeSortedVectors(Globals, RHS); 00762 } 00763 00764 // MergeNodes - Helper function for DSNode::mergeWith(). 00765 // This function does the hard work of merging two nodes, CurNodeH 00766 // and NH after filtering out trivial cases and making sure that 00767 // CurNodeH.offset >= NH.offset. 00768 // 00769 // ***WARNING*** 00770 // Since merging may cause either node to go away, we must always 00771 // use the node-handles to refer to the nodes. These node handles are 00772 // automatically updated during merging, so will always provide access 00773 // to the correct node after a merge. 00774 // 00775 void DSNode::MergeNodes(DSNodeHandle& CurNodeH, DSNodeHandle& NH) { 00776 assert(CurNodeH.getOffset() >= NH.getOffset() && 00777 "This should have been enforced in the caller."); 00778 assert(CurNodeH.getNode()->getParentGraph()==NH.getNode()->getParentGraph() && 00779 "Cannot merge two nodes that are not in the same graph!"); 00780 00781 // Now we know that Offset >= NH.Offset, so convert it so our "Offset" (with 00782 // respect to NH.Offset) is now zero. NOffset is the distance from the base 00783 // of our object that N starts from. 00784 // 00785 unsigned NOffset = CurNodeH.getOffset()-NH.getOffset(); 00786 unsigned NSize = NH.getNode()->getSize(); 00787 00788 // If the two nodes are of different size, and the smaller node has the array 00789 // bit set, collapse! 00790 if (NSize != CurNodeH.getNode()->getSize()) { 00791 #if COLLAPSE_ARRAYS_AGGRESSIVELY 00792 if (NSize < CurNodeH.getNode()->getSize()) { 00793 if (NH.getNode()->isArray()) 00794 NH.getNode()->foldNodeCompletely(); 00795 } else if (CurNodeH.getNode()->isArray()) { 00796 NH.getNode()->foldNodeCompletely(); 00797 } 00798 #endif 00799 } 00800 00801 // Merge the type entries of the two nodes together... 00802 if (NH.getNode()->Ty != Type::VoidTy) 00803 CurNodeH.getNode()->mergeTypeInfo(NH.getNode()->Ty, NOffset); 00804 assert(!CurNodeH.getNode()->isDeadNode()); 00805 00806 // If we are merging a node with a completely folded node, then both nodes are 00807 // now completely folded. 00808 // 00809 if (CurNodeH.getNode()->isNodeCompletelyFolded()) { 00810 if (!NH.getNode()->isNodeCompletelyFolded()) { 00811 NH.getNode()->foldNodeCompletely(); 00812 assert(NH.getNode() && NH.getOffset() == 0 && 00813 "folding did not make offset 0?"); 00814 NOffset = NH.getOffset(); 00815 NSize = NH.getNode()->getSize(); 00816 assert(NOffset == 0 && NSize == 1); 00817 } 00818 } else if (NH.getNode()->isNodeCompletelyFolded()) { 00819 CurNodeH.getNode()->foldNodeCompletely(); 00820 assert(CurNodeH.getNode() && CurNodeH.getOffset() == 0 && 00821 "folding did not make offset 0?"); 00822 NSize = NH.getNode()->getSize(); 00823 NOffset = NH.getOffset(); 00824 assert(NOffset == 0 && NSize == 1); 00825 } 00826 00827 DSNode *N = NH.getNode(); 00828 if (CurNodeH.getNode() == N || N == 0) return; 00829 assert(!CurNodeH.getNode()->isDeadNode()); 00830 00831 // Merge the NodeType information. 00832 CurNodeH.getNode()->NodeType |= N->NodeType; 00833 00834 // Start forwarding to the new node! 00835 N->forwardNode(CurNodeH.getNode(), NOffset); 00836 assert(!CurNodeH.getNode()->isDeadNode()); 00837 00838 // Make all of the outgoing links of N now be outgoing links of CurNodeH. 00839 // 00840 for (unsigned i = 0; i < N->getNumLinks(); ++i) { 00841 DSNodeHandle &Link = N->getLink(i << DS::PointerShift); 00842 if (Link.getNode()) { 00843 // Compute the offset into the current node at which to 00844 // merge this link. In the common case, this is a linear 00845 // relation to the offset in the original node (with 00846 // wrapping), but if the current node gets collapsed due to 00847 // recursive merging, we must make sure to merge in all remaining 00848 // links at offset zero. 00849 unsigned MergeOffset = 0; 00850 DSNode *CN = CurNodeH.getNode(); 00851 if (CN->Size != 1) 00852 MergeOffset = ((i << DS::PointerShift)+NOffset) % CN->getSize(); 00853 CN->addEdgeTo(MergeOffset, Link); 00854 } 00855 } 00856 00857 // Now that there are no outgoing edges, all of the Links are dead. 00858 N->Links.clear(); 00859 00860 // Merge the globals list... 00861 if (!N->Globals.empty()) { 00862 CurNodeH.getNode()->mergeGlobals(N->Globals); 00863 00864 // Delete the globals from the old node... 00865 std::vector<GlobalValue*>().swap(N->Globals); 00866 } 00867 } 00868 00869 00870 /// mergeWith - Merge this node and the specified node, moving all links to and 00871 /// from the argument node into the current node, deleting the node argument. 00872 /// Offset indicates what offset the specified node is to be merged into the 00873 /// current node. 00874 /// 00875 /// The specified node may be a null pointer (in which case, we update it to 00876 /// point to this node). 00877 /// 00878 void DSNode::mergeWith(const DSNodeHandle &NH, unsigned Offset) { 00879 DSNode *N = NH.getNode(); 00880 if (N == this && NH.getOffset() == Offset) 00881 return; // Noop 00882 00883 // If the RHS is a null node, make it point to this node! 00884 if (N == 0) { 00885 NH.mergeWith(DSNodeHandle(this, Offset)); 00886 return; 00887 } 00888 00889 assert(!N->isDeadNode() && !isDeadNode()); 00890 assert(!hasNoReferrers() && "Should not try to fold a useless node!"); 00891 00892 if (N == this) { 00893 // We cannot merge two pieces of the same node together, collapse the node 00894 // completely. 00895 DEBUG(std::cerr << "Attempting to merge two chunks of" 00896 << " the same node together!\n"); 00897 foldNodeCompletely(); 00898 return; 00899 } 00900 00901 // If both nodes are not at offset 0, make sure that we are merging the node 00902 // at an later offset into the node with the zero offset. 00903 // 00904 if (Offset < NH.getOffset()) { 00905 N->mergeWith(DSNodeHandle(this, Offset), NH.getOffset()); 00906 return; 00907 } else if (Offset == NH.getOffset() && getSize() < N->getSize()) { 00908 // If the offsets are the same, merge the smaller node into the bigger node 00909 N->mergeWith(DSNodeHandle(this, Offset), NH.getOffset()); 00910 return; 00911 } 00912 00913 // Ok, now we can merge the two nodes. Use a static helper that works with 00914 // two node handles, since "this" may get merged away at intermediate steps. 00915 DSNodeHandle CurNodeH(this, Offset); 00916 DSNodeHandle NHCopy(NH); 00917 DSNode::MergeNodes(CurNodeH, NHCopy); 00918 } 00919 00920 00921 //===----------------------------------------------------------------------===// 00922 // ReachabilityCloner Implementation 00923 //===----------------------------------------------------------------------===// 00924 00925 DSNodeHandle ReachabilityCloner::getClonedNH(const DSNodeHandle &SrcNH) { 00926 if (SrcNH.isNull()) return DSNodeHandle(); 00927 const DSNode *SN = SrcNH.getNode(); 00928 00929 DSNodeHandle &NH = NodeMap[SN]; 00930 if (!NH.isNull()) { // Node already mapped? 00931 DSNode *NHN = NH.getNode(); 00932 return DSNodeHandle(NHN, NH.getOffset()+SrcNH.getOffset()); 00933 } 00934 00935 // If SrcNH has globals and the destination graph has one of the same globals, 00936 // merge this node with the destination node, which is much more efficient. 00937 if (SN->globals_begin() != SN->globals_end()) { 00938 DSScalarMap &DestSM = Dest.getScalarMap(); 00939 for (DSNode::globals_iterator I = SN->globals_begin(),E = SN->globals_end(); 00940 I != E; ++I) { 00941 GlobalValue *GV = *I; 00942 DSScalarMap::iterator GI = DestSM.find(GV); 00943 if (GI != DestSM.end() && !GI->second.isNull()) { 00944 // We found one, use merge instead! 00945 merge(GI->second, Src.getNodeForValue(GV)); 00946 assert(!NH.isNull() && "Didn't merge node!"); 00947 DSNode *NHN = NH.getNode(); 00948 return DSNodeHandle(NHN, NH.getOffset()+SrcNH.getOffset()); 00949 } 00950 } 00951 } 00952 00953 DSNode *DN = new DSNode(*SN, &Dest, true /* Null out all links */); 00954 DN->maskNodeTypes(BitsToKeep); 00955 NH = DN; 00956 00957 // Next, recursively clone all outgoing links as necessary. Note that 00958 // adding these links can cause the node to collapse itself at any time, and 00959 // the current node may be merged with arbitrary other nodes. For this 00960 // reason, we must always go through NH. 00961 DN = 0; 00962 for (unsigned i = 0, e = SN->getNumLinks(); i != e; ++i) { 00963 const DSNodeHandle &SrcEdge = SN->getLink(i << DS::PointerShift); 00964 if (!SrcEdge.isNull()) { 00965 const DSNodeHandle &DestEdge = getClonedNH(SrcEdge); 00966 // Compute the offset into the current node at which to 00967 // merge this link. In the common case, this is a linear 00968 // relation to the offset in the original node (with 00969 // wrapping), but if the current node gets collapsed due to 00970 // recursive merging, we must make sure to merge in all remaining 00971 // links at offset zero. 00972 unsigned MergeOffset = 0; 00973 DSNode *CN = NH.getNode(); 00974 if (CN->getSize() != 1) 00975 MergeOffset = ((i << DS::PointerShift)+NH.getOffset()) % CN->getSize(); 00976 CN->addEdgeTo(MergeOffset, DestEdge); 00977 } 00978 } 00979 00980 // If this node contains any globals, make sure they end up in the scalar 00981 // map with the correct offset. 00982 for (DSNode::globals_iterator I = SN->globals_begin(), E = SN->globals_end(); 00983 I != E; ++I) { 00984 GlobalValue *GV = *I; 00985 const DSNodeHandle &SrcGNH = Src.getNodeForValue(GV); 00986 DSNodeHandle &DestGNH = NodeMap[SrcGNH.getNode()]; 00987 assert(DestGNH.getNode() == NH.getNode() &&"Global mapping inconsistent"); 00988 Dest.getNodeForValue(GV).mergeWith(DSNodeHandle(DestGNH.getNode(), 00989 DestGNH.getOffset()+SrcGNH.getOffset())); 00990 } 00991 NH.getNode()->mergeGlobals(SN->getGlobalsList()); 00992 00993 return DSNodeHandle(NH.getNode(), NH.getOffset()+SrcNH.getOffset()); 00994 } 00995 00996 void ReachabilityCloner::merge(const DSNodeHandle &NH, 00997 const DSNodeHandle &SrcNH) { 00998 if (SrcNH.isNull()) return; // Noop 00999 if (NH.isNull()) { 01000 // If there is no destination node, just clone the source and assign the 01001 // destination node to be it. 01002 NH.mergeWith(getClonedNH(SrcNH)); 01003 return; 01004 } 01005 01006 // Okay, at this point, we know that we have both a destination and a source 01007 // node that need to be merged. Check to see if the source node has already 01008 // been cloned. 01009 const DSNode *SN = SrcNH.getNode(); 01010 DSNodeHandle &SCNH = NodeMap[SN]; // SourceClonedNodeHandle 01011 if (!SCNH.isNull()) { // Node already cloned? 01012 DSNode *SCNHN = SCNH.getNode(); 01013 NH.mergeWith(DSNodeHandle(SCNHN, 01014 SCNH.getOffset()+SrcNH.getOffset())); 01015 return; // Nothing to do! 01016 } 01017 01018 // Okay, so the source node has not already been cloned. Instead of creating 01019 // a new DSNode, only to merge it into the one we already have, try to perform 01020 // the merge in-place. The only case we cannot handle here is when the offset 01021 // into the existing node is less than the offset into the virtual node we are 01022 // merging in. In this case, we have to extend the existing node, which 01023 // requires an allocation anyway. 01024 DSNode *DN = NH.getNode(); // Make sure the Offset is up-to-date 01025 if (NH.getOffset() >= SrcNH.getOffset()) { 01026 if (!DN->isNodeCompletelyFolded()) { 01027 // Make sure the destination node is folded if the source node is folded. 01028 if (SN->isNodeCompletelyFolded()) { 01029 DN->foldNodeCompletely(); 01030 DN = NH.getNode(); 01031 } else if (SN->getSize() != DN->getSize()) { 01032 // If the two nodes are of different size, and the smaller node has the 01033 // array bit set, collapse! 01034 #if COLLAPSE_ARRAYS_AGGRESSIVELY 01035 if (SN->getSize() < DN->getSize()) { 01036 if (SN->isArray()) { 01037 DN->foldNodeCompletely(); 01038 DN = NH.getNode(); 01039 } 01040 } else if (DN->isArray()) { 01041 DN->foldNodeCompletely(); 01042 DN = NH.getNode(); 01043 } 01044 #endif 01045 } 01046 01047 // Merge the type entries of the two nodes together... 01048 if (SN->getType() != Type::VoidTy && !DN->isNodeCompletelyFolded()) { 01049 DN->mergeTypeInfo(SN->getType(), NH.getOffset()-SrcNH.getOffset()); 01050 DN = NH.getNode(); 01051 } 01052 } 01053 01054 assert(!DN->isDeadNode()); 01055 01056 // Merge the NodeType information. 01057 DN->mergeNodeFlags(SN->getNodeFlags() & BitsToKeep); 01058 01059 // Before we start merging outgoing links and updating the scalar map, make 01060 // sure it is known that this is the representative node for the src node. 01061 SCNH = DSNodeHandle(DN, NH.getOffset()-SrcNH.getOffset()); 01062 01063 // If the source node contains any globals, make sure they end up in the 01064 // scalar map with the correct offset. 01065 if (SN->globals_begin() != SN->globals_end()) { 01066 // Update the globals in the destination node itself. 01067 DN->mergeGlobals(SN->getGlobalsList()); 01068 01069 // Update the scalar map for the graph we are merging the source node 01070 // into. 01071 for (DSNode::globals_iterator I = SN->globals_begin(), 01072 E = SN->globals_end(); I != E; ++I) { 01073 GlobalValue *GV = *I; 01074 const DSNodeHandle &SrcGNH = Src.getNodeForValue(GV); 01075 DSNodeHandle &DestGNH = NodeMap[SrcGNH.getNode()]; 01076 assert(DestGNH.getNode()==NH.getNode() &&"Global mapping inconsistent"); 01077 Dest.getNodeForValue(GV).mergeWith(DSNodeHandle(DestGNH.getNode(), 01078 DestGNH.getOffset()+SrcGNH.getOffset())); 01079 } 01080 NH.getNode()->mergeGlobals(SN->getGlobalsList()); 01081 } 01082 } else { 01083 // We cannot handle this case without allocating a temporary node. Fall 01084 // back on being simple. 01085 DSNode *NewDN = new DSNode(*SN, &Dest, true /* Null out all links */); 01086 NewDN->maskNodeTypes(BitsToKeep); 01087 01088 unsigned NHOffset = NH.getOffset(); 01089 NH.mergeWith(DSNodeHandle(NewDN, SrcNH.getOffset())); 01090 01091 assert(NH.getNode() && 01092 (NH.getOffset() > NHOffset || 01093 (NH.getOffset() == 0 && NH.getNode()->isNodeCompletelyFolded())) && 01094 "Merging did not adjust the offset!"); 01095 01096 // Before we start merging outgoing links and updating the scalar map, make 01097 // sure it is known that this is the representative node for the src node. 01098 SCNH = DSNodeHandle(NH.getNode(), NH.getOffset()-SrcNH.getOffset()); 01099 01100 // If the source node contained any globals, make sure to create entries 01101 // in the scalar map for them! 01102 for (DSNode::globals_iterator I = SN->globals_begin(), 01103 E = SN->globals_end(); I != E; ++I) { 01104 GlobalValue *GV = *I; 01105 const DSNodeHandle &SrcGNH = Src.getNodeForValue(GV); 01106 DSNodeHandle &DestGNH = NodeMap[SrcGNH.getNode()]; 01107 assert(DestGNH.getNode()==NH.getNode() &&"Global mapping inconsistent"); 01108 assert(SrcGNH.getNode() == SN && "Global mapping inconsistent"); 01109 Dest.getNodeForValue(GV).mergeWith(DSNodeHandle(DestGNH.getNode(), 01110 DestGNH.getOffset()+SrcGNH.getOffset())); 01111 } 01112 } 01113 01114 01115 // Next, recursively merge all outgoing links as necessary. Note that 01116 // adding these links can cause the destination node to collapse itself at 01117 // any time, and the current node may be merged with arbitrary other nodes. 01118 // For this reason, we must always go through NH. 01119 DN = 0; 01120 for (unsigned i = 0, e = SN->getNumLinks(); i != e; ++i) { 01121 const DSNodeHandle &SrcEdge = SN->getLink(i << DS::PointerShift); 01122 if (!SrcEdge.isNull()) { 01123 // Compute the offset into the current node at which to 01124 // merge this link. In the common case, this is a linear 01125 // relation to the offset in the original node (with 01126 // wrapping), but if the current node gets collapsed due to 01127 // recursive merging, we must make sure to merge in all remaining 01128 // links at offset zero. 01129 DSNode *CN = SCNH.getNode(); 01130 unsigned MergeOffset = 01131 ((i << DS::PointerShift)+SCNH.getOffset()) % CN->getSize(); 01132 01133 DSNodeHandle Tmp = CN->getLink(MergeOffset); 01134 if (!Tmp.isNull()) { 01135 // Perform the recursive merging. Make sure to create a temporary NH, 01136 // because the Link can disappear in the process of recursive merging. 01137 merge(Tmp, SrcEdge); 01138 } else { 01139 Tmp.mergeWith(getClonedNH(SrcEdge)); 01140 // Merging this could cause all kinds of recursive things to happen, 01141 // culminating in the current node being eliminated. Since this is 01142 // possible, make sure to reaquire the link from 'CN'. 01143 01144 unsigned MergeOffset = 0; 01145 CN = SCNH.getNode(); 01146 MergeOffset = ((i << DS::PointerShift)+SCNH.getOffset()) %CN->getSize(); 01147 CN->getLink(MergeOffset).mergeWith(Tmp); 01148 } 01149 } 01150 } 01151 } 01152 01153 /// mergeCallSite - Merge the nodes reachable from the specified src call 01154 /// site into the nodes reachable from DestCS. 01155 void ReachabilityCloner::mergeCallSite(DSCallSite &DestCS, 01156 const DSCallSite &SrcCS) { 01157 merge(DestCS.getRetVal(), SrcCS.getRetVal()); 01158 unsigned MinArgs = DestCS.getNumPtrArgs(); 01159 if (SrcCS.getNumPtrArgs() < MinArgs) MinArgs = SrcCS.getNumPtrArgs(); 01160 01161 for (unsigned a = 0; a != MinArgs; ++a) 01162 merge(DestCS.getPtrArg(a), SrcCS.getPtrArg(a)); 01163 01164 for (unsigned a = MinArgs, e = SrcCS.getNumPtrArgs(); a != e; ++a) 01165 DestCS.addPtrArg(getClonedNH(SrcCS.getPtrArg(a))); 01166 } 01167 01168 01169 //===----------------------------------------------------------------------===// 01170 // DSCallSite Implementation 01171 //===----------------------------------------------------------------------===// 01172 01173 // Define here to avoid including iOther.h and BasicBlock.h in DSGraph.h 01174 Function &DSCallSite::getCaller() const { 01175 return *Site.getInstruction()->getParent()->getParent(); 01176 } 01177 01178 void DSCallSite::InitNH(DSNodeHandle &NH, const DSNodeHandle &Src, 01179 ReachabilityCloner &RC) { 01180 NH = RC.getClonedNH(Src); 01181 } 01182 01183 //===----------------------------------------------------------------------===// 01184 // DSGraph Implementation 01185 //===----------------------------------------------------------------------===// 01186 01187 /// getFunctionNames - Return a space separated list of the name of the 01188 /// functions in this graph (if any) 01189 std::string DSGraph::getFunctionNames() const { 01190 switch (getReturnNodes().size()) { 01191 case 0: return "Globals graph"; 01192 case 1: return retnodes_begin()->first->getName(); 01193 default: 01194 std::string Return; 01195 for (DSGraph::retnodes_iterator I = retnodes_begin(); 01196 I != retnodes_end(); ++I) 01197 Return += I->first->getName() + " "; 01198 Return.erase(Return.end()-1, Return.end()); // Remove last space character 01199 return Return; 01200 } 01201 } 01202 01203 01204 DSGraph::DSGraph(const DSGraph &G, EquivalenceClasses<GlobalValue*> &ECs, 01205 unsigned CloneFlags) 01206 : GlobalsGraph(0), ScalarMap(ECs), TD(G.TD) { 01207 PrintAuxCalls = false; 01208 cloneInto(G, CloneFlags); 01209 } 01210 01211 DSGraph::~DSGraph() { 01212 FunctionCalls.clear(); 01213 AuxFunctionCalls.clear(); 01214 ScalarMap.clear(); 01215 ReturnNodes.clear(); 01216 01217 // Drop all intra-node references, so that assertions don't fail... 01218 for (node_iterator NI = node_begin(), E = node_end(); NI != E; ++NI) 01219 NI->dropAllReferences(); 01220 01221 // Free all of the nodes. 01222 Nodes.clear(); 01223 } 01224 01225 // dump - Allow inspection of graph in a debugger. 01226 void DSGraph::dump() const { print(std::cerr); } 01227 01228 01229 /// remapLinks - Change all of the Links in the current node according to the 01230 /// specified mapping. 01231 /// 01232 void DSNode::remapLinks(DSGraph::NodeMapTy &OldNodeMap) { 01233 for (unsigned i = 0, e = Links.size(); i != e; ++i) 01234 if (DSNode *N = Links[i].getNode()) { 01235 DSGraph::NodeMapTy::const_iterator ONMI = OldNodeMap.find(N); 01236 if (ONMI != OldNodeMap.end()) { 01237 DSNode *ONMIN = ONMI->second.getNode(); 01238 Links[i].setTo(ONMIN, Links[i].getOffset()+ONMI->second.getOffset()); 01239 } 01240 } 01241 } 01242 01243 /// addObjectToGraph - This method can be used to add global, stack, and heap 01244 /// objects to the graph. This can be used when updating DSGraphs due to the 01245 /// introduction of new temporary objects. The new object is not pointed to 01246 /// and does not point to any other objects in the graph. 01247 DSNode *DSGraph::addObjectToGraph(Value *Ptr, bool UseDeclaredType) { 01248 assert(isa<PointerType>(Ptr->getType()) && "Ptr is not a pointer!"); 01249 const Type *Ty = cast<PointerType>(Ptr->getType())->getElementType(); 01250 DSNode *N = new DSNode(UseDeclaredType ? Ty : 0, this); 01251 assert(ScalarMap[Ptr].isNull() && "Object already in this graph!"); 01252 ScalarMap[Ptr] = N; 01253 01254 if (GlobalValue *GV = dyn_cast<GlobalValue>(Ptr)) { 01255 N->addGlobal(GV); 01256 } else if (MallocInst *MI = dyn_cast<MallocInst>(Ptr)) { 01257 N->setHeapNodeMarker(); 01258 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Ptr)) { 01259 N->setAllocaNodeMarker(); 01260 } else { 01261 assert(0 && "Illegal memory object input!"); 01262 } 01263 return N; 01264 } 01265 01266 01267 /// cloneInto - Clone the specified DSGraph into the current graph. The 01268 /// translated ScalarMap for the old function is filled into the ScalarMap 01269 /// for the graph, and the translated ReturnNodes map is returned into 01270 /// ReturnNodes. 01271 /// 01272 /// The CloneFlags member controls various aspects of the cloning process. 01273 /// 01274 void DSGraph::cloneInto(const DSGraph &G, unsigned CloneFlags) { 01275 TIME_REGION(X, "cloneInto"); 01276 assert(&G != this && "Cannot clone graph into itself!"); 01277 01278 NodeMapTy OldNodeMap; 01279 01280 // Remove alloca or mod/ref bits as specified... 01281 unsigned BitsToClear = ((CloneFlags & StripAllocaBit)? DSNode::AllocaNode : 0) 01282 | ((CloneFlags & StripModRefBits)? (DSNode::Modified | DSNode::Read) : 0) 01283 | ((CloneFlags & StripIncompleteBit)? DSNode::Incomplete : 0); 01284 BitsToClear |= DSNode::DEAD; // Clear dead flag... 01285 01286 for (node_const_iterator I = G.node_begin(), E = G.node_end(); I != E; ++I) { 01287 assert(!I->isForwarding() && 01288 "Forward nodes shouldn't be in node list!"); 01289 DSNode *New = new DSNode(*I, this); 01290 New->maskNodeTypes(~BitsToClear); 01291 OldNodeMap[I] = New; 01292 } 01293 01294 #ifndef NDEBUG 01295 Timer::addPeakMemoryMeasurement(); 01296 #endif 01297 01298 // Rewrite the links in the new nodes to point into the current graph now. 01299 // Note that we don't loop over the node's list to do this. The problem is 01300 // that remaping links can cause recursive merging to happen, which means 01301 // that node_iterator's can get easily invalidated! Because of this, we 01302 // loop over the OldNodeMap, which contains all of the new nodes as the 01303 // .second element of the map elements. Also note that if we remap a node 01304 // more than once, we won't break anything. 01305 for (NodeMapTy::iterator I = OldNodeMap.begin(), E = OldNodeMap.end(); 01306 I != E; ++I) 01307 I->second.getNode()->remapLinks(OldNodeMap); 01308 01309 // Copy the scalar map... merging all of the global nodes... 01310 for (DSScalarMap::const_iterator I = G.ScalarMap.begin(), 01311 E = G.ScalarMap.end(); I != E; ++I) { 01312 DSNodeHandle &MappedNode = OldNodeMap[I->second.getNode()]; 01313 DSNodeHandle &H = ScalarMap.getRawEntryRef(I->first); 01314 DSNode *MappedNodeN = MappedNode.getNode(); 01315 H.mergeWith(DSNodeHandle(MappedNodeN, 01316 I->second.getOffset()+MappedNode.getOffset())); 01317 } 01318 01319 if (!(CloneFlags & DontCloneCallNodes)) { 01320 // Copy the function calls list. 01321 for (fc_iterator I = G.fc_begin(), E = G.fc_end(); I != E; ++I) 01322 FunctionCalls.push_back(DSCallSite(*I, OldNodeMap)); 01323 } 01324 01325 if (!(CloneFlags & DontCloneAuxCallNodes)) { 01326 // Copy the auxiliary function calls list. 01327 for (afc_iterator I = G.afc_begin(), E = G.afc_end(); I != E; ++I) 01328 AuxFunctionCalls.push_back(DSCallSite(*I, OldNodeMap)); 01329 } 01330 01331 // Map the return node pointers over... 01332 for (retnodes_iterator I = G.retnodes_begin(), 01333 E = G.retnodes_end(); I != E; ++I) { 01334 const DSNodeHandle &Ret = I->second; 01335 DSNodeHandle &MappedRet = OldNodeMap[Ret.getNode()]; 01336 DSNode *MappedRetN = MappedRet.getNode(); 01337 ReturnNodes.insert(std::make_pair(I->first, 01338 DSNodeHandle(MappedRetN, 01339 MappedRet.getOffset()+Ret.getOffset()))); 01340 } 01341 } 01342 01343 /// spliceFrom - Logically perform the operation of cloning the RHS graph into 01344 /// this graph, then clearing the RHS graph. Instead of performing this as 01345 /// two seperate operations, do it as a single, much faster, one. 01346 /// 01347 void DSGraph::spliceFrom(DSGraph &RHS) { 01348 // Change all of the nodes in RHS to think we are their parent. 01349 for (NodeListTy::iterator I = RHS.Nodes.begin(), E = RHS.Nodes.end(); 01350 I != E; ++I) 01351 I->setParentGraph(this); 01352 // Take all of the nodes. 01353 Nodes.splice(Nodes.end(), RHS.Nodes); 01354 01355 // Take all of the calls. 01356 FunctionCalls.splice(FunctionCalls.end(), RHS.FunctionCalls); 01357 AuxFunctionCalls.splice(AuxFunctionCalls.end(), RHS.AuxFunctionCalls); 01358 01359 // Take all of the return nodes. 01360 if (ReturnNodes.empty()) { 01361 ReturnNodes.swap(RHS.ReturnNodes); 01362 } else { 01363 ReturnNodes.insert(RHS.ReturnNodes.begin(), RHS.ReturnNodes.end()); 01364 RHS.ReturnNodes.clear(); 01365 } 01366 01367 // Merge the scalar map in. 01368 ScalarMap.spliceFrom(RHS.ScalarMap); 01369 } 01370 01371 /// spliceFrom - Copy all entries from RHS, then clear RHS. 01372 /// 01373 void DSScalarMap::spliceFrom(DSScalarMap &RHS) { 01374 // Special case if this is empty. 01375 if (ValueMap.empty()) { 01376 ValueMap.swap(RHS.ValueMap); 01377 GlobalSet.swap(RHS.GlobalSet); 01378 } else { 01379 GlobalSet.insert(RHS.GlobalSet.begin(), RHS.GlobalSet.end()); 01380 for (ValueMapTy::iterator I = RHS.ValueMap.begin(), E = RHS.ValueMap.end(); 01381 I != E; ++I) 01382 ValueMap[I->first].mergeWith(I->second); 01383 RHS.ValueMap.clear(); 01384 } 01385 } 01386 01387 01388 /// getFunctionArgumentsForCall - Given a function that is currently in this 01389 /// graph, return the DSNodeHandles that correspond to the pointer-compatible 01390 /// function arguments. The vector is filled in with the return value (or 01391 /// null if it is not pointer compatible), followed by all of the 01392 /// pointer-compatible arguments. 01393 void DSGraph::getFunctionArgumentsForCall(Function *F, 01394 std::vector<DSNodeHandle> &Args) const { 01395 Args.push_back(getReturnNodeFor(*F)); 01396 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); 01397 AI != E; ++AI) 01398 if (isPointerType(AI->getType())) { 01399 Args.push_back(getNodeForValue(AI)); 01400 assert(!Args.back().isNull() && "Pointer argument w/o scalarmap entry!?"); 01401 } 01402 } 01403 01404 namespace { 01405 // HackedGraphSCCFinder - This is used to find nodes that have a path from the 01406 // node to a node cloned by the ReachabilityCloner object contained. To be 01407 // extra obnoxious it ignores edges from nodes that are globals, and truncates 01408 // search at RC marked nodes. This is designed as an object so that 01409 // intermediate results can be memoized across invocations of 01410 // PathExistsToClonedNode. 01411 struct HackedGraphSCCFinder { 01412 ReachabilityCloner &RC; 01413 unsigned CurNodeId; 01414 std::vector<const DSNode*> SCCStack; 01415 std::map<const DSNode*, std::pair<unsigned, bool> > NodeInfo; 01416 01417 HackedGraphSCCFinder(ReachabilityCloner &rc) : RC(rc), CurNodeId(1) { 01418 // Remove null pointer as a special case. 01419 NodeInfo[0] = std::make_pair(0, false); 01420 } 01421 01422 std::pair<unsigned, bool> &VisitForSCCs(const DSNode *N); 01423 01424 bool PathExistsToClonedNode(const DSNode *N) { 01425 return VisitForSCCs(N).second; 01426 } 01427 01428 bool PathExistsToClonedNode(const DSCallSite &CS) { 01429 if (PathExistsToClonedNode(CS.getRetVal().getNode())) 01430 return true; 01431 for (unsigned i = 0, e = CS.getNumPtrArgs(); i != e; ++i) 01432 if (PathExistsToClonedNode(CS.getPtrArg(i).getNode())) 01433 return true; 01434 return false; 01435 } 01436 }; 01437 } 01438 01439 std::pair<unsigned, bool> &HackedGraphSCCFinder:: 01440 VisitForSCCs(const DSNode *N) { 01441 std::map<const DSNode*, std::pair<unsigned, bool> >::iterator 01442 NodeInfoIt = NodeInfo.lower_bound(N); 01443 if (NodeInfoIt != NodeInfo.end() && NodeInfoIt->first == N) 01444 return NodeInfoIt->second; 01445 01446 unsigned Min = CurNodeId++; 01447 unsigned MyId = Min; 01448 std::pair<unsigned, bool> &ThisNodeInfo = 01449 NodeInfo.insert(NodeInfoIt, 01450 std::make_pair(N, std::make_pair(MyId, false)))->second; 01451 01452 // Base case: if we find a global, this doesn't reach the cloned graph 01453 // portion. 01454 if (N->isGlobalNode()) { 01455 ThisNodeInfo.second = false; 01456 return ThisNodeInfo; 01457 } 01458 01459 // Base case: if this does reach the cloned graph portion... it does. :) 01460 if (RC.hasClonedNode(N)) { 01461 ThisNodeInfo.second = true; 01462 return ThisNodeInfo; 01463 } 01464 01465 SCCStack.push_back(N); 01466 01467 // Otherwise, check all successors. 01468 bool AnyDirectSuccessorsReachClonedNodes = false; 01469 for (DSNode::const_edge_iterator EI = N->edge_begin(), EE = N->edge_end(); 01470 EI != EE; ++EI) 01471 if (DSNode *Succ = EI->getNode()) { 01472 std::pair<unsigned, bool> &SuccInfo = VisitForSCCs(Succ); 01473 if (SuccInfo.first < Min) Min = SuccInfo.first; 01474 AnyDirectSuccessorsReachClonedNodes |= SuccInfo.second; 01475 } 01476 01477 if (Min != MyId) 01478 return ThisNodeInfo; // Part of a large SCC. Leave self on stack. 01479 01480 if (SCCStack.back() == N) { // Special case single node SCC. 01481 SCCStack.pop_back(); 01482 ThisNodeInfo.second = AnyDirectSuccessorsReachClonedNodes; 01483 return ThisNodeInfo; 01484 } 01485 01486 // Find out if any direct successors of any node reach cloned nodes. 01487 if (!AnyDirectSuccessorsReachClonedNodes) 01488 for (unsigned i = SCCStack.size()-1; SCCStack[i] != N; --i) 01489 for (DSNode::const_edge_iterator EI = N->edge_begin(), EE = N->edge_end(); 01490 EI != EE; ++EI) 01491 if (DSNode *N = EI->getNode()) 01492 if (NodeInfo[N].second) { 01493 AnyDirectSuccessorsReachClonedNodes = true; 01494 goto OutOfLoop; 01495 } 01496 OutOfLoop: 01497 // If any successor reaches a cloned node, mark all nodes in this SCC as 01498 // reaching the cloned node. 01499 if (AnyDirectSuccessorsReachClonedNodes) 01500 while (SCCStack.back() != N) { 01501 NodeInfo[SCCStack.back()].second = true; 01502 SCCStack.pop_back(); 01503 } 01504 SCCStack.pop_back(); 01505 ThisNodeInfo.second = true; 01506 return ThisNodeInfo; 01507 } 01508 01509 /// mergeInCallFromOtherGraph - This graph merges in the minimal number of 01510 /// nodes from G2 into 'this' graph, merging the bindings specified by the 01511 /// call site (in this graph) with the bindings specified by the vector in G2. 01512 /// The two DSGraphs must be different. 01513 /// 01514 void DSGraph::mergeInGraph(const DSCallSite &CS, 01515 std::vector<DSNodeHandle> &Args, 01516 const DSGraph &Graph, unsigned CloneFlags) { 01517 TIME_REGION(X, "mergeInGraph"); 01518 01519 assert((CloneFlags & DontCloneCallNodes) && 01520 "Doesn't support copying of call nodes!"); 01521 01522 // If this is not a recursive call, clone the graph into this graph... 01523 if (&Graph == this) { 01524 // Merge the return value with the return value of the context. 01525 Args[0].mergeWith(CS.getRetVal()); 01526 01527 // Resolve all of the function arguments. 01528 for (unsigned i = 0, e = CS.getNumPtrArgs(); i != e; ++i) { 01529 if (i == Args.size()-1) 01530 break; 01531 01532 // Add the link from the argument scalar to the provided value. 01533 Args[i+1].mergeWith(CS.getPtrArg(i)); 01534 } 01535 return; 01536 } 01537 01538 // Clone the callee's graph into the current graph, keeping track of where 01539 // scalars in the old graph _used_ to point, and of the new nodes matching 01540 // nodes of the old graph. 01541 ReachabilityCloner RC(*this, Graph, CloneFlags); 01542 01543 // Map the return node pointer over. 01544 if (!CS.getRetVal().isNull()) 01545 RC.merge(CS.getRetVal(), Args[0]); 01546 01547 // Map over all of the arguments. 01548 for (unsigned i = 0, e = CS.getNumPtrArgs(); i != e; ++i) { 01549 if (i == Args.size()-1) 01550 break; 01551 01552 // Add the link from the argument scalar to the provided value. 01553 RC.merge(CS.getPtrArg(i), Args[i+1]); 01554 } 01555 01556 // We generally don't want to copy global nodes or aux calls from the callee 01557 // graph to the caller graph. However, we have to copy them if there is a 01558 // path from the node to a node we have already copied which does not go 01559 // through another global. Compute the set of node that can reach globals and 01560 // aux call nodes to copy over, then do it. 01561 std::vector<const DSCallSite*> AuxCallToCopy; 01562 std::vector<GlobalValue*> GlobalsToCopy; 01563 01564 // NodesReachCopiedNodes - Memoize results for efficiency. Contains a 01565 // true/false value for every visited node that reaches a copied node without 01566 // going through a global. 01567 HackedGraphSCCFinder SCCFinder(RC); 01568 01569 if (!(CloneFlags & DontCloneAuxCallNodes)) 01570 for (afc_iterator I = Graph.afc_begin(), E = Graph.afc_end(); I!=E; ++I) 01571 if (SCCFinder.PathExistsToClonedNode(*I)) 01572 AuxCallToCopy.push_back(&*I); 01573 01574 const DSScalarMap &GSM = Graph.getScalarMap(); 01575 for (DSScalarMap::global_iterator GI = GSM.global_begin(), 01576 E = GSM.global_end(); GI != E; ++GI) { 01577 DSNode *GlobalNode = Graph.getNodeForValue(*GI).getNode(); 01578 for (DSNode::edge_iterator EI = GlobalNode->edge_begin(), 01579 EE = GlobalNode->edge_end(); EI != EE; ++EI) 01580 if (SCCFinder.PathExistsToClonedNode(EI->getNode())) { 01581 GlobalsToCopy.push_back(*GI); 01582 break; 01583 } 01584 } 01585 01586 // Copy aux calls that are needed. 01587 for (unsigned i = 0, e = AuxCallToCopy.size(); i != e; ++i) 01588 AuxFunctionCalls.push_back(DSCallSite(*AuxCallToCopy[i], RC)); 01589 01590 // Copy globals that are needed. 01591 for (unsigned i = 0, e = GlobalsToCopy.size(); i != e; ++i) 01592 RC.getClonedNH(Graph.getNodeForValue(GlobalsToCopy[i])); 01593 } 01594 01595 01596 01597 /// mergeInGraph - The method is used for merging graphs together. If the 01598 /// argument graph is not *this, it makes a clone of the specified graph, then 01599 /// merges the nodes specified in the call site with the formal arguments in the 01600 /// graph. 01601 /// 01602 void DSGraph::mergeInGraph(const DSCallSite &CS, Function &F, 01603 const DSGraph &Graph, unsigned CloneFlags) { 01604 // Set up argument bindings. 01605 std::vector<DSNodeHandle> Args; 01606 Graph.getFunctionArgumentsForCall(&F, Args); 01607 01608 mergeInGraph(CS, Args, Graph, CloneFlags); 01609 } 01610 01611 /// getCallSiteForArguments - Get the arguments and return value bindings for 01612 /// the specified function in the current graph. 01613 /// 01614 DSCallSite DSGraph::getCallSiteForArguments(Function &F) const { 01615 std::vector<DSNodeHandle> Args; 01616 01617 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) 01618 if (isPointerType(I->getType())) 01619 Args.push_back(getNodeForValue(I)); 01620 01621 return DSCallSite(CallSite(), getReturnNodeFor(F), &F, Args); 01622 } 01623 01624 /// getDSCallSiteForCallSite - Given an LLVM CallSite object that is live in 01625 /// the context of this graph, return the DSCallSite for it. 01626 DSCallSite DSGraph::getDSCallSiteForCallSite(CallSite CS) const { 01627 DSNodeHandle RetVal; 01628 Instruction *I = CS.getInstruction(); 01629 if (isPointerType(I->getType())) 01630 RetVal = getNodeForValue(I); 01631 01632 std::vector<DSNodeHandle> Args; 01633 Args.reserve(CS.arg_end()-CS.arg_begin()); 01634 01635 // Calculate the arguments vector... 01636 for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); I != E; ++I) 01637 if (isPointerType((*I)->getType())) 01638 if (isa<ConstantPointerNull>(*I)) 01639 Args.push_back(DSNodeHandle()); 01640 else 01641 Args.push_back(getNodeForValue(*I)); 01642 01643 // Add a new function call entry... 01644 if (Function *F = CS.getCalledFunction()) 01645 return DSCallSite(CS, RetVal, F, Args); 01646 else 01647 return DSCallSite(CS, RetVal, 01648 getNodeForValue(CS.getCalledValue()).getNode(), Args); 01649 } 01650 01651 01652 01653 // markIncompleteNodes - Mark the specified node as having contents that are not 01654 // known with the current analysis we have performed. Because a node makes all 01655 // of the nodes it can reach incomplete if the node itself is incomplete, we 01656 // must recursively traverse the data structure graph, marking all reachable 01657 // nodes as incomplete. 01658 // 01659 static void markIncompleteNode(DSNode *N) { 01660 // Stop recursion if no node, or if node already marked... 01661 if (N == 0 || N->isIncomplete()) return; 01662 01663 // Actually mark the node 01664 N->setIncompleteMarker(); 01665 01666 // Recursively process children... 01667 for (DSNode::edge_iterator I = N->edge_begin(),E = N->edge_end(); I != E; ++I) 01668 if (DSNode *DSN = I->getNode()) 01669 markIncompleteNode(DSN); 01670 } 01671 01672 static void markIncomplete(DSCallSite &Call) { 01673 // Then the return value is certainly incomplete! 01674 markIncompleteNode(Call.getRetVal().getNode()); 01675 01676 // All objects pointed to by function arguments are incomplete! 01677 for (unsigned i = 0, e = Call.getNumPtrArgs(); i != e; ++i) 01678 markIncompleteNode(Call.getPtrArg(i).getNode()); 01679 } 01680 01681 // markIncompleteNodes - Traverse the graph, identifying nodes that may be 01682 // modified by other functions that have not been resolved yet. This marks 01683 // nodes that are reachable through three sources of "unknownness": 01684 // 01685 // Global Variables, Function Calls, and Incoming Arguments 01686 // 01687 // For any node that may have unknown components (because something outside the 01688 // scope of current analysis may have modified it), the 'Incomplete' flag is 01689 // added to the NodeType. 01690 // 01691 void DSGraph::markIncompleteNodes(unsigned Flags) { 01692 // Mark any incoming arguments as incomplete. 01693 if (Flags & DSGraph::MarkFormalArgs) 01694 for (ReturnNodesTy::iterator FI = ReturnNodes.begin(), E =ReturnNodes.end(); 01695 FI != E; ++FI) { 01696 Function &F = *FI->first; 01697 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); 01698 I != E; ++I) 01699 if (isPointerType(I->getType())) 01700 markIncompleteNode(getNodeForValue(I).getNode()); 01701 markIncompleteNode(FI->second.getNode()); 01702 } 01703 01704 // Mark stuff passed into functions calls as being incomplete. 01705 if (!shouldPrintAuxCalls()) 01706 for (std::list<DSCallSite>::iterator I = FunctionCalls.begin(), 01707 E = FunctionCalls.end(); I != E; ++I) 01708 markIncomplete(*I); 01709 else 01710 for (std::list<DSCallSite>::iterator I = AuxFunctionCalls.begin(), 01711 E = AuxFunctionCalls.end(); I != E; ++I) 01712 markIncomplete(*I); 01713 01714 // Mark all global nodes as incomplete. 01715 for (DSScalarMap::global_iterator I = ScalarMap.global_begin(), 01716 E = ScalarMap.global_end(); I != E; ++I) 01717 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(*I)) 01718 if (!GV->hasInitializer() || // Always mark external globals incomp. 01719 (!GV->isConstant() && (Flags & DSGraph::IgnoreGlobals) == 0)) 01720 markIncompleteNode(ScalarMap[GV].getNode()); 01721 } 01722 01723 static inline void killIfUselessEdge(DSNodeHandle &Edge) { 01724 if (DSNode *N = Edge.getNode()) // Is there an edge? 01725 if (N->getNumReferrers() == 1) // Does it point to a lonely node? 01726 // No interesting info? 01727 if ((N->getNodeFlags() & ~DSNode::Incomplete) == 0 && 01728 N->getType() == Type::VoidTy && !N->isNodeCompletelyFolded()) 01729 Edge.setTo(0, 0); // Kill the edge! 01730 } 01731 01732 static inline bool nodeContainsExternalFunction(const DSNode *N) { 01733 std::vector<Function*> Funcs; 01734 N->addFullFunctionList(Funcs); 01735 for (unsigned i = 0, e = Funcs.size(); i != e; ++i) 01736 if (Funcs[i]->isExternal()) return true; 01737 return false; 01738 } 01739 01740 static void removeIdenticalCalls(std::list<DSCallSite> &Calls) { 01741 // Remove trivially identical function calls 01742 Calls.sort(); // Sort by callee as primary key! 01743 01744 // Scan the call list cleaning it up as necessary... 01745 DSNodeHandle LastCalleeNode; 01746 Function *LastCalleeFunc = 0; 01747 unsigned NumDuplicateCalls = 0; 01748 bool LastCalleeContainsExternalFunction = false; 01749 01750 unsigned NumDeleted = 0; 01751 for (std::list<DSCallSite>::iterator I = Calls.begin(), E = Calls.end(); 01752 I != E;) { 01753 DSCallSite &CS = *I; 01754 std::list<DSCallSite>::iterator OldIt = I++; 01755 01756 if (!CS.isIndirectCall()) { 01757 LastCalleeNode = 0; 01758 } else { 01759 DSNode *Callee = CS.getCalleeNode(); 01760 01761 // If the Callee is a useless edge, this must be an unreachable call site, 01762 // eliminate it. 01763 if (Callee->getNumReferrers() == 1 && Callee->isComplete() && 01764 Callee->getGlobalsList().empty()) { // No useful info? 01765 #ifndef NDEBUG 01766 std::cerr << "WARNING: Useless call site found.\n"; 01767 #endif 01768 Calls.erase(OldIt); 01769 ++NumDeleted; 01770 continue; 01771 } 01772 01773 // If the last call site in the list has the same callee as this one, and 01774 // if the callee contains an external function, it will never be 01775 // resolvable, just merge the call sites. 01776 if (!LastCalleeNode.isNull() && LastCalleeNode.getNode() == Callee) { 01777 LastCalleeContainsExternalFunction = 01778 nodeContainsExternalFunction(Callee); 01779 01780 std::list<DSCallSite>::iterator PrevIt = OldIt; 01781 --PrevIt; 01782 PrevIt->mergeWith(CS); 01783 01784 // No need to keep this call anymore. 01785 Calls.erase(OldIt); 01786 ++NumDeleted; 01787 continue; 01788 } else { 01789 LastCalleeNode = Callee; 01790 } 01791 } 01792 01793 // If the return value or any arguments point to a void node with no 01794 // information at all in it, and the call node is the only node to point 01795 // to it, remove the edge to the node (killing the node). 01796 // 01797 killIfUselessEdge(CS.getRetVal()); 01798 for (unsigned a = 0, e = CS.getNumPtrArgs(); a != e; ++a) 01799 killIfUselessEdge(CS.getPtrArg(a)); 01800 01801 #if 0 01802 // If this call site calls the same function as the last call site, and if 01803 // the function pointer contains an external function, this node will 01804 // never be resolved. Merge the arguments of the call node because no 01805 // information will be lost. 01806 // 01807 if ((CS.isDirectCall() && CS.getCalleeFunc() == LastCalleeFunc) || 01808 (CS.isIndirectCall() && CS.getCalleeNode() == LastCalleeNode)) { 01809 ++NumDuplicateCalls; 01810 if (NumDuplicateCalls == 1) { 01811 if (LastCalleeNode) 01812 LastCalleeContainsExternalFunction = 01813 nodeContainsExternalFunction(LastCalleeNode); 01814 else 01815 LastCalleeContainsExternalFunction = LastCalleeFunc->isExternal(); 01816 } 01817 01818 // It is not clear why, but enabling this code makes DSA really 01819 // sensitive to node forwarding. Basically, with this enabled, DSA 01820 // performs different number of inlinings based on which nodes are 01821 // forwarding or not. This is clearly a problem, so this code is 01822 // disabled until this can be resolved. 01823 #if 1 01824 if (LastCalleeContainsExternalFunction 01825 #if 0 01826 || 01827 // This should be more than enough context sensitivity! 01828 // FIXME: Evaluate how many times this is tripped! 01829 NumDuplicateCalls > 20 01830 #endif 01831 ) { 01832 01833 std::list<DSCallSite>::iterator PrevIt = OldIt; 01834 --PrevIt; 01835 PrevIt->mergeWith(CS); 01836 01837 // No need to keep this call anymore. 01838 Calls.erase(OldIt); 01839 ++NumDeleted; 01840 continue; 01841 } 01842 #endif 01843 } else { 01844 if (CS.isDirectCall()) { 01845 LastCalleeFunc = CS.getCalleeFunc(); 01846 LastCalleeNode = 0; 01847 } else { 01848 LastCalleeNode = CS.getCalleeNode(); 01849 LastCalleeFunc = 0; 01850 } 01851 NumDuplicateCalls = 0; 01852 } 01853 #endif 01854 01855 if (I != Calls.end() && CS == *I) { 01856 LastCalleeNode = 0; 01857 Calls.erase(OldIt); 01858 ++NumDeleted; 01859 continue; 01860 } 01861 } 01862 01863 // Resort now that we simplified things. 01864 Calls.sort(); 01865 01866 // Now that we are in sorted order, eliminate duplicates. 01867 std::list<DSCallSite>::iterator CI = Calls.begin(), CE = Calls.end(); 01868 if (CI != CE) 01869 while (1) { 01870 std::list<DSCallSite>::iterator OldIt = CI++; 01871 if (CI == CE) break; 01872 01873 // If this call site is now the same as the previous one, we can delete it 01874 // as a duplicate. 01875 if (*OldIt == *CI) { 01876 Calls.erase(CI); 01877 CI = OldIt; 01878 ++NumDeleted; 01879 } 01880 } 01881 01882 //Calls.erase(std::unique(Calls.begin(), Calls.end()), Calls.end()); 01883 01884 // Track the number of call nodes merged away... 01885 NumCallNodesMerged += NumDeleted; 01886 01887 DEBUG(if (NumDeleted) 01888 std::cerr << "Merged " << NumDeleted << " call nodes.\n";); 01889 } 01890 01891 01892 // removeTriviallyDeadNodes - After the graph has been constructed, this method 01893 // removes all unreachable nodes that are created because they got merged with 01894 // other nodes in the graph. These nodes will all be trivially unreachable, so 01895 // we don't have to perform any non-trivial analysis here. 01896 // 01897 void DSGraph::removeTriviallyDeadNodes() { 01898 TIME_REGION(X, "removeTriviallyDeadNodes"); 01899 01900 #if 0 01901 /// NOTE: This code is disabled. This slows down DSA on 177.mesa 01902 /// substantially! 01903 01904 // Loop over all of the nodes in the graph, calling getNode on each field. 01905 // This will cause all nodes to update their forwarding edges, causing 01906 // forwarded nodes to be delete-able. 01907 { TIME_REGION(X, "removeTriviallyDeadNodes:node_iterate"); 01908 for (node_iterator NI = node_begin(), E = node_end(); NI != E; ++NI) { 01909 DSNode &N = *NI; 01910 for (unsigned l = 0, e = N.getNumLinks(); l != e; ++l) 01911 N.getLink(l*N.getPointerSize()).getNode(); 01912 } 01913 } 01914 01915 // NOTE: This code is disabled. Though it should, in theory, allow us to 01916 // remove more nodes down below, the scan of the scalar map is incredibly 01917 // expensive for certain programs (with large SCCs). In the future, if we can 01918 // make the scalar map scan more efficient, then we can reenable this. 01919 { TIME_REGION(X, "removeTriviallyDeadNodes:scalarmap"); 01920 01921 // Likewise, forward any edges from the scalar nodes. While we are at it, 01922 // clean house a bit. 01923 for (DSScalarMap::iterator I = ScalarMap.begin(),E = ScalarMap.end();I != E;){ 01924 I->second.getNode(); 01925 ++I; 01926 } 01927 } 01928 #endif 01929 bool isGlobalsGraph = !GlobalsGraph; 01930 01931 for (NodeListTy::iterator NI = Nodes.begin(), E = Nodes.end(); NI != E; ) { 01932 DSNode &Node = *NI; 01933 01934 // Do not remove *any* global nodes in the globals graph. 01935 // This is a special case because such nodes may not have I, M, R flags set. 01936 if (Node.isGlobalNode() && isGlobalsGraph) { 01937 ++NI; 01938 continue; 01939 } 01940 01941 if (Node.isComplete() && !Node.isModified() && !Node.isRead()) { 01942 // This is a useless node if it has no mod/ref info (checked above), 01943 // outgoing edges (which it cannot, as it is not modified in this 01944 // context), and it has no incoming edges. If it is a global node it may 01945 // have all of these properties and still have incoming edges, due to the 01946 // scalar map, so we check those now. 01947 // 01948 if (Node.getNumReferrers() == Node.getGlobalsList().size()) { 01949 const std::vector<GlobalValue*> &Globals = Node.getGlobalsList(); 01950 01951 // Loop through and make sure all of the globals are referring directly 01952 // to the node... 01953 for (unsigned j = 0, e = Globals.size(); j != e; ++j) { 01954 DSNode *N = getNodeForValue(Globals[j]).getNode(); 01955 assert(N == &Node && "ScalarMap doesn't match globals list!"); 01956 } 01957 01958 // Make sure NumReferrers still agrees, if so, the node is truly dead. 01959 if (Node.getNumReferrers() == Globals.size()) { 01960 for (unsigned j = 0, e = Globals.size(); j != e; ++j) 01961 ScalarMap.erase(Globals[j]); 01962 Node.makeNodeDead(); 01963 ++NumTrivialGlobalDNE; 01964 } 01965 } 01966 } 01967 01968 if (Node.getNodeFlags() == 0 && Node.hasNoReferrers()) { 01969 // This node is dead! 01970 NI = Nodes.erase(NI); // Erase & remove from node list. 01971 ++NumTrivialDNE; 01972 } else { 01973 ++NI; 01974 } 01975 } 01976 01977 removeIdenticalCalls(FunctionCalls); 01978 removeIdenticalCalls(AuxFunctionCalls); 01979 } 01980 01981 01982 /// markReachableNodes - This method recursively traverses the specified 01983 /// DSNodes, marking any nodes which are reachable. All reachable nodes it adds 01984 /// to the set, which allows it to only traverse visited nodes once. 01985 /// 01986 void DSNode::markReachableNodes(hash_set<const DSNode*> &ReachableNodes) const { 01987 if (this == 0) return; 01988 assert(getForwardNode() == 0 && "Cannot mark a forwarded node!"); 01989 if (ReachableNodes.insert(this).second) // Is newly reachable? 01990 for (DSNode::const_edge_iterator I = edge_begin(), E = edge_end(); 01991 I != E; ++I) 01992 I->getNode()->markReachableNodes(ReachableNodes); 01993 } 01994 01995 void DSCallSite::markReachableNodes(hash_set<const DSNode*> &Nodes) const { 01996 getRetVal().getNode()->markReachableNodes(Nodes); 01997 if (isIndirectCall()) getCalleeNode()->markReachableNodes(Nodes); 01998 01999 for (unsigned i = 0, e = getNumPtrArgs(); i != e; ++i) 02000 getPtrArg(i).getNode()->markReachableNodes(Nodes); 02001 } 02002 02003 // CanReachAliveNodes - Simple graph walker that recursively traverses the graph 02004 // looking for a node that is marked alive. If an alive node is found, return 02005 // true, otherwise return false. If an alive node is reachable, this node is 02006 // marked as alive... 02007 // 02008 static bool CanReachAliveNodes(DSNode *N, hash_set<const DSNode*> &Alive, 02009 hash_set<const DSNode*> &Visited, 02010 bool IgnoreGlobals) { 02011 if (N == 0) return false; 02012 assert(N->getForwardNode() == 0 && "Cannot mark a forwarded node!"); 02013 02014 // If this is a global node, it will end up in the globals graph anyway, so we 02015 // don't need to worry about it. 02016 if (IgnoreGlobals && N->isGlobalNode()) return false; 02017 02018 // If we know that this node is alive, return so! 02019 if (Alive.count(N)) return true; 02020 02021 // Otherwise, we don't think the node is alive yet, check for infinite 02022 // recursion. 02023 if (Visited.count(N)) return false; // Found a cycle 02024 Visited.insert(N); // No recursion, insert into Visited... 02025 02026 for (DSNode::edge_iterator I = N->edge_begin(),E = N->edge_end(); I != E; ++I) 02027 if (CanReachAliveNodes(I->getNode(), Alive, Visited, IgnoreGlobals)) { 02028 N->markReachableNodes(Alive); 02029 return true; 02030 } 02031 return false; 02032 } 02033 02034 // CallSiteUsesAliveArgs - Return true if the specified call site can reach any 02035 // alive nodes. 02036 // 02037 static bool CallSiteUsesAliveArgs(const DSCallSite &CS, 02038 hash_set<const DSNode*> &Alive, 02039 hash_set<const DSNode*> &Visited, 02040 bool IgnoreGlobals) { 02041 if (CanReachAliveNodes(CS.getRetVal().getNode(), Alive, Visited, 02042 IgnoreGlobals)) 02043 return true; 02044 if (CS.isIndirectCall() && 02045 CanReachAliveNodes(CS.getCalleeNode(), Alive, Visited, IgnoreGlobals)) 02046 return true; 02047 for (unsigned i = 0, e = CS.getNumPtrArgs(); i != e; ++i) 02048 if (CanReachAliveNodes(CS.getPtrArg(i).getNode(), Alive, Visited, 02049 IgnoreGlobals)) 02050 return true; 02051 return false; 02052 } 02053 02054 // removeDeadNodes - Use a more powerful reachability analysis to eliminate 02055 // subgraphs that are unreachable. This often occurs because the data 02056 // structure doesn't "escape" into it's caller, and thus should be eliminated 02057 // from the caller's graph entirely. This is only appropriate to use when 02058 // inlining graphs. 02059 // 02060 void DSGraph::removeDeadNodes(unsigned Flags) { 02061 DEBUG(AssertGraphOK(); if (GlobalsGraph) GlobalsGraph->AssertGraphOK()); 02062 02063 // Reduce the amount of work we have to do... remove dummy nodes left over by 02064 // merging... 02065 removeTriviallyDeadNodes(); 02066 02067 TIME_REGION(X, "removeDeadNodes"); 02068 02069 // FIXME: Merge non-trivially identical call nodes... 02070 02071 // Alive - a set that holds all nodes found to be reachable/alive. 02072 hash_set<const DSNode*> Alive; 02073 std::vector<std::pair<Value*, DSNode*> > GlobalNodes; 02074 02075 // Copy and merge all information about globals to the GlobalsGraph if this is 02076 // not a final pass (where unreachable globals are removed). 02077 // 02078 // Strip all alloca bits since the current function is only for the BU pass. 02079 // Strip all incomplete bits since they are short-lived properties and they 02080 // will be correctly computed when rematerializing nodes into the functions. 02081 // 02082 ReachabilityCloner GGCloner(*GlobalsGraph, *this, DSGraph::StripAllocaBit | 02083 DSGraph::StripIncompleteBit); 02084 02085 // Mark all nodes reachable by (non-global) scalar nodes as alive... 02086 { TIME_REGION(Y, "removeDeadNodes:scalarscan"); 02087 for (DSScalarMap::iterator I = ScalarMap.begin(), E = ScalarMap.end(); 02088 I != E; ++I) 02089 if (isa<GlobalValue>(I->first)) { // Keep track of global nodes 02090 assert(!I->second.isNull() && "Null global node?"); 02091 assert(I->second.getNode()->isGlobalNode() && "Should be a global node!"); 02092 GlobalNodes.push_back(std::make_pair(I->first, I->second.getNode())); 02093 02094 // Make sure that all globals are cloned over as roots. 02095 if (!(Flags & DSGraph::RemoveUnreachableGlobals) && GlobalsGraph) { 02096 DSGraph::ScalarMapTy::iterator SMI = 02097 GlobalsGraph->getScalarMap().find(I->first); 02098 if (SMI != GlobalsGraph->getScalarMap().end()) 02099 GGCloner.merge(SMI->second, I->second); 02100 else 02101 GGCloner.getClonedNH(I->second); 02102 } 02103 } else { 02104 I->second.getNode()->markReachableNodes(Alive); 02105 } 02106 } 02107 02108 // The return values are alive as well. 02109 for (ReturnNodesTy::iterator I = ReturnNodes.begin(), E = ReturnNodes.end(); 02110 I != E; ++I) 02111 I->second.getNode()->markReachableNodes(Alive); 02112 02113 // Mark any nodes reachable by primary calls as alive... 02114 for (fc_iterator I = fc_begin(), E = fc_end(); I != E; ++I) 02115 I->markReachableNodes(Alive); 02116 02117 02118 // Now find globals and aux call nodes that are already live or reach a live 02119 // value (which makes them live in turn), and continue till no more are found. 02120 // 02121 bool Iterate; 02122 hash_set<const DSNode*> Visited; 02123 hash_set<const DSCallSite*> AuxFCallsAlive; 02124 do { 02125 Visited.clear(); 02126 // If any global node points to a non-global that is "alive", the global is 02127 // "alive" as well... Remove it from the GlobalNodes list so we only have 02128 // unreachable globals in the list. 02129 // 02130 Iterate = false; 02131 if (!(Flags & DSGraph::RemoveUnreachableGlobals)) 02132 for (unsigned i = 0; i != GlobalNodes.size(); ++i) 02133 if (CanReachAliveNodes(GlobalNodes[i].second, Alive, Visited, 02134 Flags & DSGraph::RemoveUnreachableGlobals)) { 02135 std::swap(GlobalNodes[i--], GlobalNodes.back()); // Move to end to... 02136 GlobalNodes.pop_back(); // erase efficiently 02137 Iterate = true; 02138 } 02139 02140 // Mark only unresolvable call nodes for moving to the GlobalsGraph since 02141 // call nodes that get resolved will be difficult to remove from that graph. 02142 // The final unresolved call nodes must be handled specially at the end of 02143 // the BU pass (i.e., in main or other roots of the call graph). 02144 for (afc_iterator CI = afc_begin(), E = afc_end(); CI != E; ++CI) 02145 if (!AuxFCallsAlive.count(&*CI) && 02146 (CI->isIndirectCall() 02147 || CallSiteUsesAliveArgs(*CI, Alive, Visited, 02148 Flags & DSGraph::RemoveUnreachableGlobals))) { 02149 CI->markReachableNodes(Alive); 02150 AuxFCallsAlive.insert(&*CI); 02151 Iterate = true; 02152 } 02153 } while (Iterate); 02154 02155 // Move dead aux function calls to the end of the list 02156 unsigned CurIdx = 0; 02157 for (std::list<DSCallSite>::iterator CI = AuxFunctionCalls.begin(), 02158 E = AuxFunctionCalls.end(); CI != E; ) 02159 if (AuxFCallsAlive.count(&*CI)) 02160 ++CI; 02161 else { 02162 // Copy and merge global nodes and dead aux call nodes into the 02163 // GlobalsGraph, and all nodes reachable from those nodes. Update their 02164 // target pointers using the GGCloner. 02165 // 02166 if (!(Flags & DSGraph::RemoveUnreachableGlobals)) 02167 GlobalsGraph->AuxFunctionCalls.push_back(DSCallSite(*CI, GGCloner)); 02168 02169 AuxFunctionCalls.erase(CI++); 02170 } 02171 02172 // We are finally done with the GGCloner so we can destroy it. 02173 GGCloner.destroy(); 02174 02175 // At this point, any nodes which are visited, but not alive, are nodes 02176 // which can be removed. Loop over all nodes, eliminating completely 02177 // unreachable nodes. 02178 // 02179 std::vector<DSNode*> DeadNodes; 02180 DeadNodes.reserve(Nodes.size()); 02181 for (NodeListTy::iterator NI = Nodes.begin(), E = Nodes.end(); NI != E;) { 02182 DSNode *N = NI++; 02183 assert(!N->isForwarding() && "Forwarded node in nodes list?"); 02184 02185 if (!Alive.count(N)) { 02186 Nodes.remove(N); 02187 assert(!N->isForwarding() && "Cannot remove a forwarding node!"); 02188 DeadNodes.push_back(N); 02189 N->dropAllReferences(); 02190 ++NumDNE; 02191 } 02192 } 02193 02194 // Remove all unreachable globals from the ScalarMap. 02195 // If flag RemoveUnreachableGlobals is set, GlobalNodes has only dead nodes. 02196 // In either case, the dead nodes will not be in the set Alive. 02197 for (unsigned i = 0, e = GlobalNodes.size(); i != e; ++i) 02198 if (!Alive.count(GlobalNodes[i].second)) 02199 ScalarMap.erase(GlobalNodes[i].first); 02200 else 02201 assert((Flags & DSGraph::RemoveUnreachableGlobals) && "non-dead global"); 02202 02203 // Delete all dead nodes now since their referrer counts are zero. 02204 for (unsigned i = 0, e = DeadNodes.size(); i != e; ++i) 02205 delete DeadNodes[i]; 02206 02207 DEBUG(AssertGraphOK(); GlobalsGraph->AssertGraphOK()); 02208 } 02209 02210 void DSGraph::AssertNodeContainsGlobal(const DSNode *N, GlobalValue *GV) const { 02211 assert(std::find(N->globals_begin(),N->globals_end(), GV) != 02212 N->globals_end() && "Global value not in node!"); 02213 } 02214 02215 void DSGraph::AssertCallSiteInGraph(const DSCallSite &CS) const { 02216 if (CS.isIndirectCall()) { 02217 AssertNodeInGraph(CS.getCalleeNode()); 02218 #if 0 02219 if (CS.getNumPtrArgs() && CS.getCalleeNode() == CS.getPtrArg(0).getNode() && 02220 CS.getCalleeNode() && CS.getCalleeNode()->getGlobals().empty()) 02221 std::cerr << "WARNING: WEIRD CALL SITE FOUND!\n"; 02222 #endif 02223 } 02224 AssertNodeInGraph(CS.getRetVal().getNode()); 02225 for (unsigned j = 0, e = CS.getNumPtrArgs(); j != e; ++j) 02226 AssertNodeInGraph(CS.getPtrArg(j).getNode()); 02227 } 02228 02229 void DSGraph::AssertCallNodesInGraph() const { 02230 for (fc_iterator I = fc_begin(), E = fc_end(); I != E; ++I) 02231 AssertCallSiteInGraph(*I); 02232 } 02233 void DSGraph::AssertAuxCallNodesInGraph() const { 02234 for (afc_iterator I = afc_begin(), E = afc_end(); I != E; ++I) 02235 AssertCallSiteInGraph(*I); 02236 } 02237 02238 void DSGraph::AssertGraphOK() const { 02239 for (node_const_iterator NI = node_begin(), E = node_end(); NI != E; ++NI) 02240 NI->assertOK(); 02241 02242 for (ScalarMapTy::const_iterator I = ScalarMap.begin(), 02243 E = ScalarMap.end(); I != E; ++I) { 02244 assert(!I->second.isNull() && "Null node in scalarmap!"); 02245 AssertNodeInGraph(I->second.getNode()); 02246 if (GlobalValue *GV = dyn_cast<GlobalValue>(I->first)) { 02247 assert(I->second.getNode()->isGlobalNode() && 02248 "Global points to node, but node isn't global?"); 02249 AssertNodeContainsGlobal(I->second.getNode(), GV); 02250 } 02251 } 02252 AssertCallNodesInGraph(); 02253 AssertAuxCallNodesInGraph(); 02254 02255 // Check that all pointer arguments to any functions in this graph have 02256 // destinations. 02257 for (ReturnNodesTy::const_iterator RI = ReturnNodes.begin(), 02258 E = ReturnNodes.end(); 02259 RI != E; ++RI) { 02260 Function &F = *RI->first; 02261 for (Function::arg_iterator AI = F.arg_begin(); AI != F.arg_end(); ++AI) 02262 if (isPointerType(AI->getType())) 02263 assert(!getNodeForValue(AI).isNull() && 02264 "Pointer argument must be in the scalar map!"); 02265 } 02266 } 02267 02268 /// computeNodeMapping - Given roots in two different DSGraphs, traverse the 02269 /// nodes reachable from the two graphs, computing the mapping of nodes from the 02270 /// first to the second graph. This mapping may be many-to-one (i.e. the first 02271 /// graph may have multiple nodes representing one node in the second graph), 02272 /// but it will not work if there is a one-to-many or many-to-many mapping. 02273 /// 02274 void DSGraph::computeNodeMapping(const DSNodeHandle &NH1, 02275 const DSNodeHandle &NH2, NodeMapTy &NodeMap, 02276 bool StrictChecking) { 02277 DSNode *N1 = NH1.getNode(), *N2 = NH2.getNode(); 02278 if (N1 == 0 || N2 == 0) return; 02279 02280 DSNodeHandle &Entry = NodeMap[N1]; 02281 if (!Entry.isNull()) { 02282 // Termination of recursion! 02283 if (StrictChecking) { 02284 assert(Entry.getNode() == N2 && "Inconsistent mapping detected!"); 02285 assert((Entry.getOffset() == (NH2.getOffset()-NH1.getOffset()) || 02286 Entry.getNode()->isNodeCompletelyFolded()) && 02287 "Inconsistent mapping detected!"); 02288 } 02289 return; 02290 } 02291 02292 Entry.setTo(N2, NH2.getOffset()-NH1.getOffset()); 02293 02294 // Loop over all of the fields that N1 and N2 have in common, recursively 02295 // mapping the edges together now. 02296 int N2Idx = NH2.getOffset()-NH1.getOffset(); 02297 unsigned N2Size = N2->getSize(); 02298 if (N2Size == 0) return; // No edges to map to. 02299 02300 for (unsigned i = 0, e = N1->getSize(); i < e; i += DS::PointerSize) { 02301 const DSNodeHandle &N1NH = N1->getLink(i); 02302 // Don't call N2->getLink if not needed (avoiding crash if N2Idx is not 02303 // aligned right). 02304 if (!N1NH.isNull()) { 02305 if (unsigned(N2Idx)+i < N2Size) 02306 computeNodeMapping(N1NH, N2->getLink(N2Idx+i), NodeMap); 02307 else 02308 computeNodeMapping(N1NH, 02309 N2->getLink(unsigned(N2Idx+i) % N2Size), NodeMap); 02310 } 02311 } 02312 } 02313 02314 02315 /// computeGToGGMapping - Compute the mapping of nodes in the global graph to 02316 /// nodes in this graph. 02317 void DSGraph::computeGToGGMapping(NodeMapTy &NodeMap) { 02318 DSGraph &GG = *getGlobalsGraph(); 02319 02320 DSScalarMap &SM = getScalarMap(); 02321 for (DSScalarMap::global_iterator I = SM.global_begin(), 02322 E = SM.global_end(); I != E; ++I) 02323 DSGraph::computeNodeMapping(SM[*I], GG.getNodeForValue(*I), NodeMap); 02324 } 02325 02326 /// computeGGToGMapping - Compute the mapping of nodes in the global graph to 02327 /// nodes in this graph. Note that any uses of this method are probably bugs, 02328 /// unless it is known that the globals graph has been merged into this graph! 02329 void DSGraph::computeGGToGMapping(InvNodeMapTy &InvNodeMap) { 02330 NodeMapTy NodeMap; 02331 computeGToGGMapping(NodeMap); 02332 02333 while (!NodeMap.empty()) { 02334 InvNodeMap.insert(std::make_pair(NodeMap.begin()->second, 02335 NodeMap.begin()->first)); 02336 NodeMap.erase(NodeMap.begin()); 02337 } 02338 } 02339 02340 02341 /// computeCalleeCallerMapping - Given a call from a function in the current 02342 /// graph to the 'Callee' function (which lives in 'CalleeGraph'), compute the 02343 /// mapping of nodes from the callee to nodes in the caller. 02344 void DSGraph::computeCalleeCallerMapping(DSCallSite CS, const Function &Callee, 02345 DSGraph &CalleeGraph, 02346 NodeMapTy &NodeMap) { 02347 02348 DSCallSite CalleeArgs = 02349 CalleeGraph.getCallSiteForArguments(const_cast<Function&>(Callee)); 02350 02351 computeNodeMapping(CalleeArgs.getRetVal(), CS.getRetVal(), NodeMap); 02352 02353 unsigned NumArgs = CS.getNumPtrArgs(); 02354 if (NumArgs > CalleeArgs.getNumPtrArgs()) 02355 NumArgs = CalleeArgs.getNumPtrArgs(); 02356 02357 for (unsigned i = 0; i != NumArgs; ++i) 02358 computeNodeMapping(CalleeArgs.getPtrArg(i), CS.getPtrArg(i), NodeMap); 02359 02360 // Map the nodes that are pointed to by globals. 02361 DSScalarMap &CalleeSM = CalleeGraph.getScalarMap(); 02362 DSScalarMap &CallerSM = getScalarMap(); 02363 02364 if (CalleeSM.global_size() >= CallerSM.global_size()) { 02365 for (DSScalarMap::global_iterator GI = CallerSM.global_begin(), 02366 E = CallerSM.global_end(); GI != E; ++GI) 02367 if (CalleeSM.global_count(*GI)) 02368 computeNodeMapping(CalleeSM[*GI], CallerSM[*GI], NodeMap); 02369 } else { 02370 for (DSScalarMap::global_iterator GI = CalleeSM.global_begin(), 02371 E = CalleeSM.global_end(); GI != E; ++GI) 02372 if (CallerSM.global_count(*GI)) 02373 computeNodeMapping(CalleeSM[*GI], CallerSM[*GI], NodeMap); 02374 } 02375 }