LLVM API Documentation
00001 //===-- JITEmitter.cpp - Write machine code to executable memory ----------===// 00002 // 00003 // The LLVM Compiler Infrastructure 00004 // 00005 // This file was developed by the LLVM research group and is distributed under 00006 // the University of Illinois Open Source License. See LICENSE.TXT for details. 00007 // 00008 //===----------------------------------------------------------------------===// 00009 // 00010 // This file defines a MachineCodeEmitter object that is used by the JIT to 00011 // write machine code to memory and remember where relocatable values are. 00012 // 00013 //===----------------------------------------------------------------------===// 00014 00015 #define DEBUG_TYPE "jit" 00016 #include "JIT.h" 00017 #include "llvm/Constant.h" 00018 #include "llvm/Module.h" 00019 #include "llvm/Type.h" 00020 #include "llvm/CodeGen/MachineCodeEmitter.h" 00021 #include "llvm/CodeGen/MachineFunction.h" 00022 #include "llvm/CodeGen/MachineConstantPool.h" 00023 #include "llvm/CodeGen/MachineJumpTableInfo.h" 00024 #include "llvm/CodeGen/MachineRelocation.h" 00025 #include "llvm/ExecutionEngine/GenericValue.h" 00026 #include "llvm/Target/TargetData.h" 00027 #include "llvm/Target/TargetJITInfo.h" 00028 #include "llvm/Support/Debug.h" 00029 #include "llvm/Support/MutexGuard.h" 00030 #include "llvm/ADT/Statistic.h" 00031 #include "llvm/System/Memory.h" 00032 #include <algorithm> 00033 #include <iostream> 00034 using namespace llvm; 00035 00036 namespace { 00037 Statistic<> NumBytes("jit", "Number of bytes of machine code compiled"); 00038 Statistic<> NumRelos("jit", "Number of relocations applied"); 00039 JIT *TheJIT = 0; 00040 } 00041 00042 00043 //===----------------------------------------------------------------------===// 00044 // JITMemoryManager code. 00045 // 00046 namespace { 00047 /// MemoryRangeHeader - For a range of memory, this is the header that we put 00048 /// on the block of memory. It is carefully crafted to be one word of memory. 00049 /// Allocated blocks have just this header, free'd blocks have FreeRangeHeader 00050 /// which starts with this. 00051 struct FreeRangeHeader; 00052 struct MemoryRangeHeader { 00053 /// ThisAllocated - This is true if this block is currently allocated. If 00054 /// not, this can be converted to a FreeRangeHeader. 00055 intptr_t ThisAllocated : 1; 00056 00057 /// PrevAllocated - Keep track of whether the block immediately before us is 00058 /// allocated. If not, the word immediately before this header is the size 00059 /// of the previous block. 00060 intptr_t PrevAllocated : 1; 00061 00062 /// BlockSize - This is the size in bytes of this memory block, 00063 /// including this header. 00064 uintptr_t BlockSize : (sizeof(intptr_t)*8 - 2); 00065 00066 00067 /// getBlockAfter - Return the memory block immediately after this one. 00068 /// 00069 MemoryRangeHeader &getBlockAfter() const { 00070 return *(MemoryRangeHeader*)((char*)this+BlockSize); 00071 } 00072 00073 /// getFreeBlockBefore - If the block before this one is free, return it, 00074 /// otherwise return null. 00075 FreeRangeHeader *getFreeBlockBefore() const { 00076 if (PrevAllocated) return 0; 00077 intptr_t PrevSize = ((intptr_t *)this)[-1]; 00078 return (FreeRangeHeader*)((char*)this-PrevSize); 00079 } 00080 00081 /// FreeBlock - Turn an allocated block into a free block, adjusting 00082 /// bits in the object headers, and adding an end of region memory block. 00083 FreeRangeHeader *FreeBlock(FreeRangeHeader *FreeList); 00084 00085 /// TrimAllocationToSize - If this allocated block is significantly larger 00086 /// than NewSize, split it into two pieces (where the former is NewSize 00087 /// bytes, including the header), and add the new block to the free list. 00088 FreeRangeHeader *TrimAllocationToSize(FreeRangeHeader *FreeList, 00089 uint64_t NewSize); 00090 }; 00091 00092 /// FreeRangeHeader - For a memory block that isn't already allocated, this 00093 /// keeps track of the current block and has a pointer to the next free block. 00094 /// Free blocks are kept on a circularly linked list. 00095 struct FreeRangeHeader : public MemoryRangeHeader { 00096 FreeRangeHeader *Prev; 00097 FreeRangeHeader *Next; 00098 00099 /// getMinBlockSize - Get the minimum size for a memory block. Blocks 00100 /// smaller than this size cannot be created. 00101 static unsigned getMinBlockSize() { 00102 return sizeof(FreeRangeHeader)+sizeof(intptr_t); 00103 } 00104 00105 /// SetEndOfBlockSizeMarker - The word at the end of every free block is 00106 /// known to be the size of the free block. Set it for this block. 00107 void SetEndOfBlockSizeMarker() { 00108 void *EndOfBlock = (char*)this + BlockSize; 00109 ((intptr_t *)EndOfBlock)[-1] = BlockSize; 00110 } 00111 00112 FreeRangeHeader *RemoveFromFreeList() { 00113 assert(Next->Prev == this && Prev->Next == this && "Freelist broken!"); 00114 Next->Prev = Prev; 00115 return Prev->Next = Next; 00116 } 00117 00118 void AddToFreeList(FreeRangeHeader *FreeList) { 00119 Next = FreeList; 00120 Prev = FreeList->Prev; 00121 Prev->Next = this; 00122 Next->Prev = this; 00123 } 00124 00125 /// GrowBlock - The block after this block just got deallocated. Merge it 00126 /// into the current block. 00127 void GrowBlock(uintptr_t NewSize); 00128 00129 /// AllocateBlock - Mark this entire block allocated, updating freelists 00130 /// etc. This returns a pointer to the circular free-list. 00131 FreeRangeHeader *AllocateBlock(); 00132 }; 00133 } 00134 00135 00136 /// AllocateBlock - Mark this entire block allocated, updating freelists 00137 /// etc. This returns a pointer to the circular free-list. 00138 FreeRangeHeader *FreeRangeHeader::AllocateBlock() { 00139 assert(!ThisAllocated && !getBlockAfter().PrevAllocated && 00140 "Cannot allocate an allocated block!"); 00141 // Mark this block allocated. 00142 ThisAllocated = 1; 00143 getBlockAfter().PrevAllocated = 1; 00144 00145 // Remove it from the free list. 00146 return RemoveFromFreeList(); 00147 } 00148 00149 /// FreeBlock - Turn an allocated block into a free block, adjusting 00150 /// bits in the object headers, and adding an end of region memory block. 00151 /// If possible, coallesce this block with neighboring blocks. Return the 00152 /// FreeRangeHeader to allocate from. 00153 FreeRangeHeader *MemoryRangeHeader::FreeBlock(FreeRangeHeader *FreeList) { 00154 MemoryRangeHeader *FollowingBlock = &getBlockAfter(); 00155 assert(ThisAllocated && "This block is already allocated!"); 00156 assert(FollowingBlock->PrevAllocated && "Flags out of sync!"); 00157 00158 FreeRangeHeader *FreeListToReturn = FreeList; 00159 00160 // If the block after this one is free, merge it into this block. 00161 if (!FollowingBlock->ThisAllocated) { 00162 FreeRangeHeader &FollowingFreeBlock = *(FreeRangeHeader *)FollowingBlock; 00163 // "FreeList" always needs to be a valid free block. If we're about to 00164 // coallesce with it, update our notion of what the free list is. 00165 if (&FollowingFreeBlock == FreeList) { 00166 FreeList = FollowingFreeBlock.Next; 00167 FreeListToReturn = 0; 00168 assert(&FollowingFreeBlock != FreeList && "No tombstone block?"); 00169 } 00170 FollowingFreeBlock.RemoveFromFreeList(); 00171 00172 // Include the following block into this one. 00173 BlockSize += FollowingFreeBlock.BlockSize; 00174 FollowingBlock = &FollowingFreeBlock.getBlockAfter(); 00175 00176 // Tell the block after the block we are coallescing that this block is 00177 // allocated. 00178 FollowingBlock->PrevAllocated = 1; 00179 } 00180 00181 assert(FollowingBlock->ThisAllocated && "Missed coallescing?"); 00182 00183 if (FreeRangeHeader *PrevFreeBlock = getFreeBlockBefore()) { 00184 PrevFreeBlock->GrowBlock(PrevFreeBlock->BlockSize + BlockSize); 00185 return FreeListToReturn ? FreeListToReturn : PrevFreeBlock; 00186 } 00187 00188 // Otherwise, mark this block free. 00189 FreeRangeHeader &FreeBlock = *(FreeRangeHeader*)this; 00190 FollowingBlock->PrevAllocated = 0; 00191 FreeBlock.ThisAllocated = 0; 00192 00193 // Link this into the linked list of free blocks. 00194 FreeBlock.AddToFreeList(FreeList); 00195 00196 // Add a marker at the end of the block, indicating the size of this free 00197 // block. 00198 FreeBlock.SetEndOfBlockSizeMarker(); 00199 return FreeListToReturn ? FreeListToReturn : &FreeBlock; 00200 } 00201 00202 /// GrowBlock - The block after this block just got deallocated. Merge it 00203 /// into the current block. 00204 void FreeRangeHeader::GrowBlock(uintptr_t NewSize) { 00205 assert(NewSize > BlockSize && "Not growing block?"); 00206 BlockSize = NewSize; 00207 SetEndOfBlockSizeMarker(); 00208 getBlockAfter().PrevAllocated = 0; 00209 } 00210 00211 /// TrimAllocationToSize - If this allocated block is significantly larger 00212 /// than NewSize, split it into two pieces (where the former is NewSize 00213 /// bytes, including the header), and add the new block to the free list. 00214 FreeRangeHeader *MemoryRangeHeader:: 00215 TrimAllocationToSize(FreeRangeHeader *FreeList, uint64_t NewSize) { 00216 assert(ThisAllocated && getBlockAfter().PrevAllocated && 00217 "Cannot deallocate part of an allocated block!"); 00218 00219 // Round up size for alignment of header. 00220 unsigned HeaderAlign = __alignof(FreeRangeHeader); 00221 NewSize = (NewSize+ (HeaderAlign-1)) & ~(HeaderAlign-1); 00222 00223 // Size is now the size of the block we will remove from the start of the 00224 // current block. 00225 assert(NewSize <= BlockSize && 00226 "Allocating more space from this block than exists!"); 00227 00228 // If splitting this block will cause the remainder to be too small, do not 00229 // split the block. 00230 if (BlockSize <= NewSize+FreeRangeHeader::getMinBlockSize()) 00231 return FreeList; 00232 00233 // Otherwise, we splice the required number of bytes out of this block, form 00234 // a new block immediately after it, then mark this block allocated. 00235 MemoryRangeHeader &FormerNextBlock = getBlockAfter(); 00236 00237 // Change the size of this block. 00238 BlockSize = NewSize; 00239 00240 // Get the new block we just sliced out and turn it into a free block. 00241 FreeRangeHeader &NewNextBlock = (FreeRangeHeader &)getBlockAfter(); 00242 NewNextBlock.BlockSize = (char*)&FormerNextBlock - (char*)&NewNextBlock; 00243 NewNextBlock.ThisAllocated = 0; 00244 NewNextBlock.PrevAllocated = 1; 00245 NewNextBlock.SetEndOfBlockSizeMarker(); 00246 FormerNextBlock.PrevAllocated = 0; 00247 NewNextBlock.AddToFreeList(FreeList); 00248 return &NewNextBlock; 00249 } 00250 00251 00252 namespace { 00253 /// JITMemoryManager - Manage memory for the JIT code generation in a logical, 00254 /// sane way. This splits a large block of MAP_NORESERVE'd memory into two 00255 /// sections, one for function stubs, one for the functions themselves. We 00256 /// have to do this because we may need to emit a function stub while in the 00257 /// middle of emitting a function, and we don't know how large the function we 00258 /// are emitting is. This never bothers to release the memory, because when 00259 /// we are ready to destroy the JIT, the program exits. 00260 class JITMemoryManager { 00261 std::vector<sys::MemoryBlock> Blocks; // Memory blocks allocated by the JIT 00262 FreeRangeHeader *FreeMemoryList; // Circular list of free blocks. 00263 00264 // When emitting code into a memory block, this is the block. 00265 MemoryRangeHeader *CurBlock; 00266 00267 unsigned char *CurStubPtr, *StubBase; 00268 unsigned char *GOTBase; // Target Specific reserved memory 00269 00270 // Centralize memory block allocation. 00271 sys::MemoryBlock getNewMemoryBlock(unsigned size); 00272 00273 std::map<const Function*, MemoryRangeHeader*> FunctionBlocks; 00274 public: 00275 JITMemoryManager(bool useGOT); 00276 ~JITMemoryManager(); 00277 00278 inline unsigned char *allocateStub(unsigned StubSize); 00279 00280 /// startFunctionBody - When a function starts, allocate a block of free 00281 /// executable memory, returning a pointer to it and its actual size. 00282 unsigned char *startFunctionBody(uintptr_t &ActualSize) { 00283 CurBlock = FreeMemoryList; 00284 00285 // Allocate the entire memory block. 00286 FreeMemoryList = FreeMemoryList->AllocateBlock(); 00287 ActualSize = CurBlock->BlockSize-sizeof(MemoryRangeHeader); 00288 return (unsigned char *)(CurBlock+1); 00289 } 00290 00291 /// endFunctionBody - The function F is now allocated, and takes the memory 00292 /// in the range [FunctionStart,FunctionEnd). 00293 void endFunctionBody(const Function *F, unsigned char *FunctionStart, 00294 unsigned char *FunctionEnd) { 00295 assert(FunctionEnd > FunctionStart); 00296 assert(FunctionStart == (unsigned char *)(CurBlock+1) && 00297 "Mismatched function start/end!"); 00298 00299 uintptr_t BlockSize = FunctionEnd - (unsigned char *)CurBlock; 00300 FunctionBlocks[F] = CurBlock; 00301 00302 // Release the memory at the end of this block that isn't needed. 00303 FreeMemoryList =CurBlock->TrimAllocationToSize(FreeMemoryList, BlockSize); 00304 } 00305 00306 unsigned char *getGOTBase() const { 00307 return GOTBase; 00308 } 00309 bool isManagingGOT() const { 00310 return GOTBase != NULL; 00311 } 00312 00313 /// deallocateMemForFunction - Deallocate all memory for the specified 00314 /// function body. 00315 void deallocateMemForFunction(const Function *F) { 00316 std::map<const Function*, MemoryRangeHeader*>::iterator 00317 I = FunctionBlocks.find(F); 00318 if (I == FunctionBlocks.end()) return; 00319 00320 // Find the block that is allocated for this function. 00321 MemoryRangeHeader *MemRange = I->second; 00322 assert(MemRange->ThisAllocated && "Block isn't allocated!"); 00323 00324 // Fill the buffer with garbage! 00325 DEBUG(memset(MemRange+1, 0xCD, MemRange->BlockSize-sizeof(*MemRange))); 00326 00327 // Free the memory. 00328 FreeMemoryList = MemRange->FreeBlock(FreeMemoryList); 00329 00330 // Finally, remove this entry from FunctionBlocks. 00331 FunctionBlocks.erase(I); 00332 } 00333 }; 00334 } 00335 00336 JITMemoryManager::JITMemoryManager(bool useGOT) { 00337 // Allocate a 16M block of memory for functions. 00338 sys::MemoryBlock MemBlock = getNewMemoryBlock(16 << 20); 00339 00340 unsigned char *MemBase = reinterpret_cast<unsigned char*>(MemBlock.base()); 00341 00342 // Allocate stubs backwards from the base, allocate functions forward 00343 // from the base. 00344 StubBase = MemBase; 00345 CurStubPtr = MemBase + 512*1024; // Use 512k for stubs, working backwards. 00346 00347 // We set up the memory chunk with 4 mem regions, like this: 00348 // [ START 00349 // [ Free #0 ] -> Large space to allocate functions from. 00350 // [ Allocated #1 ] -> Tiny space to separate regions. 00351 // [ Free #2 ] -> Tiny space so there is always at least 1 free block. 00352 // [ Allocated #3 ] -> Tiny space to prevent looking past end of block. 00353 // END ] 00354 // 00355 // The last three blocks are never deallocated or touched. 00356 00357 // Add MemoryRangeHeader to the end of the memory region, indicating that 00358 // the space after the block of memory is allocated. This is block #3. 00359 MemoryRangeHeader *Mem3 = (MemoryRangeHeader*)(MemBase+MemBlock.size())-1; 00360 Mem3->ThisAllocated = 1; 00361 Mem3->PrevAllocated = 0; 00362 Mem3->BlockSize = 0; 00363 00364 /// Add a tiny free region so that the free list always has one entry. 00365 FreeRangeHeader *Mem2 = 00366 (FreeRangeHeader *)(((char*)Mem3)-FreeRangeHeader::getMinBlockSize()); 00367 Mem2->ThisAllocated = 0; 00368 Mem2->PrevAllocated = 1; 00369 Mem2->BlockSize = FreeRangeHeader::getMinBlockSize(); 00370 Mem2->SetEndOfBlockSizeMarker(); 00371 Mem2->Prev = Mem2; // Mem2 *is* the free list for now. 00372 Mem2->Next = Mem2; 00373 00374 /// Add a tiny allocated region so that Mem2 is never coallesced away. 00375 MemoryRangeHeader *Mem1 = (MemoryRangeHeader*)Mem2-1; 00376 Mem1->ThisAllocated = 1; 00377 Mem1->PrevAllocated = 0; 00378 Mem1->BlockSize = (char*)Mem2 - (char*)Mem1; 00379 00380 // Add a FreeRangeHeader to the start of the function body region, indicating 00381 // that the space is free. Mark the previous block allocated so we never look 00382 // at it. 00383 FreeRangeHeader *Mem0 = (FreeRangeHeader*)CurStubPtr; 00384 Mem0->ThisAllocated = 0; 00385 Mem0->PrevAllocated = 1; 00386 Mem0->BlockSize = (char*)Mem1-(char*)Mem0; 00387 Mem0->SetEndOfBlockSizeMarker(); 00388 Mem0->AddToFreeList(Mem2); 00389 00390 // Start out with the freelist pointing to Mem0. 00391 FreeMemoryList = Mem0; 00392 00393 // Allocate the GOT. 00394 GOTBase = NULL; 00395 if (useGOT) GOTBase = new unsigned char[sizeof(void*) * 8192]; 00396 } 00397 00398 JITMemoryManager::~JITMemoryManager() { 00399 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) 00400 sys::Memory::ReleaseRWX(Blocks[i]); 00401 00402 delete[] GOTBase; 00403 Blocks.clear(); 00404 } 00405 00406 unsigned char *JITMemoryManager::allocateStub(unsigned StubSize) { 00407 CurStubPtr -= StubSize; 00408 if (CurStubPtr < StubBase) { 00409 // FIXME: allocate a new block 00410 std::cerr << "JIT ran out of memory for function stubs!\n"; 00411 abort(); 00412 } 00413 return CurStubPtr; 00414 } 00415 00416 sys::MemoryBlock JITMemoryManager::getNewMemoryBlock(unsigned size) { 00417 // Allocate a new block close to the last one. 00418 const sys::MemoryBlock *BOld = Blocks.empty() ? 0 : &Blocks.front(); 00419 std::string ErrMsg; 00420 sys::MemoryBlock B = sys::Memory::AllocateRWX(size, BOld, &ErrMsg); 00421 if (B.base() == 0) { 00422 std::cerr << "Allocation failed when allocating new memory in the JIT\n"; 00423 std::cerr << ErrMsg << "\n"; 00424 abort(); 00425 } 00426 Blocks.push_back(B); 00427 return B; 00428 } 00429 00430 //===----------------------------------------------------------------------===// 00431 // JIT lazy compilation code. 00432 // 00433 namespace { 00434 class JITResolverState { 00435 private: 00436 /// FunctionToStubMap - Keep track of the stub created for a particular 00437 /// function so that we can reuse them if necessary. 00438 std::map<Function*, void*> FunctionToStubMap; 00439 00440 /// StubToFunctionMap - Keep track of the function that each stub 00441 /// corresponds to. 00442 std::map<void*, Function*> StubToFunctionMap; 00443 00444 public: 00445 std::map<Function*, void*>& getFunctionToStubMap(const MutexGuard& locked) { 00446 assert(locked.holds(TheJIT->lock)); 00447 return FunctionToStubMap; 00448 } 00449 00450 std::map<void*, Function*>& getStubToFunctionMap(const MutexGuard& locked) { 00451 assert(locked.holds(TheJIT->lock)); 00452 return StubToFunctionMap; 00453 } 00454 }; 00455 00456 /// JITResolver - Keep track of, and resolve, call sites for functions that 00457 /// have not yet been compiled. 00458 class JITResolver { 00459 /// MCE - The MachineCodeEmitter to use to emit stubs with. 00460 MachineCodeEmitter &MCE; 00461 00462 /// LazyResolverFn - The target lazy resolver function that we actually 00463 /// rewrite instructions to use. 00464 TargetJITInfo::LazyResolverFn LazyResolverFn; 00465 00466 JITResolverState state; 00467 00468 /// ExternalFnToStubMap - This is the equivalent of FunctionToStubMap for 00469 /// external functions. 00470 std::map<void*, void*> ExternalFnToStubMap; 00471 00472 //map addresses to indexes in the GOT 00473 std::map<void*, unsigned> revGOTMap; 00474 unsigned nextGOTIndex; 00475 00476 public: 00477 JITResolver(MachineCodeEmitter &mce) : MCE(mce), nextGOTIndex(0) { 00478 LazyResolverFn = 00479 TheJIT->getJITInfo().getLazyResolverFunction(JITCompilerFn); 00480 } 00481 00482 /// getFunctionStub - This returns a pointer to a function stub, creating 00483 /// one on demand as needed. 00484 void *getFunctionStub(Function *F); 00485 00486 /// getExternalFunctionStub - Return a stub for the function at the 00487 /// specified address, created lazily on demand. 00488 void *getExternalFunctionStub(void *FnAddr); 00489 00490 /// AddCallbackAtLocation - If the target is capable of rewriting an 00491 /// instruction without the use of a stub, record the location of the use so 00492 /// we know which function is being used at the location. 00493 void *AddCallbackAtLocation(Function *F, void *Location) { 00494 MutexGuard locked(TheJIT->lock); 00495 /// Get the target-specific JIT resolver function. 00496 state.getStubToFunctionMap(locked)[Location] = F; 00497 return (void*)(intptr_t)LazyResolverFn; 00498 } 00499 00500 /// getGOTIndexForAddress - Return a new or existing index in the GOT for 00501 /// and address. This function only manages slots, it does not manage the 00502 /// contents of the slots or the memory associated with the GOT. 00503 unsigned getGOTIndexForAddr(void* addr); 00504 00505 /// JITCompilerFn - This function is called to resolve a stub to a compiled 00506 /// address. If the LLVM Function corresponding to the stub has not yet 00507 /// been compiled, this function compiles it first. 00508 static void *JITCompilerFn(void *Stub); 00509 }; 00510 } 00511 00512 /// getJITResolver - This function returns the one instance of the JIT resolver. 00513 /// 00514 static JITResolver &getJITResolver(MachineCodeEmitter *MCE = 0) { 00515 static JITResolver TheJITResolver(*MCE); 00516 return TheJITResolver; 00517 } 00518 00519 /// getFunctionStub - This returns a pointer to a function stub, creating 00520 /// one on demand as needed. 00521 void *JITResolver::getFunctionStub(Function *F) { 00522 MutexGuard locked(TheJIT->lock); 00523 00524 // If we already have a stub for this function, recycle it. 00525 void *&Stub = state.getFunctionToStubMap(locked)[F]; 00526 if (Stub) return Stub; 00527 00528 // Call the lazy resolver function unless we already KNOW it is an external 00529 // function, in which case we just skip the lazy resolution step. 00530 void *Actual = (void*)(intptr_t)LazyResolverFn; 00531 if (F->isExternal() && F->hasExternalLinkage()) 00532 Actual = TheJIT->getPointerToFunction(F); 00533 00534 // Otherwise, codegen a new stub. For now, the stub will call the lazy 00535 // resolver function. 00536 Stub = TheJIT->getJITInfo().emitFunctionStub(Actual, MCE); 00537 00538 if (Actual != (void*)(intptr_t)LazyResolverFn) { 00539 // If we are getting the stub for an external function, we really want the 00540 // address of the stub in the GlobalAddressMap for the JIT, not the address 00541 // of the external function. 00542 TheJIT->updateGlobalMapping(F, Stub); 00543 } 00544 00545 // Invalidate the icache if necessary. 00546 TheJIT->getJITInfo(). 00547 synchronizeICache(Stub, MCE.getCurrentPCValue()-(intptr_t)Stub); 00548 00549 DEBUG(std::cerr << "JIT: Stub emitted at [" << Stub << "] for function '" 00550 << F->getName() << "'\n"); 00551 00552 // Finally, keep track of the stub-to-Function mapping so that the 00553 // JITCompilerFn knows which function to compile! 00554 state.getStubToFunctionMap(locked)[Stub] = F; 00555 return Stub; 00556 } 00557 00558 /// getExternalFunctionStub - Return a stub for the function at the 00559 /// specified address, created lazily on demand. 00560 void *JITResolver::getExternalFunctionStub(void *FnAddr) { 00561 // If we already have a stub for this function, recycle it. 00562 void *&Stub = ExternalFnToStubMap[FnAddr]; 00563 if (Stub) return Stub; 00564 00565 Stub = TheJIT->getJITInfo().emitFunctionStub(FnAddr, MCE); 00566 00567 // Invalidate the icache if necessary. 00568 TheJIT->getJITInfo(). 00569 synchronizeICache(Stub, MCE.getCurrentPCValue()-(intptr_t)Stub); 00570 00571 DEBUG(std::cerr << "JIT: Stub emitted at [" << Stub 00572 << "] for external function at '" << FnAddr << "'\n"); 00573 return Stub; 00574 } 00575 00576 unsigned JITResolver::getGOTIndexForAddr(void* addr) { 00577 unsigned idx = revGOTMap[addr]; 00578 if (!idx) { 00579 idx = ++nextGOTIndex; 00580 revGOTMap[addr] = idx; 00581 DEBUG(std::cerr << "Adding GOT entry " << idx 00582 << " for addr " << addr << "\n"); 00583 // ((void**)MemMgr.getGOTBase())[idx] = addr; 00584 } 00585 return idx; 00586 } 00587 00588 /// JITCompilerFn - This function is called when a lazy compilation stub has 00589 /// been entered. It looks up which function this stub corresponds to, compiles 00590 /// it if necessary, then returns the resultant function pointer. 00591 void *JITResolver::JITCompilerFn(void *Stub) { 00592 JITResolver &JR = getJITResolver(); 00593 00594 MutexGuard locked(TheJIT->lock); 00595 00596 // The address given to us for the stub may not be exactly right, it might be 00597 // a little bit after the stub. As such, use upper_bound to find it. 00598 std::map<void*, Function*>::iterator I = 00599 JR.state.getStubToFunctionMap(locked).upper_bound(Stub); 00600 assert(I != JR.state.getStubToFunctionMap(locked).begin() && 00601 "This is not a known stub!"); 00602 Function *F = (--I)->second; 00603 00604 // We might like to remove the stub from the StubToFunction map. 00605 // We can't do that! Multiple threads could be stuck, waiting to acquire the 00606 // lock above. As soon as the 1st function finishes compiling the function, 00607 // the next one will be released, and needs to be able to find the function it 00608 // needs to call. 00609 //JR.state.getStubToFunctionMap(locked).erase(I); 00610 00611 DEBUG(std::cerr << "JIT: Lazily resolving function '" << F->getName() 00612 << "' In stub ptr = " << Stub << " actual ptr = " 00613 << I->first << "\n"); 00614 00615 void *Result = TheJIT->getPointerToFunction(F); 00616 00617 // We don't need to reuse this stub in the future, as F is now compiled. 00618 JR.state.getFunctionToStubMap(locked).erase(F); 00619 00620 // FIXME: We could rewrite all references to this stub if we knew them. 00621 00622 // What we will do is set the compiled function address to map to the 00623 // same GOT entry as the stub so that later clients may update the GOT 00624 // if they see it still using the stub address. 00625 // Note: this is done so the Resolver doesn't have to manage GOT memory 00626 // Do this without allocating map space if the target isn't using a GOT 00627 if(JR.revGOTMap.find(Stub) != JR.revGOTMap.end()) 00628 JR.revGOTMap[Result] = JR.revGOTMap[Stub]; 00629 00630 return Result; 00631 } 00632 00633 00634 //===----------------------------------------------------------------------===// 00635 // JITEmitter code. 00636 // 00637 namespace { 00638 /// JITEmitter - The JIT implementation of the MachineCodeEmitter, which is 00639 /// used to output functions to memory for execution. 00640 class JITEmitter : public MachineCodeEmitter { 00641 JITMemoryManager MemMgr; 00642 00643 // When outputting a function stub in the context of some other function, we 00644 // save BufferBegin/BufferEnd/CurBufferPtr here. 00645 unsigned char *SavedBufferBegin, *SavedBufferEnd, *SavedCurBufferPtr; 00646 00647 /// Relocations - These are the relocations that the function needs, as 00648 /// emitted. 00649 std::vector<MachineRelocation> Relocations; 00650 00651 /// MBBLocations - This vector is a mapping from MBB ID's to their address. 00652 /// It is filled in by the StartMachineBasicBlock callback and queried by 00653 /// the getMachineBasicBlockAddress callback. 00654 std::vector<intptr_t> MBBLocations; 00655 00656 /// ConstantPool - The constant pool for the current function. 00657 /// 00658 MachineConstantPool *ConstantPool; 00659 00660 /// ConstantPoolBase - A pointer to the first entry in the constant pool. 00661 /// 00662 void *ConstantPoolBase; 00663 00664 /// ConstantPool - The constant pool for the current function. 00665 /// 00666 MachineJumpTableInfo *JumpTable; 00667 00668 /// JumpTableBase - A pointer to the first entry in the jump table. 00669 /// 00670 void *JumpTableBase; 00671 public: 00672 JITEmitter(JIT &jit) : MemMgr(jit.getJITInfo().needsGOT()) { 00673 TheJIT = &jit; 00674 DEBUG(if (MemMgr.isManagingGOT()) std::cerr << "JIT is managing a GOT\n"); 00675 } 00676 00677 virtual void startFunction(MachineFunction &F); 00678 virtual bool finishFunction(MachineFunction &F); 00679 00680 void emitConstantPool(MachineConstantPool *MCP); 00681 void initJumpTableInfo(MachineJumpTableInfo *MJTI); 00682 void emitJumpTableInfo(MachineJumpTableInfo *MJTI); 00683 00684 virtual void startFunctionStub(unsigned StubSize); 00685 virtual void* finishFunctionStub(const Function *F); 00686 00687 virtual void addRelocation(const MachineRelocation &MR) { 00688 Relocations.push_back(MR); 00689 } 00690 00691 virtual void StartMachineBasicBlock(MachineBasicBlock *MBB) { 00692 if (MBBLocations.size() <= (unsigned)MBB->getNumber()) 00693 MBBLocations.resize((MBB->getNumber()+1)*2); 00694 MBBLocations[MBB->getNumber()] = getCurrentPCValue(); 00695 } 00696 00697 virtual intptr_t getConstantPoolEntryAddress(unsigned Entry) const; 00698 virtual intptr_t getJumpTableEntryAddress(unsigned Entry) const; 00699 00700 virtual intptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const { 00701 assert(MBBLocations.size() > (unsigned)MBB->getNumber() && 00702 MBBLocations[MBB->getNumber()] && "MBB not emitted!"); 00703 return MBBLocations[MBB->getNumber()]; 00704 } 00705 00706 /// deallocateMemForFunction - Deallocate all memory for the specified 00707 /// function body. 00708 void deallocateMemForFunction(Function *F) { 00709 MemMgr.deallocateMemForFunction(F); 00710 } 00711 private: 00712 void *getPointerToGlobal(GlobalValue *GV, void *Reference, bool NoNeedStub); 00713 }; 00714 } 00715 00716 void *JITEmitter::getPointerToGlobal(GlobalValue *V, void *Reference, 00717 bool DoesntNeedStub) { 00718 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { 00719 /// FIXME: If we straightened things out, this could actually emit the 00720 /// global immediately instead of queuing it for codegen later! 00721 return TheJIT->getOrEmitGlobalVariable(GV); 00722 } 00723 00724 // If we have already compiled the function, return a pointer to its body. 00725 Function *F = cast<Function>(V); 00726 void *ResultPtr = TheJIT->getPointerToGlobalIfAvailable(F); 00727 if (ResultPtr) return ResultPtr; 00728 00729 if (F->hasExternalLinkage() && F->isExternal()) { 00730 // If this is an external function pointer, we can force the JIT to 00731 // 'compile' it, which really just adds it to the map. 00732 if (DoesntNeedStub) 00733 return TheJIT->getPointerToFunction(F); 00734 00735 return getJITResolver(this).getFunctionStub(F); 00736 } 00737 00738 // Okay, the function has not been compiled yet, if the target callback 00739 // mechanism is capable of rewriting the instruction directly, prefer to do 00740 // that instead of emitting a stub. 00741 if (DoesntNeedStub) 00742 return getJITResolver(this).AddCallbackAtLocation(F, Reference); 00743 00744 // Otherwise, we have to emit a lazy resolving stub. 00745 return getJITResolver(this).getFunctionStub(F); 00746 } 00747 00748 void JITEmitter::startFunction(MachineFunction &F) { 00749 uintptr_t ActualSize; 00750 BufferBegin = CurBufferPtr = MemMgr.startFunctionBody(ActualSize); 00751 BufferEnd = BufferBegin+ActualSize; 00752 00753 emitConstantPool(F.getConstantPool()); 00754 initJumpTableInfo(F.getJumpTableInfo()); 00755 00756 // About to start emitting the machine code for the function. 00757 emitAlignment(std::max(F.getFunction()->getAlignment(), 8U)); 00758 TheJIT->updateGlobalMapping(F.getFunction(), CurBufferPtr); 00759 00760 MBBLocations.clear(); 00761 } 00762 00763 bool JITEmitter::finishFunction(MachineFunction &F) { 00764 if (CurBufferPtr == BufferEnd) { 00765 // FIXME: Allocate more space, then try again. 00766 std::cerr << "JIT: Ran out of space for generated machine code!\n"; 00767 abort(); 00768 } 00769 00770 emitJumpTableInfo(F.getJumpTableInfo()); 00771 00772 // FnStart is the start of the text, not the start of the constant pool and 00773 // other per-function data. 00774 unsigned char *FnStart = 00775 (unsigned char *)TheJIT->getPointerToGlobalIfAvailable(F.getFunction()); 00776 unsigned char *FnEnd = CurBufferPtr; 00777 00778 MemMgr.endFunctionBody(F.getFunction(), BufferBegin, FnEnd); 00779 NumBytes += FnEnd-FnStart; 00780 00781 if (!Relocations.empty()) { 00782 NumRelos += Relocations.size(); 00783 00784 // Resolve the relocations to concrete pointers. 00785 for (unsigned i = 0, e = Relocations.size(); i != e; ++i) { 00786 MachineRelocation &MR = Relocations[i]; 00787 void *ResultPtr; 00788 if (MR.isString()) { 00789 ResultPtr = TheJIT->getPointerToNamedFunction(MR.getString()); 00790 00791 // If the target REALLY wants a stub for this function, emit it now. 00792 if (!MR.doesntNeedFunctionStub()) 00793 ResultPtr = getJITResolver(this).getExternalFunctionStub(ResultPtr); 00794 } else if (MR.isGlobalValue()) { 00795 ResultPtr = getPointerToGlobal(MR.getGlobalValue(), 00796 BufferBegin+MR.getMachineCodeOffset(), 00797 MR.doesntNeedFunctionStub()); 00798 } else if (MR.isConstantPoolIndex()){ 00799 assert(MR.isConstantPoolIndex()); 00800 ResultPtr=(void*)getConstantPoolEntryAddress(MR.getConstantPoolIndex()); 00801 } else { 00802 assert(MR.isJumpTableIndex()); 00803 ResultPtr=(void*)getJumpTableEntryAddress(MR.getJumpTableIndex()); 00804 00805 } 00806 00807 MR.setResultPointer(ResultPtr); 00808 00809 // if we are managing the GOT and the relocation wants an index, 00810 // give it one 00811 if (MemMgr.isManagingGOT() && MR.isGOTRelative()) { 00812 unsigned idx = getJITResolver(this).getGOTIndexForAddr(ResultPtr); 00813 MR.setGOTIndex(idx); 00814 if (((void**)MemMgr.getGOTBase())[idx] != ResultPtr) { 00815 DEBUG(std::cerr << "GOT was out of date for " << ResultPtr 00816 << " pointing at " << ((void**)MemMgr.getGOTBase())[idx] 00817 << "\n"); 00818 ((void**)MemMgr.getGOTBase())[idx] = ResultPtr; 00819 } 00820 } 00821 } 00822 00823 TheJIT->getJITInfo().relocate(BufferBegin, &Relocations[0], 00824 Relocations.size(), MemMgr.getGOTBase()); 00825 } 00826 00827 // Update the GOT entry for F to point to the new code. 00828 if(MemMgr.isManagingGOT()) { 00829 unsigned idx = getJITResolver(this).getGOTIndexForAddr((void*)BufferBegin); 00830 if (((void**)MemMgr.getGOTBase())[idx] != (void*)BufferBegin) { 00831 DEBUG(std::cerr << "GOT was out of date for " << (void*)BufferBegin 00832 << " pointing at " << ((void**)MemMgr.getGOTBase())[idx] << "\n"); 00833 ((void**)MemMgr.getGOTBase())[idx] = (void*)BufferBegin; 00834 } 00835 } 00836 00837 // Resolve BasicaBlock references. 00838 TheJIT->getJITInfo().resolveBBRefs(*this); 00839 00840 // Invalidate the icache if necessary. 00841 TheJIT->getJITInfo().synchronizeICache(FnStart, FnEnd-FnStart); 00842 00843 DEBUG(std::cerr << "JIT: Finished CodeGen of [" << (void*)FnStart 00844 << "] Function: " << F.getFunction()->getName() 00845 << ": " << (FnEnd-FnStart) << " bytes of text, " 00846 << Relocations.size() << " relocations\n"); 00847 Relocations.clear(); 00848 return false; 00849 } 00850 00851 void JITEmitter::emitConstantPool(MachineConstantPool *MCP) { 00852 const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants(); 00853 if (Constants.empty()) return; 00854 00855 unsigned Size = Constants.back().Offset; 00856 Size += TheJIT->getTargetData()->getTypeSize(Constants.back().Val->getType()); 00857 00858 ConstantPoolBase = allocateSpace(Size, 1 << MCP->getConstantPoolAlignment()); 00859 ConstantPool = MCP; 00860 00861 if (ConstantPoolBase == 0) return; // Buffer overflow. 00862 00863 // Initialize the memory for all of the constant pool entries. 00864 for (unsigned i = 0, e = Constants.size(); i != e; ++i) { 00865 void *CAddr = (char*)ConstantPoolBase+Constants[i].Offset; 00866 TheJIT->InitializeMemory(Constants[i].Val, CAddr); 00867 } 00868 } 00869 00870 void JITEmitter::initJumpTableInfo(MachineJumpTableInfo *MJTI) { 00871 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); 00872 if (JT.empty()) return; 00873 00874 unsigned NumEntries = 0; 00875 for (unsigned i = 0, e = JT.size(); i != e; ++i) 00876 NumEntries += JT[i].MBBs.size(); 00877 00878 unsigned EntrySize = MJTI->getEntrySize(); 00879 00880 // Just allocate space for all the jump tables now. We will fix up the actual 00881 // MBB entries in the tables after we emit the code for each block, since then 00882 // we will know the final locations of the MBBs in memory. 00883 JumpTable = MJTI; 00884 JumpTableBase = allocateSpace(NumEntries * EntrySize, MJTI->getAlignment()); 00885 } 00886 00887 void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) { 00888 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); 00889 if (JT.empty() || JumpTableBase == 0) return; 00890 00891 unsigned Offset = 0; 00892 assert(MJTI->getEntrySize() == sizeof(void*) && "Cross JIT'ing?"); 00893 00894 // For each jump table, map each target in the jump table to the address of 00895 // an emitted MachineBasicBlock. 00896 intptr_t *SlotPtr = (intptr_t*)JumpTableBase; 00897 00898 for (unsigned i = 0, e = JT.size(); i != e; ++i) { 00899 const std::vector<MachineBasicBlock*> &MBBs = JT[i].MBBs; 00900 // Store the address of the basic block for this jump table slot in the 00901 // memory we allocated for the jump table in 'initJumpTableInfo' 00902 for (unsigned mi = 0, me = MBBs.size(); mi != me; ++mi) 00903 *SlotPtr++ = getMachineBasicBlockAddress(MBBs[mi]); 00904 } 00905 } 00906 00907 void JITEmitter::startFunctionStub(unsigned StubSize) { 00908 SavedBufferBegin = BufferBegin; 00909 SavedBufferEnd = BufferEnd; 00910 SavedCurBufferPtr = CurBufferPtr; 00911 00912 BufferBegin = CurBufferPtr = MemMgr.allocateStub(StubSize); 00913 BufferEnd = BufferBegin+StubSize+1; 00914 } 00915 00916 void *JITEmitter::finishFunctionStub(const Function *F) { 00917 NumBytes += getCurrentPCOffset(); 00918 std::swap(SavedBufferBegin, BufferBegin); 00919 BufferEnd = SavedBufferEnd; 00920 CurBufferPtr = SavedCurBufferPtr; 00921 return SavedBufferBegin; 00922 } 00923 00924 // getConstantPoolEntryAddress - Return the address of the 'ConstantNum' entry 00925 // in the constant pool that was last emitted with the 'emitConstantPool' 00926 // method. 00927 // 00928 intptr_t JITEmitter::getConstantPoolEntryAddress(unsigned ConstantNum) const { 00929 assert(ConstantNum < ConstantPool->getConstants().size() && 00930 "Invalid ConstantPoolIndex!"); 00931 return (intptr_t)ConstantPoolBase + 00932 ConstantPool->getConstants()[ConstantNum].Offset; 00933 } 00934 00935 // getJumpTableEntryAddress - Return the address of the JumpTable with index 00936 // 'Index' in the jumpp table that was last initialized with 'initJumpTableInfo' 00937 // 00938 intptr_t JITEmitter::getJumpTableEntryAddress(unsigned Index) const { 00939 const std::vector<MachineJumpTableEntry> &JT = JumpTable->getJumpTables(); 00940 assert(Index < JT.size() && "Invalid jump table index!"); 00941 00942 unsigned Offset = 0; 00943 unsigned EntrySize = JumpTable->getEntrySize(); 00944 00945 for (unsigned i = 0; i < Index; ++i) 00946 Offset += JT[i].MBBs.size() * EntrySize; 00947 00948 return (intptr_t)((char *)JumpTableBase + Offset); 00949 } 00950 00951 //===----------------------------------------------------------------------===// 00952 // Public interface to this file 00953 //===----------------------------------------------------------------------===// 00954 00955 MachineCodeEmitter *JIT::createEmitter(JIT &jit) { 00956 return new JITEmitter(jit); 00957 } 00958 00959 // getPointerToNamedFunction - This function is used as a global wrapper to 00960 // JIT::getPointerToNamedFunction for the purpose of resolving symbols when 00961 // bugpoint is debugging the JIT. In that scenario, we are loading an .so and 00962 // need to resolve function(s) that are being mis-codegenerated, so we need to 00963 // resolve their addresses at runtime, and this is the way to do it. 00964 extern "C" { 00965 void *getPointerToNamedFunction(const char *Name) { 00966 Module &M = TheJIT->getModule(); 00967 if (Function *F = M.getNamedFunction(Name)) 00968 return TheJIT->getPointerToFunction(F); 00969 return TheJIT->getPointerToNamedFunction(Name); 00970 } 00971 } 00972 00973 // getPointerToFunctionOrStub - If the specified function has been 00974 // code-gen'd, return a pointer to the function. If not, compile it, or use 00975 // a stub to implement lazy compilation if available. 00976 // 00977 void *JIT::getPointerToFunctionOrStub(Function *F) { 00978 // If we have already code generated the function, just return the address. 00979 if (void *Addr = getPointerToGlobalIfAvailable(F)) 00980 return Addr; 00981 00982 // Get a stub if the target supports it 00983 return getJITResolver(MCE).getFunctionStub(F); 00984 } 00985 00986 /// freeMachineCodeForFunction - release machine code memory for given Function. 00987 /// 00988 void JIT::freeMachineCodeForFunction(Function *F) { 00989 // Delete translation for this from the ExecutionEngine, so it will get 00990 // retranslated next time it is used. 00991 updateGlobalMapping(F, 0); 00992 00993 // Free the actual memory for the function body and related stuff. 00994 assert(dynamic_cast<JITEmitter*>(MCE) && "Unexpected MCE?"); 00995 dynamic_cast<JITEmitter*>(MCE)->deallocateMemForFunction(F); 00996 } 00997