| /* |
| * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. |
| * |
| * Copyright (C) 2002-2007 Aleph One Ltd. |
| * for Toby Churchill Ltd and Brightstar Engineering |
| * |
| * Created by Charles Manning <charles@aleph1.co.uk> |
| * |
| * This program is free software; you can redistribute it and/or modify |
| * it under the terms of the GNU General Public License version 2 as |
| * published by the Free Software Foundation. |
| */ |
| |
| /* XXX U-BOOT XXX */ |
| #include <common.h> |
| |
| const char *yaffs_guts_c_version = |
| "$Id: yaffs_guts.c,v 1.52 2007/10/16 00:45:05 charles Exp $"; |
| |
| #include "yportenv.h" |
| #include "linux/stat.h" |
| |
| #include "yaffsinterface.h" |
| #include "yaffsfs.h" |
| #include "yaffs_guts.h" |
| #include "yaffs_tagsvalidity.h" |
| |
| #include "yaffs_tagscompat.h" |
| #ifndef CONFIG_YAFFS_USE_OWN_SORT |
| #include "yaffs_qsort.h" |
| #endif |
| #include "yaffs_nand.h" |
| |
| #include "yaffs_checkptrw.h" |
| |
| #include "yaffs_nand.h" |
| #include "yaffs_packedtags2.h" |
| |
| #include "malloc.h" |
| |
| #ifdef CONFIG_YAFFS_WINCE |
| void yfsd_LockYAFFS(BOOL fsLockOnly); |
| void yfsd_UnlockYAFFS(BOOL fsLockOnly); |
| #endif |
| |
| #define YAFFS_PASSIVE_GC_CHUNKS 2 |
| |
| #include "yaffs_ecc.h" |
| |
| |
| /* Robustification (if it ever comes about...) */ |
| static void yaffs_RetireBlock(yaffs_Device * dev, int blockInNAND); |
| static void yaffs_HandleWriteChunkError(yaffs_Device * dev, int chunkInNAND, int erasedOk); |
| static void yaffs_HandleWriteChunkOk(yaffs_Device * dev, int chunkInNAND, |
| const __u8 * data, |
| const yaffs_ExtendedTags * tags); |
| static void yaffs_HandleUpdateChunk(yaffs_Device * dev, int chunkInNAND, |
| const yaffs_ExtendedTags * tags); |
| |
| /* Other local prototypes */ |
| static int yaffs_UnlinkObject( yaffs_Object *obj); |
| static int yaffs_ObjectHasCachedWriteData(yaffs_Object *obj); |
| |
| static void yaffs_HardlinkFixup(yaffs_Device *dev, yaffs_Object *hardList); |
| |
| static int yaffs_WriteNewChunkWithTagsToNAND(yaffs_Device * dev, |
| const __u8 * buffer, |
| yaffs_ExtendedTags * tags, |
| int useReserve); |
| static int yaffs_PutChunkIntoFile(yaffs_Object * in, int chunkInInode, |
| int chunkInNAND, int inScan); |
| |
| static yaffs_Object *yaffs_CreateNewObject(yaffs_Device * dev, int number, |
| yaffs_ObjectType type); |
| static void yaffs_AddObjectToDirectory(yaffs_Object * directory, |
| yaffs_Object * obj); |
| static int yaffs_UpdateObjectHeader(yaffs_Object * in, const YCHAR * name, |
| int force, int isShrink, int shadows); |
| static void yaffs_RemoveObjectFromDirectory(yaffs_Object * obj); |
| static int yaffs_CheckStructures(void); |
| static int yaffs_DoGenericObjectDeletion(yaffs_Object * in); |
| |
| static yaffs_BlockInfo *yaffs_GetBlockInfo(yaffs_Device * dev, int blockNo); |
| |
| static __u8 *yaffs_GetTempBuffer(yaffs_Device * dev, int lineNo); |
| static void yaffs_ReleaseTempBuffer(yaffs_Device * dev, __u8 * buffer, |
| int lineNo); |
| |
| static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev, |
| int chunkInNAND); |
| |
| static int yaffs_UnlinkWorker(yaffs_Object * obj); |
| static void yaffs_DestroyObject(yaffs_Object * obj); |
| |
| static int yaffs_TagsMatch(const yaffs_ExtendedTags * tags, int objectId, |
| int chunkInObject); |
| |
| loff_t yaffs_GetFileSize(yaffs_Object * obj); |
| |
| static int yaffs_AllocateChunk(yaffs_Device * dev, int useReserve, yaffs_BlockInfo **blockUsedPtr); |
| |
| static void yaffs_VerifyFreeChunks(yaffs_Device * dev); |
| |
| static void yaffs_CheckObjectDetailsLoaded(yaffs_Object *in); |
| |
| #ifdef YAFFS_PARANOID |
| static int yaffs_CheckFileSanity(yaffs_Object * in); |
| #else |
| #define yaffs_CheckFileSanity(in) |
| #endif |
| |
| static void yaffs_InvalidateWholeChunkCache(yaffs_Object * in); |
| static void yaffs_InvalidateChunkCache(yaffs_Object * object, int chunkId); |
| |
| static void yaffs_InvalidateCheckpoint(yaffs_Device *dev); |
| |
| static int yaffs_FindChunkInFile(yaffs_Object * in, int chunkInInode, |
| yaffs_ExtendedTags * tags); |
| |
| static __u32 yaffs_GetChunkGroupBase(yaffs_Device *dev, yaffs_Tnode *tn, unsigned pos); |
| static yaffs_Tnode *yaffs_FindLevel0Tnode(yaffs_Device * dev, |
| yaffs_FileStructure * fStruct, |
| __u32 chunkId); |
| |
| |
| /* Function to calculate chunk and offset */ |
| |
| static void yaffs_AddrToChunk(yaffs_Device *dev, loff_t addr, __u32 *chunk, __u32 *offset) |
| { |
| if(dev->chunkShift){ |
| /* Easy-peasy power of 2 case */ |
| *chunk = (__u32)(addr >> dev->chunkShift); |
| *offset = (__u32)(addr & dev->chunkMask); |
| } |
| else if(dev->crumbsPerChunk) |
| { |
| /* Case where we're using "crumbs" */ |
| *offset = (__u32)(addr & dev->crumbMask); |
| addr >>= dev->crumbShift; |
| *chunk = ((__u32)addr)/dev->crumbsPerChunk; |
| *offset += ((addr - (*chunk * dev->crumbsPerChunk)) << dev->crumbShift); |
| } |
| else |
| YBUG(); |
| } |
| |
| /* Function to return the number of shifts for a power of 2 greater than or equal |
| * to the given number |
| * Note we don't try to cater for all possible numbers and this does not have to |
| * be hellishly efficient. |
| */ |
| |
| static __u32 ShiftsGE(__u32 x) |
| { |
| int extraBits; |
| int nShifts; |
| |
| nShifts = extraBits = 0; |
| |
| while(x>1){ |
| if(x & 1) extraBits++; |
| x>>=1; |
| nShifts++; |
| } |
| |
| if(extraBits) |
| nShifts++; |
| |
| return nShifts; |
| } |
| |
| /* Function to return the number of shifts to get a 1 in bit 0 |
| */ |
| |
| static __u32 ShiftDiv(__u32 x) |
| { |
| int nShifts; |
| |
| nShifts = 0; |
| |
| if(!x) return 0; |
| |
| while( !(x&1)){ |
| x>>=1; |
| nShifts++; |
| } |
| |
| return nShifts; |
| } |
| |
| |
| |
| /* |
| * Temporary buffer manipulations. |
| */ |
| |
| static int yaffs_InitialiseTempBuffers(yaffs_Device *dev) |
| { |
| int i; |
| __u8 *buf = (__u8 *)1; |
| |
| memset(dev->tempBuffer,0,sizeof(dev->tempBuffer)); |
| |
| for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) { |
| dev->tempBuffer[i].line = 0; /* not in use */ |
| dev->tempBuffer[i].buffer = buf = |
| YMALLOC_DMA(dev->nDataBytesPerChunk); |
| } |
| |
| return buf ? YAFFS_OK : YAFFS_FAIL; |
| |
| } |
| |
| static __u8 *yaffs_GetTempBuffer(yaffs_Device * dev, int lineNo) |
| { |
| int i, j; |
| for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) { |
| if (dev->tempBuffer[i].line == 0) { |
| dev->tempBuffer[i].line = lineNo; |
| if ((i + 1) > dev->maxTemp) { |
| dev->maxTemp = i + 1; |
| for (j = 0; j <= i; j++) |
| dev->tempBuffer[j].maxLine = |
| dev->tempBuffer[j].line; |
| } |
| |
| return dev->tempBuffer[i].buffer; |
| } |
| } |
| |
| T(YAFFS_TRACE_BUFFERS, |
| (TSTR("Out of temp buffers at line %d, other held by lines:"), |
| lineNo)); |
| for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) { |
| T(YAFFS_TRACE_BUFFERS, (TSTR(" %d "), dev->tempBuffer[i].line)); |
| } |
| T(YAFFS_TRACE_BUFFERS, (TSTR(" " TENDSTR))); |
| |
| /* |
| * If we got here then we have to allocate an unmanaged one |
| * This is not good. |
| */ |
| |
| dev->unmanagedTempAllocations++; |
| return YMALLOC(dev->nDataBytesPerChunk); |
| |
| } |
| |
| static void yaffs_ReleaseTempBuffer(yaffs_Device * dev, __u8 * buffer, |
| int lineNo) |
| { |
| int i; |
| for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) { |
| if (dev->tempBuffer[i].buffer == buffer) { |
| dev->tempBuffer[i].line = 0; |
| return; |
| } |
| } |
| |
| if (buffer) { |
| /* assume it is an unmanaged one. */ |
| T(YAFFS_TRACE_BUFFERS, |
| (TSTR("Releasing unmanaged temp buffer in line %d" TENDSTR), |
| lineNo)); |
| YFREE(buffer); |
| dev->unmanagedTempDeallocations++; |
| } |
| |
| } |
| |
| /* |
| * Determine if we have a managed buffer. |
| */ |
| int yaffs_IsManagedTempBuffer(yaffs_Device * dev, const __u8 * buffer) |
| { |
| int i; |
| for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) { |
| if (dev->tempBuffer[i].buffer == buffer) |
| return 1; |
| |
| } |
| |
| for (i = 0; i < dev->nShortOpCaches; i++) { |
| if( dev->srCache[i].data == buffer ) |
| return 1; |
| |
| } |
| |
| if (buffer == dev->checkpointBuffer) |
| return 1; |
| |
| T(YAFFS_TRACE_ALWAYS, |
| (TSTR("yaffs: unmaged buffer detected.\n" TENDSTR))); |
| return 0; |
| } |
| |
| |
| |
| /* |
| * Chunk bitmap manipulations |
| */ |
| |
| static Y_INLINE __u8 *yaffs_BlockBits(yaffs_Device * dev, int blk) |
| { |
| if (blk < dev->internalStartBlock || blk > dev->internalEndBlock) { |
| T(YAFFS_TRACE_ERROR, |
| (TSTR("**>> yaffs: BlockBits block %d is not valid" TENDSTR), |
| blk)); |
| YBUG(); |
| } |
| return dev->chunkBits + |
| (dev->chunkBitmapStride * (blk - dev->internalStartBlock)); |
| } |
| |
| static Y_INLINE void yaffs_VerifyChunkBitId(yaffs_Device *dev, int blk, int chunk) |
| { |
| if(blk < dev->internalStartBlock || blk > dev->internalEndBlock || |
| chunk < 0 || chunk >= dev->nChunksPerBlock) { |
| T(YAFFS_TRACE_ERROR, |
| (TSTR("**>> yaffs: Chunk Id (%d:%d) invalid"TENDSTR),blk,chunk)); |
| YBUG(); |
| } |
| } |
| |
| static Y_INLINE void yaffs_ClearChunkBits(yaffs_Device * dev, int blk) |
| { |
| __u8 *blkBits = yaffs_BlockBits(dev, blk); |
| |
| memset(blkBits, 0, dev->chunkBitmapStride); |
| } |
| |
| static Y_INLINE void yaffs_ClearChunkBit(yaffs_Device * dev, int blk, int chunk) |
| { |
| __u8 *blkBits = yaffs_BlockBits(dev, blk); |
| |
| yaffs_VerifyChunkBitId(dev,blk,chunk); |
| |
| blkBits[chunk / 8] &= ~(1 << (chunk & 7)); |
| } |
| |
| static Y_INLINE void yaffs_SetChunkBit(yaffs_Device * dev, int blk, int chunk) |
| { |
| __u8 *blkBits = yaffs_BlockBits(dev, blk); |
| |
| yaffs_VerifyChunkBitId(dev,blk,chunk); |
| |
| blkBits[chunk / 8] |= (1 << (chunk & 7)); |
| } |
| |
| static Y_INLINE int yaffs_CheckChunkBit(yaffs_Device * dev, int blk, int chunk) |
| { |
| __u8 *blkBits = yaffs_BlockBits(dev, blk); |
| yaffs_VerifyChunkBitId(dev,blk,chunk); |
| |
| return (blkBits[chunk / 8] & (1 << (chunk & 7))) ? 1 : 0; |
| } |
| |
| static Y_INLINE int yaffs_StillSomeChunkBits(yaffs_Device * dev, int blk) |
| { |
| __u8 *blkBits = yaffs_BlockBits(dev, blk); |
| int i; |
| for (i = 0; i < dev->chunkBitmapStride; i++) { |
| if (*blkBits) |
| return 1; |
| blkBits++; |
| } |
| return 0; |
| } |
| |
| static int yaffs_CountChunkBits(yaffs_Device * dev, int blk) |
| { |
| __u8 *blkBits = yaffs_BlockBits(dev, blk); |
| int i; |
| int n = 0; |
| for (i = 0; i < dev->chunkBitmapStride; i++) { |
| __u8 x = *blkBits; |
| while(x){ |
| if(x & 1) |
| n++; |
| x >>=1; |
| } |
| |
| blkBits++; |
| } |
| return n; |
| } |
| |
| /* |
| * Verification code |
| */ |
| |
| static Y_INLINE int yaffs_SkipVerification(yaffs_Device *dev) |
| { |
| return !(yaffs_traceMask & (YAFFS_TRACE_VERIFY | YAFFS_TRACE_VERIFY_FULL)); |
| } |
| |
| static Y_INLINE int yaffs_SkipFullVerification(yaffs_Device *dev) |
| { |
| return !(yaffs_traceMask & (YAFFS_TRACE_VERIFY_FULL)); |
| } |
| |
| static Y_INLINE int yaffs_SkipNANDVerification(yaffs_Device *dev) |
| { |
| return !(yaffs_traceMask & (YAFFS_TRACE_VERIFY_NAND)); |
| } |
| |
| static const char * blockStateName[] = { |
| "Unknown", |
| "Needs scanning", |
| "Scanning", |
| "Empty", |
| "Allocating", |
| "Full", |
| "Dirty", |
| "Checkpoint", |
| "Collecting", |
| "Dead" |
| }; |
| |
| static void yaffs_VerifyBlock(yaffs_Device *dev,yaffs_BlockInfo *bi,int n) |
| { |
| int actuallyUsed; |
| int inUse; |
| |
| if(yaffs_SkipVerification(dev)) |
| return; |
| |
| /* Report illegal runtime states */ |
| if(bi->blockState <0 || bi->blockState >= YAFFS_NUMBER_OF_BLOCK_STATES) |
| T(YAFFS_TRACE_VERIFY,(TSTR("Block %d has undefined state %d"TENDSTR),n,bi->blockState)); |
| |
| switch(bi->blockState){ |
| case YAFFS_BLOCK_STATE_UNKNOWN: |
| case YAFFS_BLOCK_STATE_SCANNING: |
| case YAFFS_BLOCK_STATE_NEEDS_SCANNING: |
| T(YAFFS_TRACE_VERIFY,(TSTR("Block %d has bad run-state %s"TENDSTR), |
| n,blockStateName[bi->blockState])); |
| } |
| |
| /* Check pages in use and soft deletions are legal */ |
| |
| actuallyUsed = bi->pagesInUse - bi->softDeletions; |
| |
| if(bi->pagesInUse < 0 || bi->pagesInUse > dev->nChunksPerBlock || |
| bi->softDeletions < 0 || bi->softDeletions > dev->nChunksPerBlock || |
| actuallyUsed < 0 || actuallyUsed > dev->nChunksPerBlock) |
| T(YAFFS_TRACE_VERIFY,(TSTR("Block %d has illegal values pagesInUsed %d softDeletions %d"TENDSTR), |
| n,bi->pagesInUse,bi->softDeletions)); |
| |
| |
| /* Check chunk bitmap legal */ |
| inUse = yaffs_CountChunkBits(dev,n); |
| if(inUse != bi->pagesInUse) |
| T(YAFFS_TRACE_VERIFY,(TSTR("Block %d has inconsistent values pagesInUse %d counted chunk bits %d"TENDSTR), |
| n,bi->pagesInUse,inUse)); |
| |
| /* Check that the sequence number is valid. |
| * Ten million is legal, but is very unlikely |
| */ |
| if(dev->isYaffs2 && |
| (bi->blockState == YAFFS_BLOCK_STATE_ALLOCATING || bi->blockState == YAFFS_BLOCK_STATE_FULL) && |
| (bi->sequenceNumber < YAFFS_LOWEST_SEQUENCE_NUMBER || bi->sequenceNumber > 10000000 )) |
| T(YAFFS_TRACE_VERIFY,(TSTR("Block %d has suspect sequence number of %d"TENDSTR), |
| n,bi->sequenceNumber)); |
| |
| } |
| |
| static void yaffs_VerifyCollectedBlock(yaffs_Device *dev,yaffs_BlockInfo *bi,int n) |
| { |
| yaffs_VerifyBlock(dev,bi,n); |
| |
| /* After collection the block should be in the erased state */ |
| /* TODO: This will need to change if we do partial gc */ |
| |
| if(bi->blockState != YAFFS_BLOCK_STATE_EMPTY){ |
| T(YAFFS_TRACE_ERROR,(TSTR("Block %d is in state %d after gc, should be erased"TENDSTR), |
| n,bi->blockState)); |
| } |
| } |
| |
| static void yaffs_VerifyBlocks(yaffs_Device *dev) |
| { |
| int i; |
| int nBlocksPerState[YAFFS_NUMBER_OF_BLOCK_STATES]; |
| int nIllegalBlockStates = 0; |
| |
| |
| if(yaffs_SkipVerification(dev)) |
| return; |
| |
| memset(nBlocksPerState,0,sizeof(nBlocksPerState)); |
| |
| |
| for(i = dev->internalStartBlock; i <= dev->internalEndBlock; i++){ |
| yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev,i); |
| yaffs_VerifyBlock(dev,bi,i); |
| |
| if(bi->blockState >=0 && bi->blockState < YAFFS_NUMBER_OF_BLOCK_STATES) |
| nBlocksPerState[bi->blockState]++; |
| else |
| nIllegalBlockStates++; |
| |
| } |
| |
| T(YAFFS_TRACE_VERIFY,(TSTR(""TENDSTR))); |
| T(YAFFS_TRACE_VERIFY,(TSTR("Block summary"TENDSTR))); |
| |
| T(YAFFS_TRACE_VERIFY,(TSTR("%d blocks have illegal states"TENDSTR),nIllegalBlockStates)); |
| if(nBlocksPerState[YAFFS_BLOCK_STATE_ALLOCATING] > 1) |
| T(YAFFS_TRACE_VERIFY,(TSTR("Too many allocating blocks"TENDSTR))); |
| |
| for(i = 0; i < YAFFS_NUMBER_OF_BLOCK_STATES; i++) |
| T(YAFFS_TRACE_VERIFY, |
| (TSTR("%s %d blocks"TENDSTR), |
| blockStateName[i],nBlocksPerState[i])); |
| |
| if(dev->blocksInCheckpoint != nBlocksPerState[YAFFS_BLOCK_STATE_CHECKPOINT]) |
| T(YAFFS_TRACE_VERIFY, |
| (TSTR("Checkpoint block count wrong dev %d count %d"TENDSTR), |
| dev->blocksInCheckpoint, nBlocksPerState[YAFFS_BLOCK_STATE_CHECKPOINT])); |
| |
| if(dev->nErasedBlocks != nBlocksPerState[YAFFS_BLOCK_STATE_EMPTY]) |
| T(YAFFS_TRACE_VERIFY, |
| (TSTR("Erased block count wrong dev %d count %d"TENDSTR), |
| dev->nErasedBlocks, nBlocksPerState[YAFFS_BLOCK_STATE_EMPTY])); |
| |
| if(nBlocksPerState[YAFFS_BLOCK_STATE_COLLECTING] > 1) |
| T(YAFFS_TRACE_VERIFY, |
| (TSTR("Too many collecting blocks %d (max is 1)"TENDSTR), |
| nBlocksPerState[YAFFS_BLOCK_STATE_COLLECTING])); |
| |
| T(YAFFS_TRACE_VERIFY,(TSTR(""TENDSTR))); |
| |
| } |
| |
| /* |
| * Verify the object header. oh must be valid, but obj and tags may be NULL in which |
| * case those tests will not be performed. |
| */ |
| static void yaffs_VerifyObjectHeader(yaffs_Object *obj, yaffs_ObjectHeader *oh, yaffs_ExtendedTags *tags, int parentCheck) |
| { |
| if(yaffs_SkipVerification(obj->myDev)) |
| return; |
| |
| if(!(tags && obj && oh)){ |
| T(YAFFS_TRACE_VERIFY, |
| (TSTR("Verifying object header tags %x obj %x oh %x"TENDSTR), |
| (__u32)tags,(__u32)obj,(__u32)oh)); |
| return; |
| } |
| |
| if(oh->type <= YAFFS_OBJECT_TYPE_UNKNOWN || |
| oh->type > YAFFS_OBJECT_TYPE_MAX) |
| T(YAFFS_TRACE_VERIFY, |
| (TSTR("Obj %d header type is illegal value 0x%x"TENDSTR), |
| tags->objectId, oh->type)); |
| |
| if(tags->objectId != obj->objectId) |
| T(YAFFS_TRACE_VERIFY, |
| (TSTR("Obj %d header mismatch objectId %d"TENDSTR), |
| tags->objectId, obj->objectId)); |
| |
| |
| /* |
| * Check that the object's parent ids match if parentCheck requested. |
| * |
| * Tests do not apply to the root object. |
| */ |
| |
| if(parentCheck && tags->objectId > 1 && !obj->parent) |
| T(YAFFS_TRACE_VERIFY, |
| (TSTR("Obj %d header mismatch parentId %d obj->parent is NULL"TENDSTR), |
| tags->objectId, oh->parentObjectId)); |
| |
| |
| if(parentCheck && obj->parent && |
| oh->parentObjectId != obj->parent->objectId && |
| (oh->parentObjectId != YAFFS_OBJECTID_UNLINKED || |
| obj->parent->objectId != YAFFS_OBJECTID_DELETED)) |
| T(YAFFS_TRACE_VERIFY, |
| (TSTR("Obj %d header mismatch parentId %d parentObjectId %d"TENDSTR), |
| tags->objectId, oh->parentObjectId, obj->parent->objectId)); |
| |
| |
| if(tags->objectId > 1 && oh->name[0] == 0) /* Null name */ |
| T(YAFFS_TRACE_VERIFY, |
| (TSTR("Obj %d header name is NULL"TENDSTR), |
| obj->objectId)); |
| |
| if(tags->objectId > 1 && ((__u8)(oh->name[0])) == 0xff) /* Trashed name */ |
| T(YAFFS_TRACE_VERIFY, |
| (TSTR("Obj %d header name is 0xFF"TENDSTR), |
| obj->objectId)); |
| } |
| |
| static void yaffs_VerifyFile(yaffs_Object *obj) |
| { |
| int requiredTallness; |
| int actualTallness; |
| __u32 lastChunk; |
| __u32 x; |
| __u32 i; |
| yaffs_Device *dev; |
| yaffs_ExtendedTags tags; |
| yaffs_Tnode *tn; |
| __u32 objectId; |
| |
| if(obj && yaffs_SkipVerification(obj->myDev)) |
| return; |
| |
| dev = obj->myDev; |
| objectId = obj->objectId; |
| |
| /* Check file size is consistent with tnode depth */ |
| lastChunk = obj->variant.fileVariant.fileSize / dev->nDataBytesPerChunk + 1; |
| x = lastChunk >> YAFFS_TNODES_LEVEL0_BITS; |
| requiredTallness = 0; |
| while (x> 0) { |
| x >>= YAFFS_TNODES_INTERNAL_BITS; |
| requiredTallness++; |
| } |
| |
| actualTallness = obj->variant.fileVariant.topLevel; |
| |
| if(requiredTallness > actualTallness ) |
| T(YAFFS_TRACE_VERIFY, |
| (TSTR("Obj %d had tnode tallness %d, needs to be %d"TENDSTR), |
| obj->objectId,actualTallness, requiredTallness)); |
| |
| |
| /* Check that the chunks in the tnode tree are all correct. |
| * We do this by scanning through the tnode tree and |
| * checking the tags for every chunk match. |
| */ |
| |
| if(yaffs_SkipNANDVerification(dev)) |
| return; |
| |
| for(i = 1; i <= lastChunk; i++){ |
| tn = yaffs_FindLevel0Tnode(dev, &obj->variant.fileVariant,i); |
| |
| if (tn) { |
| __u32 theChunk = yaffs_GetChunkGroupBase(dev,tn,i); |
| if(theChunk > 0){ |
| /* T(~0,(TSTR("verifying (%d:%d) %d"TENDSTR),objectId,i,theChunk)); */ |
| yaffs_ReadChunkWithTagsFromNAND(dev,theChunk,NULL, &tags); |
| if(tags.objectId != objectId || tags.chunkId != i){ |
| T(~0,(TSTR("Object %d chunkId %d NAND mismatch chunk %d tags (%d:%d)"TENDSTR), |
| objectId, i, theChunk, |
| tags.objectId, tags.chunkId)); |
| } |
| } |
| } |
| |
| } |
| |
| } |
| |
| static void yaffs_VerifyDirectory(yaffs_Object *obj) |
| { |
| if(obj && yaffs_SkipVerification(obj->myDev)) |
| return; |
| |
| } |
| |
| static void yaffs_VerifyHardLink(yaffs_Object *obj) |
| { |
| if(obj && yaffs_SkipVerification(obj->myDev)) |
| return; |
| |
| /* Verify sane equivalent object */ |
| } |
| |
| static void yaffs_VerifySymlink(yaffs_Object *obj) |
| { |
| if(obj && yaffs_SkipVerification(obj->myDev)) |
| return; |
| |
| /* Verify symlink string */ |
| } |
| |
| static void yaffs_VerifySpecial(yaffs_Object *obj) |
| { |
| if(obj && yaffs_SkipVerification(obj->myDev)) |
| return; |
| } |
| |
| static void yaffs_VerifyObject(yaffs_Object *obj) |
| { |
| yaffs_Device *dev; |
| |
| __u32 chunkMin; |
| __u32 chunkMax; |
| |
| __u32 chunkIdOk; |
| __u32 chunkIsLive; |
| |
| if(!obj) |
| return; |
| |
| dev = obj->myDev; |
| |
| if(yaffs_SkipVerification(dev)) |
| return; |
| |
| /* Check sane object header chunk */ |
| |
| chunkMin = dev->internalStartBlock * dev->nChunksPerBlock; |
| chunkMax = (dev->internalEndBlock+1) * dev->nChunksPerBlock - 1; |
| |
| chunkIdOk = (obj->chunkId >= chunkMin && obj->chunkId <= chunkMax); |
| chunkIsLive = chunkIdOk && |
| yaffs_CheckChunkBit(dev, |
| obj->chunkId / dev->nChunksPerBlock, |
| obj->chunkId % dev->nChunksPerBlock); |
| if(!obj->fake && |
| (!chunkIdOk || !chunkIsLive)) { |
| T(YAFFS_TRACE_VERIFY, |
| (TSTR("Obj %d has chunkId %d %s %s"TENDSTR), |
| obj->objectId,obj->chunkId, |
| chunkIdOk ? "" : ",out of range", |
| chunkIsLive || !chunkIdOk ? "" : ",marked as deleted")); |
| } |
| |
| if(chunkIdOk && chunkIsLive &&!yaffs_SkipNANDVerification(dev)) { |
| yaffs_ExtendedTags tags; |
| yaffs_ObjectHeader *oh; |
| __u8 *buffer = yaffs_GetTempBuffer(dev,__LINE__); |
| |
| oh = (yaffs_ObjectHeader *)buffer; |
| |
| yaffs_ReadChunkWithTagsFromNAND(dev, obj->chunkId,buffer, &tags); |
| |
| yaffs_VerifyObjectHeader(obj,oh,&tags,1); |
| |
| yaffs_ReleaseTempBuffer(dev,buffer,__LINE__); |
| } |
| |
| /* Verify it has a parent */ |
| if(obj && !obj->fake && |
| (!obj->parent || obj->parent->myDev != dev)){ |
| T(YAFFS_TRACE_VERIFY, |
| (TSTR("Obj %d has parent pointer %p which does not look like an object"TENDSTR), |
| obj->objectId,obj->parent)); |
| } |
| |
| /* Verify parent is a directory */ |
| if(obj->parent && obj->parent->variantType != YAFFS_OBJECT_TYPE_DIRECTORY){ |
| T(YAFFS_TRACE_VERIFY, |
| (TSTR("Obj %d's parent is not a directory (type %d)"TENDSTR), |
| obj->objectId,obj->parent->variantType)); |
| } |
| |
| switch(obj->variantType){ |
| case YAFFS_OBJECT_TYPE_FILE: |
| yaffs_VerifyFile(obj); |
| break; |
| case YAFFS_OBJECT_TYPE_SYMLINK: |
| yaffs_VerifySymlink(obj); |
| break; |
| case YAFFS_OBJECT_TYPE_DIRECTORY: |
| yaffs_VerifyDirectory(obj); |
| break; |
| case YAFFS_OBJECT_TYPE_HARDLINK: |
| yaffs_VerifyHardLink(obj); |
| break; |
| case YAFFS_OBJECT_TYPE_SPECIAL: |
| yaffs_VerifySpecial(obj); |
| break; |
| case YAFFS_OBJECT_TYPE_UNKNOWN: |
| default: |
| T(YAFFS_TRACE_VERIFY, |
| (TSTR("Obj %d has illegaltype %d"TENDSTR), |
| obj->objectId,obj->variantType)); |
| break; |
| } |
| |
| |
| } |
| |
| static void yaffs_VerifyObjects(yaffs_Device *dev) |
| { |
| yaffs_Object *obj; |
| int i; |
| struct list_head *lh; |
| |
| if(yaffs_SkipVerification(dev)) |
| return; |
| |
| /* Iterate through the objects in each hash entry */ |
| |
| for(i = 0; i < YAFFS_NOBJECT_BUCKETS; i++){ |
| list_for_each(lh, &dev->objectBucket[i].list) { |
| if (lh) { |
| obj = list_entry(lh, yaffs_Object, hashLink); |
| yaffs_VerifyObject(obj); |
| } |
| } |
| } |
| |
| } |
| |
| |
| /* |
| * Simple hash function. Needs to have a reasonable spread |
| */ |
| |
| static Y_INLINE int yaffs_HashFunction(int n) |
| { |
| /* XXX U-BOOT XXX */ |
| /*n = abs(n); */ |
| if (n < 0) |
| n = -n; |
| return (n % YAFFS_NOBJECT_BUCKETS); |
| } |
| |
| /* |
| * Access functions to useful fake objects |
| */ |
| |
| yaffs_Object *yaffs_Root(yaffs_Device * dev) |
| { |
| return dev->rootDir; |
| } |
| |
| yaffs_Object *yaffs_LostNFound(yaffs_Device * dev) |
| { |
| return dev->lostNFoundDir; |
| } |
| |
| |
| /* |
| * Erased NAND checking functions |
| */ |
| |
| int yaffs_CheckFF(__u8 * buffer, int nBytes) |
| { |
| /* Horrible, slow implementation */ |
| while (nBytes--) { |
| if (*buffer != 0xFF) |
| return 0; |
| buffer++; |
| } |
| return 1; |
| } |
| |
| static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev, |
| int chunkInNAND) |
| { |
| |
| int retval = YAFFS_OK; |
| __u8 *data = yaffs_GetTempBuffer(dev, __LINE__); |
| yaffs_ExtendedTags tags; |
| |
| yaffs_ReadChunkWithTagsFromNAND(dev, chunkInNAND, data, &tags); |
| |
| if(tags.eccResult > YAFFS_ECC_RESULT_NO_ERROR) |
| retval = YAFFS_FAIL; |
| |
| |
| if (!yaffs_CheckFF(data, dev->nDataBytesPerChunk) || tags.chunkUsed) { |
| T(YAFFS_TRACE_NANDACCESS, |
| (TSTR("Chunk %d not erased" TENDSTR), chunkInNAND)); |
| retval = YAFFS_FAIL; |
| } |
| |
| yaffs_ReleaseTempBuffer(dev, data, __LINE__); |
| |
| return retval; |
| |
| } |
| |
| static int yaffs_WriteNewChunkWithTagsToNAND(struct yaffs_DeviceStruct *dev, |
| const __u8 * data, |
| yaffs_ExtendedTags * tags, |
| int useReserve) |
| { |
| int attempts = 0; |
| int writeOk = 0; |
| int chunk; |
| |
| yaffs_InvalidateCheckpoint(dev); |
| |
| do { |
| yaffs_BlockInfo *bi = 0; |
| int erasedOk = 0; |
| |
| chunk = yaffs_AllocateChunk(dev, useReserve, &bi); |
| if (chunk < 0) { |
| /* no space */ |
| break; |
| } |
| |
| /* First check this chunk is erased, if it needs |
| * checking. The checking policy (unless forced |
| * always on) is as follows: |
| * |
| * Check the first page we try to write in a block. |
| * If the check passes then we don't need to check any |
| * more. If the check fails, we check again... |
| * If the block has been erased, we don't need to check. |
| * |
| * However, if the block has been prioritised for gc, |
| * then we think there might be something odd about |
| * this block and stop using it. |
| * |
| * Rationale: We should only ever see chunks that have |
| * not been erased if there was a partially written |
| * chunk due to power loss. This checking policy should |
| * catch that case with very few checks and thus save a |
| * lot of checks that are most likely not needed. |
| */ |
| if (bi->gcPrioritise) { |
| yaffs_DeleteChunk(dev, chunk, 1, __LINE__); |
| /* try another chunk */ |
| continue; |
| } |
| |
| /* let's give it a try */ |
| attempts++; |
| |
| #ifdef CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED |
| bi->skipErasedCheck = 0; |
| #endif |
| if (!bi->skipErasedCheck) { |
| erasedOk = yaffs_CheckChunkErased(dev, chunk); |
| if (erasedOk != YAFFS_OK) { |
| T(YAFFS_TRACE_ERROR, |
| (TSTR ("**>> yaffs chunk %d was not erased" |
| TENDSTR), chunk)); |
| |
| /* try another chunk */ |
| continue; |
| } |
| bi->skipErasedCheck = 1; |
| } |
| |
| writeOk = yaffs_WriteChunkWithTagsToNAND(dev, chunk, |
| data, tags); |
| if (writeOk != YAFFS_OK) { |
| yaffs_HandleWriteChunkError(dev, chunk, erasedOk); |
| /* try another chunk */ |
| continue; |
| } |
| |
| /* Copy the data into the robustification buffer */ |
| yaffs_HandleWriteChunkOk(dev, chunk, data, tags); |
| |
| } while (writeOk != YAFFS_OK && |
| (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts)); |
| |
| if(!writeOk) |
| chunk = -1; |
| |
| if (attempts > 1) { |
| T(YAFFS_TRACE_ERROR, |
| (TSTR("**>> yaffs write required %d attempts" TENDSTR), |
| attempts)); |
| |
| dev->nRetriedWrites += (attempts - 1); |
| } |
| |
| return chunk; |
| } |
| |
| /* |
| * Block retiring for handling a broken block. |
| */ |
| |
| static void yaffs_RetireBlock(yaffs_Device * dev, int blockInNAND) |
| { |
| yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockInNAND); |
| |
| yaffs_InvalidateCheckpoint(dev); |
| |
| yaffs_MarkBlockBad(dev, blockInNAND); |
| |
| bi->blockState = YAFFS_BLOCK_STATE_DEAD; |
| bi->gcPrioritise = 0; |
| bi->needsRetiring = 0; |
| |
| dev->nRetiredBlocks++; |
| } |
| |
| /* |
| * Functions for robustisizing TODO |
| * |
| */ |
| |
| static void yaffs_HandleWriteChunkOk(yaffs_Device * dev, int chunkInNAND, |
| const __u8 * data, |
| const yaffs_ExtendedTags * tags) |
| { |
| } |
| |
| static void yaffs_HandleUpdateChunk(yaffs_Device * dev, int chunkInNAND, |
| const yaffs_ExtendedTags * tags) |
| { |
| } |
| |
| void yaffs_HandleChunkError(yaffs_Device *dev, yaffs_BlockInfo *bi) |
| { |
| if(!bi->gcPrioritise){ |
| bi->gcPrioritise = 1; |
| dev->hasPendingPrioritisedGCs = 1; |
| bi->chunkErrorStrikes ++; |
| |
| if(bi->chunkErrorStrikes > 3){ |
| bi->needsRetiring = 1; /* Too many stikes, so retire this */ |
| T(YAFFS_TRACE_ALWAYS, (TSTR("yaffs: Block struck out" TENDSTR))); |
| |
| } |
| |
| } |
| } |
| |
| static void yaffs_HandleWriteChunkError(yaffs_Device * dev, int chunkInNAND, int erasedOk) |
| { |
| |
| int blockInNAND = chunkInNAND / dev->nChunksPerBlock; |
| yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockInNAND); |
| |
| yaffs_HandleChunkError(dev,bi); |
| |
| |
| if(erasedOk ) { |
| /* Was an actual write failure, so mark the block for retirement */ |
| bi->needsRetiring = 1; |
| T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS, |
| (TSTR("**>> Block %d needs retiring" TENDSTR), blockInNAND)); |
| |
| |
| } |
| |
| /* Delete the chunk */ |
| yaffs_DeleteChunk(dev, chunkInNAND, 1, __LINE__); |
| } |
| |
| |
| /*---------------- Name handling functions ------------*/ |
| |
| static __u16 yaffs_CalcNameSum(const YCHAR * name) |
| { |
| __u16 sum = 0; |
| __u16 i = 1; |
| |
| YUCHAR *bname = (YUCHAR *) name; |
| if (bname) { |
| while ((*bname) && (i < (YAFFS_MAX_NAME_LENGTH/2))) { |
| |
| #ifdef CONFIG_YAFFS_CASE_INSENSITIVE |
| sum += yaffs_toupper(*bname) * i; |
| #else |
| sum += (*bname) * i; |
| #endif |
| i++; |
| bname++; |
| } |
| } |
| return sum; |
| } |
| |
| static void yaffs_SetObjectName(yaffs_Object * obj, const YCHAR * name) |
| { |
| #ifdef CONFIG_YAFFS_SHORT_NAMES_IN_RAM |
| if (name && yaffs_strlen(name) <= YAFFS_SHORT_NAME_LENGTH) { |
| yaffs_strcpy(obj->shortName, name); |
| } else { |
| obj->shortName[0] = _Y('\0'); |
| } |
| #endif |
| obj->sum = yaffs_CalcNameSum(name); |
| } |
| |
| /*-------------------- TNODES ------------------- |
| |
| * List of spare tnodes |
| * The list is hooked together using the first pointer |
| * in the tnode. |
| */ |
| |
| /* yaffs_CreateTnodes creates a bunch more tnodes and |
| * adds them to the tnode free list. |
| * Don't use this function directly |
| */ |
| |
| static int yaffs_CreateTnodes(yaffs_Device * dev, int nTnodes) |
| { |
| int i; |
| int tnodeSize; |
| yaffs_Tnode *newTnodes; |
| __u8 *mem; |
| yaffs_Tnode *curr; |
| yaffs_Tnode *next; |
| yaffs_TnodeList *tnl; |
| |
| if (nTnodes < 1) |
| return YAFFS_OK; |
| |
| /* Calculate the tnode size in bytes for variable width tnode support. |
| * Must be a multiple of 32-bits */ |
| tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8; |
| |
| /* make these things */ |
| |
| newTnodes = YMALLOC(nTnodes * tnodeSize); |
| mem = (__u8 *)newTnodes; |
| |
| if (!newTnodes) { |
| T(YAFFS_TRACE_ERROR, |
| (TSTR("yaffs: Could not allocate Tnodes" TENDSTR))); |
| return YAFFS_FAIL; |
| } |
| |
| /* Hook them into the free list */ |
| #if 0 |
| for (i = 0; i < nTnodes - 1; i++) { |
| newTnodes[i].internal[0] = &newTnodes[i + 1]; |
| #ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG |
| newTnodes[i].internal[YAFFS_NTNODES_INTERNAL] = (void *)1; |
| #endif |
| } |
| |
| newTnodes[nTnodes - 1].internal[0] = dev->freeTnodes; |
| #ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG |
| newTnodes[nTnodes - 1].internal[YAFFS_NTNODES_INTERNAL] = (void *)1; |
| #endif |
| dev->freeTnodes = newTnodes; |
| #else |
| /* New hookup for wide tnodes */ |
| for(i = 0; i < nTnodes -1; i++) { |
| curr = (yaffs_Tnode *) &mem[i * tnodeSize]; |
| next = (yaffs_Tnode *) &mem[(i+1) * tnodeSize]; |
| curr->internal[0] = next; |
| } |
| |
| curr = (yaffs_Tnode *) &mem[(nTnodes - 1) * tnodeSize]; |
| curr->internal[0] = dev->freeTnodes; |
| dev->freeTnodes = (yaffs_Tnode *)mem; |
| |
| #endif |
| |
| |
| dev->nFreeTnodes += nTnodes; |
| dev->nTnodesCreated += nTnodes; |
| |
| /* Now add this bunch of tnodes to a list for freeing up. |
| * NB If we can't add this to the management list it isn't fatal |
| * but it just means we can't free this bunch of tnodes later. |
| */ |
| |
| tnl = YMALLOC(sizeof(yaffs_TnodeList)); |
| if (!tnl) { |
| T(YAFFS_TRACE_ERROR, |
| (TSTR |
| ("yaffs: Could not add tnodes to management list" TENDSTR))); |
| return YAFFS_FAIL; |
| |
| } else { |
| tnl->tnodes = newTnodes; |
| tnl->next = dev->allocatedTnodeList; |
| dev->allocatedTnodeList = tnl; |
| } |
| |
| T(YAFFS_TRACE_ALLOCATE, (TSTR("yaffs: Tnodes added" TENDSTR))); |
| |
| return YAFFS_OK; |
| } |
| |
| /* GetTnode gets us a clean tnode. Tries to make allocate more if we run out */ |
| |
| static yaffs_Tnode *yaffs_GetTnodeRaw(yaffs_Device * dev) |
| { |
| yaffs_Tnode *tn = NULL; |
| |
| /* If there are none left make more */ |
| if (!dev->freeTnodes) { |
| yaffs_CreateTnodes(dev, YAFFS_ALLOCATION_NTNODES); |
| } |
| |
| if (dev->freeTnodes) { |
| tn = dev->freeTnodes; |
| #ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG |
| if (tn->internal[YAFFS_NTNODES_INTERNAL] != (void *)1) { |
| /* Hoosterman, this thing looks like it isn't in the list */ |
| T(YAFFS_TRACE_ALWAYS, |
| (TSTR("yaffs: Tnode list bug 1" TENDSTR))); |
| } |
| #endif |
| dev->freeTnodes = dev->freeTnodes->internal[0]; |
| dev->nFreeTnodes--; |
| } |
| |
| return tn; |
| } |
| |
| static yaffs_Tnode *yaffs_GetTnode(yaffs_Device * dev) |
| { |
| yaffs_Tnode *tn = yaffs_GetTnodeRaw(dev); |
| |
| if(tn) |
| memset(tn, 0, (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8); |
| |
| return tn; |
| } |
| |
| /* FreeTnode frees up a tnode and puts it back on the free list */ |
| static void yaffs_FreeTnode(yaffs_Device * dev, yaffs_Tnode * tn) |
| { |
| if (tn) { |
| #ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG |
| if (tn->internal[YAFFS_NTNODES_INTERNAL] != 0) { |
| /* Hoosterman, this thing looks like it is already in the list */ |
| T(YAFFS_TRACE_ALWAYS, |
| (TSTR("yaffs: Tnode list bug 2" TENDSTR))); |
| } |
| tn->internal[YAFFS_NTNODES_INTERNAL] = (void *)1; |
| #endif |
| tn->internal[0] = dev->freeTnodes; |
| dev->freeTnodes = tn; |
| dev->nFreeTnodes++; |
| } |
| } |
| |
| static void yaffs_DeinitialiseTnodes(yaffs_Device * dev) |
| { |
| /* Free the list of allocated tnodes */ |
| yaffs_TnodeList *tmp; |
| |
| while (dev->allocatedTnodeList) { |
| tmp = dev->allocatedTnodeList->next; |
| |
| YFREE(dev->allocatedTnodeList->tnodes); |
| YFREE(dev->allocatedTnodeList); |
| dev->allocatedTnodeList = tmp; |
| |
| } |
| |
| dev->freeTnodes = NULL; |
| dev->nFreeTnodes = 0; |
| } |
| |
| static void yaffs_InitialiseTnodes(yaffs_Device * dev) |
| { |
| dev->allocatedTnodeList = NULL; |
| dev->freeTnodes = NULL; |
| dev->nFreeTnodes = 0; |
| dev->nTnodesCreated = 0; |
| |
| } |
| |
| |
| void yaffs_PutLevel0Tnode(yaffs_Device *dev, yaffs_Tnode *tn, unsigned pos, unsigned val) |
| { |
| __u32 *map = (__u32 *)tn; |
| __u32 bitInMap; |
| __u32 bitInWord; |
| __u32 wordInMap; |
| __u32 mask; |
| |
| pos &= YAFFS_TNODES_LEVEL0_MASK; |
| val >>= dev->chunkGroupBits; |
| |
| bitInMap = pos * dev->tnodeWidth; |
| wordInMap = bitInMap /32; |
| bitInWord = bitInMap & (32 -1); |
| |
| mask = dev->tnodeMask << bitInWord; |
| |
| map[wordInMap] &= ~mask; |
| map[wordInMap] |= (mask & (val << bitInWord)); |
| |
| if(dev->tnodeWidth > (32-bitInWord)) { |
| bitInWord = (32 - bitInWord); |
| wordInMap++;; |
| mask = dev->tnodeMask >> (/*dev->tnodeWidth -*/ bitInWord); |
| map[wordInMap] &= ~mask; |
| map[wordInMap] |= (mask & (val >> bitInWord)); |
| } |
| } |
| |
| static __u32 yaffs_GetChunkGroupBase(yaffs_Device *dev, yaffs_Tnode *tn, unsigned pos) |
| { |
| __u32 *map = (__u32 *)tn; |
| __u32 bitInMap; |
| __u32 bitInWord; |
| __u32 wordInMap; |
| __u32 val; |
| |
| pos &= YAFFS_TNODES_LEVEL0_MASK; |
| |
| bitInMap = pos * dev->tnodeWidth; |
| wordInMap = bitInMap /32; |
| bitInWord = bitInMap & (32 -1); |
| |
| val = map[wordInMap] >> bitInWord; |
| |
| if(dev->tnodeWidth > (32-bitInWord)) { |
| bitInWord = (32 - bitInWord); |
| wordInMap++;; |
| val |= (map[wordInMap] << bitInWord); |
| } |
| |
| val &= dev->tnodeMask; |
| val <<= dev->chunkGroupBits; |
| |
| return val; |
| } |
| |
| /* ------------------- End of individual tnode manipulation -----------------*/ |
| |
| /* ---------Functions to manipulate the look-up tree (made up of tnodes) ------ |
| * The look up tree is represented by the top tnode and the number of topLevel |
| * in the tree. 0 means only the level 0 tnode is in the tree. |
| */ |
| |
| /* FindLevel0Tnode finds the level 0 tnode, if one exists. */ |
| static yaffs_Tnode *yaffs_FindLevel0Tnode(yaffs_Device * dev, |
| yaffs_FileStructure * fStruct, |
| __u32 chunkId) |
| { |
| |
| yaffs_Tnode *tn = fStruct->top; |
| __u32 i; |
| int requiredTallness; |
| int level = fStruct->topLevel; |
| |
| /* Check sane level and chunk Id */ |
| if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL) { |
| return NULL; |
| } |
| |
| if (chunkId > YAFFS_MAX_CHUNK_ID) { |
| return NULL; |
| } |
| |
| /* First check we're tall enough (ie enough topLevel) */ |
| |
| i = chunkId >> YAFFS_TNODES_LEVEL0_BITS; |
| requiredTallness = 0; |
| while (i) { |
| i >>= YAFFS_TNODES_INTERNAL_BITS; |
| requiredTallness++; |
| } |
| |
| if (requiredTallness > fStruct->topLevel) { |
| /* Not tall enough, so we can't find it, return NULL. */ |
| return NULL; |
| } |
| |
| /* Traverse down to level 0 */ |
| while (level > 0 && tn) { |
| tn = tn-> |
| internal[(chunkId >> |
| ( YAFFS_TNODES_LEVEL0_BITS + |
| (level - 1) * |
| YAFFS_TNODES_INTERNAL_BITS) |
| ) & |
| YAFFS_TNODES_INTERNAL_MASK]; |
| level--; |
| |
| } |
| |
| return tn; |
| } |
| |
| /* AddOrFindLevel0Tnode finds the level 0 tnode if it exists, otherwise first expands the tree. |
| * This happens in two steps: |
| * 1. If the tree isn't tall enough, then make it taller. |
| * 2. Scan down the tree towards the level 0 tnode adding tnodes if required. |
| * |
| * Used when modifying the tree. |
| * |
| * If the tn argument is NULL, then a fresh tnode will be added otherwise the specified tn will |
| * be plugged into the ttree. |
| */ |
| |
| static yaffs_Tnode *yaffs_AddOrFindLevel0Tnode(yaffs_Device * dev, |
| yaffs_FileStructure * fStruct, |
| __u32 chunkId, |
| yaffs_Tnode *passedTn) |
| { |
| |
| int requiredTallness; |
| int i; |
| int l; |
| yaffs_Tnode *tn; |
| |
| __u32 x; |
| |
| |
| /* Check sane level and page Id */ |
| if (fStruct->topLevel < 0 || fStruct->topLevel > YAFFS_TNODES_MAX_LEVEL) { |
| return NULL; |
| } |
| |
| if (chunkId > YAFFS_MAX_CHUNK_ID) { |
| return NULL; |
| } |
| |
| /* First check we're tall enough (ie enough topLevel) */ |
| |
| x = chunkId >> YAFFS_TNODES_LEVEL0_BITS; |
| requiredTallness = 0; |
| while (x) { |
| x >>= YAFFS_TNODES_INTERNAL_BITS; |
| requiredTallness++; |
| } |
| |
| |
| if (requiredTallness > fStruct->topLevel) { |
| /* Not tall enough,gotta make the tree taller */ |
| for (i = fStruct->topLevel; i < requiredTallness; i++) { |
| |
| tn = yaffs_GetTnode(dev); |
| |
| if (tn) { |
| tn->internal[0] = fStruct->top; |
| fStruct->top = tn; |
| } else { |
| T(YAFFS_TRACE_ERROR, |
| (TSTR("yaffs: no more tnodes" TENDSTR))); |
| } |
| } |
| |
| fStruct->topLevel = requiredTallness; |
| } |
| |
| /* Traverse down to level 0, adding anything we need */ |
| |
| l = fStruct->topLevel; |
| tn = fStruct->top; |
| |
| if(l > 0) { |
| while (l > 0 && tn) { |
| x = (chunkId >> |
| ( YAFFS_TNODES_LEVEL0_BITS + |
| (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) & |
| YAFFS_TNODES_INTERNAL_MASK; |
| |
| |
| if((l>1) && !tn->internal[x]){ |
| /* Add missing non-level-zero tnode */ |
| tn->internal[x] = yaffs_GetTnode(dev); |
| |
| } else if(l == 1) { |
| /* Looking from level 1 at level 0 */ |
| if (passedTn) { |
| /* If we already have one, then release it.*/ |
| if(tn->internal[x]) |
| yaffs_FreeTnode(dev,tn->internal[x]); |
| tn->internal[x] = passedTn; |
| |
| } else if(!tn->internal[x]) { |
| /* Don't have one, none passed in */ |
| tn->internal[x] = yaffs_GetTnode(dev); |
| } |
| } |
| |
| tn = tn->internal[x]; |
| l--; |
| } |
| } else { |
| /* top is level 0 */ |
| if(passedTn) { |
| memcpy(tn,passedTn,(dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8); |
| yaffs_FreeTnode(dev,passedTn); |
| } |
| } |
| |
| return tn; |
| } |
| |
| static int yaffs_FindChunkInGroup(yaffs_Device * dev, int theChunk, |
| yaffs_ExtendedTags * tags, int objectId, |
| int chunkInInode) |
| { |
| int j; |
| |
| for (j = 0; theChunk && j < dev->chunkGroupSize; j++) { |
| if (yaffs_CheckChunkBit |
| (dev, theChunk / dev->nChunksPerBlock, |
| theChunk % dev->nChunksPerBlock)) { |
| yaffs_ReadChunkWithTagsFromNAND(dev, theChunk, NULL, |
| tags); |
| if (yaffs_TagsMatch(tags, objectId, chunkInInode)) { |
| /* found it; */ |
| return theChunk; |
| |
| } |
| } |
| theChunk++; |
| } |
| return -1; |
| } |
| |
| static void yaffs_SoftDeleteChunk(yaffs_Device * dev, int chunk) |
| { |
| |
| yaffs_BlockInfo *theBlock; |
| |
| T(YAFFS_TRACE_DELETION, (TSTR("soft delete chunk %d" TENDSTR), chunk)); |
| |
| theBlock = yaffs_GetBlockInfo(dev, chunk / dev->nChunksPerBlock); |
| if (theBlock) { |
| theBlock->softDeletions++; |
| dev->nFreeChunks++; |
| } |
| } |
| |
| /* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all the chunks in the file. |
| * All soft deleting does is increment the block's softdelete count and pulls the chunk out |
| * of the tnode. |
| * Thus, essentially this is the same as DeleteWorker except that the chunks are soft deleted. |
| */ |
| |
| static int yaffs_SoftDeleteWorker(yaffs_Object * in, yaffs_Tnode * tn, |
| __u32 level, int chunkOffset) |
| { |
| int i; |
| int theChunk; |
| int allDone = 1; |
| yaffs_Device *dev = in->myDev; |
| |
| if (tn) { |
| if (level > 0) { |
| |
| for (i = YAFFS_NTNODES_INTERNAL - 1; allDone && i >= 0; |
| i--) { |
| if (tn->internal[i]) { |
| allDone = |
| yaffs_SoftDeleteWorker(in, |
| tn-> |
| internal[i], |
| level - 1, |
| (chunkOffset |
| << |
| YAFFS_TNODES_INTERNAL_BITS) |
| + i); |
| if (allDone) { |
| yaffs_FreeTnode(dev, |
| tn-> |
| internal[i]); |
| tn->internal[i] = NULL; |
| } else { |
| /* Hoosterman... how could this happen? */ |
| } |
| } |
| } |
| return (allDone) ? 1 : 0; |
| } else if (level == 0) { |
| |
| for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) { |
| theChunk = yaffs_GetChunkGroupBase(dev,tn,i); |
| if (theChunk) { |
| /* Note this does not find the real chunk, only the chunk group. |
| * We make an assumption that a chunk group is not larger than |
| * a block. |
| */ |
| yaffs_SoftDeleteChunk(dev, theChunk); |
| yaffs_PutLevel0Tnode(dev,tn,i,0); |
| } |
| |
| } |
| return 1; |
| |
| } |
| |
| } |
| |
| return 1; |
| |
| } |
| |
| static void yaffs_SoftDeleteFile(yaffs_Object * obj) |
| { |
| if (obj->deleted && |
| obj->variantType == YAFFS_OBJECT_TYPE_FILE && !obj->softDeleted) { |
| if (obj->nDataChunks <= 0) { |
| /* Empty file with no duplicate object headers, just delete it immediately */ |
| yaffs_FreeTnode(obj->myDev, |
| obj->variant.fileVariant.top); |
| obj->variant.fileVariant.top = NULL; |
| T(YAFFS_TRACE_TRACING, |
| (TSTR("yaffs: Deleting empty file %d" TENDSTR), |
| obj->objectId)); |
| yaffs_DoGenericObjectDeletion(obj); |
| } else { |
| yaffs_SoftDeleteWorker(obj, |
| obj->variant.fileVariant.top, |
| obj->variant.fileVariant. |
| topLevel, 0); |
| obj->softDeleted = 1; |
| } |
| } |
| } |
| |
| /* Pruning removes any part of the file structure tree that is beyond the |
| * bounds of the file (ie that does not point to chunks). |
| * |
| * A file should only get pruned when its size is reduced. |
| * |
| * Before pruning, the chunks must be pulled from the tree and the |
| * level 0 tnode entries must be zeroed out. |
| * Could also use this for file deletion, but that's probably better handled |
| * by a special case. |
| */ |
| |
| static yaffs_Tnode *yaffs_PruneWorker(yaffs_Device * dev, yaffs_Tnode * tn, |
| __u32 level, int del0) |
| { |
| int i; |
| int hasData; |
| |
| if (tn) { |
| hasData = 0; |
| |
| for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) { |
| if (tn->internal[i] && level > 0) { |
| tn->internal[i] = |
| yaffs_PruneWorker(dev, tn->internal[i], |
| level - 1, |
| (i == 0) ? del0 : 1); |
| } |
| |
| if (tn->internal[i]) { |
| hasData++; |
| } |
| } |
| |
| if (hasData == 0 && del0) { |
| /* Free and return NULL */ |
| |
| yaffs_FreeTnode(dev, tn); |
| tn = NULL; |
| } |
| |
| } |
| |
| return tn; |
| |
| } |
| |
| static int yaffs_PruneFileStructure(yaffs_Device * dev, |
| yaffs_FileStructure * fStruct) |
| { |
| int i; |
| int hasData; |
| int done = 0; |
| yaffs_Tnode *tn; |
| |
| if (fStruct->topLevel > 0) { |
| fStruct->top = |
| yaffs_PruneWorker(dev, fStruct->top, fStruct->topLevel, 0); |
| |
| /* Now we have a tree with all the non-zero branches NULL but the height |
| * is the same as it was. |
| * Let's see if we can trim internal tnodes to shorten the tree. |
| * We can do this if only the 0th element in the tnode is in use |
| * (ie all the non-zero are NULL) |
| */ |
| |
| while (fStruct->topLevel && !done) { |
| tn = fStruct->top; |
| |
| hasData = 0; |
| for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) { |
| if (tn->internal[i]) { |
| hasData++; |
| } |
| } |
| |
| if (!hasData) { |
| fStruct->top = tn->internal[0]; |
| fStruct->topLevel--; |
| yaffs_FreeTnode(dev, tn); |
| } else { |
| done = 1; |
| } |
| } |
| } |
| |
| return YAFFS_OK; |
| } |
| |
| /*-------------------- End of File Structure functions.-------------------*/ |
| |
| /* yaffs_CreateFreeObjects creates a bunch more objects and |
| * adds them to the object free list. |
| */ |
| static int yaffs_CreateFreeObjects(yaffs_Device * dev, int nObjects) |
| { |
| int i; |
| yaffs_Object *newObjects; |
| yaffs_ObjectList *list; |
| |
| if (nObjects < 1) |
| return YAFFS_OK; |
| |
| /* make these things */ |
| newObjects = YMALLOC(nObjects * sizeof(yaffs_Object)); |
| list = YMALLOC(sizeof(yaffs_ObjectList)); |
| |
| if (!newObjects || !list) { |
| if(newObjects) |
| YFREE(newObjects); |
| if(list) |
| YFREE(list); |
| T(YAFFS_TRACE_ALLOCATE, |
| (TSTR("yaffs: Could not allocate more objects" TENDSTR))); |
| return YAFFS_FAIL; |
| } |
| |
| /* Hook them into the free list */ |
| for (i = 0; i < nObjects - 1; i++) { |
| newObjects[i].siblings.next = |
| (struct list_head *)(&newObjects[i + 1]); |
| } |
| |
| newObjects[nObjects - 1].siblings.next = (void *)dev->freeObjects; |
| dev->freeObjects = newObjects; |
| dev->nFreeObjects += nObjects; |
| dev->nObjectsCreated += nObjects; |
| |
| /* Now add this bunch of Objects to a list for freeing up. */ |
| |
| list->objects = newObjects; |
| list->next = dev->allocatedObjectList; |
| dev->allocatedObjectList = list; |
| |
| return YAFFS_OK; |
| } |
| |
| |
| /* AllocateEmptyObject gets us a clean Object. Tries to make allocate more if we run out */ |
| static yaffs_Object *yaffs_AllocateEmptyObject(yaffs_Device * dev) |
| { |
| yaffs_Object *tn = NULL; |
| |
| /* If there are none left make more */ |
| if (!dev->freeObjects) { |
| yaffs_CreateFreeObjects(dev, YAFFS_ALLOCATION_NOBJECTS); |
| } |
| |
| if (dev->freeObjects) { |
| tn = dev->freeObjects; |
| dev->freeObjects = |
| (yaffs_Object *) (dev->freeObjects->siblings.next); |
| dev->nFreeObjects--; |
| |
| /* Now sweeten it up... */ |
| |
| memset(tn, 0, sizeof(yaffs_Object)); |
| tn->myDev = dev; |
| tn->chunkId = -1; |
| tn->variantType = YAFFS_OBJECT_TYPE_UNKNOWN; |
| INIT_LIST_HEAD(&(tn->hardLinks)); |
| INIT_LIST_HEAD(&(tn->hashLink)); |
| INIT_LIST_HEAD(&tn->siblings); |
| |
| /* Add it to the lost and found directory. |
| * NB Can't put root or lostNFound in lostNFound so |
| * check if lostNFound exists first |
| */ |
| if (dev->lostNFoundDir) { |
| yaffs_AddObjectToDirectory(dev->lostNFoundDir, tn); |
| } |
| } |
| |
| return tn; |
| } |
| |
| static yaffs_Object *yaffs_CreateFakeDirectory(yaffs_Device * dev, int number, |
| __u32 mode) |
| { |
| |
| yaffs_Object *obj = |
| yaffs_CreateNewObject(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY); |
| if (obj) { |
| obj->fake = 1; /* it is fake so it has no NAND presence... */ |
| obj->renameAllowed = 0; /* ... and we're not allowed to rename it... */ |
| obj->unlinkAllowed = 0; /* ... or unlink it */ |
| obj->deleted = 0; |
| obj->unlinked = 0; |
| obj->yst_mode = mode; |
| obj->myDev = dev; |
| obj->chunkId = 0; /* Not a valid chunk. */ |
| } |
| |
| return obj; |
| |
| } |
| |
| static void yaffs_UnhashObject(yaffs_Object * tn) |
| { |
| int bucket; |
| yaffs_Device *dev = tn->myDev; |
| |
| /* If it is still linked into the bucket list, free from the list */ |
| if (!list_empty(&tn->hashLink)) { |
| list_del_init(&tn->hashLink); |
| bucket = yaffs_HashFunction(tn->objectId); |
| dev->objectBucket[bucket].count--; |
| } |
| |
| } |
| |
| /* FreeObject frees up a Object and puts it back on the free list */ |
| static void yaffs_FreeObject(yaffs_Object * tn) |
| { |
| |
| yaffs_Device *dev = tn->myDev; |
| |
| /* XXX U-BOOT XXX */ |
| #if 0 |
| #ifdef __KERNEL__ |
| if (tn->myInode) { |
| /* We're still hooked up to a cached inode. |
| * Don't delete now, but mark for later deletion |
| */ |
| tn->deferedFree = 1; |
| return; |
| } |
| #endif |
| #endif |
| yaffs_UnhashObject(tn); |
| |
| /* Link into the free list. */ |
| tn->siblings.next = (struct list_head *)(dev->freeObjects); |
| dev->freeObjects = tn; |
| dev->nFreeObjects++; |
| } |
| |
| /* XXX U-BOOT XXX */ |
| #if 0 |
| #ifdef __KERNEL__ |
| |
| void yaffs_HandleDeferedFree(yaffs_Object * obj) |
| { |
| if (obj->deferedFree) { |
| yaffs_FreeObject(obj); |
| } |
| } |
| |
| #endif |
| #endif |
| |
| static void yaffs_DeinitialiseObjects(yaffs_Device * dev) |
| { |
| /* Free the list of allocated Objects */ |
| |
| yaffs_ObjectList *tmp; |
| |
| while (dev->allocatedObjectList) { |
| tmp = dev->allocatedObjectList->next; |
| YFREE(dev->allocatedObjectList->objects); |
| YFREE(dev->allocatedObjectList); |
| |
| dev->allocatedObjectList = tmp; |
| } |
| |
| dev->freeObjects = NULL; |
| dev->nFreeObjects = 0; |
| } |
| |
| static void yaffs_InitialiseObjects(yaffs_Device * dev) |
| { |
| int i; |
| |
| dev->allocatedObjectList = NULL; |
| dev->freeObjects = NULL; |
| dev->nFreeObjects = 0; |
| |
| for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) { |
| INIT_LIST_HEAD(&dev->objectBucket[i].list); |
| dev->objectBucket[i].count = 0; |
| } |
| |
| } |
| |
| static int yaffs_FindNiceObjectBucket(yaffs_Device * dev) |
| { |
| static int x = 0; |
| int i; |
| int l = 999; |
| int lowest = 999999; |
| |
| /* First let's see if we can find one that's empty. */ |
| |
| for (i = 0; i < 10 && lowest > 0; i++) { |
| x++; |
| x %= YAFFS_NOBJECT_BUCKETS; |
| if (dev->objectBucket[x].count < lowest) { |
| lowest = dev->objectBucket[x].count; |
| l = x; |
| } |
| |
| } |
| |
| /* If we didn't find an empty list, then try |
| * looking a bit further for a short one |
| */ |
| |
| for (i = 0; i < 10 && lowest > 3; i++) { |
| x++; |
| x %= YAFFS_NOBJECT_BUCKETS; |
| if (dev->objectBucket[x].count < lowest) { |
| lowest = dev->objectBucket[x].count; |
| l = x; |
| } |
| |
| } |
| |
| return l; |
| } |
| |
| static int yaffs_CreateNewObjectNumber(yaffs_Device * dev) |
| { |
| int bucket = yaffs_FindNiceObjectBucket(dev); |
| |
| /* Now find an object value that has not already been taken |
| * by scanning the list. |
| */ |
| |
| int found = 0; |
| struct list_head *i; |
| |
| __u32 n = (__u32) bucket; |
| |
| /* yaffs_CheckObjectHashSanity(); */ |
| |
| while (!found) { |
| found = 1; |
| n += YAFFS_NOBJECT_BUCKETS; |
| if (1 || dev->objectBucket[bucket].count > 0) { |
| list_for_each(i, &dev->objectBucket[bucket].list) { |
| /* If there is already one in the list */ |
| if (i |
| && list_entry(i, yaffs_Object, |
| hashLink)->objectId == n) { |
| found = 0; |
| } |
| } |
| } |
| } |
| |
| |
| return n; |
| } |
| |
| static void yaffs_HashObject(yaffs_Object * in) |
| { |
| int bucket = yaffs_HashFunction(in->objectId); |
| yaffs_Device *dev = in->myDev; |
| |
| list_add(&in->hashLink, &dev->objectBucket[bucket].list); |
| dev->objectBucket[bucket].count++; |
| |
| } |
| |
| yaffs_Object *yaffs_FindObjectByNumber(yaffs_Device * dev, __u32 number) |
| { |
| int bucket = yaffs_HashFunction(number); |
| struct list_head *i; |
| yaffs_Object *in; |
| |
| list_for_each(i, &dev->objectBucket[bucket].list) { |
| /* Look if it is in the list */ |
| if (i) { |
| in = list_entry(i, yaffs_Object, hashLink); |
| if (in->objectId == number) { |
| /* XXX U-BOOT XXX */ |
| #if 0 |
| #ifdef __KERNEL__ |
| /* Don't tell the VFS about this one if it is defered free */ |
| if (in->deferedFree) |
| return NULL; |
| #endif |
| #endif |
| return in; |
| } |
| } |
| } |
| |
| return NULL; |
| } |
| |
| yaffs_Object *yaffs_CreateNewObject(yaffs_Device * dev, int number, |
| yaffs_ObjectType type) |
| { |
| |
| yaffs_Object *theObject; |
| yaffs_Tnode *tn = NULL; |
| |
| if (number < 0) { |
| number = yaffs_CreateNewObjectNumber(dev); |
| } |
| |
| theObject = yaffs_AllocateEmptyObject(dev); |
| if(!theObject) |
| return NULL; |
| |
| if(type == YAFFS_OBJECT_TYPE_FILE){ |
| tn = yaffs_GetTnode(dev); |
| if(!tn){ |
| yaffs_FreeObject(theObject); |
| return NULL; |
| } |
| } |
| |
| |
| |
| if (theObject) { |
| theObject->fake = 0; |
| theObject->renameAllowed = 1; |
| theObject->unlinkAllowed = 1; |
| theObject->objectId = number; |
| yaffs_HashObject(theObject); |
| theObject->variantType = type; |
| #ifdef CONFIG_YAFFS_WINCE |
| yfsd_WinFileTimeNow(theObject->win_atime); |
| theObject->win_ctime[0] = theObject->win_mtime[0] = |
| theObject->win_atime[0]; |
| theObject->win_ctime[1] = theObject->win_mtime[1] = |
| theObject->win_atime[1]; |
| |
| #else |
| |
| theObject->yst_atime = theObject->yst_mtime = |
| theObject->yst_ctime = Y_CURRENT_TIME; |
| #endif |
| switch (type) { |
| case YAFFS_OBJECT_TYPE_FILE: |
| theObject->variant.fileVariant.fileSize = 0; |
| theObject->variant.fileVariant.scannedFileSize = 0; |
| theObject->variant.fileVariant.shrinkSize = 0xFFFFFFFF; /* max __u32 */ |
| theObject->variant.fileVariant.topLevel = 0; |
| theObject->variant.fileVariant.top = tn; |
| break; |
| case YAFFS_OBJECT_TYPE_DIRECTORY: |
| INIT_LIST_HEAD(&theObject->variant.directoryVariant. |
| children); |
| break; |
| case YAFFS_OBJECT_TYPE_SYMLINK: |
| case YAFFS_OBJECT_TYPE_HARDLINK: |
| case YAFFS_OBJECT_TYPE_SPECIAL: |
| /* No action required */ |
| break; |
| case YAFFS_OBJECT_TYPE_UNKNOWN: |
| /* todo this should not happen */ |
| break; |
| } |
| } |
| |
| return theObject; |
| } |
| |
| static yaffs_Object *yaffs_FindOrCreateObjectByNumber(yaffs_Device * dev, |
| int number, |
| yaffs_ObjectType type) |
| { |
| yaffs_Object *theObject = NULL; |
| |
| if (number > 0) { |
| theObject = yaffs_FindObjectByNumber(dev, number); |
| } |
| |
| if (!theObject) { |
| theObject = yaffs_CreateNewObject(dev, number, type); |
| } |
| |
| return theObject; |
| |
| } |
| |
| |
| static YCHAR *yaffs_CloneString(const YCHAR * str) |
| { |
| YCHAR *newStr = NULL; |
| |
| if (str && *str) { |
| newStr = YMALLOC((yaffs_strlen(str) + 1) * sizeof(YCHAR)); |
| if(newStr) |
| yaffs_strcpy(newStr, str); |
| } |
| |
| return newStr; |
| |
| } |
| |
| /* |
| * Mknod (create) a new object. |
| * equivalentObject only has meaning for a hard link; |
| * aliasString only has meaning for a sumlink. |
| * rdev only has meaning for devices (a subset of special objects) |
| */ |
| |
| static yaffs_Object *yaffs_MknodObject(yaffs_ObjectType type, |
| yaffs_Object * parent, |
| const YCHAR * name, |
| __u32 mode, |
| __u32 uid, |
| __u32 gid, |
| yaffs_Object * equivalentObject, |
| const YCHAR * aliasString, __u32 rdev) |
| { |
| yaffs_Object *in; |
| YCHAR *str = NULL; |
| |
| yaffs_Device *dev = parent->myDev; |
| |
| /* Check if the entry exists. If it does then fail the call since we don't want a dup.*/ |
| if (yaffs_FindObjectByName(parent, name)) { |
| return NULL; |
| } |
| |
| in = yaffs_CreateNewObject(dev, -1, type); |
| |
| if(type == YAFFS_OBJECT_TYPE_SYMLINK){ |
| str = yaffs_CloneString(aliasString); |
| if(!str){ |
| yaffs_FreeObject(in); |
| return NULL; |
| } |
| } |
| |
| |
| |
| if (in) { |
| in->chunkId = -1; |
| in->valid = 1; |
| in->variantType = type; |
| |
| in->yst_mode = mode; |
| |
| #ifdef CONFIG_YAFFS_WINCE |
| yfsd_WinFileTimeNow(in->win_atime); |
| in->win_ctime[0] = in->win_mtime[0] = in->win_atime[0]; |
| in->win_ctime[1] = in->win_mtime[1] = in->win_atime[1]; |
| |
| #else |
| in->yst_atime = in->yst_mtime = in->yst_ctime = Y_CURRENT_TIME; |
| |
| in->yst_rdev = rdev; |
| in->yst_uid = uid; |
| in->yst_gid = gid; |
| #endif |
| in->nDataChunks = 0; |
| |
| yaffs_SetObjectName(in, name); |
| in->dirty = 1; |
| |
| yaffs_AddObjectToDirectory(parent, in); |
| |
| in->myDev = parent->myDev; |
| |
| switch (type) { |
| case YAFFS_OBJECT_TYPE_SYMLINK: |
| in->variant.symLinkVariant.alias = str; |
| break; |
| case YAFFS_OBJECT_TYPE_HARDLINK: |
| in->variant.hardLinkVariant.equivalentObject = |
| equivalentObject; |
| in->variant.hardLinkVariant.equivalentObjectId = |
| equivalentObject->objectId; |
| list_add(&in->hardLinks, &equivalentObject->hardLinks); |
| break; |
| case YAFFS_OBJECT_TYPE_FILE: |
| case YAFFS_OBJECT_TYPE_DIRECTORY: |
| case YAFFS_OBJECT_TYPE_SPECIAL: |
| case YAFFS_OBJECT_TYPE_UNKNOWN: |
| /* do nothing */ |
| break; |
| } |
| |
| if (yaffs_UpdateObjectHeader(in, name, 0, 0, 0) < 0) { |
| /* Could not create the object header, fail the creation */ |
| yaffs_DestroyObject(in); |
| in = NULL; |
| } |
| |
| } |
| |
| return in; |
| } |
| |
| yaffs_Object *yaffs_MknodFile(yaffs_Object * parent, const YCHAR * name, |
| __u32 mode, __u32 uid, __u32 gid) |
| { |
| return yaffs_MknodObject(YAFFS_OBJECT_TYPE_FILE, parent, name, mode, |
| uid, gid, NULL, NULL, 0); |
| } |
| |
| yaffs_Object *yaffs_MknodDirectory(yaffs_Object * parent, const YCHAR * name, |
| __u32 mode, __u32 uid, __u32 gid) |
| { |
| return yaffs_MknodObject(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name, |
| mode, uid, gid, NULL, NULL, 0); |
| } |
| |
| yaffs_Object *yaffs_MknodSpecial(yaffs_Object * parent, const YCHAR * name, |
| __u32 mode, __u32 uid, __u32 gid, __u32 rdev) |
| { |
| return yaffs_MknodObject(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode, |
| uid, gid, NULL, NULL, rdev); |
| } |
| |
| yaffs_Object *yaffs_MknodSymLink(yaffs_Object * parent, const YCHAR * name, |
| __u32 mode, __u32 uid, __u32 gid, |
| const YCHAR * alias) |
| { |
| return yaffs_MknodObject(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode, |
| uid, gid, NULL, alias, 0); |
| } |
| |
| /* yaffs_Link returns the object id of the equivalent object.*/ |
| yaffs_Object *yaffs_Link(yaffs_Object * parent, const YCHAR * name, |
| yaffs_Object * equivalentObject) |
| { |
| /* Get the real object in case we were fed a hard link as an equivalent object */ |
| equivalentObject = yaffs_GetEquivalentObject(equivalentObject); |
| |
| if (yaffs_MknodObject |
| (YAFFS_OBJECT_TYPE_HARDLINK, parent, name, 0, 0, 0, |
| equivalentObject, NULL, 0)) { |
| return equivalentObject; |
| } else { |
| return NULL; |
| } |
| |
| } |
| |
| static int yaffs_ChangeObjectName(yaffs_Object * obj, yaffs_Object * newDir, |
| const YCHAR * newName, int force, int shadows) |
| { |
| int unlinkOp; |
| int deleteOp; |
| |
| yaffs_Object *existingTarget; |
| |
| if (newDir == NULL) { |
| newDir = obj->parent; /* use the old directory */ |
| } |
| |
| if (newDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) { |
| T(YAFFS_TRACE_ALWAYS, |
| (TSTR |
| ("tragendy: yaffs_ChangeObjectName: newDir is not a directory" |
| TENDSTR))); |
| YBUG(); |
| } |
| |
| /* TODO: Do we need this different handling for YAFFS2 and YAFFS1?? */ |
| if (obj->myDev->isYaffs2) { |
| unlinkOp = (newDir == obj->myDev->unlinkedDir); |
| } else { |
| unlinkOp = (newDir == obj->myDev->unlinkedDir |
| && obj->variantType == YAFFS_OBJECT_TYPE_FILE); |
| } |
| |
| deleteOp = (newDir == obj->myDev->deletedDir); |
| |
| existingTarget = yaffs_FindObjectByName(newDir, newName); |
| |
| /* If the object is a file going into the unlinked directory, |
| * then it is OK to just stuff it in since duplicate names are allowed. |
| * else only proceed if the new name does not exist and if we're putting |
| * it into a directory. |
| */ |
| if ((unlinkOp || |
| deleteOp || |
| force || |
| (shadows > 0) || |
| !existingTarget) && |
| newDir->variantType == YAFFS_OBJECT_TYPE_DIRECTORY) { |
| yaffs_SetObjectName(obj, newName); |
| obj->dirty = 1; |
| |
| yaffs_AddObjectToDirectory(newDir, obj); |
| |
| if (unlinkOp) |
| obj->unlinked = 1; |
| |
| /* If it is a deletion then we mark it as a shrink for gc purposes. */ |
| if (yaffs_UpdateObjectHeader(obj, newName, 0, deleteOp, shadows)>= 0) |
| return YAFFS_OK; |
| } |
| |
| return YAFFS_FAIL; |
| } |
| |
| int yaffs_RenameObject(yaffs_Object * oldDir, const YCHAR * oldName, |
| yaffs_Object * newDir, const YCHAR * newName) |
| { |
| yaffs_Object *obj; |
| yaffs_Object *existingTarget; |
| int force = 0; |
| |
| #ifdef CONFIG_YAFFS_CASE_INSENSITIVE |
| /* Special case for case insemsitive systems (eg. WinCE). |
| * While look-up is case insensitive, the name isn't. |
| * Therefore we might want to change x.txt to X.txt |
| */ |
| if (oldDir == newDir && yaffs_strcmp(oldName, newName) == 0) { |
| force = 1; |
| } |
| #endif |
| |
| obj = yaffs_FindObjectByName(oldDir, oldName); |
| /* Check new name to long. */ |
| if (obj->variantType == YAFFS_OBJECT_TYPE_SYMLINK && |
| yaffs_strlen(newName) > YAFFS_MAX_ALIAS_LENGTH) |
| /* ENAMETOOLONG */ |
| return YAFFS_FAIL; |
| else if (obj->variantType != YAFFS_OBJECT_TYPE_SYMLINK && |
| yaffs_strlen(newName) > YAFFS_MAX_NAME_LENGTH) |
| /* ENAMETOOLONG */ |
| return YAFFS_FAIL; |
| |
| if (obj && obj->renameAllowed) { |
| |
| /* Now do the handling for an existing target, if there is one */ |
| |
| existingTarget = yaffs_FindObjectByName(newDir, newName); |
| if (existingTarget && |
| existingTarget->variantType == YAFFS_OBJECT_TYPE_DIRECTORY && |
| !list_empty(&existingTarget->variant.directoryVariant.children)) { |
| /* There is a target that is a non-empty directory, so we fail */ |
| return YAFFS_FAIL; /* EEXIST or ENOTEMPTY */ |
| } else if (existingTarget && existingTarget != obj) { |
| /* Nuke the target first, using shadowing, |
| * but only if it isn't the same object |
| */ |
| yaffs_ChangeObjectName(obj, newDir, newName, force, |
| existingTarget->objectId); |
| yaffs_UnlinkObject(existingTarget); |
| } |
| |
| return yaffs_ChangeObjectName(obj, newDir, newName, 1, 0); |
| } |
| return YAFFS_FAIL; |
| } |
| |
| /*------------------------- Block Management and Page Allocation ----------------*/ |
| |
| static int yaffs_InitialiseBlocks(yaffs_Device * dev) |
| { |
| int nBlocks = dev->internalEndBlock - dev->internalStartBlock + 1; |
| |
| dev->blockInfo = NULL; |
| dev->chunkBits = NULL; |
| |
| dev->allocationBlock = -1; /* force it to get a new one */ |
| |
| /* If the first allocation strategy fails, thry the alternate one */ |
| dev->blockInfo = YMALLOC(nBlocks * sizeof(yaffs_BlockInfo)); |
| if(!dev->blockInfo){ |
| dev->blockInfo = YMALLOC_ALT(nBlocks * sizeof(yaffs_BlockInfo)); |
| dev->blockInfoAlt = 1; |
| } |
| else |
| dev->blockInfoAlt = 0; |
| |
| if(dev->blockInfo){ |
| |
| /* Set up dynamic blockinfo stuff. */ |
| dev->chunkBitmapStride = (dev->nChunksPerBlock + 7) / 8; /* round up bytes */ |
| dev->chunkBits = YMALLOC(dev->chunkBitmapStride * nBlocks); |
| if(!dev->chunkBits){ |
| dev->chunkBits = YMALLOC_ALT(dev->chunkBitmapStride * nBlocks); |
| dev->chunkBitsAlt = 1; |
| } |
| else |
| dev->chunkBitsAlt = 0; |
| } |
| |
| if (dev->blockInfo && dev->chunkBits) { |
| memset(dev->blockInfo, 0, nBlocks * sizeof(yaffs_BlockInfo)); |
| memset(dev->chunkBits, 0, dev->chunkBitmapStride * nBlocks); |
| return YAFFS_OK; |
| } |
| |
| return YAFFS_FAIL; |
| |
| } |
| |
| static void yaffs_DeinitialiseBlocks(yaffs_Device * dev) |
| { |
| if(dev->blockInfoAlt && dev->blockInfo) |
| YFREE_ALT(dev->blockInfo); |
| else if(dev->blockInfo) |
| YFREE(dev->blockInfo); |
| |
| dev->blockInfoAlt = 0; |
| |
| dev->blockInfo = NULL; |
| |
| if(dev->chunkBitsAlt && dev->chunkBits) |
| YFREE_ALT(dev->chunkBits); |
| else if(dev->chunkBits) |
| YFREE(dev->chunkBits); |
| dev->chunkBitsAlt = 0; |
| dev->chunkBits = NULL; |
| } |
| |
| static int yaffs_BlockNotDisqualifiedFromGC(yaffs_Device * dev, |
| yaffs_BlockInfo * bi) |
| { |
| int i; |
| __u32 seq; |
| yaffs_BlockInfo *b; |
| |
| if (!dev->isYaffs2) |
| return 1; /* disqualification only applies to yaffs2. */ |
| |
| if (!bi->hasShrinkHeader) |
| return 1; /* can gc */ |
| |
| /* Find the oldest dirty sequence number if we don't know it and save it |
| * so we don't have to keep recomputing it. |
| */ |
| if (!dev->oldestDirtySequence) { |
| seq = dev->sequenceNumber; |
| |
| for (i = dev->internalStartBlock; i <= dev->internalEndBlock; |
| i++) { |
| b = yaffs_GetBlockInfo(dev, i); |
| if (b->blockState == YAFFS_BLOCK_STATE_FULL && |
| (b->pagesInUse - b->softDeletions) < |
| dev->nChunksPerBlock && b->sequenceNumber < seq) { |
| seq = b->sequenceNumber; |
| } |
| } |
| dev->oldestDirtySequence = seq; |
| } |
| |
| /* Can't do gc of this block if there are any blocks older than this one that have |
| * discarded pages. |
| */ |
| return (bi->sequenceNumber <= dev->oldestDirtySequence); |
| |
| } |
| |
| /* FindDiretiestBlock is used to select the dirtiest block (or close enough) |
| * for garbage collection. |
| */ |
| |
| static int yaffs_FindBlockForGarbageCollection(yaffs_Device * dev, |
| int aggressive) |
| { |
| |
| int b = dev->currentDirtyChecker; |
| |
| int i; |
| int iterations; |
| int dirtiest = -1; |
| int pagesInUse = 0; |
| int prioritised=0; |
| yaffs_BlockInfo *bi; |
| int pendingPrioritisedExist = 0; |
| |
| /* First let's see if we need to grab a prioritised block */ |
| if(dev->hasPendingPrioritisedGCs){ |
| for(i = dev->internalStartBlock; i < dev->internalEndBlock && !prioritised; i++){ |
| |
| bi = yaffs_GetBlockInfo(dev, i); |
| //yaffs_VerifyBlock(dev,bi,i); |
| |
| if(bi->gcPrioritise) { |
| pendingPrioritisedExist = 1; |
| if(bi->blockState == YAFFS_BLOCK_STATE_FULL && |
| yaffs_BlockNotDisqualifiedFromGC(dev, bi)){ |
| pagesInUse = (bi->pagesInUse - bi->softDeletions); |
| dirtiest = i; |
| prioritised = 1; |
| aggressive = 1; /* Fool the non-aggressive skip logiv below */ |
| } |
| } |
| } |
| |
| if(!pendingPrioritisedExist) /* None found, so we can clear this */ |
| dev->hasPendingPrioritisedGCs = 0; |
| } |
| |
| /* If we're doing aggressive GC then we are happy to take a less-dirty block, and |
| * search harder. |
| * else (we're doing a leasurely gc), then we only bother to do this if the |
| * block has only a few pages in use. |
| */ |
| |
| dev->nonAggressiveSkip--; |
| |
| if (!aggressive && (dev->nonAggressiveSkip > 0)) { |
| return -1; |
| } |
| |
| if(!prioritised) |
| pagesInUse = |
| (aggressive) ? dev->nChunksPerBlock : YAFFS_PASSIVE_GC_CHUNKS + 1; |
| |
| if (aggressive) { |
| iterations = |
| dev->internalEndBlock - dev->internalStartBlock + 1; |
| } else { |
| iterations = |
| dev->internalEndBlock - dev->internalStartBlock + 1; |
| iterations = iterations / 16; |
| if (iterations > 200) { |
| iterations = 200; |
| } |
| } |
| |
| for (i = 0; i <= iterations && pagesInUse > 0 && !prioritised; i++) { |
| b++; |
| if (b < dev->internalStartBlock || b > dev->internalEndBlock) { |
| b = dev->internalStartBlock; |
| } |
| |
| if (b < dev->internalStartBlock || b > dev->internalEndBlock) { |
| T(YAFFS_TRACE_ERROR, |
| (TSTR("**>> Block %d is not valid" TENDSTR), b)); |
| YBUG(); |
| } |
| |
| bi = yaffs_GetBlockInfo(dev, b); |
| |
| #if 0 |
| if (bi->blockState == YAFFS_BLOCK_STATE_CHECKPOINT) { |
| dirtiest = b; |
| pagesInUse = 0; |
| } |
| else |
| #endif |
| |
| if (bi->blockState == YAFFS_BLOCK_STATE_FULL && |
| (bi->pagesInUse - bi->softDeletions) < pagesInUse && |
| yaffs_BlockNotDisqualifiedFromGC(dev, bi)) { |
| dirtiest = b; |
| pagesInUse = (bi->pagesInUse - bi->softDeletions); |
| } |
| } |
| |
| dev->currentDirtyChecker = b; |
| |
| if (dirtiest > 0) { |
| T(YAFFS_TRACE_GC, |
| (TSTR("GC Selected block %d with %d free, prioritised:%d" TENDSTR), dirtiest, |
| dev->nChunksPerBlock - pagesInUse,prioritised)); |
| } |
| |
| dev->oldestDirtySequence = 0; |
| |
| if (dirtiest > 0) { |
| dev->nonAggressiveSkip = 4; |
| } |
| |
| return dirtiest; |
| } |
| |
| static void yaffs_BlockBecameDirty(yaffs_Device * dev, int blockNo) |
| { |
| yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockNo); |
| |
| int erasedOk = 0; |
| |
| /* If the block is still healthy erase it and mark as clean. |
| * If the block has had a data failure, then retire it. |
| */ |
| |
| T(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE, |
| (TSTR("yaffs_BlockBecameDirty block %d state %d %s"TENDSTR), |
| blockNo, bi->blockState, (bi->needsRetiring) ? "needs retiring" : "")); |
| |
| bi->blockState = YAFFS_BLOCK_STATE_DIRTY; |
| |
| if (!bi->needsRetiring) { |
| yaffs_InvalidateCheckpoint(dev); |
| erasedOk = yaffs_EraseBlockInNAND(dev, blockNo); |
| if (!erasedOk) { |
| dev->nErasureFailures++; |
| T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS, |
| (TSTR("**>> Erasure failed %d" TENDSTR), blockNo)); |
| } |
| } |
| |
| if (erasedOk && |
| ((yaffs_traceMask & YAFFS_TRACE_ERASE) || !yaffs_SkipVerification(dev))) { |
| int i; |
| for (i = 0; i < dev->nChunksPerBlock; i++) { |
| if (!yaffs_CheckChunkErased |
| (dev, blockNo * dev->nChunksPerBlock + i)) { |
| T(YAFFS_TRACE_ERROR, |
| (TSTR |
| (">>Block %d erasure supposedly OK, but chunk %d not erased" |
| TENDSTR), blockNo, i)); |
| } |
| } |
| } |
| |
| if (erasedOk) { |
| /* Clean it up... */ |
| bi->blockState = YAFFS_BLOCK_STATE_EMPTY; |
| dev->nErasedBlocks++; |
| bi->pagesInUse = 0; |
| bi->softDeletions = 0; |
| bi->hasShrinkHeader = 0; |
| bi->skipErasedCheck = 1; /* This is clean, so no need to check */ |
| bi->gcPrioritise = 0; |
| yaffs_ClearChunkBits(dev, blockNo); |
| |
| T(YAFFS_TRACE_ERASE, |
| (TSTR("Erased block %d" TENDSTR), blockNo)); |
| } else { |
| dev->nFreeChunks -= dev->nChunksPerBlock; /* We lost a block of free space */ |
| |
| yaffs_RetireBlock(dev, blockNo); |
| T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS, |
| (TSTR("**>> Block %d retired" TENDSTR), blockNo)); |
| } |
| } |
| |
| static int yaffs_FindBlockForAllocation(yaffs_Device * dev) |
| { |
| int i; |
| |
| yaffs_BlockInfo *bi; |
| |
| if (dev->nErasedBlocks < 1) { |
| /* Hoosterman we've got a problem. |
| * Can't get space to gc |
| */ |
| T(YAFFS_TRACE_ERROR, |
| (TSTR("yaffs tragedy: no more eraased blocks" TENDSTR))); |
| |
| return -1; |
| } |
| |
| /* Find an empty block. */ |
| |
| for (i = dev->internalStartBlock; i <= dev->internalEndBlock; i++) { |
| dev->allocationBlockFinder++; |
| if (dev->allocationBlockFinder < dev->internalStartBlock |
| || dev->allocationBlockFinder > dev->internalEndBlock) { |
| dev->allocationBlockFinder = dev->internalStartBlock; |
| } |
| |
| bi = yaffs_GetBlockInfo(dev, dev->allocationBlockFinder); |
| |
| if (bi->blockState == YAFFS_BLOCK_STATE_EMPTY) { |
| bi->blockState = YAFFS_BLOCK_STATE_ALLOCATING; |
| dev->sequenceNumber++; |
| bi->sequenceNumber = dev->sequenceNumber; |
| dev->nErasedBlocks--; |
| T(YAFFS_TRACE_ALLOCATE, |
| (TSTR("Allocated block %d, seq %d, %d left" TENDSTR), |
| dev->allocationBlockFinder, dev->sequenceNumber, |
| dev->nErasedBlocks)); |
| return dev->allocationBlockFinder; |
| } |
| } |
| |
| T(YAFFS_TRACE_ALWAYS, |
| (TSTR |
| ("yaffs tragedy: no more eraased blocks, but there should have been %d" |
| TENDSTR), dev->nErasedBlocks)); |
| |
| return -1; |
| } |
| |
| |
| // Check if there's space to allocate... |
| // Thinks.... do we need top make this ths same as yaffs_GetFreeChunks()? |
| static int yaffs_CheckSpaceForAllocation(yaffs_Device * dev) |
| { |
| int reservedChunks; |
| int reservedBlocks = dev->nReservedBlocks; |
| int checkpointBlocks; |
| |
| checkpointBlocks = dev->nCheckpointReservedBlocks - dev->blocksInCheckpoint; |
| if(checkpointBlocks < 0) |
| checkpointBlocks = 0; |
| |
| reservedChunks = ((reservedBlocks + checkpointBlocks) * dev->nChunksPerBlock); |
| |
| return (dev->nFreeChunks > reservedChunks); |
| } |
| |
| static int yaffs_AllocateChunk(yaffs_Device * dev, int useReserve, yaffs_BlockInfo **blockUsedPtr) |
| { |
| int retVal; |
| yaffs_BlockInfo *bi; |
| |
| if (dev->allocationBlock < 0) { |
| /* Get next block to allocate off */ |
| dev->allocationBlock = yaffs_FindBlockForAllocation(dev); |
| dev->allocationPage = 0; |
| } |
| |
| if (!useReserve && !yaffs_CheckSpaceForAllocation(dev)) { |
| /* Not enough space to allocate unless we're allowed to use the reserve. */ |
| return -1; |
| } |
| |
| if (dev->nErasedBlocks < dev->nReservedBlocks |
| && dev->allocationPage == 0) { |
| T(YAFFS_TRACE_ALLOCATE, (TSTR("Allocating reserve" TENDSTR))); |
| } |
| |
| /* Next page please.... */ |
| if (dev->allocationBlock >= 0) { |
| bi = yaffs_GetBlockInfo(dev, dev->allocationBlock); |
| |
| retVal = (dev->allocationBlock * dev->nChunksPerBlock) + |
| dev->allocationPage; |
| bi->pagesInUse++; |
| yaffs_SetChunkBit(dev, dev->allocationBlock, |
| dev->allocationPage); |
| |
| dev->allocationPage++; |
| |
| dev->nFreeChunks--; |
| |
| /* If the block is full set the state to full */ |
| if (dev->allocationPage >= dev->nChunksPerBlock) { |
| bi->blockState = YAFFS_BLOCK_STATE_FULL; |
| dev->allocationBlock = -1; |
| } |
| |
| if(blockUsedPtr) |
| *blockUsedPtr = bi; |
| |
| return retVal; |
| } |
| |
| T(YAFFS_TRACE_ERROR, |
| (TSTR("!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!" TENDSTR))); |
| |
| return -1; |
| } |
| |
| static int yaffs_GetErasedChunks(yaffs_Device * dev) |
| { |
| int n; |
| |
| n = dev->nErasedBlocks * dev->nChunksPerBlock; |
| |
| if (dev->allocationBlock > 0) { |
| n += (dev->nChunksPerBlock - dev->allocationPage); |
| } |
| |
| return n; |
| |
| } |
| |
| static int yaffs_GarbageCollectBlock(yaffs_Device * dev, int block) |
| { |
| int oldChunk; |
| int newChunk; |
| int chunkInBlock; |
| int markNAND; |
| int retVal = YAFFS_OK; |
| int cleanups = 0; |
| int i; |
| int isCheckpointBlock; |
| int matchingChunk; |
| |
| int chunksBefore = yaffs_GetErasedChunks(dev); |
| int chunksAfter; |
| |
| yaffs_ExtendedTags tags; |
| |
| yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, block); |
| |
| yaffs_Object *object; |
| |
| isCheckpointBlock = (bi->blockState == YAFFS_BLOCK_STATE_CHECKPOINT); |
| |
| bi->blockState = YAFFS_BLOCK_STATE_COLLECTING; |
| |
| T(YAFFS_TRACE_TRACING, |
| (TSTR("Collecting block %d, in use %d, shrink %d, " TENDSTR), block, |
| bi->pagesInUse, bi->hasShrinkHeader)); |
| |
| /*yaffs_VerifyFreeChunks(dev); */ |
| |
| bi->hasShrinkHeader = 0; /* clear the flag so that the block can erase */ |
| |
| /* Take off the number of soft deleted entries because |
| * they're going to get really deleted during GC. |
| */ |
| dev->nFreeChunks -= bi->softDeletions; |
| |
| dev->isDoingGC = 1; |
| |
| if (isCheckpointBlock || |
| !yaffs_StillSomeChunkBits(dev, block)) { |
| T(YAFFS_TRACE_TRACING, |
| (TSTR |
| ("Collecting block %d that has no chunks in use" TENDSTR), |
| block)); |
| yaffs_BlockBecameDirty(dev, block); |
| } else { |
| |
| __u8 *buffer = yaffs_GetTempBuffer(dev, __LINE__); |
| |
| yaffs_VerifyBlock(dev,bi,block); |
| |
| for (chunkInBlock = 0, oldChunk = block * dev->nChunksPerBlock; |
| chunkInBlock < dev->nChunksPerBlock |
| && yaffs_StillSomeChunkBits(dev, block); |
| chunkInBlock++, oldChunk++) { |
| if (yaffs_CheckChunkBit(dev, block, chunkInBlock)) { |
| |
| /* This page is in use and might need to be copied off */ |
| |
| markNAND = 1; |
| |
| yaffs_InitialiseTags(&tags); |
| |
| yaffs_ReadChunkWithTagsFromNAND(dev, oldChunk, |
| buffer, &tags); |
| |
| object = |
| yaffs_FindObjectByNumber(dev, |
| tags.objectId); |
| |
| T(YAFFS_TRACE_GC_DETAIL, |
| (TSTR |
| ("Collecting page %d, %d %d %d " TENDSTR), |
| chunkInBlock, tags.objectId, tags.chunkId, |
| tags.byteCount)); |
| |
| if(object && !yaffs_SkipVerification(dev)){ |
| if(tags.chunkId == 0) |
| matchingChunk = object->chunkId; |
| else if(object->softDeleted) |
| matchingChunk = oldChunk; /* Defeat the test */ |
| else |
| matchingChunk = yaffs_FindChunkInFile(object,tags.chunkId,NULL); |
| |
| if(oldChunk != matchingChunk) |
| T(YAFFS_TRACE_ERROR, |
| (TSTR("gc: page in gc mismatch: %d %d %d %d"TENDSTR), |
| oldChunk,matchingChunk,tags.objectId, tags.chunkId)); |
| |
| } |
| |
| if (!object) { |
| T(YAFFS_TRACE_ERROR, |
| (TSTR |
| ("page %d in gc has no object: %d %d %d " |
| TENDSTR), oldChunk, |
| tags.objectId, tags.chunkId, tags.byteCount)); |
| } |
| |
| if (object && object->deleted |
| && tags.chunkId != 0) { |
| /* Data chunk in a deleted file, throw it away |
| * It's a soft deleted data chunk, |
| * No need to copy this, just forget about it and |
| * fix up the object. |
| */ |
| |
| object->nDataChunks--; |
| |
| if (object->nDataChunks <= 0) { |
| /* remeber to clean up the object */ |
| dev->gcCleanupList[cleanups] = |
| tags.objectId; |
| cleanups++; |
| } |
| markNAND = 0; |
| } else if (0 |
| /* Todo object && object->deleted && object->nDataChunks == 0 */ |
| ) { |
| /* Deleted object header with no data chunks. |
| * Can be discarded and the file deleted. |
| */ |
| object->chunkId = 0; |
| yaffs_FreeTnode(object->myDev, |
| object->variant. |
| fileVariant.top); |
| object->variant.fileVariant.top = NULL; |
| yaffs_DoGenericObjectDeletion(object); |
| |
| } else if (object) { |
| /* It's either a data chunk in a live file or |
| * an ObjectHeader, so we're interested in it. |
| * NB Need to keep the ObjectHeaders of deleted files |
| * until the whole file has been deleted off |
| */ |
| tags.serialNumber++; |
| |
| dev->nGCCopies++; |
| |
| if (tags.chunkId == 0) { |
| /* It is an object Id, |
| * We need to nuke the shrinkheader flags first |
| * We no longer want the shrinkHeader flag since its work is done |
| * and if it is left in place it will mess up scanning. |
| * Also, clear out any shadowing stuff |
| */ |
| |
| yaffs_ObjectHeader *oh; |
| oh = (yaffs_ObjectHeader *)buffer; |
| oh->isShrink = 0; |
| oh->shadowsObject = -1; |
| tags.extraShadows = 0; |
| tags.extraIsShrinkHeader = 0; |
| |
| yaffs_VerifyObjectHeader(object,oh,&tags,1); |
| } |
| |
| newChunk = |
| yaffs_WriteNewChunkWithTagsToNAND(dev, buffer, &tags, 1); |
| |
| if (newChunk < 0) { |
| retVal = YAFFS_FAIL; |
| } else { |
| |
| /* Ok, now fix up the Tnodes etc. */ |
| |
| if (tags.chunkId == 0) { |
| /* It's a header */ |
| object->chunkId = newChunk; |
| object->serial = tags.serialNumber; |
| } else { |
| /* It's a data chunk */ |
| yaffs_PutChunkIntoFile |
| (object, |
| tags.chunkId, |
| newChunk, 0); |
| } |
| } |
| } |
| |
| yaffs_DeleteChunk(dev, oldChunk, markNAND, __LINE__); |
| |
| } |
| } |
| |
| yaffs_ReleaseTempBuffer(dev, buffer, __LINE__); |
| |
| |
| /* Do any required cleanups */ |
| for (i = 0; i < cleanups; i++) { |
| /* Time to delete the file too */ |
| object = |
| yaffs_FindObjectByNumber(dev, |
| dev->gcCleanupList[i]); |
| if (object) { |
| yaffs_FreeTnode(dev, |
| object->variant.fileVariant. |
| top); |
| object->variant.fileVariant.top = NULL; |
| T(YAFFS_TRACE_GC, |
| (TSTR |
| ("yaffs: About to finally delete object %d" |
| TENDSTR), object->objectId)); |
| yaffs_DoGenericObjectDeletion(object); |
| object->myDev->nDeletedFiles--; |
| } |
| |
| } |
| |
| } |
| |
| yaffs_VerifyCollectedBlock(dev,bi,block); |
| |
| if (chunksBefore >= (chunksAfter = yaffs_GetErasedChunks(dev))) { |
| T(YAFFS_TRACE_GC, |
| (TSTR |
| ("gc did not increase free chunks before %d after %d" |
| TENDSTR), chunksBefore, chunksAfter)); |
| } |
| |
| dev->isDoingGC = 0; |
| |
| return retVal; |
| } |
| |
| /* New garbage collector |
| * If we're very low on erased blocks then we do aggressive garbage collection |
| * otherwise we do "leasurely" garbage collection. |
| * Aggressive gc looks further (whole array) and will accept less dirty blocks. |
| * Passive gc only inspects smaller areas and will only accept more dirty blocks. |
| * |
| * The idea is to help clear out space in a more spread-out manner. |
| * Dunno if it really does anything useful. |
| */ |
| static int yaffs_CheckGarbageCollection(yaffs_Device * dev) |
| { |
| int block; |
| int aggressive; |
| int gcOk = YAFFS_OK; |
| int maxTries = 0; |
| |
| int checkpointBlockAdjust; |
| |
| if (dev->isDoingGC) { |
| /* Bail out so we don't get recursive gc */ |
| return YAFFS_OK; |
| } |
| |
| /* This loop should pass the first time. |
| * We'll only see looping here if the erase of the collected block fails. |
| */ |
| |
| do { |
| maxTries++; |
| |
| checkpointBlockAdjust = (dev->nCheckpointReservedBlocks - dev->blocksInCheckpoint); |
| if(checkpointBlockAdjust < 0) |
| checkpointBlockAdjust = 0; |
| |
| if (dev->nErasedBlocks < (dev->nReservedBlocks + checkpointBlockAdjust + 2)) { |
| /* We need a block soon...*/ |
| aggressive = 1; |
| } else { |
| /* We're in no hurry */ |
| aggressive = 0; |
| } |
| |
| block = yaffs_FindBlockForGarbageCollection(dev, aggressive); |
| |
| if (block > 0) { |
| dev->garbageCollections++; |
| if (!aggressive) { |
| dev->passiveGarbageCollections++; |
| } |
| |
| T(YAFFS_TRACE_GC, |
| (TSTR |
| ("yaffs: GC erasedBlocks %d aggressive %d" TENDSTR), |
| dev->nErasedBlocks, aggressive)); |
| |
| gcOk = yaffs_GarbageCollectBlock(dev, block); |
| } |
| |
| if (dev->nErasedBlocks < (dev->nReservedBlocks) && block > 0) { |
| T(YAFFS_TRACE_GC, |
| (TSTR |
| ("yaffs: GC !!!no reclaim!!! erasedBlocks %d after try %d block %d" |
| TENDSTR), dev->nErasedBlocks, maxTries, block)); |
| } |
| } while ((dev->nErasedBlocks < dev->nReservedBlocks) && (block > 0) |
| && (maxTries < 2)); |
| |
| return aggressive ? gcOk : YAFFS_OK; |
| } |
| |
| /*------------------------- TAGS --------------------------------*/ |
| |
| static int yaffs_TagsMatch(const yaffs_ExtendedTags * tags, int objectId, |
| int chunkInObject) |
| { |
| return (tags->chunkId == chunkInObject && |
| tags->objectId == objectId && !tags->chunkDeleted) ? 1 : 0; |
| |
| } |
| |
| |
| /*-------------------- Data file manipulation -----------------*/ |
| |
| static int yaffs_FindChunkInFile(yaffs_Object * in, int chunkInInode, |
| yaffs_ExtendedTags * tags) |
| { |
| /*Get the Tnode, then get the level 0 offset chunk offset */ |
| yaffs_Tnode *tn; |
| int theChunk = -1; |
| yaffs_ExtendedTags localTags; |
| int retVal = -1; |
| |
| yaffs_Device *dev = in->myDev; |
| |
| if (!tags) { |
| /* Passed a NULL, so use our own tags space */ |
| tags = &localTags; |
| } |
| |
| tn = yaffs_FindLevel0Tnode(dev, &in->variant.fileVariant, chunkInInode); |
| |
| if (tn) { |
| theChunk = yaffs_GetChunkGroupBase(dev,tn,chunkInInode); |
| |
| retVal = |
| yaffs_FindChunkInGroup(dev, theChunk, tags, in->objectId, |
| chunkInInode); |
| } |
| return retVal; |
| } |
| |
| static int yaffs_FindAndDeleteChunkInFile(yaffs_Object * in, int chunkInInode, |
| yaffs_ExtendedTags * tags) |
| { |
| /* Get the Tnode, then get the level 0 offset chunk offset */ |
| yaffs_Tnode *tn; |
| int theChunk = -1; |
| yaffs_ExtendedTags localTags; |
| |
| yaffs_Device *dev = in->myDev; |
| int retVal = -1; |
| |
| if (!tags) { |
| /* Passed a NULL, so use our own tags space */ |
| tags = &localTags; |
| } |
| |
| tn = yaffs_FindLevel0Tnode(dev, &in->variant.fileVariant, chunkInInode); |
| |
| if (tn) { |
| |
| theChunk = yaffs_GetChunkGroupBase(dev,tn,chunkInInode); |
| |
| retVal = |
| yaffs_FindChunkInGroup(dev, theChunk, tags, in->objectId, |
| chunkInInode); |
| |
| /* Delete the entry in the filestructure (if found) */ |
| if (retVal != -1) { |
| yaffs_PutLevel0Tnode(dev,tn,chunkInInode,0); |
| } |
| } else { |
| /*T(("No level 0 found for %d\n", chunkInInode)); */ |
| } |
| |
| if (retVal == -1) { |
| /* T(("Could not find %d to delete\n",chunkInInode)); */ |
| } |
| return retVal; |
| } |
| |
| #ifdef YAFFS_PARANOID |
| |
| static int yaffs_CheckFileSanity(yaffs_Object * in) |
| { |
| int chunk; |
| int nChunks; |
| int fSize; |
| int failed = 0; |
| int objId; |
| yaffs_Tnode *tn; |
| yaffs_Tags localTags; |
| yaffs_Tags *tags = &localTags; |
| int theChunk; |
| int chunkDeleted; |
| |
| if (in->variantType != YAFFS_OBJECT_TYPE_FILE) { |
| /* T(("Object not a file\n")); */ |
| return YAFFS_FAIL; |
| } |
| |
| objId = in->objectId; |
| fSize = in->variant.fileVariant.fileSize; |
| nChunks = |
| (fSize + in->myDev->nDataBytesPerChunk - 1) / in->myDev->nDataBytesPerChunk; |
| |
| for (chunk = 1; chunk <= nChunks; chunk++) { |
| tn = yaffs_FindLevel0Tnode(in->myDev, &in->variant.fileVariant, |
| chunk); |
| |
| if (tn) { |
| |
| theChunk = yaffs_GetChunkGroupBase(dev,tn,chunk); |
| |
| if (yaffs_CheckChunkBits |
| (dev, theChunk / dev->nChunksPerBlock, |
| theChunk % dev->nChunksPerBlock)) { |
| |
| yaffs_ReadChunkTagsFromNAND(in->myDev, theChunk, |
| tags, |
| &chunkDeleted); |
| if (yaffs_TagsMatch |
| (tags, in->objectId, chunk, chunkDeleted)) { |
| /* found it; */ |
| |
| } |
| } else { |
| |
| failed = 1; |
| } |
| |
| } else { |
| /* T(("No level 0 found for %d\n", chunk)); */ |
| } |
| } |
| |
| return failed ? YAFFS_FAIL : YAFFS_OK; |
| } |
| |
| #endif |
| |
| static int yaffs_PutChunkIntoFile(yaffs_Object * in, int chunkInInode, |
| int chunkInNAND, int inScan) |
| { |
| /* NB inScan is zero unless scanning. |
| * For forward scanning, inScan is > 0; |
| * for backward scanning inScan is < 0 |
| */ |
| |
| yaffs_Tnode *tn; |
| yaffs_Device *dev = in->myDev; |
| int existingChunk; |
| yaffs_ExtendedTags existingTags; |
| yaffs_ExtendedTags newTags; |
| unsigned existingSerial, newSerial; |
| |
| if (in->variantType != YAFFS_OBJECT_TYPE_FILE) { |
| /* Just ignore an attempt at putting a chunk into a non-file during scanning |
| * If it is not during Scanning then something went wrong! |
| */ |
| if (!inScan) { |
| T(YAFFS_TRACE_ERROR, |
| (TSTR |
| ("yaffs tragedy:attempt to put data chunk into a non-file" |
| TENDSTR))); |
| YBUG(); |
| } |