Коммит
cb6eafadc4
|
@ -1,2 +1,2 @@
|
|||
add_library(common abstractfile.c base64.c ../includes/abstractfile.h ../includes/common.h)
|
||||
add_library(common abstractfile.c ../includes/abstractfile.h ../includes/common.h)
|
||||
|
||||
|
|
|
@ -232,7 +232,6 @@ size_t memFileWrite(AbstractFile* file, const void* data, size_t len) {
|
|||
}
|
||||
|
||||
if((info->offset + (size_t)len) > (*(info->bufferSize))) {
|
||||
memset(((uint8_t*)(*(info->buffer))) + *(info->bufferSize), 0, (info->offset + (size_t)len) - *(info->bufferSize));
|
||||
*(info->bufferSize) = info->offset + (size_t)len;
|
||||
}
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ link_directories(${BZIP2_LIBRARIES})
|
|||
|
||||
link_directories(${PROJECT_BINARY_DIR}/common ${PROJECT_BINARY_DIR}/hfs)
|
||||
|
||||
add_library(dmg adc.c checksum.c dmgfile.c dmglib.c filevault.c io.c partition.c resources.c udif.c ../includes/dmg/adc.h ../includes/dmg/dmg.h ../includes/dmg/dmgfile.h ../includes/dmg/dmglib.h ../includes/dmg/filevault.h)
|
||||
add_library(dmg adc.c base64.c checksum.c dmgfile.c dmglib.c filevault.c io.c partition.c resources.c udif.c ../includes/dmg/adc.h ../includes/dmg/dmg.h ../includes/dmg/dmgfile.h ../includes/dmg/dmglib.h ../includes/dmg/filevault.h)
|
||||
|
||||
IF(OPENSSL_FOUND)
|
||||
add_definitions(-DHAVE_CRYPT)
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
#include <dmg/dmg.h>
|
||||
#include <dmg/adc.h>
|
||||
|
||||
size_t adc_decompress(size_t in_size, unsigned char *input, size_t avail_size, unsigned char *output, size_t *bytes_written)
|
||||
int adc_decompress(int in_size, unsigned char *input, int avail_size, unsigned char *output, size_t *bytes_written)
|
||||
{
|
||||
if (in_size == 0)
|
||||
return 0;
|
||||
|
|
|
@ -208,17 +208,16 @@ A million repetitions of "a"
|
|||
34AA973C D4C4DAA4 F61EEB2B DBAD2731 6534016F
|
||||
*/
|
||||
|
||||
#define SHA1HANDSOFF
|
||||
|
||||
void SHA1Transform(uint32_t state[5], const uint8_t buffer[64]);
|
||||
#define SHA1HANDSOFF * Copies data before messing with it.
|
||||
|
||||
#define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits))))
|
||||
|
||||
/* blk0() and blk() perform the initial expand. */
|
||||
/* I got the idea of expanding during the round function from SSLeay */
|
||||
/* FIXME: can we do this in an endian-proof way? */
|
||||
|
||||
#define blk0(i) ((endianness == IS_LITTLE_ENDIAN) ? (block->l[i] = (rol(block->l[i],24)&0xFF00FF00) \
|
||||
|(rol(block->l[i],8)&0x00FF00FF)) : block->l[i])
|
||||
|(rol(block->l[i],8)&0x00FF00FF)) : block->l[i])
|
||||
|
||||
#define blk(i) (block->l[i&15] = rol(block->l[(i+13)&15]^block->l[(i+8)&15] \
|
||||
^block->l[(i+2)&15]^block->l[i&15],1))
|
||||
|
||||
|
@ -229,31 +228,30 @@ void SHA1Transform(uint32_t state[5], const uint8_t buffer[64]);
|
|||
#define R3(v,w,x,y,z,i) z+=(((w|x)&y)|(w&x))+blk(i)+0x8F1BBCDC+rol(v,5);w=rol(w,30);
|
||||
#define R4(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0xCA62C1D6+rol(v,5);w=rol(w,30);
|
||||
|
||||
/* Hash a single 512-bit block. This is the core of the algorithm. */
|
||||
void SHA1Transform(uint32_t state[5], const uint8_t buffer[64])
|
||||
{
|
||||
uint32_t a, b, c, d, e;
|
||||
typedef union {
|
||||
uint8_t c[64];
|
||||
uint32_t l[16];
|
||||
} CHAR64LONG16;
|
||||
CHAR64LONG16* block;
|
||||
|
||||
/* Hash a single 512-bit block. This is the core of the algorithm. */
|
||||
|
||||
void SHA1Transform(unsigned long state[5], const unsigned char buffer[64])
|
||||
{
|
||||
unsigned long a, b, c, d, e;
|
||||
typedef union {
|
||||
unsigned char c[64];
|
||||
unsigned long l[16];
|
||||
} CHAR64LONG16;
|
||||
CHAR64LONG16* block;
|
||||
#ifdef SHA1HANDSOFF
|
||||
static uint8_t workspace[64];
|
||||
static unsigned char workspace[64];
|
||||
block = (CHAR64LONG16*)workspace;
|
||||
memcpy(block, buffer, 64);
|
||||
#else
|
||||
block = (CHAR64LONG16*)buffer;
|
||||
#endif
|
||||
|
||||
/* Copy context->state[] to working vars */
|
||||
a = state[0];
|
||||
b = state[1];
|
||||
c = state[2];
|
||||
d = state[3];
|
||||
e = state[4];
|
||||
|
||||
/* 4 rounds of 20 operations each. Loop unrolled. */
|
||||
R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3);
|
||||
R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0(d,e,a,b,c, 7);
|
||||
|
@ -275,20 +273,19 @@ void SHA1Transform(uint32_t state[5], const uint8_t buffer[64])
|
|||
R4(c,d,e,a,b,68); R4(b,c,d,e,a,69); R4(a,b,c,d,e,70); R4(e,a,b,c,d,71);
|
||||
R4(d,e,a,b,c,72); R4(c,d,e,a,b,73); R4(b,c,d,e,a,74); R4(a,b,c,d,e,75);
|
||||
R4(e,a,b,c,d,76); R4(d,e,a,b,c,77); R4(c,d,e,a,b,78); R4(b,c,d,e,a,79);
|
||||
|
||||
/* Add the working vars back into context.state[] */
|
||||
state[0] += a;
|
||||
state[1] += b;
|
||||
state[2] += c;
|
||||
state[3] += d;
|
||||
state[4] += e;
|
||||
|
||||
/* Wipe variables */
|
||||
a = b = c = d = e = 0;
|
||||
}
|
||||
|
||||
|
||||
/* SHA1Init - Initialize new context */
|
||||
|
||||
void SHA1Init(SHA1_CTX* context)
|
||||
{
|
||||
/* SHA1 initialization constants */
|
||||
|
@ -302,9 +299,10 @@ void SHA1Init(SHA1_CTX* context)
|
|||
|
||||
|
||||
/* Run your data through this. */
|
||||
void SHA1Update(SHA1_CTX* context, const uint8_t* data, const size_t len)
|
||||
|
||||
void SHA1Update(SHA1_CTX* context, const unsigned char* data, unsigned int len)
|
||||
{
|
||||
size_t i, j;
|
||||
unsigned int i, j;
|
||||
|
||||
j = (context->count[0] >> 3) & 63;
|
||||
if ((context->count[0] += len << 3) < (len << 3)) context->count[1]++;
|
||||
|
@ -313,7 +311,7 @@ void SHA1Update(SHA1_CTX* context, const uint8_t* data, const size_t len)
|
|||
memcpy(&context->buffer[j], data, (i = 64-j));
|
||||
SHA1Transform(context->state, context->buffer);
|
||||
for ( ; i + 63 < len; i += 64) {
|
||||
SHA1Transform(context->state, data + i);
|
||||
SHA1Transform(context->state, &data[i]);
|
||||
}
|
||||
j = 0;
|
||||
}
|
||||
|
@ -323,33 +321,33 @@ void SHA1Update(SHA1_CTX* context, const uint8_t* data, const size_t len)
|
|||
|
||||
|
||||
/* Add padding and return the message digest. */
|
||||
void SHA1Final(uint8_t digest[SHA1_DIGEST_SIZE], SHA1_CTX* context)
|
||||
|
||||
void SHA1Final(unsigned char digest[20], SHA1_CTX* context)
|
||||
{
|
||||
uint32_t i;
|
||||
uint8_t finalcount[8];
|
||||
unsigned long i, j;
|
||||
unsigned char finalcount[8];
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
finalcount[i] = (unsigned char)((context->count[(i >= 4 ? 0 : 1)]
|
||||
>> ((3-(i & 3)) * 8) ) & 255); /* Endian independent */
|
||||
}
|
||||
SHA1Update(context, (uint8_t *)"\200", 1);
|
||||
SHA1Update(context, (unsigned char *)"\200", 1);
|
||||
while ((context->count[0] & 504) != 448) {
|
||||
SHA1Update(context, (uint8_t *)"\0", 1);
|
||||
SHA1Update(context, (unsigned char *)"\0", 1);
|
||||
}
|
||||
SHA1Update(context, finalcount, 8); /* Should cause a SHA1Transform() */
|
||||
for (i = 0; i < SHA1_DIGEST_SIZE; i++) {
|
||||
digest[i] = (uint8_t)
|
||||
for (i = 0; i < 20; i++) {
|
||||
digest[i] = (unsigned char)
|
||||
((context->state[i>>2] >> ((3-(i & 3)) * 8) ) & 255);
|
||||
}
|
||||
|
||||
/* Wipe variables */
|
||||
i = 0;
|
||||
i = j = 0;
|
||||
memset(context->buffer, 0, 64);
|
||||
memset(context->state, 0, 20);
|
||||
memset(context->count, 0, 8);
|
||||
memset(finalcount, 0, 8); /* SWR */
|
||||
|
||||
#ifdef SHA1HANDSOFF /* make SHA1Transform overwrite its own static vars */
|
||||
memset(&finalcount, 0, 8);
|
||||
#ifdef SHA1HANDSOFF /* make SHA1Transform overwrite it's own static vars */
|
||||
SHA1Transform(context->state, context->buffer);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ int main(int argc, char* argv[]) {
|
|||
TestByteOrder();
|
||||
|
||||
if(argc < 4) {
|
||||
printf("usage: %s [extract|build|build2048|res|iso|dmg] <in> <out> (-k <key>) (partition)\n", argv[0]);
|
||||
printf("usage: %s [extract|build|iso|dmg] <in> <out> (-k <key>) (partition)\n", argv[0]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -72,11 +72,7 @@ int main(int argc, char* argv[]) {
|
|||
}
|
||||
extractDmg(in, out, partNum);
|
||||
} else if(strcmp(argv[1], "build") == 0) {
|
||||
buildDmg(in, out, SECTOR_SIZE);
|
||||
} else if(strcmp(argv[1], "build2048") == 0) {
|
||||
buildDmg(in, out, 2048);
|
||||
} else if(strcmp(argv[1], "res") == 0) {
|
||||
outResources(in, out);
|
||||
buildDmg(in, out);
|
||||
} else if(strcmp(argv[1], "iso") == 0) {
|
||||
convertToISO(in, out);
|
||||
} else if(strcmp(argv[1], "dmg") == 0) {
|
||||
|
|
|
@ -19,43 +19,41 @@ static void cacheRun(DMG* dmg, BLKXTable* blkx, int run) {
|
|||
void* inBuffer;
|
||||
int ret;
|
||||
size_t have;
|
||||
|
||||
int bufferRead;
|
||||
|
||||
if(dmg->runData) {
|
||||
free(dmg->runData);
|
||||
}
|
||||
|
||||
|
||||
bufferSize = SECTOR_SIZE * blkx->runs[run].sectorCount;
|
||||
|
||||
|
||||
dmg->runData = (void*) malloc(bufferSize);
|
||||
inBuffer = (void*) malloc(bufferSize);
|
||||
memset(dmg->runData, 0, bufferSize);
|
||||
|
||||
|
||||
ASSERT(dmg->dmg->seek(dmg->dmg, blkx->dataStart + blkx->runs[run].compOffset) == 0, "fseeko");
|
||||
|
||||
|
||||
switch(blkx->runs[run].type) {
|
||||
case BLOCK_ADC:
|
||||
{
|
||||
size_t bufferRead = 0;
|
||||
do {
|
||||
strm.avail_in = dmg->dmg->read(dmg->dmg, inBuffer, blkx->runs[run].compLength);
|
||||
strm.avail_out = adc_decompress(strm.avail_in, inBuffer, bufferSize, dmg->runData, &have);
|
||||
bufferRead+=strm.avail_out;
|
||||
} while (bufferRead < blkx->runs[run].compLength);
|
||||
break;
|
||||
}
|
||||
|
||||
case BLOCK_ADC:
|
||||
bufferRead = 0;
|
||||
do {
|
||||
strm.avail_in = dmg->dmg->read(dmg->dmg, inBuffer, blkx->runs[run].compLength);
|
||||
strm.avail_out = adc_decompress(strm.avail_in, inBuffer, bufferSize, dmg->runData, &have);
|
||||
bufferRead+=strm.avail_out;
|
||||
} while (bufferRead < blkx->runs[run].compLength);
|
||||
break;
|
||||
case BLOCK_ZLIB:
|
||||
strm.zalloc = Z_NULL;
|
||||
strm.zfree = Z_NULL;
|
||||
strm.opaque = Z_NULL;
|
||||
strm.avail_in = 0;
|
||||
strm.next_in = Z_NULL;
|
||||
|
||||
|
||||
ASSERT(inflateInit(&strm) == Z_OK, "inflateInit");
|
||||
|
||||
|
||||
ASSERT((strm.avail_in = dmg->dmg->read(dmg->dmg, inBuffer, blkx->runs[run].compLength)) == blkx->runs[run].compLength, "fread");
|
||||
strm.next_in = (unsigned char*) inBuffer;
|
||||
|
||||
|
||||
do {
|
||||
strm.avail_out = bufferSize;
|
||||
strm.next_out = (unsigned char*) dmg->runData;
|
||||
|
@ -65,7 +63,7 @@ static void cacheRun(DMG* dmg, BLKXTable* blkx, int run) {
|
|||
}
|
||||
have = bufferSize - strm.avail_out;
|
||||
} while (strm.avail_out == 0);
|
||||
|
||||
|
||||
ASSERT(inflateEnd(&strm) == Z_OK, "inflateEnd");
|
||||
break;
|
||||
case BLOCK_RAW:
|
||||
|
@ -102,7 +100,7 @@ static void cacheRun(DMG* dmg, BLKXTable* blkx, int run) {
|
|||
break;
|
||||
}
|
||||
free(inBuffer);
|
||||
|
||||
|
||||
dmg->runType = blkx->runs[run].type;
|
||||
dmg->runStart = (blkx->runs[run].sectorStart + blkx->firstSectorNumber) * SECTOR_SIZE;
|
||||
dmg->runEnd = dmg->runStart + (blkx->runs[run].sectorCount * SECTOR_SIZE);
|
||||
|
@ -112,7 +110,7 @@ static void cacheOffset(DMG* dmg, off_t location) {
|
|||
int i;
|
||||
int j;
|
||||
uint64_t sector;
|
||||
|
||||
|
||||
sector = (uint64_t)(location / SECTOR_SIZE);
|
||||
|
||||
for(i = 0; i < dmg->numBLKX; i++) {
|
||||
|
@ -134,7 +132,7 @@ static int dmgFileRead(io_func* io, off_t location, size_t size, void *buffer) {
|
|||
dmg = (DMG*) io->data;
|
||||
|
||||
location += dmg->offset;
|
||||
|
||||
|
||||
if(size == 0) {
|
||||
return TRUE;
|
||||
}
|
||||
|
@ -142,18 +140,18 @@ static int dmgFileRead(io_func* io, off_t location, size_t size, void *buffer) {
|
|||
if(dmg->runType == BLOCK_TERMINATOR || location < dmg->runStart || location >= dmg->runEnd) {
|
||||
cacheOffset(dmg, location);
|
||||
}
|
||||
|
||||
|
||||
if((location + size) > dmg->runEnd) {
|
||||
toRead = dmg->runEnd - location;
|
||||
} else {
|
||||
toRead = size;
|
||||
}
|
||||
|
||||
|
||||
memcpy(buffer, (void*)((uint8_t*)dmg->runData + (uint32_t)(location - dmg->runStart)), toRead);
|
||||
size -= toRead;
|
||||
location += toRead;
|
||||
buffer = (void*)((uint8_t*)buffer + toRead);
|
||||
|
||||
|
||||
if(size > 0) {
|
||||
return dmgFileRead(io, location - dmg->offset, size, buffer);
|
||||
} else {
|
||||
|
@ -170,13 +168,13 @@ static int dmgFileWrite(io_func* io, off_t location, size_t size, void *buffer)
|
|||
|
||||
static void closeDmgFile(io_func* io) {
|
||||
DMG* dmg;
|
||||
|
||||
|
||||
dmg = (DMG*) io->data;
|
||||
|
||||
|
||||
if(dmg->runData) {
|
||||
free(dmg->runData);
|
||||
}
|
||||
|
||||
|
||||
free(dmg->blkx);
|
||||
releaseResources(dmg->resources);
|
||||
free(dmg->resourceXML);
|
||||
|
@ -187,10 +185,10 @@ static void closeDmgFile(io_func* io) {
|
|||
|
||||
io_func* openDmgFile(AbstractFile* abstractIn) {
|
||||
off_t fileLength;
|
||||
DMG* dmg;
|
||||
DMG* dmg;
|
||||
ResourceData* blkx;
|
||||
ResourceData* curData;
|
||||
|
||||
|
||||
io_func* toReturn;
|
||||
|
||||
if(abstractIn == NULL) {
|
||||
|
@ -203,7 +201,7 @@ io_func* openDmgFile(AbstractFile* abstractIn) {
|
|||
fileLength = abstractIn->getLength(abstractIn);
|
||||
abstractIn->seek(abstractIn, fileLength - sizeof(UDIFResourceFile));
|
||||
readUDIFResourceFile(abstractIn, &dmg->resourceFile);
|
||||
|
||||
|
||||
dmg->resourceXML = malloc(dmg->resourceFile.fUDIFXMLLength + 1);
|
||||
ASSERT( abstractIn->seek(abstractIn, (off_t)(dmg->resourceFile.fUDIFXMLOffset)) == 0, "fseeko" );
|
||||
ASSERT( abstractIn->read(abstractIn, dmg->resourceXML, (size_t)dmg->resourceFile.fUDIFXMLLength) == (size_t)dmg->resourceFile.fUDIFXMLLength, "fread" );
|
||||
|
@ -211,17 +209,17 @@ io_func* openDmgFile(AbstractFile* abstractIn) {
|
|||
|
||||
dmg->resources = readResources(dmg->resourceXML, dmg->resourceFile.fUDIFXMLLength);
|
||||
dmg->numBLKX = 0;
|
||||
|
||||
|
||||
blkx = (getResourceByKey(dmg->resources, "blkx"))->data;
|
||||
|
||||
|
||||
curData = blkx;
|
||||
while(curData != NULL) {
|
||||
dmg->numBLKX++;
|
||||
curData = curData->next;
|
||||
curData = curData->next;
|
||||
}
|
||||
|
||||
|
||||
dmg->blkx = (BLKXTable**) malloc(sizeof(BLKXTable*) * dmg->numBLKX);
|
||||
|
||||
|
||||
int i = 0;
|
||||
while(blkx != NULL) {
|
||||
dmg->blkx[i] = (BLKXTable*)(blkx->data);
|
||||
|
@ -234,12 +232,12 @@ io_func* openDmgFile(AbstractFile* abstractIn) {
|
|||
dmg->runData = NULL;
|
||||
|
||||
toReturn = (io_func*) malloc(sizeof(io_func));
|
||||
|
||||
|
||||
toReturn->data = dmg;
|
||||
toReturn->read = &dmgFileRead;
|
||||
toReturn->write = &dmgFileWrite;
|
||||
toReturn->close = &closeDmgFile;
|
||||
|
||||
|
||||
return toReturn;
|
||||
}
|
||||
|
||||
|
@ -271,7 +269,7 @@ io_func* seekDmgPartition(io_func* toReturn, int partition) {
|
|||
flipPartitionMultiple(partitions, FALSE, FALSE, BlockSize);
|
||||
numPartitions = partitions->pmMapBlkCnt;
|
||||
partitions = (Partition*) realloc(partitions, numPartitions * BlockSize);
|
||||
toReturn->read(toReturn, BlockSize, numPartitions * BlockSize, partitions);
|
||||
toReturn->read(toReturn, BlockSize, numPartitions * BlockSize, partitions);
|
||||
flipPartition(partitions, FALSE, BlockSize);
|
||||
|
||||
if(partition >= 0) {
|
||||
|
|
51
dmg/dmglib.c
51
dmg/dmglib.c
|
@ -87,7 +87,7 @@ uint32_t calculateMasterChecksum(ResourceKey* resources) {
|
|||
return result;
|
||||
}
|
||||
|
||||
int buildDmg(AbstractFile* abstractIn, AbstractFile* abstractOut, unsigned int BlockSize) {
|
||||
int buildDmg(AbstractFile* abstractIn, AbstractFile* abstractOut) {
|
||||
io_func* io;
|
||||
Volume* volume;
|
||||
|
||||
|
@ -126,20 +126,18 @@ int buildDmg(AbstractFile* abstractIn, AbstractFile* abstractOut, unsigned int B
|
|||
nsiz = NULL;
|
||||
|
||||
memset(&dataForkToken, 0, sizeof(ChecksumToken));
|
||||
memset(koly.fUDIFMasterChecksum.data, 0, sizeof(koly.fUDIFMasterChecksum.data));
|
||||
memset(koly.fUDIFDataForkChecksum.data, 0, sizeof(koly.fUDIFDataForkChecksum.data));
|
||||
|
||||
printf("Creating and writing DDM and partition map...\n"); fflush(stdout);
|
||||
|
||||
DDM = createDriverDescriptorMap((volumeHeader->totalBlocks * volumeHeader->blockSize)/SECTOR_SIZE, BlockSize);
|
||||
DDM = createDriverDescriptorMap((volumeHeader->totalBlocks * volumeHeader->blockSize)/SECTOR_SIZE);
|
||||
|
||||
partitions = createApplePartitionMap((volumeHeader->totalBlocks * volumeHeader->blockSize)/SECTOR_SIZE, HFSX_VOLUME_TYPE, BlockSize);
|
||||
partitions = createApplePartitionMap((volumeHeader->totalBlocks * volumeHeader->blockSize)/SECTOR_SIZE, HFSX_VOLUME_TYPE);
|
||||
|
||||
int pNum = writeDriverDescriptorMap(-1, abstractOut, DDM, BlockSize, &CRCProxy, (void*) (&dataForkToken), &resources);
|
||||
writeDriverDescriptorMap(abstractOut, DDM, &CRCProxy, (void*) (&dataForkToken), &resources);
|
||||
free(DDM);
|
||||
pNum = writeApplePartitionMap(pNum, abstractOut, partitions, BlockSize, &CRCProxy, (void*) (&dataForkToken), &resources, &nsiz);
|
||||
writeApplePartitionMap(abstractOut, partitions, &CRCProxy, (void*) (&dataForkToken), &resources, &nsiz);
|
||||
free(partitions);
|
||||
pNum = writeATAPI(pNum, abstractOut, BlockSize, &CRCProxy, (void*) (&dataForkToken), &resources, &nsiz);
|
||||
writeATAPI(abstractOut, &CRCProxy, (void*) (&dataForkToken), &resources, &nsiz);
|
||||
|
||||
memset(&uncompressedToken, 0, sizeof(uncompressedToken));
|
||||
SHA1Init(&(uncompressedToken.sha1));
|
||||
|
@ -148,16 +146,15 @@ int buildDmg(AbstractFile* abstractIn, AbstractFile* abstractOut, unsigned int B
|
|||
|
||||
abstractIn->seek(abstractIn, 0);
|
||||
blkx = insertBLKX(abstractOut, abstractIn, USER_OFFSET, (volumeHeader->totalBlocks * volumeHeader->blockSize)/SECTOR_SIZE,
|
||||
pNum, CHECKSUM_UDIF_CRC32, &BlockSHA1CRC, &uncompressedToken, &CRCProxy, &dataForkToken, volume, 1);
|
||||
2, CHECKSUM_UDIF_CRC32, &BlockSHA1CRC, &uncompressedToken, &CRCProxy, &dataForkToken, volume);
|
||||
|
||||
blkx->checksum.data[0] = uncompressedToken.crc;
|
||||
printf("Inserting main blkx...\n"); fflush(stdout);
|
||||
|
||||
char pName[100];
|
||||
sprintf(pName, "Mac_OS_X (Apple_HFSX : %d)", pNum + 1);
|
||||
resources = insertData(resources, "blkx", pNum, pName, (const char*) blkx, sizeof(BLKXTable) + (blkx->blocksRunCount * sizeof(BLKXRun)), ATTRIBUTE_HDIUTIL);
|
||||
|
||||
resources = insertData(resources, "blkx", 2, "Mac_OS_X (Apple_HFSX : 3)", (const char*) blkx, sizeof(BLKXTable) + (blkx->blocksRunCount * sizeof(BLKXRun)), ATTRIBUTE_HDIUTIL);
|
||||
free(blkx);
|
||||
|
||||
|
||||
|
||||
printf("Inserting cSum data...\n"); fflush(stdout);
|
||||
|
||||
csum.version = 1;
|
||||
|
@ -183,20 +180,13 @@ int buildDmg(AbstractFile* abstractIn, AbstractFile* abstractOut, unsigned int B
|
|||
if(nsiz == NULL) {
|
||||
nsiz = myNSiz;
|
||||
} else {
|
||||
NSizResource* curNsiz = nsiz;
|
||||
while(curNsiz->next != NULL)
|
||||
{
|
||||
curNsiz = curNsiz->next;
|
||||
}
|
||||
curNsiz->next = myNSiz;
|
||||
myNSiz->next = nsiz->next;
|
||||
nsiz->next = myNSiz;
|
||||
}
|
||||
|
||||
pNum++;
|
||||
|
||||
printf("Writing free partition...\n"); fflush(stdout);
|
||||
|
||||
pNum = writeFreePartition(pNum, abstractOut, USER_OFFSET + (volumeHeader->totalBlocks * volumeHeader->blockSize)/SECTOR_SIZE,
|
||||
(FREE_SIZE + (BlockSize / SECTOR_SIZE / 2)) / (BlockSize / SECTOR_SIZE) * (BlockSize / SECTOR_SIZE), &resources);
|
||||
writeFreePartition(abstractOut, (volumeHeader->totalBlocks * volumeHeader->blockSize)/SECTOR_SIZE, &resources);
|
||||
|
||||
dataForkChecksum = dataForkToken.crc;
|
||||
|
||||
|
@ -204,7 +194,7 @@ int buildDmg(AbstractFile* abstractIn, AbstractFile* abstractOut, unsigned int B
|
|||
curResource = resources;
|
||||
while(curResource->next != NULL)
|
||||
curResource = curResource->next;
|
||||
|
||||
|
||||
curResource->next = writeNSiz(nsiz);
|
||||
curResource = curResource->next;
|
||||
releaseNSiz(nsiz);
|
||||
|
@ -250,8 +240,7 @@ int buildDmg(AbstractFile* abstractIn, AbstractFile* abstractOut, unsigned int B
|
|||
printf("Master checksum: %x\n", koly.fUDIFMasterChecksum.data[0]); fflush(stdout);
|
||||
|
||||
koly.fUDIFImageVariant = kUDIFDeviceImageType;
|
||||
koly.fUDIFSectorCount = (volumeHeader->totalBlocks * volumeHeader->blockSize)/SECTOR_SIZE
|
||||
+ ((EXTRA_SIZE + (BlockSize / SECTOR_SIZE / 2)) / (BlockSize / SECTOR_SIZE) * (BlockSize / SECTOR_SIZE));
|
||||
koly.fUDIFSectorCount = EXTRA_SIZE + (volumeHeader->totalBlocks * volumeHeader->blockSize)/SECTOR_SIZE;
|
||||
koly.reserved2 = 0;
|
||||
koly.reserved3 = 0;
|
||||
koly.reserved4 = 0;
|
||||
|
@ -309,8 +298,6 @@ int convertToDMG(AbstractFile* abstractIn, AbstractFile* abstractOut) {
|
|||
nsiz = NULL;
|
||||
myNSiz = NULL;
|
||||
memset(&dataForkToken, 0, sizeof(ChecksumToken));
|
||||
memset(koly.fUDIFMasterChecksum.data, 0, sizeof(koly.fUDIFMasterChecksum.data));
|
||||
memset(koly.fUDIFDataForkChecksum.data, 0, sizeof(koly.fUDIFDataForkChecksum.data));
|
||||
|
||||
partitions = (Partition*) malloc(SECTOR_SIZE);
|
||||
|
||||
|
@ -322,7 +309,7 @@ int convertToDMG(AbstractFile* abstractIn, AbstractFile* abstractOut) {
|
|||
|
||||
if(DDM->sbSig == DRIVER_DESCRIPTOR_SIGNATURE) {
|
||||
BlockSize = DDM->sbBlkSize;
|
||||
int pNum = writeDriverDescriptorMap(-1, abstractOut, DDM, BlockSize, &CRCProxy, (void*) (&dataForkToken), &resources);
|
||||
writeDriverDescriptorMap(abstractOut, DDM, &CRCProxy, (void*) (&dataForkToken), &resources);
|
||||
free(DDM);
|
||||
|
||||
printf("Processing partition map...\n"); fflush(stdout);
|
||||
|
@ -353,7 +340,7 @@ int convertToDMG(AbstractFile* abstractIn, AbstractFile* abstractOut) {
|
|||
|
||||
abstractIn->seek(abstractIn, partitions[i].pmPyPartStart * BlockSize);
|
||||
blkx = insertBLKX(abstractOut, abstractIn, partitions[i].pmPyPartStart, partitions[i].pmPartBlkCnt, i, CHECKSUM_UDIF_CRC32,
|
||||
&BlockCRC, &uncompressedToken, &CRCProxy, &dataForkToken, NULL, 1);
|
||||
&BlockCRC, &uncompressedToken, &CRCProxy, &dataForkToken, NULL);
|
||||
|
||||
blkx->checksum.data[0] = uncompressedToken.crc;
|
||||
resources = insertData(resources, "blkx", i, partitionName, (const char*) blkx, sizeof(BLKXTable) + (blkx->blocksRunCount * sizeof(BLKXRun)), ATTRIBUTE_HDIUTIL);
|
||||
|
@ -394,7 +381,7 @@ int convertToDMG(AbstractFile* abstractIn, AbstractFile* abstractOut) {
|
|||
|
||||
abstractIn->seek(abstractIn, 0);
|
||||
blkx = insertBLKX(abstractOut, abstractIn, 0, fileLength/SECTOR_SIZE, ENTIRE_DEVICE_DESCRIPTOR, CHECKSUM_UDIF_CRC32,
|
||||
&BlockCRC, &uncompressedToken, &CRCProxy, &dataForkToken, NULL, 1);
|
||||
&BlockCRC, &uncompressedToken, &CRCProxy, &dataForkToken, NULL);
|
||||
blkx->checksum.data[0] = uncompressedToken.crc;
|
||||
resources = insertData(resources, "blkx", 0, "whole disk (unknown partition : 0)", (const char*) blkx, sizeof(BLKXTable) + (blkx->blocksRunCount * sizeof(BLKXRun)), ATTRIBUTE_HDIUTIL);
|
||||
free(blkx);
|
||||
|
|
226
dmg/io.c
226
dmg/io.c
|
@ -10,40 +10,27 @@
|
|||
|
||||
#define SECTORS_AT_A_TIME 0x200
|
||||
|
||||
// Okay, this value sucks. You shouldn't touch it because it affects how many ignore sections get added to the blkx list
|
||||
// If the blkx list gets too fragmented with ignore sections, then the copy list in certain versions of the iPhone's
|
||||
// asr becomes too big. Due to Apple's BUGGY CODE, this causes asr to segfault! This is because the copy list becomes
|
||||
// too large for the initial buffer allocated, and realloc is called by asr. Unfortunately, after the realloc, the initial
|
||||
// pointer is still used by asr for a little while! Frakking noob mistake.
|
||||
|
||||
// The only reason why it works at all is their really idiotic algorithm to determine where to put ignore blocks. It's
|
||||
// certainly nothing reasonable like "put in an ignore block if you encounter more than X blank sectors" (like mine)
|
||||
// There's always a large-ish one at the end, and a tiny 2 sector one at the end too, to take care of the space after
|
||||
// the backup volume header. No frakking clue how they go about determining how to do that.
|
||||
|
||||
BLKXTable* insertBLKX(AbstractFile* out, AbstractFile* in, uint32_t firstSectorNumber, uint32_t numSectors, uint32_t blocksDescriptor,
|
||||
uint32_t checksumType, ChecksumFunc uncompressedChk, void* uncompressedChkToken, ChecksumFunc compressedChk,
|
||||
void* compressedChkToken, Volume* volume, int addComment) {
|
||||
void* compressedChkToken, Volume* volume) {
|
||||
BLKXTable* blkx;
|
||||
|
||||
|
||||
uint32_t roomForRuns;
|
||||
uint32_t curRun;
|
||||
uint64_t curSector;
|
||||
|
||||
|
||||
unsigned char* inBuffer;
|
||||
unsigned char* outBuffer;
|
||||
size_t bufferSize;
|
||||
size_t have;
|
||||
int ret;
|
||||
|
||||
int IGNORE_THRESHOLD = 100000;
|
||||
|
||||
|
||||
bz_stream strm;
|
||||
|
||||
|
||||
blkx = (BLKXTable*) malloc(sizeof(BLKXTable) + (2 * sizeof(BLKXRun)));
|
||||
roomForRuns = 2;
|
||||
memset(blkx, 0, sizeof(BLKXTable) + (roomForRuns * sizeof(BLKXRun)));
|
||||
|
||||
|
||||
blkx->fUDIFBlocksSignature = UDIF_BLOCK_SIGNATURE;
|
||||
blkx->infoVersion = 1;
|
||||
blkx->firstSectorNumber = firstSectorNumber;
|
||||
|
@ -61,39 +48,21 @@ BLKXTable* insertBLKX(AbstractFile* out, AbstractFile* in, uint32_t firstSectorN
|
|||
blkx->checksum.type = checksumType;
|
||||
blkx->checksum.bitness = checksumBitness(checksumType);
|
||||
blkx->blocksRunCount = 0;
|
||||
|
||||
|
||||
bufferSize = SECTOR_SIZE * blkx->decompressBufferRequested;
|
||||
|
||||
|
||||
ASSERT(inBuffer = (unsigned char*) malloc(bufferSize), "malloc");
|
||||
ASSERT(outBuffer = (unsigned char*) malloc(bufferSize), "malloc");
|
||||
|
||||
|
||||
curRun = 0;
|
||||
curSector = 0;
|
||||
|
||||
uint64_t startOff = in->tell(in);
|
||||
|
||||
if(addComment)
|
||||
{
|
||||
blkx->runs[curRun].type = BLOCK_COMMENT;
|
||||
blkx->runs[curRun].reserved = 0x2B626567;
|
||||
blkx->runs[curRun].sectorStart = curSector;
|
||||
blkx->runs[curRun].sectorCount = 0;
|
||||
blkx->runs[curRun].compOffset = out->tell(out) - blkx->dataStart;
|
||||
blkx->runs[curRun].compLength = 0;
|
||||
curRun++;
|
||||
|
||||
if(curRun >= roomForRuns) {
|
||||
roomForRuns <<= 1;
|
||||
blkx = (BLKXTable*) realloc(blkx, sizeof(BLKXTable) + (roomForRuns * sizeof(BLKXRun)));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
while(numSectors > 0) {
|
||||
if(curRun >= roomForRuns) {
|
||||
roomForRuns <<= 1;
|
||||
blkx = (BLKXTable*) realloc(blkx, sizeof(BLKXTable) + (roomForRuns * sizeof(BLKXRun)));
|
||||
}
|
||||
|
||||
|
||||
blkx->runs[curRun].type = BLOCK_BZIP2;
|
||||
blkx->runs[curRun].reserved = 0;
|
||||
blkx->runs[curRun].sectorStart = curSector;
|
||||
|
@ -103,121 +72,30 @@ BLKXTable* insertBLKX(AbstractFile* out, AbstractFile* in, uint32_t firstSectorN
|
|||
strm.bzalloc = Z_NULL;
|
||||
strm.bzfree = Z_NULL;
|
||||
strm.opaque = Z_NULL;
|
||||
|
||||
int amountRead;
|
||||
{
|
||||
size_t sectorsToSkip = 0;
|
||||
size_t processed = 0;
|
||||
|
||||
while(processed < numSectors)
|
||||
{
|
||||
blkx->runs[curRun].sectorCount = ((numSectors - processed) > SECTORS_AT_A_TIME) ? SECTORS_AT_A_TIME : (numSectors - processed);
|
||||
|
||||
//printf("Currently at %" PRId64 "\n", curOff);
|
||||
in->seek(in, startOff + (blkx->sectorCount - numSectors + processed) * SECTOR_SIZE);
|
||||
ASSERT((amountRead = in->read(in, inBuffer, blkx->runs[curRun].sectorCount * SECTOR_SIZE)) == (blkx->runs[curRun].sectorCount * SECTOR_SIZE), "mRead");
|
||||
|
||||
if(!addComment)
|
||||
break;
|
||||
|
||||
processed += amountRead / SECTOR_SIZE;
|
||||
|
||||
size_t* checkBuffer = (size_t*) inBuffer;
|
||||
size_t counter;
|
||||
size_t counter_max = amountRead / sizeof(size_t);
|
||||
for(counter = 0; counter < counter_max; counter++)
|
||||
{
|
||||
if(checkBuffer[counter] != 0) {
|
||||
//printf("Not empty at %" PRId64 " / %" PRId64 "\n", (int64_t)(counter * sizeof(size_t)) + curOff, (int64_t)((counter * sizeof(size_t)) / SECTOR_SIZE + sectorsToSkip + blkx->runs[curRun].sectorStart));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
size_t skipInBuffer = (counter * sizeof(size_t)) / SECTOR_SIZE;
|
||||
sectorsToSkip += skipInBuffer;
|
||||
|
||||
//printf("sectorsToSkip: %d\n", sectorsToSkip);
|
||||
|
||||
if(counter < counter_max)
|
||||
{
|
||||
if(sectorsToSkip > IGNORE_THRESHOLD)
|
||||
{
|
||||
//printf("Seeking back to %" PRId64 "\n", curOff + (skipInBuffer * SECTOR_SIZE));
|
||||
//in->seek(in, curOff + (skipInBuffer * SECTOR_SIZE));
|
||||
} else {
|
||||
//printf("Breaking out: %d / %d\n", (size_t) counter, (size_t) counter_max);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if(sectorsToSkip > IGNORE_THRESHOLD)
|
||||
{
|
||||
int remainder = sectorsToSkip & 0xf;
|
||||
|
||||
if(sectorsToSkip != remainder)
|
||||
{
|
||||
blkx->runs[curRun].type = BLOCK_IGNORE;
|
||||
blkx->runs[curRun].reserved = 0;
|
||||
blkx->runs[curRun].sectorStart = curSector;
|
||||
blkx->runs[curRun].sectorCount = sectorsToSkip - remainder;
|
||||
blkx->runs[curRun].compOffset = out->tell(out) - blkx->dataStart;
|
||||
blkx->runs[curRun].compLength = 0;
|
||||
|
||||
printf("run %d: skipping sectors=%" PRId64 ", left=%d\n", curRun, (int64_t) sectorsToSkip, numSectors);
|
||||
|
||||
curSector += blkx->runs[curRun].sectorCount;
|
||||
numSectors -= blkx->runs[curRun].sectorCount;
|
||||
|
||||
curRun++;
|
||||
}
|
||||
|
||||
if(remainder > 0)
|
||||
{
|
||||
blkx->runs[curRun].type = BLOCK_IGNORE;
|
||||
blkx->runs[curRun].reserved = 0;
|
||||
blkx->runs[curRun].sectorStart = curSector;
|
||||
blkx->runs[curRun].sectorCount = remainder;
|
||||
blkx->runs[curRun].compOffset = out->tell(out) - blkx->dataStart;
|
||||
blkx->runs[curRun].compLength = 0;
|
||||
|
||||
printf("run %d: skipping sectors=%" PRId64 ", left=%d\n", curRun, (int64_t) sectorsToSkip, numSectors);
|
||||
|
||||
curSector += blkx->runs[curRun].sectorCount;
|
||||
numSectors -= blkx->runs[curRun].sectorCount;
|
||||
|
||||
curRun++;
|
||||
}
|
||||
|
||||
IGNORE_THRESHOLD = 0;
|
||||
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
printf("run %d: sectors=%" PRId64 ", left=%d\n", curRun, blkx->runs[curRun].sectorCount, numSectors);
|
||||
|
||||
|
||||
ASSERT(BZ2_bzCompressInit(&strm, 9, 0, 0) == BZ_OK, "BZ2_bzCompressInit");
|
||||
|
||||
strm.avail_in = amountRead;
|
||||
|
||||
ASSERT((strm.avail_in = in->read(in, inBuffer, blkx->runs[curRun].sectorCount * SECTOR_SIZE)) == (blkx->runs[curRun].sectorCount * SECTOR_SIZE), "mRead");
|
||||
strm.next_in = (char*)inBuffer;
|
||||
|
||||
|
||||
if(uncompressedChk)
|
||||
(*uncompressedChk)(uncompressedChkToken, inBuffer, blkx->runs[curRun].sectorCount * SECTOR_SIZE);
|
||||
|
||||
|
||||
blkx->runs[curRun].compOffset = out->tell(out) - blkx->dataStart;
|
||||
blkx->runs[curRun].compLength = 0;
|
||||
|
||||
strm.avail_out = bufferSize;
|
||||
strm.next_out = (char*)outBuffer;
|
||||
|
||||
|
||||
ASSERT((ret = BZ2_bzCompress(&strm, BZ_FINISH)) != BZ_SEQUENCE_ERROR, "BZ2_bzCompress/BZ_SEQUENCE_ERROR");
|
||||
if(ret != BZ_STREAM_END) {
|
||||
ASSERT(FALSE, "BZ2_bzCompress");
|
||||
}
|
||||
have = bufferSize - strm.avail_out;
|
||||
|
||||
if((have / SECTOR_SIZE) >= (blkx->runs[curRun].sectorCount - 15)) {
|
||||
|
||||
if((have / SECTOR_SIZE) > blkx->runs[curRun].sectorCount) {
|
||||
blkx->runs[curRun].type = BLOCK_RAW;
|
||||
ASSERT(out->write(out, inBuffer, blkx->runs[curRun].sectorCount * SECTOR_SIZE) == (blkx->runs[curRun].sectorCount * SECTOR_SIZE), "fwrite");
|
||||
blkx->runs[curRun].compLength += blkx->runs[curRun].sectorCount * SECTOR_SIZE;
|
||||
|
@ -233,35 +111,19 @@ BLKXTable* insertBLKX(AbstractFile* out, AbstractFile* in, uint32_t firstSectorN
|
|||
|
||||
blkx->runs[curRun].compLength += have;
|
||||
}
|
||||
|
||||
|
||||
BZ2_bzCompressEnd(&strm);
|
||||
|
||||
curSector += blkx->runs[curRun].sectorCount;
|
||||
numSectors -= blkx->runs[curRun].sectorCount;
|
||||
curRun++;
|
||||
}
|
||||
|
||||
|
||||
if(curRun >= roomForRuns) {
|
||||
roomForRuns <<= 1;
|
||||
blkx = (BLKXTable*) realloc(blkx, sizeof(BLKXTable) + (roomForRuns * sizeof(BLKXRun)));
|
||||
}
|
||||
|
||||
if(addComment)
|
||||
{
|
||||
blkx->runs[curRun].type = BLOCK_COMMENT;
|
||||
blkx->runs[curRun].reserved = 0x2B656E64;
|
||||
blkx->runs[curRun].sectorStart = curSector;
|
||||
blkx->runs[curRun].sectorCount = 0;
|
||||
blkx->runs[curRun].compOffset = out->tell(out) - blkx->dataStart;
|
||||
blkx->runs[curRun].compLength = 0;
|
||||
curRun++;
|
||||
|
||||
if(curRun >= roomForRuns) {
|
||||
roomForRuns <<= 1;
|
||||
blkx = (BLKXTable*) realloc(blkx, sizeof(BLKXTable) + (roomForRuns * sizeof(BLKXRun)));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
blkx->runs[curRun].type = BLOCK_TERMINATOR;
|
||||
blkx->runs[curRun].reserved = 0;
|
||||
blkx->runs[curRun].sectorStart = curSector;
|
||||
|
@ -269,10 +131,10 @@ BLKXTable* insertBLKX(AbstractFile* out, AbstractFile* in, uint32_t firstSectorN
|
|||
blkx->runs[curRun].compOffset = out->tell(out) - blkx->dataStart;
|
||||
blkx->runs[curRun].compLength = 0;
|
||||
blkx->blocksRunCount = curRun + 1;
|
||||
|
||||
|
||||
free(inBuffer);
|
||||
free(outBuffer);
|
||||
|
||||
|
||||
return blkx;
|
||||
}
|
||||
|
||||
|
@ -287,44 +149,44 @@ void extractBLKX(AbstractFile* in, AbstractFile* out, BLKXTable* blkx) {
|
|||
off_t initialOffset;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
int bufferRead;
|
||||
|
||||
z_stream strm;
|
||||
bz_stream bzstrm;
|
||||
|
||||
|
||||
bufferSize = SECTOR_SIZE * blkx->decompressBufferRequested;
|
||||
|
||||
|
||||
ASSERT(inBuffer = (unsigned char*) malloc(bufferSize), "malloc");
|
||||
ASSERT(outBuffer = (unsigned char*) malloc(bufferSize), "malloc");
|
||||
|
||||
|
||||
initialOffset = out->tell(out);
|
||||
ASSERT(initialOffset != -1, "ftello");
|
||||
|
||||
|
||||
zero = 0;
|
||||
|
||||
|
||||
for(i = 0; i < blkx->blocksRunCount; i++) {
|
||||
ASSERT(in->seek(in, blkx->dataStart + blkx->runs[i].compOffset) == 0, "fseeko");
|
||||
ASSERT(out->seek(out, initialOffset + (blkx->runs[i].sectorStart * SECTOR_SIZE)) == 0, "mSeek");
|
||||
|
||||
|
||||
if(blkx->runs[i].sectorCount > 0) {
|
||||
ASSERT(out->seek(out, initialOffset + (blkx->runs[i].sectorStart + blkx->runs[i].sectorCount) * SECTOR_SIZE - 1) == 0, "mSeek");
|
||||
ASSERT(out->write(out, &zero, 1) == 1, "mWrite");
|
||||
ASSERT(out->seek(out, initialOffset + (blkx->runs[i].sectorStart * SECTOR_SIZE)) == 0, "mSeek");
|
||||
}
|
||||
|
||||
|
||||
if(blkx->runs[i].type == BLOCK_TERMINATOR) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
if( blkx->runs[i].compLength == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
printf("run %d: start=%" PRId64 " sectors=%" PRId64 ", length=%" PRId64 ", fileOffset=0x%" PRIx64 "\n", i, initialOffset + (blkx->runs[i].sectorStart * SECTOR_SIZE), blkx->runs[i].sectorCount, blkx->runs[i].compLength, blkx->runs[i].compOffset);
|
||||
|
||||
|
||||
switch(blkx->runs[i].type) {
|
||||
case BLOCK_ADC:
|
||||
{
|
||||
size_t bufferRead = 0;
|
||||
bufferRead = 0;
|
||||
do {
|
||||
ASSERT((strm.avail_in = in->read(in, inBuffer, blkx->runs[i].compLength)) == blkx->runs[i].compLength, "fread");
|
||||
strm.avail_out = adc_decompress(strm.avail_in, inBuffer, bufferSize, outBuffer, &have);
|
||||
|
@ -332,20 +194,18 @@ void extractBLKX(AbstractFile* in, AbstractFile* out, BLKXTable* blkx) {
|
|||
bufferRead+=strm.avail_out;
|
||||
} while (bufferRead < blkx->runs[i].compLength);
|
||||
break;
|
||||
}
|
||||
|
||||
case BLOCK_ZLIB:
|
||||
strm.zalloc = Z_NULL;
|
||||
strm.zfree = Z_NULL;
|
||||
strm.opaque = Z_NULL;
|
||||
strm.avail_in = 0;
|
||||
strm.next_in = Z_NULL;
|
||||
|
||||
|
||||
ASSERT(inflateInit(&strm) == Z_OK, "inflateInit");
|
||||
|
||||
|
||||
ASSERT((strm.avail_in = in->read(in, inBuffer, blkx->runs[i].compLength)) == blkx->runs[i].compLength, "fread");
|
||||
strm.next_in = inBuffer;
|
||||
|
||||
|
||||
do {
|
||||
strm.avail_out = bufferSize;
|
||||
strm.next_out = outBuffer;
|
||||
|
@ -356,7 +216,7 @@ void extractBLKX(AbstractFile* in, AbstractFile* out, BLKXTable* blkx) {
|
|||
have = bufferSize - strm.avail_out;
|
||||
ASSERT(out->write(out, outBuffer, have) == have, "mWrite");
|
||||
} while (strm.avail_out == 0);
|
||||
|
||||
|
||||
ASSERT(inflateEnd(&strm) == Z_OK, "inflateEnd");
|
||||
break;
|
||||
case BLOCK_RAW:
|
||||
|
@ -408,7 +268,7 @@ void extractBLKX(AbstractFile* in, AbstractFile* out, BLKXTable* blkx) {
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
free(inBuffer);
|
||||
free(outBuffer);
|
||||
}
|
||||
|
|
246
dmg/partition.c
246
dmg/partition.c
|
@ -421,7 +421,7 @@ void flipPartitionMultiple(Partition* partition, char multiple, char out, unsign
|
|||
FLIPENDIAN(partition->pmBootEntry);
|
||||
FLIPENDIAN(partition->pmBootEntry2);
|
||||
FLIPENDIAN(partition->pmBootCksum);
|
||||
FLIPENDIANLE(partition->bootCode);
|
||||
FLIPENDIAN(partition->bootCode);
|
||||
|
||||
if(!multiple) {
|
||||
break;
|
||||
|
@ -472,87 +472,71 @@ void readDriverDescriptorMap(AbstractFile* file, ResourceKey* resources) {
|
|||
free(buffer);
|
||||
}
|
||||
|
||||
DriverDescriptorRecord* createDriverDescriptorMap(uint32_t numSectors, unsigned int BlockSize) {
|
||||
DriverDescriptorRecord* createDriverDescriptorMap(uint32_t numSectors) {
|
||||
DriverDescriptorRecord* toReturn;
|
||||
|
||||
toReturn = (DriverDescriptorRecord*) malloc(BlockSize);
|
||||
memset(toReturn, 0, BlockSize);
|
||||
toReturn = (DriverDescriptorRecord*) malloc(SECTOR_SIZE);
|
||||
memset(toReturn, 0, SECTOR_SIZE);
|
||||
|
||||
toReturn->sbSig = DRIVER_DESCRIPTOR_SIGNATURE;
|
||||
toReturn->sbBlkSize = BlockSize;
|
||||
toReturn->sbBlkCount = (numSectors + EXTRA_SIZE + (BlockSize / SECTOR_SIZE / 2)) / (BlockSize / SECTOR_SIZE);
|
||||
toReturn->sbBlkSize = SECTOR_SIZE;
|
||||
toReturn->sbBlkCount = numSectors + EXTRA_SIZE;
|
||||
toReturn->sbDevType = 0;
|
||||
toReturn->sbDevId = 0;
|
||||
toReturn->sbData = 0;
|
||||
/*
|
||||
toReturn->sbDrvrCount = 1;
|
||||
toReturn->ddBlock = (ATAPI_OFFSET * SECTOR_SIZE) / BlockSize;
|
||||
toReturn->ddBlock = ATAPI_OFFSET;
|
||||
toReturn->ddSize = 0x4;
|
||||
toReturn->ddType = 0x701;
|
||||
*/
|
||||
toReturn->sbDrvrCount = 0;
|
||||
toReturn->ddBlock = 0;
|
||||
toReturn->ddSize = 0;
|
||||
toReturn->ddType = 0;
|
||||
|
||||
return toReturn;
|
||||
}
|
||||
|
||||
int writeDriverDescriptorMap(int pNum, AbstractFile* file, DriverDescriptorRecord* DDM, unsigned int BlockSize, ChecksumFunc dataForkChecksum, void* dataForkToken,
|
||||
void writeDriverDescriptorMap(AbstractFile* file, DriverDescriptorRecord* DDM, ChecksumFunc dataForkChecksum, void* dataForkToken,
|
||||
ResourceKey **resources) {
|
||||
AbstractFile* bufferFile;
|
||||
BLKXTable* blkx;
|
||||
ChecksumToken uncompressedToken;
|
||||
DriverDescriptorRecord* buffer;
|
||||
|
||||
buffer = (DriverDescriptorRecord*) malloc(DDM_SIZE * BlockSize);
|
||||
memcpy(buffer, DDM, DDM_SIZE * BlockSize);
|
||||
buffer = (DriverDescriptorRecord*) malloc(DDM_SIZE * SECTOR_SIZE);
|
||||
memcpy(buffer, DDM, DDM_SIZE * SECTOR_SIZE);
|
||||
memset(&uncompressedToken, 0, sizeof(uncompressedToken));
|
||||
|
||||
flipDriverDescriptorRecord(buffer, TRUE);
|
||||
|
||||
bufferFile = createAbstractFileFromMemory((void**)&buffer, DDM_SIZE * BlockSize);
|
||||
bufferFile = createAbstractFileFromMemory((void**)&buffer, DDM_SIZE * SECTOR_SIZE);
|
||||
|
||||
blkx = insertBLKX(file, bufferFile, DDM_OFFSET, DDM_SIZE, DDM_DESCRIPTOR, CHECKSUM_UDIF_CRC32, &CRCProxy, &uncompressedToken,
|
||||
dataForkChecksum, dataForkToken, NULL, 0);
|
||||
dataForkChecksum, dataForkToken, NULL);
|
||||
|
||||
blkx->checksum.data[0] = uncompressedToken.crc;
|
||||
|
||||
char pName[100];
|
||||
sprintf(pName, "Driver Descriptor Map (DDM : %d)", pNum + 1);
|
||||
*resources = insertData(*resources, "blkx", pNum, pName, (const char*) blkx, sizeof(BLKXTable) + (blkx->blocksRunCount * sizeof(BLKXRun)), ATTRIBUTE_HDIUTIL);
|
||||
*resources = insertData(*resources, "blkx", -1, "Driver Descriptor Map (DDM : 0)", (const char*) blkx, sizeof(BLKXTable) + (blkx->blocksRunCount * sizeof(BLKXRun)), ATTRIBUTE_HDIUTIL);
|
||||
|
||||
free(buffer);
|
||||
bufferFile->close(bufferFile);
|
||||
free(blkx);
|
||||
|
||||
pNum++;
|
||||
|
||||
if((DDM_SIZE * BlockSize / SECTOR_SIZE) - DDM_SIZE > 0)
|
||||
pNum = writeFreePartition(pNum, file, DDM_SIZE, (DDM_SIZE * BlockSize / SECTOR_SIZE) - DDM_SIZE, resources);
|
||||
|
||||
return pNum;
|
||||
}
|
||||
|
||||
int writeApplePartitionMap(int pNum, AbstractFile* file, Partition* partitions, unsigned int BlockSize, ChecksumFunc dataForkChecksum, void* dataForkToken, ResourceKey **resources, NSizResource** nsizIn) {
|
||||
void writeApplePartitionMap(AbstractFile* file, Partition* partitions, ChecksumFunc dataForkChecksum, void* dataForkToken, ResourceKey **resources, NSizResource** nsizIn) {
|
||||
AbstractFile* bufferFile;
|
||||
BLKXTable* blkx;
|
||||
ChecksumToken uncompressedToken;
|
||||
Partition* buffer;
|
||||
NSizResource* nsiz;
|
||||
CSumResource csum;
|
||||
|
||||
size_t realPartitionSize = (PARTITION_SIZE * SECTOR_SIZE) / BlockSize * BlockSize;
|
||||
buffer = (Partition*) malloc(realPartitionSize);
|
||||
memcpy(buffer, partitions, realPartitionSize);
|
||||
|
||||
buffer = (Partition*) malloc(PARTITION_SIZE * SECTOR_SIZE);
|
||||
memcpy(buffer, partitions, PARTITION_SIZE * SECTOR_SIZE);
|
||||
memset(&uncompressedToken, 0, sizeof(uncompressedToken));
|
||||
|
||||
flipPartition(buffer, TRUE, BlockSize);
|
||||
flipPartition(buffer, TRUE, SECTOR_SIZE);
|
||||
|
||||
bufferFile = createAbstractFileFromMemory((void**)&buffer, realPartitionSize);
|
||||
bufferFile = createAbstractFileFromMemory((void**)&buffer, PARTITION_SIZE * SECTOR_SIZE);
|
||||
|
||||
blkx = insertBLKX(file, bufferFile, PARTITION_OFFSET * BlockSize / SECTOR_SIZE, realPartitionSize / SECTOR_SIZE, pNum, CHECKSUM_UDIF_CRC32,
|
||||
&BlockCRC, &uncompressedToken, dataForkChecksum, dataForkToken, NULL, 0);
|
||||
blkx = insertBLKX(file, bufferFile, PARTITION_OFFSET, PARTITION_SIZE, 0, CHECKSUM_UDIF_CRC32,
|
||||
&BlockCRC, &uncompressedToken, dataForkChecksum, dataForkToken, NULL);
|
||||
|
||||
bufferFile->close(bufferFile);
|
||||
|
||||
|
@ -562,9 +546,7 @@ int writeApplePartitionMap(int pNum, AbstractFile* file, Partition* partitions,
|
|||
csum.type = CHECKSUM_MKBLOCK;
|
||||
csum.checksum = uncompressedToken.block;
|
||||
|
||||
char pName[100];
|
||||
sprintf(pName, "Apple (Apple_partition_map : %d)", pNum + 1);
|
||||
*resources = insertData(*resources, "blkx", pNum, pName, (const char*) blkx, sizeof(BLKXTable) + (blkx->blocksRunCount * sizeof(BLKXRun)), ATTRIBUTE_HDIUTIL);
|
||||
*resources = insertData(*resources, "blkx", 0, "Apple (Apple_partition_map : 1)", (const char*) blkx, sizeof(BLKXTable) + (blkx->blocksRunCount * sizeof(BLKXRun)), ATTRIBUTE_HDIUTIL);
|
||||
*resources = insertData(*resources, "cSum", 0, "", (const char*) (&csum), sizeof(csum), 0);
|
||||
|
||||
nsiz = (NSizResource*) malloc(sizeof(NSizResource));
|
||||
|
@ -584,11 +566,9 @@ int writeApplePartitionMap(int pNum, AbstractFile* file, Partition* partitions,
|
|||
|
||||
free(buffer);
|
||||
free(blkx);
|
||||
|
||||
return pNum + 1;
|
||||
}
|
||||
|
||||
int writeATAPI(int pNum, AbstractFile* file, unsigned int BlockSize, ChecksumFunc dataForkChecksum, void* dataForkToken, ResourceKey **resources, NSizResource** nsizIn) {
|
||||
void writeATAPI(AbstractFile* file, ChecksumFunc dataForkChecksum, void* dataForkToken, ResourceKey **resources, NSizResource** nsizIn) {
|
||||
AbstractFile* bufferFile;
|
||||
BLKXTable* blkx;
|
||||
ChecksumToken uncompressedToken;
|
||||
|
@ -603,16 +583,8 @@ int writeATAPI(int pNum, AbstractFile* file, unsigned int BlockSize, ChecksumFun
|
|||
memcpy(atapi, atapi_data, ATAPI_SIZE * SECTOR_SIZE);
|
||||
bufferFile = createAbstractFileFromMemory((void**)&atapi, ATAPI_SIZE * SECTOR_SIZE);
|
||||
|
||||
if(BlockSize != SECTOR_SIZE)
|
||||
{
|
||||
blkx = insertBLKX(file, bufferFile, ATAPI_OFFSET, BlockSize / SECTOR_SIZE, pNum, CHECKSUM_UDIF_CRC32,
|
||||
&BlockCRC, &uncompressedToken, dataForkChecksum, dataForkToken, NULL, 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
blkx = insertBLKX(file, bufferFile, ATAPI_OFFSET, ATAPI_SIZE, pNum, CHECKSUM_UDIF_CRC32,
|
||||
&BlockCRC, &uncompressedToken, dataForkChecksum, dataForkToken, NULL, 0);
|
||||
}
|
||||
blkx = insertBLKX(file, bufferFile, ATAPI_OFFSET, ATAPI_SIZE, 1, CHECKSUM_UDIF_CRC32,
|
||||
&BlockCRC, &uncompressedToken, dataForkChecksum, dataForkToken, NULL);
|
||||
|
||||
bufferFile->close(bufferFile);
|
||||
free(atapi);
|
||||
|
@ -623,9 +595,7 @@ int writeATAPI(int pNum, AbstractFile* file, unsigned int BlockSize, ChecksumFun
|
|||
csum.type = CHECKSUM_MKBLOCK;
|
||||
csum.checksum = uncompressedToken.block;
|
||||
|
||||
char pName[100];
|
||||
sprintf(pName, "Macintosh (Apple_Driver_ATAPI : %d)", pNum + 1);
|
||||
*resources = insertData(*resources, "blkx", pNum, pName, (const char*) blkx, sizeof(BLKXTable) + (blkx->blocksRunCount * sizeof(BLKXRun)), ATTRIBUTE_HDIUTIL);
|
||||
*resources = insertData(*resources, "blkx", 1, "Macintosh (Apple_Driver_ATAPI : 2)", (const char*) blkx, sizeof(BLKXTable) + (blkx->blocksRunCount * sizeof(BLKXRun)), ATTRIBUTE_HDIUTIL);
|
||||
*resources = insertData(*resources, "cSum", 1, "", (const char*) (&csum), sizeof(csum), 0);
|
||||
|
||||
nsiz = (NSizResource*) malloc(sizeof(NSizResource));
|
||||
|
@ -644,13 +614,6 @@ int writeATAPI(int pNum, AbstractFile* file, unsigned int BlockSize, ChecksumFun
|
|||
}
|
||||
|
||||
free(blkx);
|
||||
|
||||
pNum++;
|
||||
|
||||
if(BlockSize != SECTOR_SIZE && (USER_OFFSET - (ATAPI_OFFSET + (BlockSize / SECTOR_SIZE))) > 0)
|
||||
pNum = writeFreePartition(pNum, file, ATAPI_OFFSET + (BlockSize / SECTOR_SIZE), USER_OFFSET - (ATAPI_OFFSET + (BlockSize / SECTOR_SIZE)), resources);
|
||||
|
||||
return pNum;
|
||||
}
|
||||
|
||||
|
||||
|
@ -697,13 +660,11 @@ void readApplePartitionMap(AbstractFile* file, ResourceKey* resources, unsigned
|
|||
free(partition);
|
||||
}
|
||||
|
||||
Partition* createApplePartitionMap(uint32_t numSectors, const char* volumeType, unsigned int BlockSize) {
|
||||
Partition* createApplePartitionMap(uint32_t numSectors, const char* volumeType) {
|
||||
Partition* partition;
|
||||
Partition* orig;
|
||||
|
||||
size_t realPartitionSize = (PARTITION_SIZE * SECTOR_SIZE) / BlockSize * BlockSize;
|
||||
orig = partition = (Partition*) malloc(realPartitionSize);
|
||||
memset(partition, 0, realPartitionSize);
|
||||
partition = (Partition*) malloc(SECTOR_SIZE * PARTITION_SIZE);
|
||||
memset(partition, 0, SECTOR_SIZE * PARTITION_SIZE);
|
||||
|
||||
partition[0].pmSig = APPLE_PARTITION_MAP_SIGNATURE;
|
||||
partition[0].pmSigPad = 0;
|
||||
|
@ -711,9 +672,9 @@ Partition* createApplePartitionMap(uint32_t numSectors, const char* volumeType,
|
|||
strcpy((char*)partition[0].pmPartName, "Apple");
|
||||
strcpy((char*)partition[0].pmParType, "Apple_partition_map");
|
||||
partition[0].pmPyPartStart = PARTITION_OFFSET;
|
||||
partition[0].pmPartBlkCnt = PARTITION_SIZE / (BlockSize / SECTOR_SIZE);
|
||||
partition[0].pmPartBlkCnt = PARTITION_SIZE;
|
||||
partition[0].pmLgDataStart = 0;
|
||||
partition[0].pmDataCnt = partition[0].pmPartBlkCnt;
|
||||
partition[0].pmDataCnt = PARTITION_SIZE;
|
||||
partition[0].pmPartStatus = 0x3;
|
||||
partition[0].pmLgBootStart = 0x0;
|
||||
partition[0].pmBootSize = 0x0;
|
||||
|
@ -724,92 +685,82 @@ Partition* createApplePartitionMap(uint32_t numSectors, const char* volumeType,
|
|||
partition[0].pmBootCksum = 0x0;
|
||||
partition[0].pmProcessor[0] = '\0';
|
||||
partition[0].bootCode = 0;
|
||||
|
||||
partition = (Partition*)(((char*) partition) + BlockSize);
|
||||
partition[0].pmSig = APPLE_PARTITION_MAP_SIGNATURE;
|
||||
partition[0].pmSigPad = 0;
|
||||
partition[0].pmMapBlkCnt = 0x4;
|
||||
strcpy((char*)partition[0].pmPartName, "Macintosh");
|
||||
strcpy((char*)partition[0].pmParType, "Apple_Driver_ATAPI");
|
||||
partition[0].pmPyPartStart = ATAPI_OFFSET / (BlockSize / SECTOR_SIZE);
|
||||
if(BlockSize != SECTOR_SIZE)
|
||||
{
|
||||
partition[0].pmPartBlkCnt = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
partition[0].pmPartBlkCnt = ATAPI_SIZE;
|
||||
}
|
||||
partition[0].pmLgDataStart = 0;
|
||||
partition[0].pmDataCnt = partition[0].pmPartBlkCnt;
|
||||
partition[0].pmPartStatus = 0x303;
|
||||
partition[0].pmLgBootStart = 0x0;
|
||||
partition[0].pmBootSize = 0x800;
|
||||
partition[0].pmBootAddr = 0x0;
|
||||
partition[0].pmBootAddr2 = 0x0;
|
||||
partition[0].pmBootEntry = 0x0;
|
||||
partition[0].pmBootEntry2 = 0x0;
|
||||
partition[0].pmBootCksum = 0xffff;
|
||||
partition[0].pmProcessor[0] = '\0';
|
||||
partition[0].bootCode = BOOTCODE_DMMY;
|
||||
|
||||
partition = (Partition*)(((char*) partition) + BlockSize);
|
||||
partition[0].pmSig = APPLE_PARTITION_MAP_SIGNATURE;
|
||||
partition[0].pmSigPad = 0;
|
||||
partition[0].pmMapBlkCnt = 0x4;
|
||||
strcpy((char*)partition[0].pmPartName, "Mac_OS_X");
|
||||
strcpy((char*)partition[0].pmParType, volumeType);
|
||||
partition[0].pmPyPartStart = USER_OFFSET / (BlockSize / SECTOR_SIZE);
|
||||
partition[0].pmPartBlkCnt = numSectors / (BlockSize / SECTOR_SIZE);
|
||||
partition[0].pmLgDataStart = 0;
|
||||
partition[0].pmDataCnt = partition[0].pmPartBlkCnt;
|
||||
partition[0].pmPartStatus = 0x40000033;
|
||||
partition[0].pmLgBootStart = 0x0;
|
||||
partition[0].pmBootSize = 0x0;
|
||||
partition[0].pmBootAddr = 0x0;
|
||||
partition[0].pmBootAddr2 = 0x0;
|
||||
partition[0].pmBootEntry = 0x0;
|
||||
partition[0].pmBootEntry2 = 0x0;
|
||||
partition[0].pmBootCksum = 0x0;
|
||||
partition[0].pmProcessor[0] = '\0';
|
||||
partition[0].bootCode = 0;
|
||||
partition[1].pmSig = APPLE_PARTITION_MAP_SIGNATURE;
|
||||
partition[1].pmSigPad = 0;
|
||||
partition[1].pmMapBlkCnt = 0x4;
|
||||
strcpy((char*)partition[1].pmPartName, "Macintosh");
|
||||
strcpy((char*)partition[1].pmParType, "Apple_Driver_ATAPI");
|
||||
partition[1].pmPyPartStart = ATAPI_OFFSET;
|
||||
partition[1].pmPartBlkCnt = ATAPI_SIZE;
|
||||
partition[1].pmLgDataStart = 0;
|
||||
partition[1].pmDataCnt = 0x04;
|
||||
partition[1].pmPartStatus = 0x303;
|
||||
partition[1].pmLgBootStart = 0x0;
|
||||
partition[1].pmBootSize = 0x800;
|
||||
partition[1].pmBootAddr = 0x0;
|
||||
partition[1].pmBootAddr2 = 0x0;
|
||||
partition[1].pmBootEntry = 0x0;
|
||||
partition[1].pmBootEntry2 = 0x0;
|
||||
partition[1].pmBootCksum = 0xffff;
|
||||
partition[1].pmProcessor[0] = '\0';
|
||||
partition[1].bootCode = BOOTCODE_DMMY;
|
||||
|
||||
partition = (Partition*)(((char*) partition) + BlockSize);
|
||||
partition[0].pmSig = APPLE_PARTITION_MAP_SIGNATURE;
|
||||
partition[0].pmSigPad = 0;
|
||||
partition[0].pmMapBlkCnt = 0x4;
|
||||
partition[0].pmPartName[0] = '\0';
|
||||
strcpy((char*)partition[0].pmParType, "Apple_Free");
|
||||
partition[0].pmPyPartStart = (USER_OFFSET + numSectors) / (BlockSize / SECTOR_SIZE);
|
||||
partition[0].pmPartBlkCnt = (FREE_SIZE + (BlockSize / SECTOR_SIZE / 2)) / (BlockSize / SECTOR_SIZE);
|
||||
partition[0].pmLgDataStart = 0;
|
||||
partition[0].pmDataCnt = 0x0;
|
||||
partition[0].pmPartStatus = 0x0;
|
||||
partition[0].pmLgBootStart = 0x0;
|
||||
partition[0].pmBootSize = 0x0;
|
||||
partition[0].pmBootAddr = 0x0;
|
||||
partition[0].pmBootAddr2 = 0x0;
|
||||
partition[0].pmBootEntry = 0x0;
|
||||
partition[0].pmBootEntry2 = 0x0;
|
||||
partition[0].pmBootCksum = 0x0;
|
||||
partition[0].pmProcessor[0] = '\0';
|
||||
partition[0].bootCode = 0;
|
||||
partition[2].pmSig = APPLE_PARTITION_MAP_SIGNATURE;
|
||||
partition[2].pmSigPad = 0;
|
||||
partition[2].pmMapBlkCnt = 0x4;
|
||||
strcpy((char*)partition[2].pmPartName, "Mac_OS_X");
|
||||
strcpy((char*)partition[2].pmParType, volumeType);
|
||||
partition[2].pmPyPartStart = USER_OFFSET;
|
||||
partition[2].pmPartBlkCnt = numSectors;
|
||||
partition[2].pmLgDataStart = 0;
|
||||
partition[2].pmDataCnt = numSectors;
|
||||
partition[2].pmPartStatus = 0x40000033;
|
||||
partition[2].pmLgBootStart = 0x0;
|
||||
partition[2].pmBootSize = 0x0;
|
||||
partition[2].pmBootAddr = 0x0;
|
||||
partition[2].pmBootAddr2 = 0x0;
|
||||
partition[2].pmBootEntry = 0x0;
|
||||
partition[2].pmBootEntry2 = 0x0;
|
||||
partition[2].pmBootCksum = 0x0;
|
||||
partition[2].pmProcessor[0] = '\0';
|
||||
partition[2].bootCode = BOOTCODE_GOON;
|
||||
|
||||
return orig;
|
||||
partition[3].pmSig = APPLE_PARTITION_MAP_SIGNATURE;
|
||||
partition[3].pmSigPad = 0;
|
||||
partition[3].pmMapBlkCnt = 0x4;
|
||||
partition[3].pmPartName[0] = '\0';
|
||||
strcpy((char*)partition[3].pmParType, "Apple_Free");
|
||||
partition[3].pmPyPartStart = USER_OFFSET + numSectors;
|
||||
partition[3].pmPartBlkCnt = FREE_SIZE;
|
||||
partition[3].pmLgDataStart = 0;
|
||||
partition[3].pmDataCnt = 0x0;
|
||||
partition[3].pmPartStatus = 0x0;
|
||||
partition[3].pmLgBootStart = 0x0;
|
||||
partition[3].pmBootSize = 0x0;
|
||||
partition[3].pmBootAddr = 0x0;
|
||||
partition[3].pmBootAddr2 = 0x0;
|
||||
partition[3].pmBootEntry = 0x0;
|
||||
partition[3].pmBootEntry2 = 0x0;
|
||||
partition[3].pmBootCksum = 0x0;
|
||||
partition[3].pmProcessor[0] = '\0';
|
||||
partition[3].bootCode = 0;
|
||||
|
||||
return partition;
|
||||
}
|
||||
|
||||
int writeFreePartition(int pNum, AbstractFile* outFile, uint32_t offset, uint32_t numSectors, ResourceKey** resources) {
|
||||
void writeFreePartition(AbstractFile* outFile, uint32_t numSectors, ResourceKey** resources) {
|
||||
BLKXTable* blkx;
|
||||
|
||||
blkx = (BLKXTable*) malloc(sizeof(BLKXTable) + (2 * sizeof(BLKXRun)));
|
||||
|
||||
blkx->fUDIFBlocksSignature = UDIF_BLOCK_SIGNATURE;
|
||||
blkx->infoVersion = 1;
|
||||
blkx->firstSectorNumber = offset;
|
||||
blkx->sectorCount = numSectors;
|
||||
blkx->firstSectorNumber = USER_OFFSET + numSectors;
|
||||
blkx->sectorCount = FREE_SIZE;
|
||||
blkx->dataStart = 0;
|
||||
blkx->decompressBufferRequested = 0;
|
||||
blkx->blocksDescriptor = pNum;
|
||||
blkx->blocksDescriptor = 3;
|
||||
blkx->reserved1 = 0;
|
||||
blkx->reserved2 = 0;
|
||||
blkx->reserved3 = 0;
|
||||
|
@ -823,20 +774,17 @@ int writeFreePartition(int pNum, AbstractFile* outFile, uint32_t offset, uint32_
|
|||
blkx->runs[0].type = BLOCK_IGNORE;
|
||||
blkx->runs[0].reserved = 0;
|
||||
blkx->runs[0].sectorStart = 0;
|
||||
blkx->runs[0].sectorCount = numSectors;
|
||||
blkx->runs[0].sectorCount = FREE_SIZE;
|
||||
blkx->runs[0].compOffset = outFile->tell(outFile);
|
||||
blkx->runs[0].compLength = 0;
|
||||
blkx->runs[1].type = BLOCK_TERMINATOR;
|
||||
blkx->runs[1].reserved = 0;
|
||||
blkx->runs[1].sectorStart = numSectors;
|
||||
blkx->runs[1].sectorStart = FREE_SIZE;
|
||||
blkx->runs[1].sectorCount = 0;
|
||||
blkx->runs[1].compOffset = blkx->runs[0].compOffset;
|
||||
blkx->runs[1].compLength = 0;
|
||||
|
||||
char pName[100];
|
||||
sprintf(pName, " (Apple_Free : %d)", pNum + 1);
|
||||
*resources = insertData(*resources, "blkx", pNum, pName, (const char*) blkx, sizeof(BLKXTable) + (blkx->blocksRunCount * sizeof(BLKXRun)), ATTRIBUTE_HDIUTIL);
|
||||
*resources = insertData(*resources, "blkx", 3, " (Apple_Free : 4)", (const char*) blkx, sizeof(BLKXTable) + (blkx->blocksRunCount * sizeof(BLKXRun)), ATTRIBUTE_HDIUTIL);
|
||||
|
||||
free(blkx);
|
||||
return pNum + 1;
|
||||
}
|
||||
|
|
|
@ -95,7 +95,7 @@ static char plstData[1032] = {
|
|||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
|
||||
};
|
||||
|
||||
const char* plistHeader = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n";
|
||||
const char* plistHeader = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple Computer//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n";
|
||||
const char* plistFooter = "</dict>\n</plist>\n";
|
||||
|
||||
static void flipSizeResource(unsigned char* data, char out) {
|
||||
|
@ -103,23 +103,24 @@ static void flipSizeResource(unsigned char* data, char out) {
|
|||
|
||||
size = (SizeResource*) data;
|
||||
|
||||
FLIPENDIANLE(size->version);
|
||||
FLIPENDIANLE(size->isHFS);
|
||||
FLIPENDIANLE(size->unknown2);
|
||||
FLIPENDIANLE(size->unknown3);
|
||||
FLIPENDIANLE(size->volumeModified);
|
||||
FLIPENDIANLE(size->unknown4);
|
||||
FLIPENDIANLE(size->volumeSignature);
|
||||
FLIPENDIANLE(size->sizePresent);
|
||||
FLIPENDIAN(size->version);
|
||||
FLIPENDIAN(size->isHFS);
|
||||
FLIPENDIAN(size->unknown2);
|
||||
FLIPENDIAN(size->unknown3);
|
||||
FLIPENDIAN(size->volumeModified);
|
||||
FLIPENDIAN(size->unknown4);
|
||||
FLIPENDIAN(size->volumeSignature);
|
||||
FLIPENDIAN(size->sizePresent);
|
||||
}
|
||||
|
||||
static void flipCSumResource(unsigned char* data, char out) {
|
||||
CSumResource* cSum;
|
||||
|
||||
cSum = (CSumResource*) data;
|
||||
|
||||
FLIPENDIANLE(cSum->version);
|
||||
FLIPENDIANLE(cSum->type);
|
||||
FLIPENDIANLE(cSum->checksum);
|
||||
FLIPENDIAN(cSum->version);
|
||||
FLIPENDIAN(cSum->type);
|
||||
FLIPENDIAN(cSum->checksum);
|
||||
}
|
||||
|
||||
|
||||
|
@ -169,22 +170,17 @@ static void flipBLKX(unsigned char* data, char out) {
|
|||
FLIPENDIAN(blkx->blocksRunCount);
|
||||
for(i = 0; i < blkx->blocksRunCount; i++) {
|
||||
flipBLKXRun(&(blkx->runs[i]));
|
||||
}
|
||||
}
|
||||
/*
|
||||
printf("fUDIFBlocksSignature: 0x%x\n", blkx->fUDIFBlocksSignature);
|
||||
}
|
||||
|
||||
/*printf("fUDIFBlocksSignature: 0x%x\n", blkx->fUDIFBlocksSignature);
|
||||
printf("infoVersion: 0x%x\n", blkx->infoVersion);
|
||||
printf("firstSectorNumber: 0x%llx\n", blkx->firstSectorNumber);
|
||||
printf("sectorCount: 0x%llx\n", blkx->sectorCount);
|
||||
printf("dataStart: 0x%llx\n", blkx->dataStart);
|
||||
printf("decompressBufferRequested: 0x%x\n", blkx->decompressBufferRequested);
|
||||
printf("blocksDescriptor: 0x%x\n", blkx->blocksDescriptor);
|
||||
printf("blocksRunCount: 0x%x\n", blkx->blocksRunCount);
|
||||
|
||||
for(i = 0; i < 0x20; i++)
|
||||
{
|
||||
printf("checksum[%d]: %x\n", i, blkx->checksum.data[i]);
|
||||
}*/
|
||||
printf("blocksRunCount: 0x%x\n", blkx->blocksRunCount);*/
|
||||
}
|
||||
}
|
||||
|
||||
static char* getXMLString(char** location) {
|
||||
|
@ -425,7 +421,7 @@ static void writeNSizResource(NSizResource* data, char* buffer) {
|
|||
sprintf(itemBuffer, "\t<key>version</key>\n\t<integer>%d</integer>\n", (int32_t)(data->version));
|
||||
strcat(buffer, itemBuffer);
|
||||
if(data->isVolume) {
|
||||
sprintf(itemBuffer, "\t<key>volume-signature</key>\n\t<integer>%d</integer>\n", (int32_t)(data->volumeSignature));
|
||||
sprintf(itemBuffer, "\t<key>bytes</key>\n\t<integer>%d</integer>\n", (int32_t)(data->volumeSignature));
|
||||
strcat(buffer, itemBuffer);
|
||||
}
|
||||
strcat(buffer, plistFooter);
|
||||
|
@ -534,25 +530,8 @@ void releaseNSiz(NSizResource* nSiz) {
|
|||
}
|
||||
}
|
||||
|
||||
void outResources(AbstractFile* file, AbstractFile* out)
|
||||
ResourceKey* readResources(char* xml, size_t length)
|
||||
{
|
||||
char* xml;
|
||||
UDIFResourceFile resourceFile;
|
||||
off_t fileLength;
|
||||
|
||||
fileLength = file->getLength(file);
|
||||
file->seek(file, fileLength - sizeof(UDIFResourceFile));
|
||||
readUDIFResourceFile(file, &resourceFile);
|
||||
xml = (char*) malloc((size_t)resourceFile.fUDIFXMLLength);
|
||||
file->seek(file, (off_t)(resourceFile.fUDIFXMLOffset));
|
||||
ASSERT(file->read(file, xml, (size_t)resourceFile.fUDIFXMLLength) == (size_t)resourceFile.fUDIFXMLLength, "fread");
|
||||
ASSERT(out->write(out, xml, (size_t)resourceFile.fUDIFXMLLength) == (size_t)resourceFile.fUDIFXMLLength, "fwrite");
|
||||
|
||||
file->close(file);
|
||||
out->close(out);
|
||||
}
|
||||
|
||||
ResourceKey* readResources(char* xml, size_t length) {
|
||||
char* curLoc;
|
||||
char* tagEnd;
|
||||
size_t strLen;
|
||||
|
@ -646,7 +625,7 @@ ResourceKey* readResources(char* xml, size_t length) {
|
|||
return toReturn;
|
||||
}
|
||||
|
||||
static void writeResourceData(AbstractFile* file, ResourceData* data, ResourceKey* curResource, FlipDataFunc flipData, int tabLength) {
|
||||
static void writeResourceData(AbstractFile* file, ResourceData* data, FlipDataFunc flipData, int tabLength) {
|
||||
unsigned char* dataBuf;
|
||||
char* tabs;
|
||||
int i;
|
||||
|
@ -659,10 +638,6 @@ static void writeResourceData(AbstractFile* file, ResourceData* data, ResourceKe
|
|||
|
||||
abstractFilePrint(file, "%s<dict>\n", tabs);
|
||||
abstractFilePrint(file, "%s\t<key>Attributes</key>\n%s\t<string>0x%04x</string>\n", tabs, tabs, data->attributes);
|
||||
|
||||
if(strcmp((char*) curResource->key, "blkx") == 0)
|
||||
abstractFilePrint(file, "%s\t<key>CFName</key>\n%s\t<string>%s</string>\n", tabs, tabs, data->name);
|
||||
|
||||
abstractFilePrint(file, "%s\t<key>Data</key>\n%s\t<data>\n", tabs, tabs);
|
||||
|
||||
if(flipData) {
|
||||
|
@ -695,8 +670,8 @@ void writeResources(AbstractFile* file, ResourceKey* resources) {
|
|||
abstractFilePrint(file, "\t\t<key>%s</key>\n\t\t<array>\n", curResource->key);
|
||||
curData = curResource->data;
|
||||
while(curData != NULL) {
|
||||
writeResourceData(file, curData, curResource, curResource->flipData, 3);
|
||||
curData = curData->next;
|
||||
writeResourceData(file, curData, curResource->flipData, 3);
|
||||
curData = curData->next;
|
||||
}
|
||||
abstractFilePrint(file, "\t\t</array>\n", curResource->key);
|
||||
curResource = curResource->next;
|
||||
|
@ -863,7 +838,6 @@ ResourceKey* makeSize(HFSPlusVolumeHeader* volumeHeader) {
|
|||
memset(&size, 0, sizeof(SizeResource));
|
||||
size.version = 5;
|
||||
size.isHFS = 1;
|
||||
size.unknown1 = 0;
|
||||
size.unknown2 = 0;
|
||||
size.unknown3 = 0;
|
||||
size.volumeModified = volumeHeader->modifyDate;
|
||||
|
@ -872,6 +846,6 @@ ResourceKey* makeSize(HFSPlusVolumeHeader* volumeHeader) {
|
|||
size.sizePresent = 1;
|
||||
|
||||
printf("making size data\n");
|
||||
return insertData(NULL, "size", 2, "", (const char*)(&size), sizeof(SizeResource), 0);
|
||||
return insertData(NULL, "size", 0, "", (const char*)(&size), sizeof(SizeResource), 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,15 +1,6 @@
|
|||
INCLUDE(FindZLIB)
|
||||
|
||||
IF(NOT ZLIB_FOUND)
|
||||
message(FATAL_ERROR "zlib is required for hfs!")
|
||||
ENDIF(NOT ZLIB_FOUND)
|
||||
|
||||
include_directories(${ZLIB_INCLUDE_DIR})
|
||||
link_directories(${ZLIB_LIBRARIES})
|
||||
|
||||
link_directories (${PROJECT_BINARY_DIR}/common)
|
||||
add_library(hfs btree.c catalog.c extents.c xattr.c fastunicodecompare.c flatfile.c hfslib.c rawfile.c utility.c volume.c hfscompress.c ../includes/hfs/hfslib.h ../includes/hfs/hfsplus.h ../includes/hfs/hfscompress.h)
|
||||
target_link_libraries(hfs common z)
|
||||
add_library(hfs btree.c catalog.c extents.c fastunicodecompare.c flatfile.c hfslib.c rawfile.c utility.c volume.c ../includes/hfs/hfslib.h ../includes/hfs/hfsplus.h)
|
||||
target_link_libraries(hfs common)
|
||||
|
||||
add_executable(hfsplus hfs.c)
|
||||
target_link_libraries (hfsplus hfs)
|
||||
|
|
33
hfs/btree.c
33
hfs/btree.c
|
@ -36,10 +36,8 @@ BTHeaderRec* readBTHeaderRec(io_func* io) {
|
|||
|
||||
headerRec = (BTHeaderRec*) malloc(sizeof(BTHeaderRec));
|
||||
|
||||
if(!READ(io, sizeof(BTNodeDescriptor), sizeof(BTHeaderRec), headerRec)) {
|
||||
free(headerRec);
|
||||
if(!READ(io, sizeof(BTNodeDescriptor), sizeof(BTHeaderRec), headerRec))
|
||||
return NULL;
|
||||
}
|
||||
|
||||
FLIPENDIAN(headerRec->treeDepth);
|
||||
FLIPENDIAN(headerRec->rootNode);
|
||||
|
@ -241,23 +239,11 @@ static void* searchNode(BTree* tree, uint32_t root, BTKey* searchKey, int *exact
|
|||
|
||||
free(descriptor);
|
||||
return READ_DATA(tree, lastRecordDataOffset, tree->io);
|
||||
} else if(descriptor->kind == kBTIndexNode) {
|
||||
} else {
|
||||
|
||||
free(descriptor);
|
||||
return searchNode(tree, getNodeNumberFromPointerRecord(lastRecordDataOffset, tree->io), searchKey, exact, nodeNumber, recordNumber);
|
||||
} else {
|
||||
if(nodeNumber != NULL)
|
||||
*nodeNumber = root;
|
||||
|
||||
if(recordNumber != NULL)
|
||||
*recordNumber = i;
|
||||
|
||||
if(exact != NULL)
|
||||
*exact = FALSE;
|
||||
|
||||
free(descriptor);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void* search(BTree* tree, BTKey* searchKey, int *exact, uint32_t *nodeNumber, int *recordNumber) {
|
||||
|
@ -480,7 +466,6 @@ static uint32_t traverseNode(uint32_t nodeNum, BTree* tree, unsigned char* map,
|
|||
printf("\n"); fflush(stdout);
|
||||
}
|
||||
free(previousKey);
|
||||
previousKey = NULL;
|
||||
}
|
||||
|
||||
if(displayTree) {
|
||||
|
@ -518,8 +503,6 @@ static uint32_t traverseNode(uint32_t nodeNum, BTree* tree, unsigned char* map,
|
|||
lastrecordDataOffset = recordDataOffset;
|
||||
}
|
||||
|
||||
if(previousKey != NULL) free(previousKey);
|
||||
|
||||
free(descriptor);
|
||||
|
||||
return count;
|
||||
|
@ -646,10 +629,7 @@ int debugBTree(BTree* tree, int displayTree) {
|
|||
} else {
|
||||
printf("Performing tree traversal...\n"); fflush(stdout);
|
||||
traverseCount = traverseNode(tree->headerRec->rootNode, tree, map, 0, &retFirstKey, &retLastKey, heightTable, &errorCount, displayTree);
|
||||
|
||||
free(retFirstKey);
|
||||
free(retLastKey);
|
||||
|
||||
|
||||
printf("Performing linear traversal...\n"); fflush(stdout);
|
||||
linearCount = linearCheck(heightTable, map, tree, &errorCount);
|
||||
}
|
||||
|
@ -785,7 +765,6 @@ static int growBTree(BTree* tree) {
|
|||
|
||||
if(byteNumber < (tree->headerRec->nodeSize - 256)) {
|
||||
ASSERT(writeBTHeaderRec(tree), "writeBTHeaderREc");
|
||||
free(buffer);
|
||||
return TRUE;
|
||||
} else {
|
||||
byteNumber -= tree->headerRec->nodeSize - 256;
|
||||
|
@ -1313,9 +1292,7 @@ static int increaseHeight(BTree* tree, uint32_t newNode) {
|
|||
|
||||
ASSERT(writeBTNodeDescriptor(&newDescriptor, tree->headerRec->rootNode, tree), "writeBTNodeDescriptor");
|
||||
ASSERT(writeBTHeaderRec(tree), "writeBTHeaderRec");
|
||||
|
||||
free(oldRootKey);
|
||||
free(newNodeKey);
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
|
|
328
hfs/catalog.c
328
hfs/catalog.c
|
@ -344,93 +344,76 @@ HFSPlusCatalogRecord* getRecordByCNID(HFSCatalogNodeID CNID, Volume* volume) {
|
|||
}
|
||||
|
||||
CatalogRecordList* getFolderContents(HFSCatalogNodeID CNID, Volume* volume) {
|
||||
BTree* tree;
|
||||
HFSPlusCatalogThread* record;
|
||||
HFSPlusCatalogKey key;
|
||||
uint32_t nodeNumber;
|
||||
int recordNumber;
|
||||
BTree* tree;
|
||||
HFSPlusCatalogThread* record;
|
||||
HFSPlusCatalogKey key;
|
||||
uint32_t nodeNumber;
|
||||
int recordNumber;
|
||||
|
||||
BTNodeDescriptor* descriptor;
|
||||
off_t recordOffset;
|
||||
off_t recordDataOffset;
|
||||
HFSPlusCatalogKey* currentKey;
|
||||
|
||||
CatalogRecordList* list;
|
||||
CatalogRecordList* lastItem;
|
||||
CatalogRecordList* item;
|
||||
|
||||
tree = volume->catalogTree;
|
||||
|
||||
key.keyLength = sizeof(key.parentID) + sizeof(key.nodeName.length);
|
||||
key.parentID = CNID;
|
||||
key.nodeName.length = 0;
|
||||
|
||||
list = NULL;
|
||||
|
||||
record = (HFSPlusCatalogThread*) search(tree, (BTKey*)(&key), NULL, &nodeNumber, &recordNumber);
|
||||
|
||||
if(record == NULL)
|
||||
return NULL;
|
||||
|
||||
free(record);
|
||||
|
||||
++recordNumber;
|
||||
|
||||
while(nodeNumber != 0) {
|
||||
descriptor = readBTNodeDescriptor(nodeNumber, tree);
|
||||
|
||||
while(recordNumber < descriptor->numRecords) {
|
||||
recordOffset = getRecordOffset(recordNumber, nodeNumber, tree);
|
||||
currentKey = (HFSPlusCatalogKey*) READ_KEY(tree, recordOffset, tree->io);
|
||||
recordDataOffset = recordOffset + currentKey->keyLength + sizeof(currentKey->keyLength);
|
||||
|
||||
if(currentKey->parentID == CNID) {
|
||||
item = (CatalogRecordList*) malloc(sizeof(CatalogRecordList));
|
||||
item->name = currentKey->nodeName;
|
||||
item->record = (HFSPlusCatalogRecord*) READ_DATA(tree, recordDataOffset, tree->io);
|
||||
item->next = NULL;
|
||||
|
||||
BTNodeDescriptor* descriptor;
|
||||
off_t recordOffset;
|
||||
off_t recordDataOffset;
|
||||
HFSPlusCatalogKey* currentKey;
|
||||
|
||||
CatalogRecordList* list;
|
||||
CatalogRecordList* lastItem;
|
||||
CatalogRecordList* item;
|
||||
|
||||
char pathBuffer[1024];
|
||||
HFSPlusCatalogRecord* toReturn;
|
||||
HFSPlusCatalogKey nkey;
|
||||
int exact;
|
||||
|
||||
tree = volume->catalogTree;
|
||||
|
||||
key.keyLength = sizeof(key.parentID) + sizeof(key.nodeName.length);
|
||||
key.parentID = CNID;
|
||||
key.nodeName.length = 0;
|
||||
|
||||
list = NULL;
|
||||
|
||||
record = (HFSPlusCatalogThread*) search(tree, (BTKey*)(&key), NULL, &nodeNumber, &recordNumber);
|
||||
|
||||
if(record == NULL)
|
||||
return NULL;
|
||||
|
||||
free(record);
|
||||
|
||||
++recordNumber;
|
||||
|
||||
while(nodeNumber != 0) {
|
||||
descriptor = readBTNodeDescriptor(nodeNumber, tree);
|
||||
|
||||
while(recordNumber < descriptor->numRecords) {
|
||||
recordOffset = getRecordOffset(recordNumber, nodeNumber, tree);
|
||||
currentKey = (HFSPlusCatalogKey*) READ_KEY(tree, recordOffset, tree->io);
|
||||
recordDataOffset = recordOffset + currentKey->keyLength + sizeof(currentKey->keyLength);
|
||||
|
||||
if(currentKey->parentID == CNID) {
|
||||
item = (CatalogRecordList*) malloc(sizeof(CatalogRecordList));
|
||||
item->name = currentKey->nodeName;
|
||||
item->record = (HFSPlusCatalogRecord*) READ_DATA(tree, recordDataOffset, tree->io);
|
||||
|
||||
if(item->record->recordType == kHFSPlusFileRecord && (((HFSPlusCatalogFile*)item->record)->userInfo.fileType) == kHardLinkFileType) {
|
||||
sprintf(pathBuffer, "iNode%d", ((HFSPlusCatalogFile*)item->record)->permissions.special.iNodeNum);
|
||||
nkey.parentID = volume->metadataDir;
|
||||
ASCIIToUnicode(pathBuffer, &nkey.nodeName);
|
||||
nkey.keyLength = sizeof(nkey.parentID) + sizeof(nkey.nodeName.length) + (sizeof(uint16_t) * nkey.nodeName.length);
|
||||
|
||||
toReturn = (HFSPlusCatalogRecord*) search(volume->catalogTree, (BTKey*)(&nkey), &exact, NULL, NULL);
|
||||
|
||||
free(item->record);
|
||||
item->record = toReturn;
|
||||
}
|
||||
item->next = NULL;
|
||||
|
||||
if(list == NULL) {
|
||||
list = item;
|
||||
} else {
|
||||
lastItem->next = item;
|
||||
}
|
||||
|
||||
lastItem = item;
|
||||
free(currentKey);
|
||||
} else {
|
||||
free(currentKey);
|
||||
free(descriptor);
|
||||
return list;
|
||||
}
|
||||
|
||||
recordNumber++;
|
||||
}
|
||||
|
||||
nodeNumber = descriptor->fLink;
|
||||
recordNumber = 0;
|
||||
|
||||
free(descriptor);
|
||||
}
|
||||
|
||||
return list;
|
||||
if(list == NULL) {
|
||||
list = item;
|
||||
} else {
|
||||
lastItem->next = item;
|
||||
}
|
||||
|
||||
lastItem = item;
|
||||
free(currentKey);
|
||||
} else {
|
||||
free(currentKey);
|
||||
free(descriptor);
|
||||
return list;
|
||||
}
|
||||
|
||||
recordNumber++;
|
||||
}
|
||||
|
||||
nodeNumber = descriptor->fLink;
|
||||
recordNumber = 0;
|
||||
|
||||
free(descriptor);
|
||||
}
|
||||
|
||||
return list;
|
||||
}
|
||||
|
||||
void releaseCatalogRecordList(CatalogRecordList* list) {
|
||||
|
@ -447,8 +430,6 @@ HFSPlusCatalogRecord* getLinkTarget(HFSPlusCatalogRecord* record, HFSCatalogNode
|
|||
io_func* io;
|
||||
char pathBuffer[1024];
|
||||
HFSPlusCatalogRecord* toReturn;
|
||||
HFSPlusCatalogKey nkey;
|
||||
int exact;
|
||||
|
||||
if(record->recordType == kHFSPlusFileRecord && (((HFSPlusCatalogFile*)record)->permissions.fileMode & S_IFLNK) == S_IFLNK) {
|
||||
io = openRawFile(((HFSPlusCatalogFile*)record)->fileID, &(((HFSPlusCatalogFile*)record)->dataFork), record, volume);
|
||||
|
@ -457,44 +438,12 @@ HFSPlusCatalogRecord* getLinkTarget(HFSPlusCatalogRecord* record, HFSCatalogNode
|
|||
pathBuffer[(((HFSPlusCatalogFile*)record)->dataFork).logicalSize] = '\0';
|
||||
toReturn = getRecordFromPath3(pathBuffer, volume, NULL, key, TRUE, TRUE, parentID);
|
||||
free(record);
|
||||
return toReturn;
|
||||
} else if(record->recordType == kHFSPlusFileRecord && (((HFSPlusCatalogFile*)record)->userInfo.fileType) == kHardLinkFileType) {
|
||||
sprintf(pathBuffer, "iNode%d", ((HFSPlusCatalogFile*)record)->permissions.special.iNodeNum);
|
||||
nkey.parentID = volume->metadataDir;
|
||||
ASCIIToUnicode(pathBuffer, &nkey.nodeName);
|
||||
nkey.keyLength = sizeof(nkey.parentID) + sizeof(nkey.nodeName.length) + (sizeof(uint16_t) * nkey.nodeName.length);
|
||||
|
||||
toReturn = (HFSPlusCatalogRecord*) search(volume->catalogTree, (BTKey*)(&nkey), &exact, NULL, NULL);
|
||||
|
||||
free(record);
|
||||
|
||||
return toReturn;
|
||||
} else {
|
||||
return record;
|
||||
}
|
||||
}
|
||||
|
||||
static const uint16_t METADATA_DIR[] = {0, 0, 0, 0, 'H', 'F', 'S', '+', ' ', 'P', 'r', 'i', 'v', 'a', 't', 'e', ' ', 'D', 'a', 't', 'a'};
|
||||
|
||||
HFSCatalogNodeID getMetadataDirectoryID(Volume* volume) {
|
||||
HFSPlusCatalogKey key;
|
||||
HFSPlusCatalogFolder* record;
|
||||
int exact;
|
||||
HFSCatalogNodeID id;
|
||||
|
||||
key.nodeName.length = sizeof(METADATA_DIR) / sizeof(uint16_t);
|
||||
key.keyLength = sizeof(key.parentID) + sizeof(key.nodeName.length) + sizeof(METADATA_DIR);
|
||||
key.parentID = kHFSRootFolderID;
|
||||
memcpy(key.nodeName.unicode, METADATA_DIR, sizeof(METADATA_DIR));
|
||||
|
||||
record = (HFSPlusCatalogFolder*) search(volume->catalogTree, (BTKey*)(&key), &exact, NULL, NULL);
|
||||
id = record->folderID;
|
||||
|
||||
free(record);
|
||||
|
||||
return id;
|
||||
}
|
||||
|
||||
HFSPlusCatalogRecord* getRecordFromPath(const char* path, Volume* volume, char **name, HFSPlusCatalogKey* retKey) {
|
||||
return getRecordFromPath2(path, volume, name, retKey, TRUE);
|
||||
}
|
||||
|
@ -805,92 +754,69 @@ int move(const char* source, const char* dest, Volume* volume) {
|
|||
}
|
||||
|
||||
int removeFile(const char* fileName, Volume* volume) {
|
||||
HFSPlusCatalogRecord* record;
|
||||
HFSPlusCatalogKey key;
|
||||
io_func* io;
|
||||
HFSPlusCatalogRecord* record;
|
||||
HFSPlusCatalogKey key;
|
||||
io_func* io;
|
||||
HFSPlusCatalogFolder* parentFolder = 0;
|
||||
|
||||
record = getRecordFromPath3(fileName, volume, NULL, &key, TRUE, FALSE, kHFSRootFolderID);
|
||||
if(record != NULL) {
|
||||
parentFolder = (HFSPlusCatalogFolder*) getRecordByCNID(key.parentID, volume);
|
||||
if(parentFolder != NULL) {
|
||||
if(parentFolder->recordType != kHFSPlusFolderRecord) {
|
||||
ASSERT(FALSE, "parent not folder");
|
||||
free(parentFolder);
|
||||
return FALSE;
|
||||
}
|
||||
} else {
|
||||
ASSERT(FALSE, "can't find parent");
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
if(record->recordType == kHFSPlusFileRecord) {
|
||||
io = openRawFile(((HFSPlusCatalogFile*)record)->fileID, &((HFSPlusCatalogFile*)record)->dataFork, record, volume);
|
||||
allocate((RawFile*)io->data, 0);
|
||||
CLOSE(io);
|
||||
|
||||
removeFromBTree(volume->catalogTree, (BTKey*)(&key));
|
||||
XAttrList* next;
|
||||
XAttrList* attrs = getAllExtendedAttributes(((HFSPlusCatalogFile*)record)->fileID, volume);
|
||||
if(attrs != NULL) {
|
||||
while(attrs != NULL) {
|
||||
next = attrs->next;
|
||||
unsetAttribute(volume, ((HFSPlusCatalogFile*)record)->fileID, attrs->name);
|
||||
free(attrs->name);
|
||||
free(attrs);
|
||||
attrs = next;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
key.nodeName.length = 0;
|
||||
key.parentID = ((HFSPlusCatalogFile*)record)->fileID;
|
||||
key.keyLength = sizeof(key.parentID) + sizeof(key.nodeName.length);
|
||||
removeFromBTree(volume->catalogTree, (BTKey*)(&key));
|
||||
|
||||
volume->volumeHeader->fileCount--;
|
||||
} else {
|
||||
if(((HFSPlusCatalogFolder*)record)->valence > 0) {
|
||||
free(record);
|
||||
free(parentFolder);
|
||||
ASSERT(FALSE, "folder not empty");
|
||||
return FALSE;
|
||||
} else {
|
||||
removeFromBTree(volume->catalogTree, (BTKey*)(&key));
|
||||
XAttrList* next;
|
||||
XAttrList* attrs = getAllExtendedAttributes(((HFSPlusCatalogFolder*)record)->folderID, volume);
|
||||
if(attrs != NULL) {
|
||||
while(attrs != NULL) {
|
||||
next = attrs->next;
|
||||
unsetAttribute(volume, ((HFSPlusCatalogFolder*)record)->folderID, attrs->name);
|
||||
free(attrs->name);
|
||||
free(attrs);
|
||||
attrs = next;
|
||||
}
|
||||
}
|
||||
|
||||
key.nodeName.length = 0;
|
||||
key.parentID = ((HFSPlusCatalogFolder*)record)->folderID;
|
||||
key.keyLength = sizeof(key.parentID) + sizeof(key.nodeName.length);
|
||||
removeFromBTree(volume->catalogTree, (BTKey*)(&key));
|
||||
}
|
||||
|
||||
parentFolder->folderCount--;
|
||||
volume->volumeHeader->folderCount--;
|
||||
}
|
||||
parentFolder->valence--;
|
||||
updateCatalog(volume, (HFSPlusCatalogRecord*) parentFolder);
|
||||
updateVolume(volume);
|
||||
|
||||
free(record);
|
||||
record = getRecordFromPath3(fileName, volume, NULL, &key, TRUE, FALSE, kHFSRootFolderID);
|
||||
if(record != NULL) {
|
||||
parentFolder = (HFSPlusCatalogFolder*) getRecordByCNID(key.parentID, volume);
|
||||
if(parentFolder != NULL) {
|
||||
if(parentFolder->recordType != kHFSPlusFolderRecord) {
|
||||
ASSERT(FALSE, "parent not folder");
|
||||
free(parentFolder);
|
||||
|
||||
return TRUE;
|
||||
} else {
|
||||
free(parentFolder);
|
||||
ASSERT(FALSE, "cannot find record");
|
||||
return FALSE;
|
||||
}
|
||||
} else {
|
||||
ASSERT(FALSE, "can't find parent");
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
if(record->recordType == kHFSPlusFileRecord) {
|
||||
io = openRawFile(((HFSPlusCatalogFile*)record)->fileID, &((HFSPlusCatalogFile*)record)->dataFork, record, volume);
|
||||
allocate((RawFile*)io->data, 0);
|
||||
CLOSE(io);
|
||||
|
||||
removeFromBTree(volume->catalogTree, (BTKey*)(&key));
|
||||
|
||||
key.nodeName.length = 0;
|
||||
key.parentID = ((HFSPlusCatalogFile*)record)->fileID;
|
||||
key.keyLength = sizeof(key.parentID) + sizeof(key.nodeName.length);
|
||||
removeFromBTree(volume->catalogTree, (BTKey*)(&key));
|
||||
|
||||
volume->volumeHeader->fileCount--;
|
||||
} else {
|
||||
if(((HFSPlusCatalogFolder*)record)->valence > 0) {
|
||||
free(record);
|
||||
free(parentFolder);
|
||||
ASSERT(FALSE, "folder not empty");
|
||||
return FALSE;
|
||||
} else {
|
||||
removeFromBTree(volume->catalogTree, (BTKey*)(&key));
|
||||
|
||||
key.nodeName.length = 0;
|
||||
key.parentID = ((HFSPlusCatalogFolder*)record)->folderID;
|
||||
key.keyLength = sizeof(key.parentID) + sizeof(key.nodeName.length);
|
||||
removeFromBTree(volume->catalogTree, (BTKey*)(&key));
|
||||
}
|
||||
|
||||
parentFolder->folderCount--;
|
||||
volume->volumeHeader->folderCount--;
|
||||
}
|
||||
parentFolder->valence--;
|
||||
updateCatalog(volume, (HFSPlusCatalogRecord*) parentFolder);
|
||||
updateVolume(volume);
|
||||
|
||||
free(record);
|
||||
free(parentFolder);
|
||||
|
||||
return TRUE;
|
||||
} else {
|
||||
free(parentFolder);
|
||||
ASSERT(FALSE, "cannot find record");
|
||||
return FALSE;
|
||||
}
|
||||
}
|
||||
|
||||
int makeSymlink(const char* pathName, const char* target, Volume* volume) {
|
||||
|
|
36
hfs/hfs.c
36
hfs/hfs.c
|
@ -232,40 +232,6 @@ void cmd_grow(Volume* volume, int argc, const char *argv[]) {
|
|||
printf("grew volume: %" PRId64 "\n", newSize);
|
||||
}
|
||||
|
||||
void cmd_getattr(Volume* volume, int argc, const char *argv[]) {
|
||||
HFSPlusCatalogRecord* record;
|
||||
|
||||
if(argc < 3) {
|
||||
printf("Not enough arguments");
|
||||
return;
|
||||
}
|
||||
|
||||
record = getRecordFromPath(argv[1], volume, NULL, NULL);
|
||||
|
||||
if(record != NULL) {
|
||||
HFSCatalogNodeID id;
|
||||
uint8_t* data;
|
||||
size_t size;
|
||||
if(record->recordType == kHFSPlusFileRecord)
|
||||
id = ((HFSPlusCatalogFile*)record)->fileID;
|
||||
else
|
||||
id = ((HFSPlusCatalogFolder*)record)->folderID;
|
||||
|
||||
size = getAttribute(volume, id, argv[2], &data);
|
||||
|
||||
if(size > 0) {
|
||||
fwrite(data, size, 1, stdout);
|
||||
free(data);
|
||||
} else {
|
||||
printf("No such attribute\n");
|
||||
}
|
||||
} else {
|
||||
printf("No such file or directory\n");
|
||||
}
|
||||
|
||||
free(record);
|
||||
}
|
||||
|
||||
void TestByteOrder()
|
||||
{
|
||||
short int word = 0x0001;
|
||||
|
@ -327,8 +293,6 @@ int main(int argc, const char *argv[]) {
|
|||
cmd_addall(volume, argc - 2, argv + 2);
|
||||
} else if(strcmp(argv[2], "grow") == 0) {
|
||||
cmd_grow(volume, argc - 2, argv + 2);
|
||||
} else if(strcmp(argv[2], "getattr") == 0) {
|
||||
cmd_getattr(volume, argc - 2, argv + 2);
|
||||
} else if(strcmp(argv[2], "debug") == 0) {
|
||||
if(argc > 3 && strcmp(argv[3], "verbose") == 0) {
|
||||
debugBTree(volume->catalogTree, TRUE);
|
||||
|
|
|
@ -1,301 +0,0 @@
|
|||
#include <zlib.h>
|
||||
#include "common.h"
|
||||
#include <hfs/hfsplus.h>
|
||||
#include <hfs/hfscompress.h>
|
||||
|
||||
void flipHFSPlusDecmpfs(HFSPlusDecmpfs* compressData) {
|
||||
FLIPENDIANLE(compressData->magic);
|
||||
FLIPENDIANLE(compressData->flags);
|
||||
FLIPENDIANLE(compressData->size);
|
||||
}
|
||||
|
||||
void flipRsrcHead(HFSPlusCmpfRsrcHead* data) {
|
||||
FLIPENDIAN(data->headerSize);
|
||||
FLIPENDIAN(data->totalSize);
|
||||
FLIPENDIAN(data->dataSize);
|
||||
FLIPENDIAN(data->flags);
|
||||
}
|
||||
|
||||
void flipRsrcBlockHead(HFSPlusCmpfRsrcBlockHead* data) {
|
||||
FLIPENDIAN(data->dataSize);
|
||||
FLIPENDIANLE(data->numBlocks);
|
||||
}
|
||||
|
||||
void flipRsrcBlock(HFSPlusCmpfRsrcBlock* data) {
|
||||
FLIPENDIANLE(data->offset);
|
||||
FLIPENDIANLE(data->size);
|
||||
}
|
||||
|
||||
void flipHFSPlusCmpfEnd(HFSPlusCmpfEnd* data) {
|
||||
FLIPENDIAN(data->unk1);
|
||||
FLIPENDIAN(data->unk2);
|
||||
FLIPENDIAN(data->unk3);
|
||||
FLIPENDIAN(data->magic);
|
||||
FLIPENDIAN(data->flags);
|
||||
FLIPENDIANLE(data->size);
|
||||
FLIPENDIANLE(data->unk4);
|
||||
}
|
||||
|
||||
static int compressedRead(io_func* io, off_t location, size_t size, void *buffer) {
|
||||
HFSPlusCompressed* data = (HFSPlusCompressed*) io->data;
|
||||
size_t toRead;
|
||||
|
||||
while(size > 0) {
|
||||
if(data->cached && location >= data->cachedStart && location < data->cachedEnd) {
|
||||
if((data->cachedEnd - location) < size)
|
||||
toRead = data->cachedEnd - location;
|
||||
else
|
||||
toRead = size;
|
||||
|
||||
memcpy(buffer, data->cached + (location - data->cachedStart), toRead);
|
||||
|
||||
size -= toRead;
|
||||
location += toRead;
|
||||
buffer = ((uint8_t*) buffer) + toRead;
|
||||
}
|
||||
|
||||
if(size == 0)
|
||||
break;
|
||||
|
||||
// Try to cache
|
||||
uLongf actualSize;
|
||||
uint32_t block = location / 0x10000;
|
||||
uint8_t* compressed = (uint8_t*) malloc(data->blocks->blocks[block].size);
|
||||
if(!READ(data->io, data->rsrcHead.headerSize + sizeof(uint32_t) + data->blocks->blocks[block].offset, data->blocks->blocks[block].size, compressed)) {
|
||||
hfs_panic("error reading");
|
||||
}
|
||||
|
||||
if(data->cached)
|
||||
free(data->cached);
|
||||
|
||||
data->cached = (uint8_t*) malloc(0x10000);
|
||||
actualSize = 0x10000;
|
||||
if(compressed[0] == 0xff) {
|
||||
actualSize = data->blocks->blocks[block].size - 1;
|
||||
memcpy(data->cached, compressed + 1, actualSize);
|
||||
} else {
|
||||
uncompress(data->cached, &actualSize, compressed, data->blocks->blocks[block].size);
|
||||
}
|
||||
data->cachedStart = block * 0x10000;
|
||||
data->cachedEnd = data->cachedStart + actualSize;
|
||||
free(compressed);
|
||||
}
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
static int compressedWrite(io_func* io, off_t location, size_t size, void *buffer) {
|
||||
HFSPlusCompressed* data = (HFSPlusCompressed*) io->data;
|
||||
|
||||
if(data->cachedStart != 0 || data->cachedEnd != data->decmpfs->size) {
|
||||
// Cache entire file
|
||||
uint8_t* newCache = (uint8_t*) malloc(data->decmpfs->size);
|
||||
compressedRead(io, 0, data->decmpfs->size, newCache);
|
||||
if(data->cached)
|
||||
free(data->cached);
|
||||
|
||||
data->cached = newCache;
|
||||
data->cachedStart = 0;
|
||||
data->cachedEnd = data->decmpfs->size;
|
||||
}
|
||||
|
||||
if((location + size) > data->decmpfs->size) {
|
||||
data->decmpfs->size = location + size;
|
||||
data->cached = (uint8_t*) realloc(data->cached, data->decmpfs->size);
|
||||
data->cachedEnd = data->decmpfs->size;
|
||||
}
|
||||
|
||||
memcpy(data->cached + location, buffer, size);
|
||||
|
||||
data->dirty = TRUE;
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
static void closeHFSPlusCompressed(io_func* io) {
|
||||
HFSPlusCompressed* data = (HFSPlusCompressed*) io->data;
|
||||
|
||||
if(data->io)
|
||||
CLOSE(data->io);
|
||||
|
||||
if(data->dirty) {
|
||||
if(data->blocks)
|
||||
free(data->blocks);
|
||||
|
||||
data->decmpfs->magic = CMPFS_MAGIC;
|
||||
data->decmpfs->flags = 0x4;
|
||||
data->decmpfsSize = sizeof(HFSPlusDecmpfs);
|
||||
|
||||
uint32_t numBlocks = (data->decmpfs->size + 0xFFFF) / 0x10000;
|
||||
uint32_t blocksSize = sizeof(HFSPlusCmpfRsrcBlockHead) + (numBlocks * sizeof(HFSPlusCmpfRsrcBlock));
|
||||
data->blocks = (HFSPlusCmpfRsrcBlockHead*) malloc(sizeof(HFSPlusCmpfRsrcBlockHead) + (numBlocks * sizeof(HFSPlusCmpfRsrcBlock)));
|
||||
data->blocks->numBlocks = numBlocks;
|
||||
data->blocks->dataSize = blocksSize - sizeof(uint32_t); // without the front dataSize in BlockHead.
|
||||
|
||||
data->rsrcHead.headerSize = 0x100;
|
||||
data->rsrcHead.dataSize = blocksSize;
|
||||
data->rsrcHead.totalSize = data->rsrcHead.headerSize + data->rsrcHead.dataSize;
|
||||
data->rsrcHead.flags = 0x32;
|
||||
|
||||
uint8_t* buffer = (uint8_t*) malloc((0x10000 * 1.1) + 12);
|
||||
uint32_t curFileOffset = data->blocks->dataSize;
|
||||
uint32_t i;
|
||||
for(i = 0; i < numBlocks; i++) {
|
||||
data->blocks->blocks[i].offset = curFileOffset;
|
||||
uLongf actualSize = (0x10000 * 1.1) + 12;
|
||||
compress(buffer, &actualSize, data->cached + (0x10000 * i),
|
||||
(data->decmpfs->size - (0x10000 * i)) > 0x10000 ? 0x10000 : (data->decmpfs->size - (0x10000 * i)));
|
||||
data->blocks->blocks[i].size = actualSize;
|
||||
|
||||
// check if we can fit the whole thing into an inline extended attribute
|
||||
// a little fudge factor here since sizeof(HFSPlusAttrKey) is bigger than it ought to be, since only 127 characters are strictly allowed
|
||||
if(numBlocks <= 1 && (actualSize + sizeof(HFSPlusDecmpfs) + sizeof(HFSPlusAttrKey)) <= 0x1000) {
|
||||
data->decmpfs->flags = 0x3;
|
||||
memcpy(data->decmpfs->data, buffer, actualSize);
|
||||
data->decmpfsSize = sizeof(HFSPlusDecmpfs) + actualSize;
|
||||
printf("inline data\n");
|
||||
break;
|
||||
} else {
|
||||
if(i == 0) {
|
||||
data->io = openRawFile(data->file->fileID, &data->file->resourceFork, (HFSPlusCatalogRecord*)data->file, data->volume);
|
||||
if(!data->io) {
|
||||
hfs_panic("error opening resource fork");
|
||||
}
|
||||
}
|
||||
|
||||
WRITE(data->io, data->rsrcHead.headerSize + sizeof(uint32_t) + data->blocks->blocks[i].offset, data->blocks->blocks[i].size, buffer);
|
||||
|
||||
curFileOffset += data->blocks->blocks[i].size;
|
||||
data->blocks->dataSize += data->blocks->blocks[i].size;
|
||||
data->rsrcHead.dataSize += data->blocks->blocks[i].size;
|
||||
data->rsrcHead.totalSize += data->blocks->blocks[i].size;
|
||||
}
|
||||
}
|
||||
|
||||
free(buffer);
|
||||
|
||||
if(data->decmpfs->flags == 0x4) {
|
||||
flipRsrcHead(&data->rsrcHead);
|
||||
WRITE(data->io, 0, sizeof(HFSPlusCmpfRsrcHead), &data->rsrcHead);
|
||||
flipRsrcHead(&data->rsrcHead);
|
||||
|
||||
for(i = 0; i < data->blocks->numBlocks; i++) {
|
||||
flipRsrcBlock(&data->blocks->blocks[i]);
|
||||
}
|
||||
flipRsrcBlockHead(data->blocks);
|
||||
WRITE(data->io, data->rsrcHead.headerSize, blocksSize, data->blocks);
|
||||
flipRsrcBlockHead(data->blocks);
|
||||
for(i = 0; i < data->blocks->numBlocks; i++) {
|
||||
flipRsrcBlock(&data->blocks->blocks[i]);
|
||||
}
|
||||
|
||||
HFSPlusCmpfEnd end;
|
||||
memset(&end, 0, sizeof(HFSPlusCmpfEnd));
|
||||
end.unk1 = 0x1C;
|
||||
end.unk2 = 0x32;
|
||||
end.unk3 = 0x0;
|
||||
end.magic = CMPFS_MAGIC;
|
||||
end.flags = 0xA;
|
||||
end.size = 0xFFFF01;
|
||||
end.unk4 = 0x0;
|
||||
|
||||
flipHFSPlusCmpfEnd(&end);
|
||||
WRITE(data->io, data->rsrcHead.totalSize, sizeof(HFSPlusCmpfEnd), &end);
|
||||
flipHFSPlusCmpfEnd(&end);
|
||||
|
||||
CLOSE(data->io);
|
||||
}
|
||||
|
||||
flipHFSPlusDecmpfs(data->decmpfs);
|
||||
setAttribute(data->volume, data->file->fileID, "com.apple.decmpfs", (uint8_t*)(data->decmpfs), data->decmpfsSize);
|
||||
flipHFSPlusDecmpfs(data->decmpfs);
|
||||
}
|
||||
|
||||
if(data->cached)
|
||||
free(data->cached);
|
||||
|
||||
if(data->blocks)
|
||||
free(data->blocks);
|
||||
|
||||
free(data->decmpfs);
|
||||
free(data);
|
||||
free(io);
|
||||
}
|
||||
|
||||
io_func* openHFSPlusCompressed(Volume* volume, HFSPlusCatalogFile* file) {
|
||||
io_func* io;
|
||||
HFSPlusCompressed* data;
|
||||
uLongf actualSize;
|
||||
|
||||
io = (io_func*) malloc(sizeof(io_func));
|
||||
data = (HFSPlusCompressed*) malloc(sizeof(HFSPlusCompressed));
|
||||
|
||||
data->volume = volume;
|
||||
data->file = file;
|
||||
|
||||
io->data = data;
|
||||
io->read = &compressedRead;
|
||||
io->write = &compressedWrite;
|
||||
io->close = &closeHFSPlusCompressed;
|
||||
|
||||
data->cached = NULL;
|
||||
data->cachedStart = 0;
|
||||
data->cachedEnd = 0;
|
||||
data->io = NULL;
|
||||
data->blocks = NULL;
|
||||
data->dirty = FALSE;
|
||||
|
||||
data->decmpfsSize = getAttribute(volume, file->fileID, "com.apple.decmpfs", (uint8_t**)(&data->decmpfs));
|
||||
if(data->decmpfsSize == 0) {
|
||||
data->decmpfs = (HFSPlusDecmpfs*) malloc(0x1000);
|
||||
data->decmpfs->size = 0;
|
||||
return io; // previously not compressed file
|
||||
}
|
||||
|
||||
flipHFSPlusDecmpfs(data->decmpfs);
|
||||
|
||||
if(data->decmpfs->flags == 0x3) {
|
||||
data->cached = (uint8_t*) malloc(data->decmpfs->size);
|
||||
actualSize = data->decmpfs->size;
|
||||
if(data->decmpfs->data[0] == 0xff) {
|
||||
memcpy(data->cached, data->decmpfs->data + 1, actualSize);
|
||||
} else {
|
||||
uncompress(data->cached, &actualSize, data->decmpfs->data, data->decmpfsSize - sizeof(HFSPlusDecmpfs));
|
||||
if(actualSize != data->decmpfs->size) {
|
||||
fprintf(stderr, "decmpfs: size mismatch\n");
|
||||
}
|
||||
}
|
||||
data->cachedStart = 0;
|
||||
data->cachedEnd = actualSize;
|
||||
} else {
|
||||
data->io = openRawFile(file->fileID, &file->resourceFork, (HFSPlusCatalogRecord*)file, volume);
|
||||
if(!data->io) {
|
||||
hfs_panic("error opening resource fork");
|
||||
}
|
||||
|
||||
if(!READ(data->io, 0, sizeof(HFSPlusCmpfRsrcHead), &data->rsrcHead)) {
|
||||
hfs_panic("error reading");
|
||||
}
|
||||
|
||||
flipRsrcHead(&data->rsrcHead);
|
||||
|
||||
data->blocks = (HFSPlusCmpfRsrcBlockHead*) malloc(sizeof(HFSPlusCmpfRsrcBlockHead));
|
||||
if(!READ(data->io, data->rsrcHead.headerSize, sizeof(HFSPlusCmpfRsrcBlockHead), data->blocks)) {
|
||||
hfs_panic("error reading");
|
||||
}
|
||||
|
||||
flipRsrcBlockHead(data->blocks);
|
||||
|
||||
data->blocks = (HFSPlusCmpfRsrcBlockHead*) realloc(data->blocks, sizeof(HFSPlusCmpfRsrcBlockHead) + (sizeof(HFSPlusCmpfRsrcBlock) * data->blocks->numBlocks));
|
||||
if(!READ(data->io, data->rsrcHead.headerSize + sizeof(HFSPlusCmpfRsrcBlockHead), sizeof(HFSPlusCmpfRsrcBlock) * data->blocks->numBlocks, data->blocks->blocks)) {
|
||||
hfs_panic("error reading");
|
||||
}
|
||||
|
||||
int i;
|
||||
for(i = 0; i < data->blocks->numBlocks; i++) {
|
||||
flipRsrcBlock(&data->blocks->blocks[i]);
|
||||
}
|
||||
}
|
||||
|
||||
return io;
|
||||
}
|
||||
|
152
hfs/hfslib.c
152
hfs/hfslib.c
|
@ -3,25 +3,14 @@
|
|||
#include <dirent.h>
|
||||
#include <time.h>
|
||||
#include <sys/types.h>
|
||||
#include <hfs/hfsplus.h>
|
||||
#include "abstractfile.h"
|
||||
#include <hfs/hfslib.h>
|
||||
#include <hfs/hfscompress.h>
|
||||
#include <sys/stat.h>
|
||||
#include <inttypes.h>
|
||||
#ifdef WIN32
|
||||
#include <sys/utime.h>
|
||||
#define lstat stat
|
||||
#else
|
||||
#include <utime.h>
|
||||
#endif
|
||||
|
||||
#define BUFSIZE 1024*1024
|
||||
|
||||
static int silence = 0;
|
||||
|
||||
void hfs_setsilence(int s) {
|
||||
silence = s;
|
||||
}
|
||||
|
||||
void writeToFile(HFSPlusCatalogFile* file, AbstractFile* output, Volume* volume) {
|
||||
unsigned char* buffer;
|
||||
io_func* io;
|
||||
|
@ -30,27 +19,16 @@ void writeToFile(HFSPlusCatalogFile* file, AbstractFile* output, Volume* volume)
|
|||
|
||||
buffer = (unsigned char*) malloc(BUFSIZE);
|
||||
|
||||
if(file->permissions.ownerFlags & UF_COMPRESSED) {
|
||||
io = openHFSPlusCompressed(volume, file);
|
||||
if(io == NULL) {
|
||||
hfs_panic("error opening file");
|
||||
free(buffer);
|
||||
return;
|
||||
}
|
||||
|
||||
curPosition = 0;
|
||||
bytesLeft = ((HFSPlusCompressed*) io->data)->decmpfs->size;
|
||||
} else {
|
||||
io = openRawFile(file->fileID, &file->dataFork, (HFSPlusCatalogRecord*)file, volume);
|
||||
if(io == NULL) {
|
||||
hfs_panic("error opening file");
|
||||
free(buffer);
|
||||
return;
|
||||
}
|
||||
|
||||
curPosition = 0;
|
||||
bytesLeft = file->dataFork.logicalSize;
|
||||
}
|
||||
io = openRawFile(file->fileID, &file->dataFork, (HFSPlusCatalogRecord*)file, volume);
|
||||
if(io == NULL) {
|
||||
hfs_panic("error opening file");
|
||||
free(buffer);
|
||||
return;
|
||||
}
|
||||
|
||||
curPosition = 0;
|
||||
bytesLeft = file->dataFork.logicalSize;
|
||||
|
||||
while(bytesLeft > 0) {
|
||||
if(bytesLeft > BUFSIZE) {
|
||||
if(!READ(io, curPosition, BUFSIZE, buffer)) {
|
||||
|
@ -87,24 +65,16 @@ void writeToHFSFile(HFSPlusCatalogFile* file, AbstractFile* input, Volume* volum
|
|||
|
||||
bytesLeft = input->getLength(input);
|
||||
|
||||
if(file->permissions.ownerFlags & UF_COMPRESSED) {
|
||||
io = openHFSPlusCompressed(volume, file);
|
||||
if(io == NULL) {
|
||||
hfs_panic("error opening file");
|
||||
free(buffer);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
io = openRawFile(file->fileID, &file->dataFork, (HFSPlusCatalogRecord*)file, volume);
|
||||
if(io == NULL) {
|
||||
hfs_panic("error opening file");
|
||||
free(buffer);
|
||||
return;
|
||||
}
|
||||
allocate((RawFile*)io->data, bytesLeft);
|
||||
io = openRawFile(file->fileID, &file->dataFork, (HFSPlusCatalogRecord*)file, volume);
|
||||
if(io == NULL) {
|
||||
hfs_panic("error opening file");
|
||||
free(buffer);
|
||||
return;
|
||||
}
|
||||
|
||||
curPosition = 0;
|
||||
curPosition = 0;
|
||||
|
||||
allocate((RawFile*)io->data, bytesLeft);
|
||||
|
||||
while(bytesLeft > 0) {
|
||||
if(bytesLeft > BUFSIZE) {
|
||||
|
@ -427,7 +397,6 @@ static void extractOne(HFSCatalogNodeID folderID, char* name, HFSPlusCatalogReco
|
|||
#ifdef WIN32
|
||||
HFSPlusCatalogRecord* targetRecord;
|
||||
#endif
|
||||
struct utimbuf times;
|
||||
|
||||
if(strncmp(name, ".HFS+ Private Directory Data", sizeof(".HFS+ Private Directory Data") - 1) == 0 || name[0] == '\0') {
|
||||
return;
|
||||
|
@ -444,9 +413,6 @@ static void extractOne(HFSCatalogNodeID folderID, char* name, HFSPlusCatalogReco
|
|||
// TODO: chown . now that contents are extracted
|
||||
ASSERT(chdir(cwd) == 0, "chdir");
|
||||
chmod(name, folder->permissions.fileMode & 07777);
|
||||
times.actime = APPLE_TO_UNIX_TIME(folder->accessDate);
|
||||
times.modtime = APPLE_TO_UNIX_TIME(folder->contentModDate);
|
||||
utime(name, ×);
|
||||
} else if(record->recordType == kHFSPlusFileRecord) {
|
||||
file = (HFSPlusCatalogFile*)record;
|
||||
fileType = file->permissions.fileMode & S_IFMT;
|
||||
|
@ -489,9 +455,6 @@ static void extractOne(HFSCatalogNodeID folderID, char* name, HFSPlusCatalogReco
|
|||
#ifdef WIN32
|
||||
chmod(name, file->permissions.fileMode & 07777);
|
||||
#endif
|
||||
times.actime = APPLE_TO_UNIX_TIME(file->accessDate);
|
||||
times.modtime = APPLE_TO_UNIX_TIME(file->contentModDate);
|
||||
utime(name, ×);
|
||||
} else {
|
||||
printf("WARNING: cannot fopen %s\n", name);
|
||||
}
|
||||
|
@ -570,25 +533,12 @@ int copyAcrossVolumes(Volume* volume1, Volume* volume2, char* path1, char* path2
|
|||
bufferSize = 0;
|
||||
tmpFile = createAbstractFileFromMemoryFile((void**)&buffer, &bufferSize);
|
||||
|
||||
if(!silence)
|
||||
{
|
||||
printf("retrieving... "); fflush(stdout);
|
||||
}
|
||||
|
||||
printf("retrieving... "); fflush(stdout);
|
||||
get_hfs(volume1, path1, tmpFile);
|
||||
tmpFile->seek(tmpFile, 0);
|
||||
|
||||
if(!silence)
|
||||
{
|
||||
printf("writing (%ld)... ", (long) tmpFile->getLength(tmpFile)); fflush(stdout);
|
||||
}
|
||||
|
||||
printf("writing (%ld)... ", (long) tmpFile->getLength(tmpFile)); fflush(stdout);
|
||||
ret = add_hfs(volume2, tmpFile, path2);
|
||||
|
||||
if(!silence)
|
||||
{
|
||||
printf("done\n");
|
||||
}
|
||||
printf("done\n");
|
||||
|
||||
free(buffer);
|
||||
|
||||
|
@ -602,8 +552,6 @@ void displayFolder(HFSCatalogNodeID folderID, Volume* volume) {
|
|||
HFSPlusCatalogFile* file;
|
||||
time_t fileTime;
|
||||
struct tm *date;
|
||||
HFSPlusDecmpfs* compressData;
|
||||
size_t attrSize;
|
||||
|
||||
theList = list = getFolderContents(folderID, volume);
|
||||
|
||||
|
@ -620,22 +568,15 @@ void displayFolder(HFSCatalogNodeID folderID, Volume* volume) {
|
|||
printf("%06o ", file->permissions.fileMode);
|
||||
printf("%3d ", file->permissions.ownerID);
|
||||
printf("%3d ", file->permissions.groupID);
|
||||
if(file->permissions.ownerFlags & UF_COMPRESSED) {
|
||||
attrSize = getAttribute(volume, file->fileID, "com.apple.decmpfs", (uint8_t**)(&compressData));
|
||||
flipHFSPlusDecmpfs(compressData);
|
||||
printf("%12" PRId64 " ", compressData->size);
|
||||
free(compressData);
|
||||
} else {
|
||||
printf("%12" PRId64 " ", file->dataFork.logicalSize);
|
||||
}
|
||||
printf("%12" PRId64 " ", file->dataFork.logicalSize);
|
||||
fileTime = APPLE_TO_UNIX_TIME(file->contentModDate);
|
||||
}
|
||||
|
||||
date = localtime(&fileTime);
|
||||
if(date != NULL) {
|
||||
printf("%2d/%2d/%4d %02d:%02d ", date->tm_mon, date->tm_mday, date->tm_year + 1900, date->tm_hour, date->tm_min);
|
||||
printf("%2d/%2d/%4d %02d:%02d ", date->tm_mon, date->tm_mday, date->tm_year + 1900, date->tm_hour, date->tm_min);
|
||||
} else {
|
||||
printf(" ");
|
||||
printf(" ");
|
||||
}
|
||||
|
||||
printUnicode(&list->name);
|
||||
|
@ -647,24 +588,14 @@ void displayFolder(HFSCatalogNodeID folderID, Volume* volume) {
|
|||
releaseCatalogRecordList(theList);
|
||||
}
|
||||
|
||||
void displayFileLSLine(Volume* volume, HFSPlusCatalogFile* file, const char* name) {
|
||||
void displayFileLSLine(HFSPlusCatalogFile* file, const char* name) {
|
||||
time_t fileTime;
|
||||
struct tm *date;
|
||||
HFSPlusDecmpfs* compressData;
|
||||
|
||||
printf("%06o ", file->permissions.fileMode);
|
||||
printf("%3d ", file->permissions.ownerID);
|
||||
printf("%3d ", file->permissions.groupID);
|
||||
|
||||
if(file->permissions.ownerFlags & UF_COMPRESSED) {
|
||||
getAttribute(volume, file->fileID, "com.apple.decmpfs", (uint8_t**)(&compressData));
|
||||
flipHFSPlusDecmpfs(compressData);
|
||||
printf("%12" PRId64 " ", compressData->size);
|
||||
free(compressData);
|
||||
} else {
|
||||
printf("%12" PRId64 " ", file->dataFork.logicalSize);
|
||||
}
|
||||
|
||||
printf("%12" PRId64 " ", file->dataFork.logicalSize);
|
||||
fileTime = APPLE_TO_UNIX_TIME(file->contentModDate);
|
||||
date = localtime(&fileTime);
|
||||
if(date != NULL) {
|
||||
|
@ -673,19 +604,6 @@ void displayFileLSLine(Volume* volume, HFSPlusCatalogFile* file, const char* nam
|
|||
printf(" ");
|
||||
}
|
||||
printf("%s\n", name);
|
||||
|
||||
XAttrList* next;
|
||||
XAttrList* attrs = getAllExtendedAttributes(file->fileID, volume);
|
||||
if(attrs != NULL) {
|
||||
printf("Extended attributes\n");
|
||||
while(attrs != NULL) {
|
||||
next = attrs->next;
|
||||
printf("\t%s\n", attrs->name);
|
||||
free(attrs->name);
|
||||
free(attrs);
|
||||
attrs = next;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void hfs_ls(Volume* volume, const char* path) {
|
||||
|
@ -699,7 +617,7 @@ void hfs_ls(Volume* volume, const char* path) {
|
|||
if(record->recordType == kHFSPlusFolderRecord)
|
||||
displayFolder(((HFSPlusCatalogFolder*)record)->folderID, volume);
|
||||
else
|
||||
displayFileLSLine(volume, (HFSPlusCatalogFile*)record, name);
|
||||
displayFileLSLine((HFSPlusCatalogFile*)record, name);
|
||||
} else {
|
||||
printf("No such file or directory\n");
|
||||
}
|
||||
|
@ -750,8 +668,7 @@ void hfs_untar(Volume* volume, AbstractFile* tarFile) {
|
|||
HFSPlusCatalogRecord* record = getRecordFromPath3(fileName, volume, NULL, NULL, TRUE, FALSE, kHFSRootFolderID);
|
||||
if(record) {
|
||||
if(record->recordType == kHFSPlusFolderRecord || type == 5) {
|
||||
if(!silence)
|
||||
printf("ignoring %s, type = %d\n", fileName, type);
|
||||
printf("ignoring %s, type = %d\n", fileName, type);
|
||||
free(record);
|
||||
goto loop;
|
||||
} else {
|
||||
|
@ -762,8 +679,7 @@ void hfs_untar(Volume* volume, AbstractFile* tarFile) {
|
|||
}
|
||||
|
||||
if(type == 0) {
|
||||
if(!silence)
|
||||
printf("file: %s (%04o), size = %d\n", fileName, mode, size);
|
||||
printf("file: %s (%04o), size = %d\n", fileName, mode, size);
|
||||
void* buffer = malloc(size);
|
||||
tarFile->seek(tarFile, curRecord + 512);
|
||||
tarFile->read(tarFile, buffer, size);
|
||||
|
@ -771,12 +687,10 @@ void hfs_untar(Volume* volume, AbstractFile* tarFile) {
|
|||
add_hfs(volume, inFile, fileName);
|
||||
free(buffer);
|
||||
} else if(type == 5) {
|
||||
if(!silence)
|
||||
printf("directory: %s (%04o)\n", fileName, mode);
|
||||
printf("directory: %s (%04o)\n", fileName, mode);
|
||||
newFolder(fileName, volume);
|
||||
} else if(type == 2) {
|
||||
if(!silence)
|
||||
printf("symlink: %s (%04o) -> %s\n", fileName, mode, target);
|
||||
printf("symlink: %s (%04o) -> %s\n", fileName, mode, target);
|
||||
makeSymlink(fileName, target, volume);
|
||||
}
|
||||
|
||||
|
|
|
@ -174,9 +174,6 @@ static int rawFileRead(io_func* io,off_t location, size_t size, void *buffer) {
|
|||
volume = rawFile->volume;
|
||||
blockSize = volume->volumeHeader->blockSize;
|
||||
|
||||
if(!rawFile->extents)
|
||||
return FALSE;
|
||||
|
||||
extent = rawFile->extents;
|
||||
fileLoc = 0;
|
||||
|
||||
|
|
120
hfs/volume.c
120
hfs/volume.c
|
@ -98,76 +98,62 @@ int updateVolume(Volume* volume) {
|
|||
}
|
||||
|
||||
Volume* openVolume(io_func* io) {
|
||||
Volume* volume;
|
||||
io_func* file;
|
||||
|
||||
volume = (Volume*) malloc(sizeof(Volume));
|
||||
volume->image = io;
|
||||
volume->extentsTree = NULL;
|
||||
|
||||
volume->volumeHeader = readVolumeHeader(io, 1024);
|
||||
if(volume->volumeHeader == NULL) {
|
||||
free(volume);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
file = openRawFile(kHFSExtentsFileID, &volume->volumeHeader->extentsFile, NULL, volume);
|
||||
if(file == NULL) {
|
||||
free(volume->volumeHeader);
|
||||
free(volume);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
volume->extentsTree = openExtentsTree(file);
|
||||
if(volume->extentsTree == NULL) {
|
||||
free(volume->volumeHeader);
|
||||
free(volume);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
file = openRawFile(kHFSCatalogFileID, &volume->volumeHeader->catalogFile, NULL, volume);
|
||||
if(file == NULL) {
|
||||
closeBTree(volume->extentsTree);
|
||||
free(volume->volumeHeader);
|
||||
free(volume);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
volume->catalogTree = openCatalogTree(file);
|
||||
if(volume->catalogTree == NULL) {
|
||||
closeBTree(volume->extentsTree);
|
||||
free(volume->volumeHeader);
|
||||
free(volume);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
volume->allocationFile = openRawFile(kHFSAllocationFileID, &volume->volumeHeader->allocationFile, NULL, volume);
|
||||
if(volume->allocationFile == NULL) {
|
||||
closeBTree(volume->catalogTree);
|
||||
closeBTree(volume->extentsTree);
|
||||
free(volume->volumeHeader);
|
||||
free(volume);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
volume->attrTree = NULL;
|
||||
file = openRawFile(kHFSAttributesFileID, &volume->volumeHeader->attributesFile, NULL, volume);
|
||||
if(file != NULL) {
|
||||
volume->attrTree = openAttributesTree(file);
|
||||
if(!volume->attrTree) {
|
||||
CLOSE(file);
|
||||
}
|
||||
}
|
||||
|
||||
volume->metadataDir = getMetadataDirectoryID(volume);
|
||||
|
||||
return volume;
|
||||
Volume* volume;
|
||||
io_func* file;
|
||||
|
||||
volume = (Volume*) malloc(sizeof(Volume));
|
||||
volume->image = io;
|
||||
volume->extentsTree = NULL;
|
||||
|
||||
volume->volumeHeader = readVolumeHeader(io, 1024);
|
||||
if(volume->volumeHeader == NULL) {
|
||||
free(volume);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
file = openRawFile(kHFSExtentsFileID, &volume->volumeHeader->extentsFile, NULL, volume);
|
||||
if(file == NULL) {
|
||||
free(volume->volumeHeader);
|
||||
free(volume);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
volume->extentsTree = openExtentsTree(file);
|
||||
if(volume->extentsTree == NULL) {
|
||||
free(volume->volumeHeader);
|
||||
free(volume);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
file = openRawFile(kHFSCatalogFileID, &volume->volumeHeader->catalogFile, NULL, volume);
|
||||
if(file == NULL) {
|
||||
closeBTree(volume->extentsTree);
|
||||
free(volume->volumeHeader);
|
||||
free(volume);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
volume->catalogTree = openCatalogTree(file);
|
||||
if(volume->catalogTree == NULL) {
|
||||
closeBTree(volume->extentsTree);
|
||||
free(volume->volumeHeader);
|
||||
free(volume);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
volume->allocationFile = openRawFile(kHFSAllocationFileID, &volume->volumeHeader->allocationFile, NULL, volume);
|
||||
if(volume->catalogTree == NULL) {
|
||||
closeBTree(volume->catalogTree);
|
||||
closeBTree(volume->extentsTree);
|
||||
free(volume->volumeHeader);
|
||||
free(volume);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return volume;
|
||||
}
|
||||
|
||||
void closeVolume(Volume *volume) {
|
||||
if(volume->attrTree)
|
||||
closeBTree(volume->attrTree);
|
||||
|
||||
CLOSE(volume->allocationFile);
|
||||
closeBTree(volume->catalogTree);
|
||||
closeBTree(volume->extentsTree);
|
||||
|
|
367
hfs/xattr.c
367
hfs/xattr.c
|
@ -1,367 +0,0 @@
|
|||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <hfs/hfsplus.h>
|
||||
|
||||
static inline void flipAttrData(HFSPlusAttrData* data) {
|
||||
FLIPENDIAN(data->recordType);
|
||||
FLIPENDIAN(data->size);
|
||||
}
|
||||
|
||||
static inline void flipAttrForkData(HFSPlusAttrForkData* data) {
|
||||
FLIPENDIAN(data->recordType);
|
||||
flipForkData(&data->theFork);
|
||||
}
|
||||
|
||||
static inline void flipAttrExtents(HFSPlusAttrExtents* data) {
|
||||
FLIPENDIAN(data->recordType);
|
||||
flipExtentRecord(&data->extents);
|
||||
}
|
||||
|
||||
static int attrCompare(BTKey* vLeft, BTKey* vRight) {
|
||||
HFSPlusAttrKey* left;
|
||||
HFSPlusAttrKey* right;
|
||||
uint16_t i;
|
||||
|
||||
uint16_t cLeft;
|
||||
uint16_t cRight;
|
||||
|
||||
left = (HFSPlusAttrKey*) vLeft;
|
||||
right =(HFSPlusAttrKey*) vRight;
|
||||
|
||||
if(left->fileID < right->fileID) {
|
||||
return -1;
|
||||
} else if(left->fileID > right->fileID) {
|
||||
return 1;
|
||||
} else {
|
||||
for(i = 0; i < left->name.length; i++) {
|
||||
if(i >= right->name.length) {
|
||||
return 1;
|
||||
} else {
|
||||
cLeft = left->name.unicode[i];
|
||||
cRight = right->name.unicode[i];
|
||||
|
||||
if(cLeft < cRight)
|
||||
return -1;
|
||||
else if(cLeft > cRight)
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
if(i < right->name.length) {
|
||||
return -1;
|
||||
} else {
|
||||
/* do a safety check on key length. Otherwise, bad things may happen later on when we try to add or remove with this key */
|
||||
/*if(left->keyLength == right->keyLength) {
|
||||
return 0;
|
||||
} else if(left->keyLength < right->keyLength) {
|
||||
return -1;
|
||||
} else {
|
||||
return 1;
|
||||
}*/
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#define UNICODE_START (sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint16_t))
|
||||
|
||||
static BTKey* attrKeyRead(off_t offset, io_func* io) {
|
||||
int i;
|
||||
HFSPlusAttrKey* key;
|
||||
|
||||
key = (HFSPlusAttrKey*) malloc(sizeof(HFSPlusAttrKey));
|
||||
|
||||
if(!READ(io, offset, UNICODE_START, key))
|
||||
return NULL;
|
||||
|
||||
FLIPENDIAN(key->keyLength);
|
||||
FLIPENDIAN(key->fileID);
|
||||
FLIPENDIAN(key->startBlock);
|
||||
FLIPENDIAN(key->name.length);
|
||||
|
||||
if(!READ(io, offset + UNICODE_START, key->name.length * sizeof(uint16_t), ((unsigned char *)key) + UNICODE_START))
|
||||
return NULL;
|
||||
|
||||
for(i = 0; i < key->name.length; i++) {
|
||||
FLIPENDIAN(key->name.unicode[i]);
|
||||
}
|
||||
|
||||
return (BTKey*)key;
|
||||
}
|
||||
|
||||
static int attrKeyWrite(off_t offset, BTKey* toWrite, io_func* io) {
|
||||
HFSPlusAttrKey* key;
|
||||
uint16_t keyLength;
|
||||
uint16_t nodeNameLength;
|
||||
int i;
|
||||
|
||||
keyLength = toWrite->keyLength;
|
||||
key = (HFSPlusAttrKey*) malloc(keyLength);
|
||||
memcpy(key, toWrite, keyLength);
|
||||
|
||||
nodeNameLength = key->name.length;
|
||||
|
||||
FLIPENDIAN(key->keyLength);
|
||||
FLIPENDIAN(key->fileID);
|
||||
FLIPENDIAN(key->startBlock);
|
||||
FLIPENDIAN(key->name.length);
|
||||
|
||||
for(i = 0; i < nodeNameLength; i++) {
|
||||
FLIPENDIAN(key->name.unicode[i]);
|
||||
}
|
||||
|
||||
if(!WRITE(io, offset, keyLength, key))
|
||||
return FALSE;
|
||||
|
||||
free(key);
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
static void attrKeyPrint(BTKey* toPrint) {
|
||||
HFSPlusAttrKey* key;
|
||||
|
||||
key = (HFSPlusAttrKey*)toPrint;
|
||||
|
||||
printf("attribute%d:%d:", key->fileID, key->startBlock);
|
||||
printUnicode(&key->name);
|
||||
}
|
||||
|
||||
static BTKey* attrDataRead(off_t offset, io_func* io) {
|
||||
HFSPlusAttrRecord* record;
|
||||
|
||||
record = (HFSPlusAttrRecord*) malloc(sizeof(HFSPlusAttrRecord));
|
||||
|
||||
if(!READ(io, offset, sizeof(uint32_t), record))
|
||||
return NULL;
|
||||
|
||||
FLIPENDIAN(record->recordType);
|
||||
switch(record->recordType)
|
||||
{
|
||||
case kHFSPlusAttrInlineData:
|
||||
if(!READ(io, offset, sizeof(HFSPlusAttrData), record))
|
||||
return NULL;
|
||||
|
||||
flipAttrData((HFSPlusAttrData*) record);
|
||||
|
||||
record = realloc(record, sizeof(HFSPlusAttrData) + ((HFSPlusAttrData*) record)->size);
|
||||
if(!READ(io, offset + sizeof(HFSPlusAttrData), ((HFSPlusAttrData*) record)->size, ((HFSPlusAttrData*) record)->data))
|
||||
return NULL;
|
||||
|
||||
break;
|
||||
|
||||
case kHFSPlusAttrForkData:
|
||||
if(!READ(io, offset, sizeof(HFSPlusAttrForkData), record))
|
||||
return NULL;
|
||||
|
||||
flipAttrForkData((HFSPlusAttrForkData*) record);
|
||||
|
||||
break;
|
||||
|
||||
case kHFSPlusAttrExtents:
|
||||
if(!READ(io, offset, sizeof(HFSPlusAttrExtents), record))
|
||||
return NULL;
|
||||
|
||||
flipAttrExtents((HFSPlusAttrExtents*) record);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
return (BTKey*)record;
|
||||
}
|
||||
|
||||
static int updateAttributes(Volume* volume, HFSPlusAttrKey* skey, HFSPlusAttrRecord* srecord) {
|
||||
HFSPlusAttrKey key;
|
||||
HFSPlusAttrRecord* record;
|
||||
int ret, len;
|
||||
|
||||
memcpy(&key, skey, skey->keyLength);
|
||||
|
||||
switch(srecord->recordType) {
|
||||
case kHFSPlusAttrInlineData:
|
||||
len = srecord->attrData.size + sizeof(HFSPlusAttrData);
|
||||
record = (HFSPlusAttrRecord*) malloc(len);
|
||||
memcpy(record, srecord, len);
|
||||
flipAttrData((HFSPlusAttrData*) record);
|
||||
removeFromBTree(volume->attrTree, (BTKey*)(&key));
|
||||
ret = addToBTree(volume->attrTree, (BTKey*)(&key), len, (unsigned char *)record);
|
||||
free(record);
|
||||
break;
|
||||
case kHFSPlusAttrForkData:
|
||||
record = (HFSPlusAttrRecord*) malloc(sizeof(HFSPlusAttrForkData));
|
||||
memcpy(record, srecord, sizeof(HFSPlusAttrForkData));
|
||||
flipAttrForkData((HFSPlusAttrForkData*) record);
|
||||
removeFromBTree(volume->attrTree, (BTKey*)(&key));
|
||||
ret = addToBTree(volume->attrTree, (BTKey*)(&key), sizeof(HFSPlusAttrForkData), (unsigned char *)record);
|
||||
free(record);
|
||||
break;
|
||||
case kHFSPlusAttrExtents:
|
||||
record = (HFSPlusAttrRecord*) malloc(sizeof(HFSPlusAttrExtents));
|
||||
memcpy(record, srecord, sizeof(HFSPlusAttrExtents));
|
||||
flipAttrExtents((HFSPlusAttrExtents*) record);
|
||||
removeFromBTree(volume->attrTree, (BTKey*)(&key));
|
||||
ret = addToBTree(volume->attrTree, (BTKey*)(&key), sizeof(HFSPlusAttrExtents), (unsigned char *)record);
|
||||
free(record);
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
size_t getAttribute(Volume* volume, uint32_t fileID, const char* name, uint8_t** data) {
|
||||
HFSPlusAttrKey key;
|
||||
HFSPlusAttrRecord* record;
|
||||
size_t size;
|
||||
int exact;
|
||||
|
||||
if(!volume->attrTree)
|
||||
return FALSE;
|
||||
|
||||
memset(&key, 0 , sizeof(HFSPlusAttrKey));
|
||||
key.fileID = fileID;
|
||||
key.startBlock = 0;
|
||||
ASCIIToUnicode(name, &key.name);
|
||||
key.keyLength = sizeof(HFSPlusAttrKey) - sizeof(HFSUniStr255) + sizeof(key.name.length) + (sizeof(uint16_t) * key.name.length);
|
||||
|
||||
*data = NULL;
|
||||
|
||||
record = (HFSPlusAttrRecord*) search(volume->attrTree, (BTKey*)(&key), &exact, NULL, NULL);
|
||||
|
||||
if(exact == FALSE) {
|
||||
if(record)
|
||||
free(record);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch(record->recordType)
|
||||
{
|
||||
case kHFSPlusAttrInlineData:
|
||||
size = record->attrData.size;
|
||||
*data = (uint8_t*) malloc(size);
|
||||
memcpy(*data, record->attrData.data, size);
|
||||
free(record);
|
||||
return size;
|
||||
default:
|
||||
fprintf(stderr, "unsupported attribute node format\n");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int setAttribute(Volume* volume, uint32_t fileID, const char* name, uint8_t* data, size_t size) {
|
||||
HFSPlusAttrKey key;
|
||||
HFSPlusAttrData* record;
|
||||
int ret, exact;
|
||||
|
||||
if(!volume->attrTree)
|
||||
return FALSE;
|
||||
|
||||
memset(&key, 0 , sizeof(HFSPlusAttrKey));
|
||||
key.fileID = fileID;
|
||||
key.startBlock = 0;
|
||||
ASCIIToUnicode(name, &key.name);
|
||||
key.keyLength = sizeof(HFSPlusAttrKey) - sizeof(HFSUniStr255) + sizeof(key.name.length) + (sizeof(uint16_t) * key.name.length);
|
||||
|
||||
record = (HFSPlusAttrData*) malloc(sizeof(HFSPlusAttrData) + size);
|
||||
memset(record, 0, sizeof(HFSPlusAttrData));
|
||||
|
||||
record->recordType = kHFSPlusAttrInlineData;
|
||||
record->size = size;
|
||||
memcpy(record->data, data, size);
|
||||
|
||||
ret = updateAttributes(volume, &key, (HFSPlusAttrRecord*) record);
|
||||
|
||||
free(record);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int unsetAttribute(Volume* volume, uint32_t fileID, const char* name) {
|
||||
HFSPlusAttrKey key;
|
||||
|
||||
if(!volume->attrTree)
|
||||
return FALSE;
|
||||
|
||||
memset(&key, 0 , sizeof(HFSPlusAttrKey));
|
||||
key.fileID = fileID;
|
||||
key.startBlock = 0;
|
||||
ASCIIToUnicode(name, &key.name);
|
||||
key.keyLength = sizeof(HFSPlusAttrKey) - sizeof(HFSUniStr255) + sizeof(key.name.length) + (sizeof(uint16_t) * key.name.length);
|
||||
return removeFromBTree(volume->attrTree, (BTKey*)(&key));
|
||||
}
|
||||
|
||||
XAttrList* getAllExtendedAttributes(HFSCatalogNodeID CNID, Volume* volume) {
|
||||
BTree* tree;
|
||||
HFSPlusAttrKey key;
|
||||
HFSPlusAttrRecord* record;
|
||||
uint32_t nodeNumber;
|
||||
int recordNumber;
|
||||
BTNodeDescriptor* descriptor;
|
||||
HFSPlusAttrKey* currentKey;
|
||||
off_t recordOffset;
|
||||
XAttrList* list = NULL;
|
||||
XAttrList* lastItem = NULL;
|
||||
XAttrList* item = NULL;
|
||||
|
||||
if(!volume->attrTree)
|
||||
return NULL;
|
||||
|
||||
memset(&key, 0 , sizeof(HFSPlusAttrKey));
|
||||
key.fileID = CNID;
|
||||
key.startBlock = 0;
|
||||
key.name.length = 0;
|
||||
key.keyLength = sizeof(HFSPlusAttrKey) - sizeof(HFSUniStr255) + sizeof(key.name.length) + (sizeof(uint16_t) * key.name.length);
|
||||
|
||||
tree = volume->attrTree;
|
||||
record = (HFSPlusAttrRecord*) search(tree, (BTKey*)(&key), NULL, &nodeNumber, &recordNumber);
|
||||
if(record == NULL)
|
||||
return NULL;
|
||||
|
||||
free(record);
|
||||
|
||||
while(nodeNumber != 0) {
|
||||
descriptor = readBTNodeDescriptor(nodeNumber, tree);
|
||||
|
||||
while(recordNumber < descriptor->numRecords) {
|
||||
recordOffset = getRecordOffset(recordNumber, nodeNumber, tree);
|
||||
currentKey = (HFSPlusAttrKey*) READ_KEY(tree, recordOffset, tree->io);
|
||||
|
||||
if(currentKey->fileID == CNID) {
|
||||
item = (XAttrList*) malloc(sizeof(XAttrList));
|
||||
item->name = (char*) malloc(currentKey->name.length + 1);
|
||||
int i;
|
||||
for(i = 0; i < currentKey->name.length; i++) {
|
||||
item->name[i] = currentKey->name.unicode[i];
|
||||
}
|
||||
item->name[currentKey->name.length] = '\0';
|
||||
item->next = NULL;
|
||||
|
||||
if(lastItem != NULL) {
|
||||
lastItem->next = item;
|
||||
} else {
|
||||
list = item;
|
||||
}
|
||||
|
||||
lastItem = item;
|
||||
|
||||
free(currentKey);
|
||||
} else {
|
||||
free(currentKey);
|
||||
free(descriptor);
|
||||
return list;
|
||||
}
|
||||
|
||||
recordNumber++;
|
||||
}
|
||||
|
||||
nodeNumber = descriptor->fLink;
|
||||
recordNumber = 0;
|
||||
|
||||
free(descriptor);
|
||||
}
|
||||
return list;
|
||||
}
|
||||
|
||||
BTree* openAttributesTree(io_func* file) {
|
||||
return openBTree(file, &attrCompare, &attrKeyRead, &attrKeyWrite, &attrKeyPrint, &attrDataRead);
|
||||
}
|
||||
|
|
@ -8,7 +8,6 @@
|
|||
#include <sys/types.h>
|
||||
|
||||
#ifdef WIN32
|
||||
#include <unistd.h>
|
||||
#define fseeko fseeko64
|
||||
#define ftello ftello64
|
||||
#define off_t off64_t
|
||||
|
@ -100,10 +99,4 @@ typedef struct io_func_struct {
|
|||
closeFunc close;
|
||||
} io_func;
|
||||
|
||||
struct AbstractFile;
|
||||
|
||||
unsigned char* decodeBase64(char* toDecode, size_t* dataLength);
|
||||
void writeBase64(struct AbstractFile* file, unsigned char* data, size_t dataLength, int tabLength, int width);
|
||||
char* convertBase64(unsigned char* data, size_t dataLength, int tabLength, int width);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
#define true 1
|
||||
#define false 0
|
||||
|
||||
size_t adc_decompress(size_t in_size, unsigned char *input, size_t avail_size, unsigned char *output, size_t *bytes_written);
|
||||
int adc_decompress(int in_size, unsigned char *input, int avail_size, unsigned char *output, size_t *bytes_written);
|
||||
int adc_chunk_type(char _byte);
|
||||
int adc_chunk_size(char _byte);
|
||||
int adc_chunk_offset(unsigned char *chunk_start);
|
||||
|
|
|
@ -38,12 +38,12 @@
|
|||
#define PARTITION_SIZE 0x3f
|
||||
#define ATAPI_SIZE 0x8
|
||||
#define FREE_SIZE 0xa
|
||||
#define EXTRA_SIZE (ATAPI_OFFSET + ATAPI_SIZE + FREE_SIZE)
|
||||
#define EXTRA_SIZE (DDM_SIZE + PARTITION_SIZE + ATAPI_SIZE + FREE_SIZE)
|
||||
|
||||
#define DDM_OFFSET 0x0
|
||||
#define PARTITION_OFFSET (DDM_SIZE)
|
||||
#define ATAPI_OFFSET 64
|
||||
#define USER_OFFSET (ATAPI_OFFSET + ATAPI_SIZE)
|
||||
#define ATAPI_OFFSET (DDM_SIZE + PARTITION_SIZE)
|
||||
#define USER_OFFSET (DDM_SIZE + PARTITION_SIZE + ATAPI_SIZE)
|
||||
|
||||
#define BOOTCODE_DMMY 0x444D4D59
|
||||
#define BOOTCODE_GOON 0x676F6F6E
|
||||
|
@ -235,12 +235,10 @@ typedef struct ResourceKey {
|
|||
FlipDataFunc flipData;
|
||||
} ResourceKey;
|
||||
|
||||
#define SHA1_DIGEST_SIZE 20
|
||||
|
||||
typedef struct {
|
||||
uint32_t state[5];
|
||||
uint32_t count[2];
|
||||
uint8_t buffer[64];
|
||||
unsigned long state[5];
|
||||
unsigned long count[2];
|
||||
unsigned char buffer[64];
|
||||
} SHA1_CTX;
|
||||
|
||||
typedef struct {
|
||||
|
@ -263,7 +261,7 @@ static inline void writeUInt32(AbstractFile* file, uint32_t data) {
|
|||
ASSERT(file->write(file, &data, sizeof(data)) == sizeof(data), "fwrite");
|
||||
}
|
||||
|
||||
static inline uint64_t readUInt64(AbstractFile* file) {
|
||||
static inline uint32_t readUInt64(AbstractFile* file) {
|
||||
uint64_t data;
|
||||
|
||||
ASSERT(file->read(file, &data, sizeof(data)) == sizeof(data), "fread");
|
||||
|
@ -280,7 +278,9 @@ static inline void writeUInt64(AbstractFile* file, uint64_t data) {
|
|||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
void outResources(AbstractFile* file, AbstractFile* out);
|
||||
unsigned char* decodeBase64(char* toDecode, size_t* dataLength);
|
||||
void writeBase64(AbstractFile* file, unsigned char* data, size_t dataLength, int tabLength, int width);
|
||||
char* convertBase64(unsigned char* data, size_t dataLength, int tabLength, int width);
|
||||
|
||||
uint32_t checksumBitness(uint32_t type);
|
||||
|
||||
|
@ -293,9 +293,10 @@ extern "C" {
|
|||
void CRCProxy(void* token, const unsigned char* data, size_t len);
|
||||
void CRCZeroesProxy(void* token, size_t len);
|
||||
|
||||
void SHA1Transform(unsigned long state[5], const unsigned char buffer[64]);
|
||||
void SHA1Init(SHA1_CTX* context);
|
||||
void SHA1Update(SHA1_CTX* context, const uint8_t* data, const size_t len);
|
||||
void SHA1Final(uint8_t digest[SHA1_DIGEST_SIZE], SHA1_CTX* context);
|
||||
void SHA1Update(SHA1_CTX* context, const unsigned char* data, unsigned int len);
|
||||
void SHA1Final(unsigned char digest[20], SHA1_CTX* context);
|
||||
|
||||
void flipUDIFChecksum(UDIFChecksum* o, char out);
|
||||
void readUDIFChecksum(AbstractFile* file, UDIFChecksum* o);
|
||||
|
@ -327,22 +328,22 @@ extern "C" {
|
|||
void flipPartitionMultiple(Partition* partition, char multiple, char out, unsigned int BlockSize);
|
||||
|
||||
void readDriverDescriptorMap(AbstractFile* file, ResourceKey* resources);
|
||||
DriverDescriptorRecord* createDriverDescriptorMap(uint32_t numSectors, unsigned int BlockSize);
|
||||
int writeDriverDescriptorMap(int pNum, AbstractFile* file, DriverDescriptorRecord* DDM, unsigned int BlockSize, ChecksumFunc dataForkChecksum, void* dataForkToken, ResourceKey **resources);
|
||||
DriverDescriptorRecord* createDriverDescriptorMap(uint32_t numSectors);
|
||||
void writeDriverDescriptorMap(AbstractFile* file, DriverDescriptorRecord* DDM, ChecksumFunc dataForkChecksum, void* dataForkToken, ResourceKey **resources);
|
||||
void readApplePartitionMap(AbstractFile* file, ResourceKey* resources, unsigned int BlockSize);
|
||||
Partition* createApplePartitionMap(uint32_t numSectors, const char* volumeType, unsigned int BlockSize);
|
||||
int writeApplePartitionMap(int pNum, AbstractFile* file, Partition* partitions, unsigned int BlockSize, ChecksumFunc dataForkChecksum, void* dataForkToken, ResourceKey **resources, NSizResource** nsizIn);
|
||||
int writeATAPI(int pNum, AbstractFile* file, unsigned int BlockSize, ChecksumFunc dataForkChecksum, void* dataForkToken, ResourceKey **resources, NSizResource** nsizIn);
|
||||
int writeFreePartition(int pNum, AbstractFile* outFile, uint32_t offset, uint32_t numSectors, ResourceKey** resources);
|
||||
Partition* createApplePartitionMap(uint32_t numSectors, const char* volumeType);
|
||||
void writeApplePartitionMap(AbstractFile* file, Partition* partitions, ChecksumFunc dataForkChecksum, void* dataForkToken, ResourceKey **resources, NSizResource** nsizIn);
|
||||
void writeATAPI(AbstractFile* file, ChecksumFunc dataForkChecksum, void* dataForkToken, ResourceKey **resources, NSizResource** nsizIn);
|
||||
void writeFreePartition(AbstractFile* outFile, uint32_t numSectors, ResourceKey** resources);
|
||||
|
||||
void extractBLKX(AbstractFile* in, AbstractFile* out, BLKXTable* blkx);
|
||||
BLKXTable* insertBLKX(AbstractFile* out, AbstractFile* in, uint32_t firstSectorNumber, uint32_t numSectors, uint32_t blocksDescriptor,
|
||||
uint32_t checksumType, ChecksumFunc uncompressedChk, void* uncompressedChkToken, ChecksumFunc compressedChk,
|
||||
void* compressedChkToken, Volume* volume, int addComment);
|
||||
void* compressedChkToken, Volume* volume);
|
||||
|
||||
|
||||
int extractDmg(AbstractFile* abstractIn, AbstractFile* abstractOut, int partNum);
|
||||
int buildDmg(AbstractFile* abstractIn, AbstractFile* abstractOut, unsigned int BlockSize);
|
||||
int buildDmg(AbstractFile* abstractIn, AbstractFile* abstractOut);
|
||||
int convertToISO(AbstractFile* abstractIn, AbstractFile* abstractOut);
|
||||
int convertToDMG(AbstractFile* abstractIn, AbstractFile* abstractOut);
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
int extractDmg(AbstractFile* abstractIn, AbstractFile* abstractOut, int partNum);
|
||||
int buildDmg(AbstractFile* abstractIn, AbstractFile* abstractOut, unsigned int BlockSize);
|
||||
int buildDmg(AbstractFile* abstractIn, AbstractFile* abstractOut);
|
||||
|
||||
int convertToDMG(AbstractFile* abstractIn, AbstractFile* abstractOut);
|
||||
int convertToISO(AbstractFile* abstractIn, AbstractFile* abstractOut);
|
||||
|
|
|
@ -1,71 +0,0 @@
|
|||
#ifndef HFSCOMPRESS_H
|
||||
#define HFSCOMPRESS_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include "common.h"
|
||||
|
||||
#define CMPFS_MAGIC 0x636D7066
|
||||
|
||||
typedef struct HFSPlusDecmpfs {
|
||||
uint32_t magic;
|
||||
uint32_t flags;
|
||||
uint64_t size;
|
||||
uint8_t data[0];
|
||||
} __attribute__ ((packed)) HFSPlusDecmpfs;
|
||||
|
||||
typedef struct HFSPlusCmpfRsrcHead {
|
||||
uint32_t headerSize;
|
||||
uint32_t totalSize;
|
||||
uint32_t dataSize;
|
||||
uint32_t flags;
|
||||
} __attribute__ ((packed)) HFSPlusCmpfRsrcHead;
|
||||
|
||||
typedef struct HFSPlusCmpfRsrcBlock {
|
||||
uint32_t offset;
|
||||
uint32_t size;
|
||||
} __attribute__ ((packed)) HFSPlusCmpfRsrcBlock;
|
||||
|
||||
typedef struct HFSPlusCmpfRsrcBlockHead {
|
||||
uint32_t dataSize;
|
||||
uint32_t numBlocks;
|
||||
HFSPlusCmpfRsrcBlock blocks[0];
|
||||
} __attribute__ ((packed)) HFSPlusCmpfRsrcBlockHead;
|
||||
|
||||
typedef struct HFSPlusCmpfEnd {
|
||||
uint32_t pad[6];
|
||||
uint16_t unk1;
|
||||
uint16_t unk2;
|
||||
uint16_t unk3;
|
||||
uint32_t magic;
|
||||
uint32_t flags;
|
||||
uint64_t size;
|
||||
uint32_t unk4;
|
||||
} __attribute__ ((packed)) HFSPlusCmpfEnd;
|
||||
|
||||
typedef struct HFSPlusCompressed {
|
||||
Volume* volume;
|
||||
HFSPlusCatalogFile* file;
|
||||
io_func* io;
|
||||
size_t decmpfsSize;
|
||||
HFSPlusDecmpfs* decmpfs;
|
||||
|
||||
HFSPlusCmpfRsrcHead rsrcHead;
|
||||
HFSPlusCmpfRsrcBlockHead* blocks;
|
||||
|
||||
int dirty;
|
||||
|
||||
uint8_t* cached;
|
||||
uint32_t cachedStart;
|
||||
uint32_t cachedEnd;
|
||||
} HFSPlusCompressed;
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
void flipHFSPlusDecmpfs(HFSPlusDecmpfs* compressData);
|
||||
io_func* openHFSPlusCompressed(Volume* volume, HFSPlusCatalogFile* file);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -18,7 +18,6 @@ extern "C" {
|
|||
|
||||
void hfs_untar(Volume* volume, AbstractFile* tarFile);
|
||||
void hfs_ls(Volume* volume, const char* path);
|
||||
void hfs_setsilence(int s);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -28,10 +28,6 @@ typedef void (*keyPrintFunc)(BTKey* toPrint);
|
|||
typedef int (*keyWriteFunc)(off_t offset, BTKey* toWrite, struct io_func_struct* io);
|
||||
typedef int (*compareFunc)(BTKey* left, BTKey* right);
|
||||
|
||||
#define STR_SIZE(str) (sizeof(uint16_t) + (sizeof(uint16_t) * (str).length))
|
||||
|
||||
#ifndef __HFS_FORMAT__
|
||||
|
||||
typedef uint32_t HFSCatalogNodeID;
|
||||
|
||||
enum {
|
||||
|
@ -48,6 +44,8 @@ enum {
|
|||
kHFSFirstUserCatalogNodeID = 16
|
||||
};
|
||||
|
||||
#define STR_SIZE(str) (sizeof(uint16_t) + (sizeof(uint16_t) * (str).length))
|
||||
|
||||
struct HFSUniStr255 {
|
||||
uint16_t length;
|
||||
uint16_t unicode[255];
|
||||
|
@ -263,8 +261,6 @@ struct ExtendedFolderInfo {
|
|||
} __attribute__((__packed__));
|
||||
typedef struct ExtendedFolderInfo ExtendedFolderInfo;
|
||||
|
||||
#ifndef _STAT_H_
|
||||
#ifndef _SYS_STAT_H
|
||||
#define S_ISUID 0004000 /* set user id on execution */
|
||||
#define S_ISGID 0002000 /* set group id on execution */
|
||||
#define S_ISTXT 0001000 /* sticky bit */
|
||||
|
@ -293,10 +289,6 @@ typedef struct ExtendedFolderInfo ExtendedFolderInfo;
|
|||
#define S_IFLNK 0120000 /* symbolic link */
|
||||
#define S_IFSOCK 0140000 /* socket */
|
||||
#define S_IFWHT 0160000 /* whiteout */
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define UF_COMPRESSED 040
|
||||
|
||||
struct HFSPlusBSDInfo {
|
||||
uint32_t ownerID;
|
||||
|
@ -389,58 +381,6 @@ struct HFSPlusCatalogThread {
|
|||
} __attribute__((__packed__));
|
||||
typedef struct HFSPlusCatalogThread HFSPlusCatalogThread;
|
||||
|
||||
enum {
|
||||
kHFSPlusAttrInlineData = 0x10,
|
||||
kHFSPlusAttrForkData = 0x20,
|
||||
kHFSPlusAttrExtents = 0x30
|
||||
};
|
||||
|
||||
struct HFSPlusAttrForkData {
|
||||
uint32_t recordType;
|
||||
uint32_t reserved;
|
||||
HFSPlusForkData theFork;
|
||||
} __attribute__((__packed__));
|
||||
typedef struct HFSPlusAttrForkData HFSPlusAttrForkData;
|
||||
|
||||
struct HFSPlusAttrExtents {
|
||||
uint32_t recordType;
|
||||
uint32_t reserved;
|
||||
HFSPlusExtentRecord extents;
|
||||
};
|
||||
typedef struct HFSPlusAttrExtents HFSPlusAttrExtents;
|
||||
|
||||
struct HFSPlusAttrData {
|
||||
uint32_t recordType;
|
||||
uint32_t reserved[2];
|
||||
uint32_t size;
|
||||
uint8_t data[0];
|
||||
} __attribute__((__packed__));
|
||||
typedef struct HFSPlusAttrData HFSPlusAttrData;
|
||||
|
||||
union HFSPlusAttrRecord {
|
||||
uint32_t recordType;
|
||||
HFSPlusAttrData attrData;
|
||||
HFSPlusAttrForkData forkData;
|
||||
HFSPlusAttrExtents overflowExtents;
|
||||
};
|
||||
typedef union HFSPlusAttrRecord HFSPlusAttrRecord;
|
||||
|
||||
struct HFSPlusAttrKey {
|
||||
uint16_t keyLength;
|
||||
uint16_t pad;
|
||||
uint32_t fileID;
|
||||
uint32_t startBlock;
|
||||
HFSUniStr255 name;
|
||||
} __attribute__((__packed__));
|
||||
typedef struct HFSPlusAttrKey HFSPlusAttrKey;
|
||||
|
||||
enum {
|
||||
kHardLinkFileType = 0x686C6E6B, /* 'hlnk' */
|
||||
kHFSPlusCreator = 0x6866732B /* 'hfs+' */
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
struct HFSPlusCatalogRecord {
|
||||
int16_t recordType;
|
||||
unsigned char data[0];
|
||||
|
@ -454,12 +394,6 @@ struct CatalogRecordList {
|
|||
};
|
||||
typedef struct CatalogRecordList CatalogRecordList;
|
||||
|
||||
struct XAttrList {
|
||||
char* name;
|
||||
struct XAttrList* next;
|
||||
};
|
||||
typedef struct XAttrList XAttrList;
|
||||
|
||||
struct Extent {
|
||||
uint32_t startBlock;
|
||||
uint32_t blockCount;
|
||||
|
@ -484,9 +418,7 @@ typedef struct {
|
|||
|
||||
BTree* extentsTree;
|
||||
BTree* catalogTree;
|
||||
BTree* attrTree;
|
||||
io_func* allocationFile;
|
||||
HFSCatalogNodeID metadataDir;
|
||||
} Volume;
|
||||
|
||||
|
||||
|
@ -525,12 +457,6 @@ extern "C" {
|
|||
|
||||
io_func* openRawFile(HFSCatalogNodeID id, HFSPlusForkData* forkData, HFSPlusCatalogRecord* catalogRecord, Volume* volume);
|
||||
|
||||
BTree* openAttributesTree(io_func* file);
|
||||
size_t getAttribute(Volume* volume, uint32_t fileID, const char* name, uint8_t** data);
|
||||
int setAttribute(Volume* volume, uint32_t fileID, const char* name, uint8_t* data, size_t size);
|
||||
int unsetAttribute(Volume* volume, uint32_t fileID, const char* name);
|
||||
XAttrList* getAllExtendedAttributes(HFSCatalogNodeID CNID, Volume* volume);
|
||||
|
||||
void flipExtentRecord(HFSPlusExtentRecord* extentRecord);
|
||||
|
||||
BTree* openExtentsTree(io_func* file);
|
||||
|
@ -552,7 +478,6 @@ extern "C" {
|
|||
int makeSymlink(const char* pathName, const char* target, Volume* volume);
|
||||
int attrFile(const char* pathName, const char* flags, Volume* volume);
|
||||
|
||||
HFSCatalogNodeID getMetadataDirectoryID(Volume* volume);
|
||||
HFSPlusCatalogRecord* getRecordByCNID(HFSCatalogNodeID CNID, Volume* volume);
|
||||
HFSPlusCatalogRecord* getLinkTarget(HFSPlusCatalogRecord* record, HFSCatalogNodeID parentID, HFSPlusCatalogKey *key, Volume* volume);
|
||||
CatalogRecordList* getFolderContents(HFSCatalogNodeID CNID, Volume* volume);
|
||||
|
|
Загрузка…
Ссылка в новой задаче