Back out 8941e2b7a0bf (bug 703660) for Windows PGO build failures

This commit is contained in:
Phil Ringnalda 2011-12-07 08:41:02 -08:00
Родитель 84471e527a
Коммит ca22b9cadc
32 изменённых файлов: 204 добавлений и 6326 удалений

Просмотреть файл

@ -142,6 +142,16 @@ IDBFactory::GetConnection(const nsAString& aDatabaseFilePath)
getter_AddRefs(connection));
NS_ENSURE_SUCCESS(rv, nsnull);
#ifdef DEBUG
{
// Check to make sure that the database schema is correct again.
PRInt32 schemaVersion;
NS_ASSERTION(NS_SUCCEEDED(connection->GetSchemaVersion(&schemaVersion)) &&
schemaVersion == DB_SCHEMA_VERSION,
"Wrong schema!");
}
#endif
// Turn on foreign key constraints!
rv = connection->ExecuteSimpleSQL(NS_LITERAL_CSTRING(
"PRAGMA foreign_keys = ON;"

Просмотреть файл

@ -50,7 +50,6 @@
#include "nsJSUtils.h"
#include "nsServiceManagerUtils.h"
#include "nsThreadUtils.h"
#include "snappy/snappy.h"
#include "AsyncConnectionHelper.h"
#include "IDBCursor.h"
@ -785,31 +784,12 @@ IDBObjectStore::GetStructuredCloneDataFromStatement(
}
#endif
const PRUint8* blobData;
PRUint32 blobDataLength;
nsresult rv = aStatement->GetSharedBlob(aIndex, &blobDataLength, &blobData);
const PRUint8* data;
PRUint32 dataLength;
nsresult rv = aStatement->GetSharedBlob(aIndex, &dataLength, &data);
NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
const char* compressed = reinterpret_cast<const char*>(blobData);
size_t compressedLength = size_t(blobDataLength);
size_t uncompressedLength;
if (!snappy::GetUncompressedLength(compressed, compressedLength,
&uncompressedLength)) {
NS_WARNING("Snappy can't determine uncompressed length!");
return NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR;
}
nsAutoArrayPtr<char> uncompressed(new char[uncompressedLength]);
if (!snappy::RawUncompress(compressed, compressedLength,
uncompressed.get())) {
NS_WARNING("Snappy can't determine uncompressed length!");
return NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR;
}
return aBuffer.copy(reinterpret_cast<const uint64_t *>(uncompressed.get()),
uncompressedLength) ?
return aBuffer.copy(reinterpret_cast<const uint64_t *>(data), dataLength) ?
NS_OK :
NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR;
}
@ -1773,41 +1753,11 @@ AddHelper::DoDatabaseWork(mozIStorageConnection* aConnection)
NS_ENSURE_SUCCESS(rv, rv);
}
NS_NAMED_LITERAL_CSTRING(data, "data");
const PRUint8* buffer = reinterpret_cast<const PRUint8*>(mCloneBuffer.data());
size_t bufferLength = mCloneBuffer.nbytes();
// This will hold our compressed data until the end of the method. The
// BindBlobByName function will copy it.
nsAutoArrayPtr<char> compressed;
// This points to the compressed buffer.
const PRUint8* dataBuffer = nsnull;
size_t dataBufferLength = 0;
// If we're going to modify the buffer later to add a key property on an
// autoIncrement objectStore then we will wait to compress our data until we
// have the appropriate key value.
if (autoIncrement && !mOverwrite && !keyPath.IsEmpty() && unsetKey) {
rv = stmt->BindInt32ByName(data, 0);
NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
}
else {
// Compress the bytes before adding into the database.
const char* uncompressed =
reinterpret_cast<const char*>(mCloneBuffer.data());
size_t uncompressedLength = mCloneBuffer.nbytes();
size_t compressedLength = snappy::MaxCompressedLength(uncompressedLength);
compressed = new char[compressedLength];
snappy::RawCompress(uncompressed, uncompressedLength, compressed.get(),
&compressedLength);
dataBuffer = reinterpret_cast<const PRUint8*>(compressed.get());
dataBufferLength = compressedLength;
rv = stmt->BindBlobByName(data, dataBuffer, dataBufferLength);
NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
}
rv = stmt->BindBlobByName(NS_LITERAL_CSTRING("data"), buffer, bufferLength);
NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
rv = stmt->Execute();
if (NS_FAILED(rv)) {
@ -1830,9 +1780,8 @@ AddHelper::DoDatabaseWork(mozIStorageConnection* aConnection)
rv = mKey.BindToStatement(stmt, keyValue);
NS_ENSURE_SUCCESS(rv, rv);
NS_ASSERTION(dataBuffer && dataBufferLength, "These should be set!");
rv = stmt->BindBlobByName(data, dataBuffer, dataBufferLength);
rv = stmt->BindBlobByName(NS_LITERAL_CSTRING("data"), buffer,
bufferLength);
NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
rv = stmt->Execute();
@ -1886,23 +1835,11 @@ AddHelper::DoDatabaseWork(mozIStorageConnection* aConnection)
rv = mKey.BindToStatement(stmt, keyValue);
NS_ENSURE_SUCCESS(rv, rv);
NS_ASSERTION(!dataBuffer && !dataBufferLength, "These should be unset!");
buffer = reinterpret_cast<const PRUint8*>(mCloneBuffer.data());
bufferLength = mCloneBuffer.nbytes();
const char* uncompressed =
reinterpret_cast<const char*>(mCloneBuffer.data());
size_t uncompressedLength = mCloneBuffer.nbytes();
size_t compressedLength =
snappy::MaxCompressedLength(uncompressedLength);
compressed = new char[compressedLength];
snappy::RawCompress(uncompressed, uncompressedLength, compressed.get(),
&compressedLength);
dataBuffer = reinterpret_cast<const PRUint8*>(compressed.get());
dataBufferLength = compressedLength;
rv = stmt->BindBlobByName(data, dataBuffer, dataBufferLength);
rv = stmt->BindBlobByName(NS_LITERAL_CSTRING("data"), buffer,
bufferLength);
NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
rv = stmt->Execute();
@ -2435,13 +2372,15 @@ CreateIndexHelper::InsertDataFromObjectStore(mozIStorageConnection* aConnection)
JSAutoRequest ar(cx);
do {
JSAutoStructuredCloneBuffer buffer;
rv = IDBObjectStore::GetStructuredCloneDataFromStatement(stmt, 1, buffer);
NS_ENSURE_SUCCESS(rv, rv);
const PRUint8* data;
PRUint32 dataLength;
rv = stmt->GetSharedBlob(1, &dataLength, &data);
NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
jsval clone;
if (!buffer.read(cx, &clone)) {
NS_WARNING("Failed to deserialize structured clone data!");
if (!JS_ReadStructuredClone(cx, reinterpret_cast<const uint64*>(data),
dataLength, JS_STRUCTURED_CLONE_VERSION,
&clone, NULL, NULL)) {
return NS_ERROR_DOM_DATA_CLONE_ERR;
}

Просмотреть файл

@ -51,6 +51,8 @@
#include "nsStringGlue.h"
#include "nsTArray.h"
#define DB_SCHEMA_VERSION 8
#define BEGIN_INDEXEDDB_NAMESPACE \
namespace mozilla { namespace dom { namespace indexedDB {

Просмотреть файл

@ -39,7 +39,7 @@
DEPTH = ../..
topsrcdir = @top_srcdir@
srcdir = @srcdir@
VPATH = @srcdir@
VPATH = @srcdir@
include $(DEPTH)/config/autoconf.mk

Просмотреть файл

@ -37,63 +37,21 @@
* ***** END LICENSE BLOCK ***** */
#include "OpenDatabaseHelper.h"
#include "nsIFile.h"
#include "mozilla/storage.h"
#include "nsContentUtils.h"
#include "nsEscape.h"
#include "nsThreadUtils.h"
#include "snappy/snappy.h"
#include "IDBEvents.h"
#include "IDBFactory.h"
#include "IndexedDatabaseManager.h"
#include "mozilla/storage.h"
#include "nsIFile.h"
#include "nsContentUtils.h"
#include "nsEscape.h"
#include "nsThreadUtils.h"
USING_INDEXEDDB_NAMESPACE
namespace {
// If JS_STRUCTURED_CLONE_VERSION changes then we need to update our major
// schema version.
PR_STATIC_ASSERT(JS_STRUCTURED_CLONE_VERSION == 1);
// Major schema version. Bump for almost everything.
const PRUint32 kMajorSchemaVersion = 9;
// Minor schema version. Should almost always be 0 (maybe bump on release
// branches if we have to).
const PRUint32 kMinorSchemaVersion = 0;
// The schema version we store in the SQLite database is a (signed) 32-bit
// integer. The major version is left-shifted 4 bits so the max value is
// 0xFFFFFFF. The minor version occupies the lower 4 bits and its max is 0xF.
PR_STATIC_ASSERT(kMajorSchemaVersion <= 0xFFFFFFF);
PR_STATIC_ASSERT(kMajorSchemaVersion <= 0xF);
inline
PRInt32
MakeSchemaVersion(PRUint32 aMajorSchemaVersion,
PRUint32 aMinorSchemaVersion)
{
return PRInt32((aMajorSchemaVersion << 4) + aMinorSchemaVersion);
}
const PRInt32 kSQLiteSchemaVersion = PRInt32((kMajorSchemaVersion << 4) +
kMinorSchemaVersion);
inline
PRUint32 GetMajorSchemaVersion(PRInt32 aSchemaVersion)
{
return PRUint32(aSchemaVersion) >> 4;
}
inline
PRUint32 GetMinorSchemaVersion(PRInt32 aSchemaVersion)
{
return PRUint32(aSchemaVersion) & 0xF;
}
nsresult
GetDatabaseFile(const nsACString& aASCIIOrigin,
const nsAString& aName,
@ -154,7 +112,8 @@ CreateTables(mozIStorageConnection* aDBConn)
nsresult rv = aDBConn->ExecuteSimpleSQL(NS_LITERAL_CSTRING(
"CREATE TABLE database ("
"name TEXT NOT NULL, "
"version INTEGER NOT NULL DEFAULT 0"
"version INTEGER NOT NULL DEFAULT 0, "
"dataVersion INTEGER NOT NULL"
");"
));
NS_ENSURE_SUCCESS(rv, rv);
@ -310,17 +269,44 @@ CreateTables(mozIStorageConnection* aDBConn)
));
NS_ENSURE_SUCCESS(rv, rv);
rv = aDBConn->SetSchemaVersion(kSQLiteSchemaVersion);
rv = aDBConn->SetSchemaVersion(DB_SCHEMA_VERSION);
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
nsresult
CreateMetaData(mozIStorageConnection* aConnection,
const nsAString& aName)
{
NS_PRECONDITION(!NS_IsMainThread(), "Wrong thread!");
NS_PRECONDITION(aConnection, "Null database!");
nsCOMPtr<mozIStorageStatement> stmt;
nsresult rv = aConnection->CreateStatement(NS_LITERAL_CSTRING(
"INSERT OR REPLACE INTO database (name, dataVersion) "
"VALUES (:name, :dataVersion)"
), getter_AddRefs(stmt));
NS_ENSURE_SUCCESS(rv, rv);
rv = stmt->BindStringByName(NS_LITERAL_CSTRING("name"), aName);
NS_ENSURE_SUCCESS(rv, rv);
rv = stmt->BindInt64ByName(NS_LITERAL_CSTRING("dataVersion"),
JS_STRUCTURED_CLONE_VERSION);
NS_ENSURE_SUCCESS(rv, rv);
return stmt->Execute();
}
nsresult
UpgradeSchemaFrom4To5(mozIStorageConnection* aConnection)
{
nsresult rv;
mozStorageTransaction transaction(aConnection, false,
mozIStorageConnection::TRANSACTION_IMMEDIATE);
// All we changed is the type of the version column, so lets try to
// convert that to an integer, and if we fail, set it to 0.
nsCOMPtr<mozIStorageStatement> stmt;
@ -397,14 +383,26 @@ UpgradeSchemaFrom4To5(mozIStorageConnection* aConnection)
rv = aConnection->SetSchemaVersion(5);
NS_ENSURE_SUCCESS(rv, rv);
rv = transaction.Commit();
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
nsresult
UpgradeSchemaFrom5To6(mozIStorageConnection* aConnection)
{
// First, drop all the indexes we're no longer going to use.
mozStorageTransaction transaction(aConnection, false,
mozIStorageConnection::TRANSACTION_IMMEDIATE);
// Turn off foreign key constraints before we do anything here.
nsresult rv = aConnection->ExecuteSimpleSQL(NS_LITERAL_CSTRING(
"PRAGMA foreign_keys = OFF;"
));
NS_ENSURE_SUCCESS(rv, rv);
// First, drop all the indexes we're no longer going to use.
rv = aConnection->ExecuteSimpleSQL(NS_LITERAL_CSTRING(
"DROP INDEX key_index;"
));
NS_ENSURE_SUCCESS(rv, rv);
@ -750,18 +748,30 @@ UpgradeSchemaFrom5To6(mozIStorageConnection* aConnection)
rv = aConnection->SetSchemaVersion(6);
NS_ENSURE_SUCCESS(rv, rv);
rv = transaction.Commit();
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
nsresult
UpgradeSchemaFrom6To7(mozIStorageConnection* aConnection)
{
mozStorageTransaction transaction(aConnection, false,
mozIStorageConnection::TRANSACTION_IMMEDIATE);
// Turn off foreign key constraints before we do anything here.
nsresult rv = aConnection->ExecuteSimpleSQL(NS_LITERAL_CSTRING(
"PRAGMA foreign_keys = OFF;"
));
NS_ENSURE_SUCCESS(rv, rv);
rv = aConnection->ExecuteSimpleSQL(NS_LITERAL_CSTRING(
"CREATE TEMPORARY TABLE temp_upgrade ("
"id, "
"name, "
"key_path, "
"auto_increment"
"auto_increment, "
");"
));
NS_ENSURE_SUCCESS(rv, rv);
@ -804,20 +814,33 @@ UpgradeSchemaFrom6To7(mozIStorageConnection* aConnection)
rv = aConnection->SetSchemaVersion(7);
NS_ENSURE_SUCCESS(rv, rv);
rv = transaction.Commit();
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
nsresult
UpgradeSchemaFrom7To8(mozIStorageConnection* aConnection)
{
mozStorageTransaction transaction(aConnection, false,
mozIStorageConnection::TRANSACTION_IMMEDIATE);
// Turn off foreign key constraints before we do anything here.
nsresult rv = aConnection->ExecuteSimpleSQL(NS_LITERAL_CSTRING(
"PRAGMA foreign_keys = OFF;"
));
NS_ENSURE_SUCCESS(rv, rv);
rv = aConnection->ExecuteSimpleSQL(NS_LITERAL_CSTRING(
"CREATE TEMPORARY TABLE temp_upgrade ("
"id, "
"object_store_id, "
"name, "
"key_path, "
"unique_index, "
"object_store_autoincrement"
"object_store_autoincrement, "
");"
));
NS_ENSURE_SUCCESS(rv, rv);
@ -825,7 +848,7 @@ UpgradeSchemaFrom7To8(mozIStorageConnection* aConnection)
rv = aConnection->ExecuteSimpleSQL(NS_LITERAL_CSTRING(
"INSERT INTO temp_upgrade "
"SELECT id, object_store_id, name, key_path, "
"unique_index, object_store_autoincrement "
"unique_index, object_store_autoincrement, "
"FROM object_store_index;"
));
NS_ENSURE_SUCCESS(rv, rv);
@ -855,7 +878,7 @@ UpgradeSchemaFrom7To8(mozIStorageConnection* aConnection)
rv = aConnection->ExecuteSimpleSQL(NS_LITERAL_CSTRING(
"INSERT INTO object_store_index "
"SELECT id, object_store_id, name, key_path, "
"unique_index, 0, object_store_autoincrement "
"unique_index, 0, object_store_autoincrement, "
"FROM temp_upgrade;"
));
NS_ENSURE_SUCCESS(rv, rv);
@ -868,93 +891,7 @@ UpgradeSchemaFrom7To8(mozIStorageConnection* aConnection)
rv = aConnection->SetSchemaVersion(8);
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
class CompressDataBlobsFunction : public mozIStorageFunction
{
public:
NS_DECL_ISUPPORTS
NS_IMETHOD
OnFunctionCall(mozIStorageValueArray* aArguments,
nsIVariant** aResult)
{
PRUint32 argc;
nsresult rv = aArguments->GetNumEntries(&argc);
NS_ENSURE_SUCCESS(rv, rv);
if (argc != 1) {
NS_WARNING("Don't call me with the wrong number of arguments!");
return NS_ERROR_UNEXPECTED;
}
PRInt32 type;
rv = aArguments->GetTypeOfIndex(0, &type);
NS_ENSURE_SUCCESS(rv, rv);
if (type != mozIStorageStatement::VALUE_TYPE_BLOB) {
NS_WARNING("Don't call me with the wrong type of arguments!");
return NS_ERROR_UNEXPECTED;
}
const PRUint8* uncompressed;
PRUint32 uncompressedLength;
rv = aArguments->GetSharedBlob(0, &uncompressedLength, &uncompressed);
NS_ENSURE_SUCCESS(rv, rv);
size_t compressedLength = snappy::MaxCompressedLength(uncompressedLength);
nsAutoArrayPtr<char> compressed(new char[compressedLength]);
snappy::RawCompress(reinterpret_cast<const char*>(uncompressed),
uncompressedLength, compressed.get(),
&compressedLength);
std::pair<const void *, int> data(static_cast<void*>(compressed.get()),
int(compressedLength));
// XXX This copies the buffer again... There doesn't appear to be any way to
// preallocate space and write directly to a BlobVariant at the moment.
nsCOMPtr<nsIVariant> result = new mozilla::storage::BlobVariant(data);
result.forget(aResult);
return NS_OK;
}
};
NS_IMPL_ISUPPORTS1(CompressDataBlobsFunction, mozIStorageFunction)
nsresult
UpgradeSchemaFrom8To9_0(mozIStorageConnection* aConnection)
{
// We no longer use the dataVersion column.
nsresult rv = aConnection->ExecuteSimpleSQL(NS_LITERAL_CSTRING(
"UPDATE database SET dataVersion = 0;"
));
NS_ENSURE_SUCCESS(rv, rv);
nsCOMPtr<mozIStorageFunction> compressor = new CompressDataBlobsFunction();
NS_NAMED_LITERAL_CSTRING(compressorName, "compress");
rv = aConnection->CreateFunction(compressorName, 1, compressor);
NS_ENSURE_SUCCESS(rv, rv);
// Turn off foreign key constraints before we do anything here.
rv = aConnection->ExecuteSimpleSQL(NS_LITERAL_CSTRING(
"UPDATE object_data SET data = compress(data);"
));
NS_ENSURE_SUCCESS(rv, rv);
rv = aConnection->ExecuteSimpleSQL(NS_LITERAL_CSTRING(
"UPDATE ai_object_data SET data = compress(data);"
));
NS_ENSURE_SUCCESS(rv, rv);
rv = aConnection->RemoveFunction(compressorName);
NS_ENSURE_SUCCESS(rv, rv);
rv = aConnection->SetSchemaVersion(MakeSchemaVersion(9, 0));
rv = transaction.Commit();
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
@ -991,88 +928,50 @@ CreateDatabaseConnection(const nsAString& aName,
rv = connection->GetSchemaVersion(&schemaVersion);
NS_ENSURE_SUCCESS(rv, rv);
if (schemaVersion > kSQLiteSchemaVersion) {
NS_WARNING("Unable to open IndexedDB database, schema is too high!");
return NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR;
}
bool vacuumNeeded = false;
if (schemaVersion != kSQLiteSchemaVersion) {
if (!schemaVersion) {
// Brand new file, initialize our tables.
mozStorageTransaction transaction(connection, false,
mozIStorageConnection::TRANSACTION_IMMEDIATE);
if (!schemaVersion) {
// Brand new file, initialize our tables.
rv = CreateTables(connection);
NS_ENSURE_SUCCESS(rv, rv);
rv = CreateTables(connection);
NS_ENSURE_SUCCESS(rv, rv);
NS_ASSERTION(NS_SUCCEEDED(connection->GetSchemaVersion(&schemaVersion)) &&
schemaVersion == kSQLiteSchemaVersion,
"CreateTables set a bad schema version!");
nsCOMPtr<mozIStorageStatement> stmt;
nsresult rv = connection->CreateStatement(NS_LITERAL_CSTRING(
"INSERT INTO database (name) "
"VALUES (:name)"
), getter_AddRefs(stmt));
NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
rv = stmt->BindStringByName(NS_LITERAL_CSTRING("name"), aName);
NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
rv = stmt->Execute();
NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
}
else {
// This logic needs to change next time we change the schema!
PR_STATIC_ASSERT(kSQLiteSchemaVersion == PRInt32((9 << 4) + 0));
while (schemaVersion != kSQLiteSchemaVersion) {
if (schemaVersion == 4) {
rv = UpgradeSchemaFrom4To5(connection);
}
else if (schemaVersion == 5) {
rv = UpgradeSchemaFrom5To6(connection);
}
else if (schemaVersion == 6) {
rv = UpgradeSchemaFrom6To7(connection);
}
else if (schemaVersion == 7) {
rv = UpgradeSchemaFrom7To8(connection);
}
else if (schemaVersion == 8) {
rv = UpgradeSchemaFrom8To9_0(connection);
vacuumNeeded = true;
}
#if 0
else if (schemaVersion == MakeSchemaVersion(9, 0)) {
// Upgrade.
}
#endif
else {
NS_WARNING("Unable to open IndexedDB database, no upgrade path is "
"available!");
return NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR;
}
NS_ENSURE_SUCCESS(rv, rv);
rv = connection->GetSchemaVersion(&schemaVersion);
NS_ENSURE_SUCCESS(rv, rv);
}
NS_ASSERTION(schemaVersion == kSQLiteSchemaVersion, "Huh?!");
}
rv = CreateMetaData(connection, aName);
NS_ENSURE_SUCCESS(rv, rv);
rv = transaction.Commit();
NS_ENSURE_SUCCESS(rv, rv);
NS_ASSERTION(NS_SUCCEEDED(connection->GetSchemaVersion(&schemaVersion)) &&
schemaVersion == DB_SCHEMA_VERSION,
"CreateTables set a bad schema version!");
}
else if (schemaVersion != DB_SCHEMA_VERSION) {
// This logic needs to change next time we change the schema!
PR_STATIC_ASSERT(DB_SCHEMA_VERSION == 8);
#define UPGRADE_SCHEMA_CASE(_from, _to) \
if (schemaVersion == _from) { \
rv = UpgradeSchemaFrom##_from##To##_to (connection); \
NS_ENSURE_SUCCESS(rv, rv); \
\
rv = connection->GetSchemaVersion(&schemaVersion); \
NS_ENSURE_SUCCESS(rv, rv); \
\
NS_ASSERTION(schemaVersion == _to, "Bad upgrade function!"); \
}
if (vacuumNeeded) {
rv = connection->ExecuteSimpleSQL(NS_LITERAL_CSTRING(
"VACUUM;"
));
NS_ENSURE_SUCCESS(rv, rv);
UPGRADE_SCHEMA_CASE(4, 5)
UPGRADE_SCHEMA_CASE(5, 6)
UPGRADE_SCHEMA_CASE(6, 7)
UPGRADE_SCHEMA_CASE(7, 8)
#undef UPGRADE_SCHEMA_CASE
if (schemaVersion != DB_SCHEMA_VERSION) {
NS_WARNING("Unable to open IndexedDB database, schema doesn't match");
return NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR;
}
}
// Turn on foreign key constraints.
@ -1381,15 +1280,41 @@ OpenDatabaseHelper::DoDatabaseWork()
rv = CreateDatabaseConnection(mName, dbFile, getter_AddRefs(connection));
NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
rv = IDBFactory::LoadDatabaseInformation(connection, mDatabaseId,
&mCurrentVersion, mObjectStores);
// Get the data version.
nsCOMPtr<mozIStorageStatement> stmt;
rv = connection->CreateStatement(NS_LITERAL_CSTRING(
"SELECT dataVersion "
"FROM database"
), getter_AddRefs(stmt));
NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
if (mForDeletion) {
mState = eDeletePending;
return NS_OK;
bool hasResult;
rv = stmt->ExecuteStep(&hasResult);
NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
if (!hasResult) {
NS_ERROR("Database has no dataVersion!");
return NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR;
}
PRInt64 dataVersion;
rv = stmt->GetInt64(0, &dataVersion);
NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
if (dataVersion > JS_STRUCTURED_CLONE_VERSION) {
NS_ERROR("Bad data version!");
return NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR;
}
if (dataVersion < JS_STRUCTURED_CLONE_VERSION) {
// Need to upgrade the database, here, before returning to the main thread.
NS_NOTYETIMPLEMENTED("Implement me!");
}
rv = IDBFactory::LoadDatabaseInformation(connection, mDatabaseId, &mCurrentVersion,
mObjectStores);
NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
for (PRUint32 i = 0; i < mObjectStores.Length(); i++) {
nsAutoPtr<ObjectStoreInfo>& objectStoreInfo = mObjectStores[i];
for (PRUint32 j = 0; j < objectStoreInfo->indexes.Length(); j++) {
@ -1399,6 +1324,11 @@ OpenDatabaseHelper::DoDatabaseWork()
mLastObjectStoreId = NS_MAX(objectStoreInfo->id, mLastObjectStoreId);
}
if (mForDeletion) {
mState = eDeletePending;
return NS_OK;
}
// See if we need to do a VERSION_CHANGE transaction
// Optional version semantics.

Просмотреть файл

@ -61,8 +61,8 @@ public:
: HelperBase(aRequest), mOpenDBRequest(aRequest), mName(aName),
mASCIIOrigin(aASCIIOrigin), mRequestedVersion(aRequestedVersion),
mForDeletion(aForDeletion), mDatabaseId(nsnull), mCurrentVersion(0),
mLastObjectStoreId(0), mLastIndexId(0), mState(eCreated),
mResultCode(NS_OK)
mDataVersion(DB_SCHEMA_VERSION), mLastObjectStoreId(0),
mLastIndexId(0), mState(eCreated), mResultCode(NS_OK)
{
NS_ASSERTION(!aForDeletion || !aRequestedVersion,
"Can't be for deletion and request a version!");
@ -128,6 +128,7 @@ private:
// Out-params.
nsTArray<nsAutoPtr<ObjectStoreInfo> > mObjectStores;
PRUint64 mCurrentVersion;
PRUint32 mDataVersion;
nsString mDatabaseFilePath;
PRInt64 mLastObjectStoreId;
PRInt64 mLastIndexId;

Просмотреть файл

@ -56,91 +56,91 @@ namespace std {
// doing this after careful review because we want to define our own
// exception throwing semantics. Don't try this at home!
NS_ALWAYS_INLINE inline void NS_NORETURN
inline void NS_NORETURN
__throw_bad_exception(void)
{
mozalloc_abort("fatal: STL threw bad_exception");
}
NS_ALWAYS_INLINE inline void NS_NORETURN
inline void NS_NORETURN
__throw_bad_alloc(void)
{
mozalloc_abort("fatal: STL threw bad_alloc");
}
NS_ALWAYS_INLINE inline void NS_NORETURN
inline void NS_NORETURN
__throw_bad_cast(void)
{
mozalloc_abort("fatal: STL threw bad_cast");
}
NS_ALWAYS_INLINE inline void NS_NORETURN
inline void NS_NORETURN
__throw_bad_typeid(void)
{
mozalloc_abort("fatal: STL threw bad_typeid");
}
NS_ALWAYS_INLINE inline void NS_NORETURN
inline void NS_NORETURN
__throw_logic_error(const char* msg)
{
mozalloc_abort(msg);
}
NS_ALWAYS_INLINE inline void NS_NORETURN
inline void NS_NORETURN
__throw_domain_error(const char* msg)
{
mozalloc_abort(msg);
}
NS_ALWAYS_INLINE inline void NS_NORETURN
inline void NS_NORETURN
__throw_invalid_argument(const char* msg)
{
mozalloc_abort(msg);
}
NS_ALWAYS_INLINE inline void NS_NORETURN
inline void NS_NORETURN
__throw_length_error(const char* msg)
{
mozalloc_abort(msg);
}
NS_ALWAYS_INLINE inline void NS_NORETURN
inline void NS_NORETURN
__throw_out_of_range(const char* msg)
{
mozalloc_abort(msg);
}
NS_ALWAYS_INLINE inline void NS_NORETURN
inline void NS_NORETURN
__throw_runtime_error(const char* msg)
{
mozalloc_abort(msg);
}
NS_ALWAYS_INLINE inline void NS_NORETURN
inline void NS_NORETURN
__throw_range_error(const char* msg)
{
mozalloc_abort(msg);
}
NS_ALWAYS_INLINE inline void NS_NORETURN
inline void NS_NORETURN
__throw_overflow_error(const char* msg)
{
mozalloc_abort(msg);
}
NS_ALWAYS_INLINE inline void NS_NORETURN
inline void NS_NORETURN
__throw_underflow_error(const char* msg)
{
mozalloc_abort(msg);
}
NS_ALWAYS_INLINE inline void NS_NORETURN
inline void NS_NORETURN
__throw_ios_failure(const char* msg)
{
mozalloc_abort(msg);
}
NS_ALWAYS_INLINE inline void NS_NORETURN
inline void NS_NORETURN
__throw_system_error(int err)
{
char error[128];

Просмотреть файл

@ -1,73 +0,0 @@
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is Mozilla Firefox
#
# The Initial Developer of the Original Code is
# The Mozilla Foundation.
# Portions created by the Initial Developer are Copyright (C) 2011
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Ben Turner <bent.mozilla@gmail.com>
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
DEPTH = ../..
topsrcdir = @top_srcdir@
srcdir = @srcdir@
VPATH = \
@srcdir@ \
$(topsrcdir)/other-licenses/snappy/src
$(NULL)
include $(DEPTH)/config/autoconf.mk
LIBRARY_NAME = snappy_s
FORCE_STATIC_LIB = 1
LIBXUL_LIBRARY = 1
EXPORT_LIBRARY = 1
CPPSRCS = \
snappy.cc \
snappy-sinksource.cc \
snappy-stubs-internal.cc \
$(NULL)
CSRCS = \
snappy-c.cc \
$(NULL)
EXPORTS_NAMESPACES = snappy
EXPORTS_snappy = \
snappy.h \
snappy-c.h \
snappy-stubs-public.h \
$(NULL)
include $(topsrcdir)/config/rules.mk

Просмотреть файл

@ -1,22 +0,0 @@
See src/README for the README that ships with snappy.
Mozilla does not modify the actual snappy source with the exception of the
'snappy-stubs-public.h' header. We have replaced its build system with our own.
Snappy comes from:
http://code.google.com/p/snappy/
To upgrade to a newer version:
1. Check out the new code using subversion.
2. Update 'snappy-stubs-public.h' in this directory with any changes that were
made to 'snappy-stubs-public.h.in' in the new source.
3. Copy the major/minor/patch versions from 'configure.ac' into
'snappy-stubs-public.h'.
4. Copy all source files from the new version into the src subdirectory. The
following files are not needed:
- 'autom4te.cache' subdirectory
- 'm4' subdirectory
- 'testdata' subdirectory
- 'autogen.sh'
- 'configure.ac'
- 'Makefile.am'

Просмотреть файл

@ -1,68 +0,0 @@
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: sesse@google.com (Steinar H. Gunderson)
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Various type stubs for the open-source version of Snappy.
//
// This file cannot include config.h, as it is included from snappy.h,
// which is a public header. Instead, snappy-stubs-public.h is generated by
// from snappy-stubs-public.h.in at configure time.
#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
#include "prtypes.h"
#define SNAPPY_MAJOR 1
#define SNAPPY_MINOR 0
#define SNAPPY_PATCHLEVEL 4
#define SNAPPY_VERSION \
((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL)
#include <string>
namespace snappy {
typedef PRInt8 int8;
typedef PRUint8 uint8;
typedef PRInt16 int16;
typedef PRUint16 uint16;
typedef PRInt32 int32;
typedef PRUint32 uint32;
typedef PRInt64 int64;
typedef PRUint64 uint64;
typedef std::string string;
#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&); \
void operator=(const TypeName&)
} // namespace snappy
#endif // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_

Просмотреть файл

@ -1 +0,0 @@
opensource@google.com

Просмотреть файл

@ -1,28 +0,0 @@
Copyright 2011, Google Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

Просмотреть файл

@ -1,801 +0,0 @@
------------------------------------------------------------------------
r49 | snappy.mirrorbot@gmail.com | 2011-09-15 11:50:05 +0200 (Thu, 15 Sep 2011) | 5 lines
Fix public issue #50: Include generic byteswap macros.
Also include Solaris 10 and FreeBSD versions.
R=csilvers
------------------------------------------------------------------------
r48 | snappy.mirrorbot@gmail.com | 2011-08-10 20:57:27 +0200 (Wed, 10 Aug 2011) | 5 lines
Partially fix public issue 50: Remove an extra comma from the end of some
enum declarations, as it seems the Sun compiler does not like it.
Based on patch by Travis Vitek.
------------------------------------------------------------------------
r47 | snappy.mirrorbot@gmail.com | 2011-08-10 20:44:16 +0200 (Wed, 10 Aug 2011) | 4 lines
Use the right #ifdef test for sys/mman.h.
Based on patch by Travis Vitek.
------------------------------------------------------------------------
r46 | snappy.mirrorbot@gmail.com | 2011-08-10 03:22:09 +0200 (Wed, 10 Aug 2011) | 6 lines
Fix public issue #47: Small comment cleanups in the unit test.
Originally based on a patch by Patrick Pelletier.
R=sanjay
------------------------------------------------------------------------
r45 | snappy.mirrorbot@gmail.com | 2011-08-10 03:14:43 +0200 (Wed, 10 Aug 2011) | 8 lines
Fix public issue #46: Format description said "3-byte offset"
instead of "4-byte offset" for the longest copies.
Also fix an inconsistency in the heading for section 2.2.3.
Both patches by Patrick Pelletier.
R=csilvers
------------------------------------------------------------------------
r44 | snappy.mirrorbot@gmail.com | 2011-06-28 13:40:25 +0200 (Tue, 28 Jun 2011) | 8 lines
Fix public issue #44: Make the definition and declaration of CompressFragment
identical, even regarding cv-qualifiers.
This is required to work around a bug in the Solaris Studio C++ compiler
(it does not properly disregard cv-qualifiers when doing name mangling).
R=sanjay
------------------------------------------------------------------------
r43 | snappy.mirrorbot@gmail.com | 2011-06-04 12:19:05 +0200 (Sat, 04 Jun 2011) | 7 lines
Correct an inaccuracy in the Snappy format description.
(I stumbled into this when changing the way we decompress literals.)
R=csilvers
Revision created by MOE tool push_codebase.
------------------------------------------------------------------------
r42 | snappy.mirrorbot@gmail.com | 2011-06-03 22:53:06 +0200 (Fri, 03 Jun 2011) | 50 lines
Speed up decompression by removing a fast-path attempt.
Whenever we try to enter a copy fast-path, there is a certain cost in checking
that all the preconditions are in place, but it's normally offset by the fact
that we can usually take the cheaper path. However, in a certain path we've
already established that "avail < literal_length", which usually means that
either the available space is small, or the literal is big. Both will disqualify
us from taking the fast path, and thus we take the hit from the precondition
checking without gaining much from having a fast path. Thus, simply don't try
the fast path in this situation -- we're already on a slow path anyway
(one where we need to refill more data from the reader).
I'm a bit surprised at how much this gained; it could be that this path is
more common than I thought, or that the simpler structure somehow makes the
compiler happier. I haven't looked at the assembler, but it's a win across
the board on both Core 2, Core i7 and Opteron, at least for the cases we
typically care about. The gains seem to be the largest on Core i7, though.
Results from my Core i7 workstation:
Benchmark Time(ns) CPU(ns) Iterations
---------------------------------------------------
BM_UFlat/0 73337 73091 190996 1.3GB/s html [ +1.7%]
BM_UFlat/1 696379 693501 20173 965.5MB/s urls [ +2.7%]
BM_UFlat/2 9765 9734 1472135 12.1GB/s jpg [ +0.7%]
BM_UFlat/3 29720 29621 472973 3.0GB/s pdf [ +1.8%]
BM_UFlat/4 294636 293834 47782 1.3GB/s html4 [ +2.3%]
BM_UFlat/5 28399 28320 494700 828.5MB/s cp [ +3.5%]
BM_UFlat/6 12795 12760 1000000 833.3MB/s c [ +1.2%]
BM_UFlat/7 3984 3973 3526448 893.2MB/s lsp [ +5.7%]
BM_UFlat/8 991996 989322 14141 992.6MB/s xls [ +3.3%]
BM_UFlat/9 228620 227835 61404 636.6MB/s txt1 [ +4.0%]
BM_UFlat/10 197114 196494 72165 607.5MB/s txt2 [ +3.5%]
BM_UFlat/11 605240 603437 23217 674.4MB/s txt3 [ +3.7%]
BM_UFlat/12 804157 802016 17456 573.0MB/s txt4 [ +3.9%]
BM_UFlat/13 347860 346998 40346 1.4GB/s bin [ +1.2%]
BM_UFlat/14 44684 44559 315315 818.4MB/s sum [ +2.3%]
BM_UFlat/15 5120 5106 2739726 789.4MB/s man [ +3.3%]
BM_UFlat/16 76591 76355 183486 1.4GB/s pb [ +2.8%]
BM_UFlat/17 238564 237828 58824 739.1MB/s gaviota [ +1.6%]
BM_UValidate/0 42194 42060 333333 2.3GB/s html [ -0.1%]
BM_UValidate/1 433182 432005 32407 1.5GB/s urls [ -0.1%]
BM_UValidate/2 197 196 71428571 603.3GB/s jpg [ +0.5%]
BM_UValidate/3 14494 14462 972222 6.1GB/s pdf [ +0.5%]
BM_UValidate/4 168444 167836 83832 2.3GB/s html4 [ +0.1%]
R=jeff
Revision created by MOE tool push_codebase.
------------------------------------------------------------------------
r41 | snappy.mirrorbot@gmail.com | 2011-06-03 22:47:14 +0200 (Fri, 03 Jun 2011) | 43 lines
Speed up decompression by not needing a lookup table for literal items.
Looking up into and decoding the values from char_table has long shown up as a
hotspot in the decompressor. While it turns out that it's hard to make a more
efficient decoder for the copy ops, the literals are simple enough that we can
decode them without needing a table lookup. (This means that 1/4 of the table
is now unused, although that in itself doesn't buy us anything.)
The gains are small, but definitely present; some tests win as much as 10%,
but 1-4% is more typical. These results are from Core i7, in 64-bit mode;
Core 2 and Opteron show similar results. (I've run with more iterations
than unusual to make sure the smaller gains don't drown entirely in noise.)
Benchmark Time(ns) CPU(ns) Iterations
---------------------------------------------------
BM_UFlat/0 74665 74428 182055 1.3GB/s html [ +3.1%]
BM_UFlat/1 714106 711997 19663 940.4MB/s urls [ +4.4%]
BM_UFlat/2 9820 9789 1427115 12.1GB/s jpg [ -1.2%]
BM_UFlat/3 30461 30380 465116 2.9GB/s pdf [ +0.8%]
BM_UFlat/4 301445 300568 46512 1.3GB/s html4 [ +2.2%]
BM_UFlat/5 29338 29263 479452 801.8MB/s cp [ +1.6%]
BM_UFlat/6 13004 12970 1000000 819.9MB/s c [ +2.1%]
BM_UFlat/7 4180 4168 3349282 851.4MB/s lsp [ +1.3%]
BM_UFlat/8 1026149 1024000 10000 959.0MB/s xls [+10.7%]
BM_UFlat/9 237441 236830 59072 612.4MB/s txt1 [ +0.3%]
BM_UFlat/10 203966 203298 69307 587.2MB/s txt2 [ +0.8%]
BM_UFlat/11 627230 625000 22400 651.2MB/s txt3 [ +0.7%]
BM_UFlat/12 836188 833979 16787 551.0MB/s txt4 [ +1.3%]
BM_UFlat/13 351904 350750 39886 1.4GB/s bin [ +3.8%]
BM_UFlat/14 45685 45562 308370 800.4MB/s sum [ +5.9%]
BM_UFlat/15 5286 5270 2656546 764.9MB/s man [ +1.5%]
BM_UFlat/16 78774 78544 178117 1.4GB/s pb [ +4.3%]
BM_UFlat/17 242270 241345 58091 728.3MB/s gaviota [ +1.2%]
BM_UValidate/0 42149 42000 333333 2.3GB/s html [ -3.0%]
BM_UValidate/1 432741 431303 32483 1.5GB/s urls [ +7.8%]
BM_UValidate/2 198 197 71428571 600.7GB/s jpg [+16.8%]
BM_UValidate/3 14560 14521 965517 6.1GB/s pdf [ -4.1%]
BM_UValidate/4 169065 168671 83832 2.3GB/s html4 [ -2.9%]
R=jeff
Revision created by MOE tool push_codebase.
------------------------------------------------------------------------
r40 | snappy.mirrorbot@gmail.com | 2011-06-03 00:57:41 +0200 (Fri, 03 Jun 2011) | 2 lines
Release Snappy 1.0.3.
------------------------------------------------------------------------
r39 | snappy.mirrorbot@gmail.com | 2011-06-02 20:06:54 +0200 (Thu, 02 Jun 2011) | 11 lines
Remove an unneeded goto in the decompressor; it turns out that the
state of ip_ after decompression (or attempted decompresion) is
completely irrelevant, so we don't need the trailer.
Performance is, as expected, mostly flat -- there's a curious ~3–5%
loss in the “lsp” test, but that test case is so short it is hard to say
anything definitive about why (most likely, it's some sort of
unrelated effect).
R=jeff
------------------------------------------------------------------------
r38 | snappy.mirrorbot@gmail.com | 2011-06-02 19:59:40 +0200 (Thu, 02 Jun 2011) | 52 lines
Speed up decompression by caching ip_.
It is seemingly hard for the compiler to understand that ip_, the current input
pointer into the compressed data stream, can not alias on anything else, and
thus using it directly will incur memory traffic as it cannot be kept in a
register. The code already knew about this and cached it into a local
variable, but since Step() only decoded one tag, it had to move ip_ back into
place between every tag. This seems to have cost us a significant amount of
performance, so changing Step() into a function that decodes as much as it can
before it saves ip_ back and returns. (Note that Step() was already inlined,
so it is not the manual inlining that buys the performance here.)
The wins are about 3–6% for Core 2, 6–13% on Core i7 and 5–12% on Opteron
(for plain array-to-array decompression, in 64-bit opt mode).
There is a tiny difference in the behavior here; if an invalid literal is
encountered (ie., the writer refuses the Append() operation), ip_ will now
point to the byte past the tag byte, instead of where the literal was
originally thought to end. However, we don't use ip_ for anything after
DecompressAllTags() has returned, so this should not change external behavior
in any way.
Microbenchmark results for Core i7, 64-bit (Opteron results are similar):
Benchmark Time(ns) CPU(ns) Iterations
---------------------------------------------------
BM_UFlat/0 79134 79110 8835 1.2GB/s html [ +6.2%]
BM_UFlat/1 786126 786096 891 851.8MB/s urls [+10.0%]
BM_UFlat/2 9948 9948 69125 11.9GB/s jpg [ -1.3%]
BM_UFlat/3 31999 31998 21898 2.7GB/s pdf [ +6.5%]
BM_UFlat/4 318909 318829 2204 1.2GB/s html4 [ +6.5%]
BM_UFlat/5 31384 31390 22363 747.5MB/s cp [ +9.2%]
BM_UFlat/6 14037 14034 49858 757.7MB/s c [+10.6%]
BM_UFlat/7 4612 4612 151395 769.5MB/s lsp [ +9.5%]
BM_UFlat/8 1203174 1203007 582 816.3MB/s xls [+19.3%]
BM_UFlat/9 253869 253955 2757 571.1MB/s txt1 [+11.4%]
BM_UFlat/10 219292 219290 3194 544.4MB/s txt2 [+12.1%]
BM_UFlat/11 672135 672131 1000 605.5MB/s txt3 [+11.2%]
BM_UFlat/12 902512 902492 776 509.2MB/s txt4 [+12.5%]
BM_UFlat/13 372110 371998 1881 1.3GB/s bin [ +5.8%]
BM_UFlat/14 50407 50407 10000 723.5MB/s sum [+13.5%]
BM_UFlat/15 5699 5701 100000 707.2MB/s man [+12.4%]
BM_UFlat/16 83448 83424 8383 1.3GB/s pb [ +5.7%]
BM_UFlat/17 256958 256963 2723 684.1MB/s gaviota [ +7.9%]
BM_UValidate/0 42795 42796 16351 2.2GB/s html [+25.8%]
BM_UValidate/1 490672 490622 1427 1.3GB/s urls [+22.7%]
BM_UValidate/2 237 237 2950297 499.0GB/s jpg [+24.9%]
BM_UValidate/3 14610 14611 47901 6.0GB/s pdf [+26.8%]
BM_UValidate/4 171973 171990 4071 2.2GB/s html4 [+25.7%]
------------------------------------------------------------------------
r37 | snappy.mirrorbot@gmail.com | 2011-05-17 10:48:25 +0200 (Tue, 17 May 2011) | 10 lines
Fix the numbering of the headlines in the Snappy format description.
R=csilvers
DELTA=4 (0 added, 0 deleted, 4 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=1906
------------------------------------------------------------------------
r36 | snappy.mirrorbot@gmail.com | 2011-05-16 10:59:18 +0200 (Mon, 16 May 2011) | 12 lines
Fix public issue #32: Add compressed format documentation for Snappy.
This text is new, but an earlier version from Zeev Tarantov was used
as reference.
R=csilvers
DELTA=112 (111 added, 0 deleted, 1 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=1867
------------------------------------------------------------------------
r35 | snappy.mirrorbot@gmail.com | 2011-05-09 23:29:02 +0200 (Mon, 09 May 2011) | 12 lines
Fix public issue #39: Pick out the median runs based on CPU time,
not real time. Also, use nth_element instead of sort, since we
only need one element.
R=csilvers
DELTA=5 (3 added, 0 deleted, 2 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=1799
------------------------------------------------------------------------
r34 | snappy.mirrorbot@gmail.com | 2011-05-09 23:28:45 +0200 (Mon, 09 May 2011) | 19 lines
Fix public issue #38: Make the microbenchmark framework handle
properly cases where gettimeofday() can stand return the same
result twice (as sometimes on GNU/Hurd) or go backwards
(as when the user adjusts the clock). We avoid a division-by-zero,
and put a lower bound on the number of iterations -- the same
amount as we use to calibrate.
We should probably use CLOCK_MONOTONIC for platforms that support
it, to be robust against clock adjustments; we already use Windows'
monotonic timers. However, that's for a later changelist.
R=csilvers
DELTA=7 (5 added, 0 deleted, 2 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=1798
------------------------------------------------------------------------
r33 | snappy.mirrorbot@gmail.com | 2011-05-04 01:22:52 +0200 (Wed, 04 May 2011) | 11 lines
Fix public issue #37: Only link snappy_unittest against -lz and other autodetected
libraries, not libsnappy.so (which doesn't need any such dependency).
R=csilvers
DELTA=20 (14 added, 0 deleted, 6 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=1710
------------------------------------------------------------------------
r32 | snappy.mirrorbot@gmail.com | 2011-05-04 01:22:33 +0200 (Wed, 04 May 2011) | 11 lines
Release Snappy 1.0.2, to get the license change and various other fixes into
a release.
R=csilvers
DELTA=239 (236 added, 0 deleted, 3 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=1709
------------------------------------------------------------------------
r31 | snappy.mirrorbot@gmail.com | 2011-04-26 14:34:55 +0200 (Tue, 26 Apr 2011) | 15 lines
Fix public issue #30: Stop using gettimeofday() altogether on Win32,
as MSVC doesn't include it. Replace with QueryPerformanceCounter(),
which is monotonic and probably reasonably high-resolution.
(Some machines have traditionally had bugs in QPC, but they should
be relatively rare these days, and there's really no much better
alternative that I know of.)
R=csilvers
DELTA=74 (55 added, 19 deleted, 0 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=1556
------------------------------------------------------------------------
r30 | snappy.mirrorbot@gmail.com | 2011-04-26 14:34:37 +0200 (Tue, 26 Apr 2011) | 11 lines
Fix public issue #31: Don't reset PATH in autogen.sh; instead, do the trickery
we need for our own build system internally.
R=csilvers
DELTA=16 (13 added, 1 deleted, 2 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=1555
------------------------------------------------------------------------
r29 | snappy.mirrorbot@gmail.com | 2011-04-16 00:55:56 +0200 (Sat, 16 Apr 2011) | 12 lines
When including <windows.h>, define WIN32_LEAN_AND_MEAN first,
so we won't pull in macro definitions of things like min() and max(),
which can conflict with <algorithm>.
R=csilvers
DELTA=1 (1 added, 0 deleted, 0 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=1485
------------------------------------------------------------------------
r28 | snappy.mirrorbot@gmail.com | 2011-04-11 11:07:01 +0200 (Mon, 11 Apr 2011) | 15 lines
Fix public issue #29: Write CPU timing code for Windows, based on GetProcessTimes()
instead of getursage().
I thought I'd already committed this patch, so that the 1.0.1 release already
would have a Windows-compatible snappy_unittest, but I'd seemingly deleted it
instead, so this is a reconstruction.
R=csilvers
DELTA=43 (39 added, 3 deleted, 1 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=1295
------------------------------------------------------------------------
r27 | snappy.mirrorbot@gmail.com | 2011-04-08 11:51:53 +0200 (Fri, 08 Apr 2011) | 22 lines
Include C bindings of Snappy, contributed by Martin Gieseking.
I've made a few changes since Martin's version; mostly style nits, but also
a semantic change -- most functions that return bool in the C++ version now
return an enum, to better match typical C (and zlib) semantics.
I've kept the copyright notice, since Martin is obviously the author here;
he has signed the contributor license agreement, though, so this should not
hinder Google's use in the future.
We'll need to update the libtool version number to match the added interface,
but as of http://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html
I'm going to wait until public release.
R=csilvers
DELTA=238 (233 added, 0 deleted, 5 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=1294
------------------------------------------------------------------------
r26 | snappy.mirrorbot@gmail.com | 2011-04-07 18:36:43 +0200 (Thu, 07 Apr 2011) | 13 lines
Replace geo.protodata with a newer version.
The data compresses/decompresses slightly faster than the old data, and has
similar density.
R=lookingbill
DELTA=1 (0 added, 0 deleted, 1 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=1288
------------------------------------------------------------------------
r25 | snappy.mirrorbot@gmail.com | 2011-03-30 22:27:53 +0200 (Wed, 30 Mar 2011) | 12 lines
Fix public issue #27: Add HAVE_CONFIG_H tests around the config.h
inclusion in snappy-stubs-internal.h, which eases compiling outside the
automake/autoconf framework.
R=csilvers
DELTA=5 (4 added, 1 deleted, 0 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=1152
------------------------------------------------------------------------
r24 | snappy.mirrorbot@gmail.com | 2011-03-30 22:27:39 +0200 (Wed, 30 Mar 2011) | 13 lines
Fix public issue #26: Take memory allocation and reallocation entirely out of the
Measure() loop. This gives all algorithms a small speed boost, except Snappy which
already didn't do reallocation (so the measurements were slightly biased in its
favor).
R=csilvers
DELTA=92 (69 added, 9 deleted, 14 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=1151
------------------------------------------------------------------------
r23 | snappy.mirrorbot@gmail.com | 2011-03-30 22:25:09 +0200 (Wed, 30 Mar 2011) | 18 lines
Renamed "namespace zippy" to "namespace snappy" to reduce
the differences from the opensource code. Will make it easier
in the future to mix-and-match third-party code that uses
snappy with google code.
Currently, csearch shows that the only external user of
"namespace zippy" is some bigtable code that accesses
a TEST variable, which is temporarily kept in the zippy
namespace.
R=sesse
DELTA=123 (18 added, 3 deleted, 102 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=1150
------------------------------------------------------------------------
r22 | snappy.mirrorbot@gmail.com | 2011-03-29 00:17:04 +0200 (Tue, 29 Mar 2011) | 11 lines
Put back the final few lines of what was truncated during the
license header change.
R=csilvers
DELTA=5 (4 added, 0 deleted, 1 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=1094
------------------------------------------------------------------------
r21 | snappy.mirrorbot@gmail.com | 2011-03-26 03:34:34 +0100 (Sat, 26 Mar 2011) | 20 lines
Change on 2011-03-25 19:18:00-07:00 by sesse
Replace the Apache 2.0 license header by the BSD-type license header;
somehow a lot of the files were missed in the last round.
R=dannyb,csilvers
DELTA=147 (74 added, 2 deleted, 71 changed)
Change on 2011-03-25 19:25:07-07:00 by sesse
Unbreak the build; the relicensing removed a bit too much (only comments
were intended, but I also accidentially removed some of the top lines of
the actual source).
Revision created by MOE tool push_codebase.
MOE_MIGRATION=1072
------------------------------------------------------------------------
r20 | snappy.mirrorbot@gmail.com | 2011-03-25 17:14:41 +0100 (Fri, 25 Mar 2011) | 10 lines
Change Snappy from the Apache 2.0 to a BSD-type license.
R=dannyb
DELTA=328 (80 added, 184 deleted, 64 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=1061
------------------------------------------------------------------------
r19 | snappy.mirrorbot@gmail.com | 2011-03-25 01:39:01 +0100 (Fri, 25 Mar 2011) | 11 lines
Release Snappy 1.0.1, to soup up all the various small changes
that have been made since release.
R=csilvers
DELTA=266 (260 added, 0 deleted, 6 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=1057
------------------------------------------------------------------------
r18 | snappy.mirrorbot@gmail.com | 2011-03-24 20:15:54 +0100 (Thu, 24 Mar 2011) | 11 lines
Fix a microbenchmark crash on mingw32; seemingly %lld is not universally
supported on Windows, and %I64d is recommended instead.
R=csilvers
DELTA=6 (5 added, 0 deleted, 1 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=1034
------------------------------------------------------------------------
r17 | snappy.mirrorbot@gmail.com | 2011-03-24 20:15:27 +0100 (Thu, 24 Mar 2011) | 13 lines
Fix public issue #19: Fix unit test when Google Test is installed but the
gflags package isn't (Google Test is not properly initialized).
Patch by Martin Gieseking.
R=csilvers
DELTA=2 (1 added, 0 deleted, 1 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=1033
------------------------------------------------------------------------
r16 | snappy.mirrorbot@gmail.com | 2011-03-24 20:13:57 +0100 (Thu, 24 Mar 2011) | 15 lines
Make the unit test work on systems without mmap(). This is required for,
among others, Windows support. For Windows in specific, we could have used
CreateFileMapping/MapViewOfFile, but this should at least get us a bit closer
to compiling, and is of course also relevant for embedded systems with no MMU.
(Part 2/2)
R=csilvers
DELTA=15 (12 added, 3 deleted, 0 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=1032
------------------------------------------------------------------------
r15 | snappy.mirrorbot@gmail.com | 2011-03-24 20:12:27 +0100 (Thu, 24 Mar 2011) | 15 lines
Make the unit test work on systems without mmap(). This is required for,
among others, Windows support. For Windows in specific, we could have used
CreateFileMapping/MapViewOfFile, but this should at least get us a bit closer
to compiling, and is of course also relevant for embedded systems with no MMU.
(Part 1/2)
R=csilvers
DELTA=9 (8 added, 0 deleted, 1 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=1031
------------------------------------------------------------------------
r14 | snappy.mirrorbot@gmail.com | 2011-03-24 00:17:36 +0100 (Thu, 24 Mar 2011) | 14 lines
Fix public issue #12: Don't keep autogenerated auto* files in Subversion;
it causes problems with others sending patches etc..
We can't get this 100% hermetic anyhow, due to files like lt~obsolete.m4,
so we can just as well go cleanly in the other direction.
R=csilvers
DELTA=21038 (0 added, 21036 deleted, 2 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=1012
------------------------------------------------------------------------
r13 | snappy.mirrorbot@gmail.com | 2011-03-23 18:50:49 +0100 (Wed, 23 Mar 2011) | 11 lines
Fix public issue tracker bug #3: Call AC_SUBST([LIBTOOL_DEPS]), or the rule
to rebuild libtool in Makefile.am won't work.
R=csilvers
DELTA=1 (1 added, 0 deleted, 0 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=997
------------------------------------------------------------------------
r12 | snappy.mirrorbot@gmail.com | 2011-03-23 12:16:39 +0100 (Wed, 23 Mar 2011) | 11 lines
Fix public issue #10: Don't add GTEST_CPPFLAGS to snappy_unittest_CXXFLAGS;
it's not needed (CPPFLAGS are always included when compiling).
R=csilvers
DELTA=1 (0 added, 1 deleted, 0 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=994
------------------------------------------------------------------------
r11 | snappy.mirrorbot@gmail.com | 2011-03-23 12:16:18 +0100 (Wed, 23 Mar 2011) | 11 lines
Fix public issue #9: Add -Wall -Werror to automake flags.
(This concerns automake itself, not the C++ compiler.)
R=csilvers
DELTA=4 (3 added, 0 deleted, 1 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=993
------------------------------------------------------------------------
r10 | snappy.mirrorbot@gmail.com | 2011-03-23 12:13:37 +0100 (Wed, 23 Mar 2011) | 10 lines
Fix a typo in the Snappy README file.
R=csilvers
DELTA=1 (0 added, 0 deleted, 1 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=992
------------------------------------------------------------------------
r9 | snappy.mirrorbot@gmail.com | 2011-03-23 12:13:13 +0100 (Wed, 23 Mar 2011) | 11 lines
Fix public issue #6: Add a --with-gflags for disabling gflags autodetection
and using a manually given setting (use/don't use) instead.
R=csilvers
DELTA=16 (13 added, 0 deleted, 3 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=991
------------------------------------------------------------------------
r8 | snappy.mirrorbot@gmail.com | 2011-03-23 12:12:44 +0100 (Wed, 23 Mar 2011) | 12 lines
Fix public issue #5: Replace the EXTRA_LIBSNAPPY_LDFLAGS setup with something
slightly more standard, that also doesn't leak libtool command-line into
configure.ac.
R=csilvers
DELTA=7 (0 added, 4 deleted, 3 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=990
------------------------------------------------------------------------
r7 | snappy.mirrorbot@gmail.com | 2011-03-23 12:12:22 +0100 (Wed, 23 Mar 2011) | 10 lines
Fix public issue #4: Properly quote all macro arguments in configure.ac.
R=csilvers
DELTA=16 (0 added, 0 deleted, 16 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=989
------------------------------------------------------------------------
r6 | snappy.mirrorbot@gmail.com | 2011-03-23 12:11:54 +0100 (Wed, 23 Mar 2011) | 11 lines
Fix public issue #7: Don't use internal variables named ac_*, as those belong
to autoconf's namespace.
R=csilvers
DELTA=6 (0 added, 0 deleted, 6 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=988
------------------------------------------------------------------------
r5 | snappy.mirrorbot@gmail.com | 2011-03-23 12:11:09 +0100 (Wed, 23 Mar 2011) | 10 lines
Add missing licensing headers to a few files. (Part 2/2.)
R=csilvers
DELTA=12 (12 added, 0 deleted, 0 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=987
------------------------------------------------------------------------
r4 | snappy.mirrorbot@gmail.com | 2011-03-23 12:10:39 +0100 (Wed, 23 Mar 2011) | 10 lines
Add mising licensing headers to a few files. (Part 1/2.)
R=csilvers
DELTA=24 (24 added, 0 deleted, 0 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=986
------------------------------------------------------------------------
r3 | snappy.mirrorbot@gmail.com | 2011-03-23 12:10:04 +0100 (Wed, 23 Mar 2011) | 11 lines
Use the correct license file for the Apache 2.0 license;
spotted by Florian Weimer.
R=csilvers
DELTA=202 (174 added, 0 deleted, 28 changed)
Revision created by MOE tool push_codebase.
MOE_MIGRATION=985
------------------------------------------------------------------------
r2 | snappy.mirrorbot@gmail.com | 2011-03-18 18:14:15 +0100 (Fri, 18 Mar 2011) | 6 lines
Revision created by MOE tool push_codebase.
MOE_MIGRATION=
------------------------------------------------------------------------
r1 | sesse@google.com | 2011-03-18 18:13:52 +0100 (Fri, 18 Mar 2011) | 2 lines
Create trunk directory.
------------------------------------------------------------------------

Просмотреть файл

@ -1,58 +0,0 @@
Snappy v1.0.4, September 15th 2011:
* Speeded up the decompressor somewhat; typically about 2–8%
for Core i7, in 64-bit mode (comparable for Opteron).
Somewhat more for some tests, almost no gain for others.
* Make Snappy compile on certain platforms it didn't before
(Solaris with SunPro C++, HP-UX, AIX).
* Correct some minor errors in the format description.
Snappy v1.0.3, June 2nd 2011:
* Speeded up the decompressor somewhat; about 3-6% for Core 2,
6-13% for Core i7, and 5-12% for Opteron (all in 64-bit mode).
* Added compressed format documentation. This text is new,
but an earlier version from Zeev Tarantov was used as reference.
* Only link snappy_unittest against -lz and other autodetected
libraries, not libsnappy.so (which doesn't need any such dependency).
* Fixed some display issues in the microbenchmarks, one of which would
frequently make the test crash on GNU/Hurd.
Snappy v1.0.2, April 29th 2011:
* Relicense to a BSD-type license.
* Added C bindings, contributed by Martin Gieseking.
* More Win32 fixes, in particular for MSVC.
* Replace geo.protodata with a newer version.
* Fix timing inaccuracies in the unit test when comparing Snappy
to other algorithms.
Snappy v1.0.1, March 25th 2011:
This is a maintenance release, mostly containing minor fixes.
There is no new functionality. The most important fixes include:
* The COPYING file and all licensing headers now correctly state that
Snappy is licensed under the Apache 2.0 license.
* snappy_unittest should now compile natively under Windows,
as well as on embedded systems with no mmap().
* Various autotools nits have been fixed.
Snappy v1.0, March 17th 2011:
* Initial version.

Просмотреть файл

@ -1,135 +0,0 @@
Snappy, a fast compressor/decompressor.
Introduction
============
Snappy is a compression/decompression library. It does not aim for maximum
compression, or compatibility with any other compression library; instead,
it aims for very high speeds and reasonable compression. For instance,
compared to the fastest mode of zlib, Snappy is an order of magnitude faster
for most inputs, but the resulting compressed files are anywhere from 20% to
100% bigger. (For more information, see "Performance", below.)
Snappy has the following properties:
* Fast: Compression speeds at 250 MB/sec and beyond, with no assembler code.
See "Performance" below.
* Stable: Over the last few years, Snappy has compressed and decompressed
petabytes of data in Google's production environment. The Snappy bitstream
format is stable and will not change between versions.
* Robust: The Snappy decompressor is designed not to crash in the face of
corrupted or malicious input.
* Free and open source software: Snappy is licensed under a BSD-type license.
For more information, see the included COPYING file.
Snappy has previously been called "Zippy" in some Google presentations
and the like.
Performance
===========
Snappy is intended to be fast. On a single core of a Core i7 processor
in 64-bit mode, it compresses at about 250 MB/sec or more and decompresses at
about 500 MB/sec or more. (These numbers are for the slowest inputs in our
benchmark suite; others are much faster.) In our tests, Snappy usually
is faster than algorithms in the same class (e.g. LZO, LZF, FastLZ, QuickLZ,
etc.) while achieving comparable compression ratios.
Typical compression ratios (based on the benchmark suite) are about 1.5-1.7x
for plain text, about 2-4x for HTML, and of course 1.0x for JPEGs, PNGs and
other already-compressed data. Similar numbers for zlib in its fastest mode
are 2.6-2.8x, 3-7x and 1.0x, respectively. More sophisticated algorithms are
capable of achieving yet higher compression rates, although usually at the
expense of speed. Of course, compression ratio will vary significantly with
the input.
Although Snappy should be fairly portable, it is primarily optimized
for 64-bit x86-compatible processors, and may run slower in other environments.
In particular:
- Snappy uses 64-bit operations in several places to process more data at
once than would otherwise be possible.
- Snappy assumes unaligned 32- and 64-bit loads and stores are cheap.
On some platforms, these must be emulated with single-byte loads
and stores, which is much slower.
- Snappy assumes little-endian throughout, and needs to byte-swap data in
several places if running on a big-endian platform.
Experience has shown that even heavily tuned code can be improved.
Performance optimizations, whether for 64-bit x86 or other platforms,
are of course most welcome; see "Contact", below.
Usage
=====
Note that Snappy, both the implementation and the main interface,
is written in C++. However, several third-party bindings to other languages
are available; see the Google Code page at http://code.google.com/p/snappy/
for more information. Also, if you want to use Snappy from C code, you can
use the included C bindings in snappy-c.h.
To use Snappy from your own C++ program, include the file "snappy.h" from
your calling file, and link against the compiled library.
There are many ways to call Snappy, but the simplest possible is
snappy::Compress(input.data(), input.size(), &output);
and similarly
snappy::Uncompress(input.data(), input.size(), &output);
where "input" and "output" are both instances of std::string.
There are other interfaces that are more flexible in various ways, including
support for custom (non-array) input sources. See the header file for more
information.
Tests and benchmarks
====================
When you compile Snappy, snappy_unittest is compiled in addition to the
library itself. You do not need it to use the compressor from your own library,
but it contains several useful components for Snappy development.
First of all, it contains unit tests, verifying correctness on your machine in
various scenarios. If you want to change or optimize Snappy, please run the
tests to verify you have not broken anything. Note that if you have the
Google Test library installed, unit test behavior (especially failures) will be
significantly more user-friendly. You can find Google Test at
http://code.google.com/p/googletest/
You probably also want the gflags library for handling of command-line flags;
you can find it at
http://code.google.com/p/google-gflags/
In addition to the unit tests, snappy contains microbenchmarks used to
tune compression and decompression performance. These are automatically run
before the unit tests, but you can disable them using the flag
--run_microbenchmarks=false if you have gflags installed (otherwise you will
need to edit the source).
Finally, snappy can benchmark Snappy against a few other compression libraries
(zlib, LZO, LZF, FastLZ and QuickLZ), if they were detected at configure time.
To benchmark using a given file, give the compression algorithm you want to test
Snappy against (e.g. --zlib) and then a list of one or more file names on the
command line. The testdata/ directory contains the files used by the
microbenchmark, which should provide a reasonably balanced starting point for
benchmarking. (Note that baddata[1-3].snappy are not intended as benchmarks; they
are used to verify correctness in the presence of corrupted data in the unit
test.)
Contact
=======
Snappy is distributed through Google Code. For the latest version, a bug tracker,
and other information, see
http://code.google.com/p/snappy/

Просмотреть файл

@ -1,110 +0,0 @@
Snappy compressed format description
Last revised: 2011-10-05
This is not a formal specification, but should suffice to explain most
relevant parts of how the Snappy format works. It is originally based on
text by Zeev Tarantov.
Snappy is a LZ77-type compressor with a fixed, byte-oriented encoding.
There is no entropy encoder backend nor framing layer -- the latter is
assumed to be handled by other parts of the system.
This document only describes the format, not how the Snappy compressor nor
decompressor actually works. The correctness of the decompressor should not
depend on implementation details of the compressor, and vice versa.
1. Preamble
The stream starts with the uncompressed length (up to a maximum of 2^32 - 1),
stored as a little-endian varint. Varints consist of a series of bytes,
where the lower 7 bits are data and the upper bit is set iff there are
more bytes to be read. In other words, an uncompressed length of 64 would
be stored as 0x40, and an uncompressed length of 2097150 (0x1FFFFE)
would be stored as 0xFE 0xFF 0x7F.
2. The compressed stream itself
There are two types of elements in a Snappy stream: Literals and
copies (backreferences). There is no restriction on the order of elements,
except that the stream naturally cannot start with a copy. (Having
two literals in a row is never optimal from a compression point of
view, but nevertheless fully permitted.) Each element starts with a tag byte,
and the lower two bits of this tag byte signal what type of element will
follow:
00: Literal
01: Copy with 1-byte offset
10: Copy with 2-byte offset
11: Copy with 4-byte offset
The interpretation of the upper six bits are element-dependent.
2.1. Literals (00)
Literals are uncompressed data stored directly in the byte stream.
The literal length is stored differently depending on the length
of the literal:
- For literals up to and including 60 bytes in length, the upper
six bits of the tag byte contain (len-1). The literal follows
immediately thereafter in the bytestream.
- For longer literals, the (len-1) value is stored after the tag byte,
little-endian. The upper six bits of the tag byte describe how
many bytes are used for the length; 60, 61, 62 or 63 for
1-4 bytes, respectively. The literal itself follows after the
length.
2.2. Copies
Copies are references back into previous decompressed data, telling
the decompressor to reuse data it has previously decoded.
They encode two values: The _offset_, saying how many bytes back
from the current position to read, and the _length_, how many bytes
to copy. Offsets of zero can be encoded, but are not legal;
similarly, it is possible to encode backreferences that would
go past the end of the block (offset > current decompressed position),
which is also nonsensical and thus not allowed.
As in most LZ77-based compressors, the length can be larger than the offset,
yielding a form of run-length encoding (RLE). For instance,
"xababab" could be encoded as
<literal: "xab"> <copy: offset=2 length=4>
Note that since the current Snappy compressor works in 32 kB
blocks and does not do matching across blocks, it will never produce
a bitstream with offsets larger than about 32768. However, the
decompressor should not rely on this, as it may change in the future.
There are several different kinds of copy elements, depending on
the amount of bytes to be copied (length), and how far back the
data to be copied is (offset).
2.2.1. Copy with 1-byte offset (01)
These elements can encode lengths between [4..11] bytes and offsets
between [0..2047] bytes. (len-4) occupies three bits and is stored
in bits [2..4] of the tag byte. The offset occupies 11 bits, of which the
upper three are stored in the upper three bits ([5..7]) of the tag byte,
and the lower eight are stored in a byte following the tag byte.
2.2.2. Copy with 2-byte offset (10)
These elements can encode lengths between [1..64] and offsets from
[0..65535]. (len-1) occupies six bits and is stored in the upper
six bits ([2..7]) of the tag byte. The offset is stored as a
little-endian 16-bit integer in the two bytes following the tag byte.
2.2.3. Copy with 4-byte offset (11)
These are like the copies with 2-byte offsets (see previous subsection),
except that the offset is stored as a 32-bit integer instead of a
16-bit integer (and thus will occupy four bytes).

Просмотреть файл

@ -1,90 +0,0 @@
// Copyright 2011 Martin Gieseking <martin.gieseking@uos.de>.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "snappy.h"
#include "snappy-c.h"
extern "C" {
snappy_status snappy_compress(const char* input,
size_t input_length,
char* compressed,
size_t *compressed_length) {
if (*compressed_length < snappy_max_compressed_length(input_length)) {
return SNAPPY_BUFFER_TOO_SMALL;
}
snappy::RawCompress(input, input_length, compressed, compressed_length);
return SNAPPY_OK;
}
snappy_status snappy_uncompress(const char* compressed,
size_t compressed_length,
char* uncompressed,
size_t* uncompressed_length) {
size_t real_uncompressed_length;
if (!snappy::GetUncompressedLength(compressed,
compressed_length,
&real_uncompressed_length)) {
return SNAPPY_INVALID_INPUT;
}
if (*uncompressed_length < real_uncompressed_length) {
return SNAPPY_BUFFER_TOO_SMALL;
}
if (!snappy::RawUncompress(compressed, compressed_length, uncompressed)) {
return SNAPPY_INVALID_INPUT;
}
*uncompressed_length = real_uncompressed_length;
return SNAPPY_OK;
}
size_t snappy_max_compressed_length(size_t source_length) {
return snappy::MaxCompressedLength(source_length);
}
snappy_status snappy_uncompressed_length(const char *compressed,
size_t compressed_length,
size_t *result) {
if (snappy::GetUncompressedLength(compressed,
compressed_length,
result)) {
return SNAPPY_OK;
} else {
return SNAPPY_INVALID_INPUT;
}
}
snappy_status snappy_validate_compressed_buffer(const char *compressed,
size_t compressed_length) {
if (snappy::IsValidCompressedBuffer(compressed, compressed_length)) {
return SNAPPY_OK;
} else {
return SNAPPY_INVALID_INPUT;
}
}
} // extern "C"

Просмотреть файл

@ -1,138 +0,0 @@
/*
* Copyright 2011 Martin Gieseking <martin.gieseking@uos.de>.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Plain C interface (a wrapper around the C++ implementation).
*/
#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_C_H_
#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_C_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <stddef.h>
/*
* Return values; see the documentation for each function to know
* what each can return.
*/
typedef enum {
SNAPPY_OK = 0,
SNAPPY_INVALID_INPUT = 1,
SNAPPY_BUFFER_TOO_SMALL = 2,
} snappy_status;
/*
* Takes the data stored in "input[0..input_length-1]" and stores
* it in the array pointed to by "compressed".
*
* <compressed_length> signals the space available in "compressed".
* If it is not at least equal to "snappy_max_compressed_length(input_length)",
* SNAPPY_BUFFER_TOO_SMALL is returned. After successful compression,
* <compressed_length> contains the true length of the compressed output,
* and SNAPPY_OK is returned.
*
* Example:
* size_t output_length = snappy_max_compressed_length(input_length);
* char* output = (char*)malloc(output_length);
* if (snappy_compress(input, input_length, output, &output_length)
* == SNAPPY_OK) {
* ... Process(output, output_length) ...
* }
* free(output);
*/
snappy_status snappy_compress(const char* input,
size_t input_length,
char* compressed,
size_t* compressed_length);
/*
* Given data in "compressed[0..compressed_length-1]" generated by
* calling the snappy_compress routine, this routine stores
* the uncompressed data to
* uncompressed[0..uncompressed_length-1].
* Returns failure (a value not equal to SNAPPY_OK) if the message
* is corrupted and could not be decrypted.
*
* <uncompressed_length> signals the space available in "uncompressed".
* If it is not at least equal to the value returned by
* snappy_uncompressed_length for this stream, SNAPPY_BUFFER_TOO_SMALL
* is returned. After successful decompression, <uncompressed_length>
* contains the true length of the decompressed output.
*
* Example:
* size_t output_length;
* if (snappy_uncompressed_length(input, input_length, &output_length)
* != SNAPPY_OK) {
* ... fail ...
* }
* char* output = (char*)malloc(output_length);
* if (snappy_uncompress(input, input_length, output, &output_length)
* == SNAPPY_OK) {
* ... Process(output, output_length) ...
* }
* free(output);
*/
snappy_status snappy_uncompress(const char* compressed,
size_t compressed_length,
char* uncompressed,
size_t* uncompressed_length);
/*
* Returns the maximal size of the compressed representation of
* input data that is "source_length" bytes in length.
*/
size_t snappy_max_compressed_length(size_t source_length);
/*
* REQUIRES: "compressed[]" was produced by snappy_compress()
* Returns SNAPPY_OK and stores the length of the uncompressed data in
* *result normally. Returns SNAPPY_INVALID_INPUT on parsing error.
* This operation takes O(1) time.
*/
snappy_status snappy_uncompressed_length(const char* compressed,
size_t compressed_length,
size_t* result);
/*
* Check if the contents of "compressed[]" can be uncompressed successfully.
* Does not return the uncompressed data; if so, returns SNAPPY_OK,
* or if not, returns SNAPPY_INVALID_INPUT.
* Takes time proportional to compressed_length, but is usually at least a
* factor of four faster than actual decompression.
*/
snappy_status snappy_validate_compressed_buffer(const char* compressed,
size_t compressed_length);
#ifdef __cplusplus
} // extern "C"
#endif
#endif /* UTIL_SNAPPY_OPENSOURCE_SNAPPY_C_H_ */

Просмотреть файл

@ -1,150 +0,0 @@
// Copyright 2008 Google Inc. All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Internals shared between the Snappy implementation and its unittest.
#ifndef UTIL_SNAPPY_SNAPPY_INTERNAL_H_
#define UTIL_SNAPPY_SNAPPY_INTERNAL_H_
#include "snappy-stubs-internal.h"
namespace snappy {
namespace internal {
class WorkingMemory {
public:
WorkingMemory() : large_table_(NULL) { }
~WorkingMemory() { delete[] large_table_; }
// Allocates and clears a hash table using memory in "*this",
// stores the number of buckets in "*table_size" and returns a pointer to
// the base of the hash table.
uint16* GetHashTable(size_t input_size, int* table_size);
private:
uint16 small_table_[1<<10]; // 2KB
uint16* large_table_; // Allocated only when needed
DISALLOW_COPY_AND_ASSIGN(WorkingMemory);
};
// Flat array compression that does not emit the "uncompressed length"
// prefix. Compresses "input" string to the "*op" buffer.
//
// REQUIRES: "input_length <= kBlockSize"
// REQUIRES: "op" points to an array of memory that is at least
// "MaxCompressedLength(input_length)" in size.
// REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
// REQUIRES: "table_size" is a power of two
//
// Returns an "end" pointer into "op" buffer.
// "end - op" is the compressed size of "input".
char* CompressFragment(const char* input,
size_t input_length,
char* op,
uint16* table,
const int table_size);
// Return the largest n such that
//
// s1[0,n-1] == s2[0,n-1]
// and n <= (s2_limit - s2).
//
// Does not read *s2_limit or beyond.
// Does not read *(s1 + (s2_limit - s2)) or beyond.
// Requires that s2_limit >= s2.
//
// Separate implementation for x86_64, for speed. Uses the fact that
// x86_64 is little endian.
#if defined(ARCH_K8)
static inline int FindMatchLength(const char* s1,
const char* s2,
const char* s2_limit) {
DCHECK_GE(s2_limit, s2);
int matched = 0;
// Find out how long the match is. We loop over the data 64 bits at a
// time until we find a 64-bit block that doesn't match; then we find
// the first non-matching bit and use that to calculate the total
// length of the match.
while (PREDICT_TRUE(s2 <= s2_limit - 8)) {
if (PREDICT_FALSE(UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched))) {
s2 += 8;
matched += 8;
} else {
// On current (mid-2008) Opteron models there is a 3% more
// efficient code sequence to find the first non-matching byte.
// However, what follows is ~10% better on Intel Core 2 and newer,
// and we expect AMD's bsf instruction to improve.
uint64 x = UNALIGNED_LOAD64(s2) ^ UNALIGNED_LOAD64(s1 + matched);
int matching_bits = Bits::FindLSBSetNonZero64(x);
matched += matching_bits >> 3;
return matched;
}
}
while (PREDICT_TRUE(s2 < s2_limit)) {
if (PREDICT_TRUE(s1[matched] == *s2)) {
++s2;
++matched;
} else {
return matched;
}
}
return matched;
}
#else
static inline int FindMatchLength(const char* s1,
const char* s2,
const char* s2_limit) {
// Implementation based on the x86-64 version, above.
DCHECK_GE(s2_limit, s2);
int matched = 0;
while (s2 <= s2_limit - 4 &&
UNALIGNED_LOAD32(s2) == UNALIGNED_LOAD32(s1 + matched)) {
s2 += 4;
matched += 4;
}
if (LittleEndian::IsLittleEndian() && s2 <= s2_limit - 4) {
uint32 x = UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched);
int matching_bits = Bits::FindLSBSetNonZero(x);
matched += matching_bits >> 3;
} else {
while ((s2 < s2_limit) && (s1[matched] == *s2)) {
++s2;
++matched;
}
}
return matched;
}
#endif
} // end namespace internal
} // end namespace snappy
#endif // UTIL_SNAPPY_SNAPPY_INTERNAL_H_

Просмотреть файл

@ -1,72 +0,0 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <string.h>
#include "snappy-sinksource.h"
namespace snappy {
Source::~Source() { }
Sink::~Sink() { }
char* Sink::GetAppendBuffer(size_t length, char* scratch) {
return scratch;
}
ByteArraySource::~ByteArraySource() { }
size_t ByteArraySource::Available() const { return left_; }
const char* ByteArraySource::Peek(size_t* len) {
*len = left_;
return ptr_;
}
void ByteArraySource::Skip(size_t n) {
left_ -= n;
ptr_ += n;
}
UncheckedByteArraySink::~UncheckedByteArraySink() { }
void UncheckedByteArraySink::Append(const char* data, size_t n) {
// Do no copying if the caller filled in the result of GetAppendBuffer()
if (data != dest_) {
memcpy(dest_, data, n);
}
dest_ += n;
}
char* UncheckedByteArraySink::GetAppendBuffer(size_t len, char* scratch) {
return dest_;
}
}

Просмотреть файл

@ -1,136 +0,0 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
#define UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
#include <stddef.h>
namespace snappy {
// A Sink is an interface that consumes a sequence of bytes.
class Sink {
public:
Sink() { }
virtual ~Sink();
// Append "bytes[0,n-1]" to this.
virtual void Append(const char* bytes, size_t n) = 0;
// Returns a writable buffer of the specified length for appending.
// May return a pointer to the caller-owned scratch buffer which
// must have at least the indicated length. The returned buffer is
// only valid until the next operation on this Sink.
//
// After writing at most "length" bytes, call Append() with the
// pointer returned from this function and the number of bytes
// written. Many Append() implementations will avoid copying
// bytes if this function returned an internal buffer.
//
// If a non-scratch buffer is returned, the caller may only pass a
// prefix of it to Append(). That is, it is not correct to pass an
// interior pointer of the returned array to Append().
//
// The default implementation always returns the scratch buffer.
virtual char* GetAppendBuffer(size_t length, char* scratch);
private:
// No copying
Sink(const Sink&);
void operator=(const Sink&);
};
// A Source is an interface that yields a sequence of bytes
class Source {
public:
Source() { }
virtual ~Source();
// Return the number of bytes left to read from the source
virtual size_t Available() const = 0;
// Peek at the next flat region of the source. Does not reposition
// the source. The returned region is empty iff Available()==0.
//
// Returns a pointer to the beginning of the region and store its
// length in *len.
//
// The returned region is valid until the next call to Skip() or
// until this object is destroyed, whichever occurs first.
//
// The returned region may be larger than Available() (for example
// if this ByteSource is a view on a substring of a larger source).
// The caller is responsible for ensuring that it only reads the
// Available() bytes.
virtual const char* Peek(size_t* len) = 0;
// Skip the next n bytes. Invalidates any buffer returned by
// a previous call to Peek().
// REQUIRES: Available() >= n
virtual void Skip(size_t n) = 0;
private:
// No copying
Source(const Source&);
void operator=(const Source&);
};
// A Source implementation that yields the contents of a flat array
class ByteArraySource : public Source {
public:
ByteArraySource(const char* p, size_t n) : ptr_(p), left_(n) { }
virtual ~ByteArraySource();
virtual size_t Available() const;
virtual const char* Peek(size_t* len);
virtual void Skip(size_t n);
private:
const char* ptr_;
size_t left_;
};
// A Sink implementation that writes to a flat array without any bound checks.
class UncheckedByteArraySink : public Sink {
public:
explicit UncheckedByteArraySink(char* dest) : dest_(dest) { }
virtual ~UncheckedByteArraySink();
virtual void Append(const char* data, size_t n);
virtual char* GetAppendBuffer(size_t len, char* scratch);
// Return the current output pointer so that a caller can see how
// many bytes were produced.
// Note: this is not a Sink method.
char* CurrentDestination() const { return dest_; }
private:
char* dest_;
};
}
#endif // UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_

Просмотреть файл

@ -1,42 +0,0 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <algorithm>
#include <string>
#include "snappy-stubs-internal.h"
namespace snappy {
void Varint::Append32(string* s, uint32 value) {
char buf[Varint::kMax32];
const char* p = Varint::Encode32(buf, value);
s->append(buf, p - buf);
}
} // namespace snappy

Просмотреть файл

@ -1,515 +0,0 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Various stubs for the open-source version of Snappy.
#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <iostream>
#include <string>
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#ifdef HAVE_SYS_MMAN_H
#include <sys/mman.h>
#endif
#include "snappy-stubs-public.h"
#if defined(__x86_64__)
// Enable 64-bit optimized versions of some routines.
#define ARCH_K8 1
#endif
// Needed by OS X, among others.
#ifndef MAP_ANONYMOUS
#define MAP_ANONYMOUS MAP_ANON
#endif
// Pull in std::min, std::ostream, and the likes. This is safe because this
// header file is never used from any public header files.
using namespace std;
// The size of an array, if known at compile-time.
// Will give unexpected results if used on a pointer.
// We undefine it first, since some compilers already have a definition.
#ifdef ARRAYSIZE
#undef ARRAYSIZE
#endif
#define ARRAYSIZE(a) (sizeof(a) / sizeof(*(a)))
// Static prediction hints.
#ifdef HAVE_BUILTIN_EXPECT
#define PREDICT_FALSE(x) (__builtin_expect(x, 0))
#define PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
#else
#define PREDICT_FALSE(x) x
#define PREDICT_TRUE(x) x
#endif
// This is only used for recomputing the tag byte table used during
// decompression; for simplicity we just remove it from the open-source
// version (anyone who wants to regenerate it can just do the call
// themselves within main()).
#define DEFINE_bool(flag_name, default_value, description) \
bool FLAGS_ ## flag_name = default_value;
#define DECLARE_bool(flag_name) \
extern bool FLAGS_ ## flag_name;
#define REGISTER_MODULE_INITIALIZER(name, code)
namespace snappy {
static const uint32 kuint32max = static_cast<uint32>(0xFFFFFFFF);
static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
// Logging.
#define LOG(level) LogMessage()
#define VLOG(level) true ? (void)0 : \
snappy::LogMessageVoidify() & snappy::LogMessage()
class LogMessage {
public:
LogMessage() { }
~LogMessage() {
cerr << endl;
}
LogMessage& operator<<(const std::string& msg) {
cerr << msg;
return *this;
}
LogMessage& operator<<(int x) {
cerr << x;
return *this;
}
};
// Asserts, both versions activated in debug mode only,
// and ones that are always active.
#define CRASH_UNLESS(condition) \
PREDICT_TRUE(condition) ? (void)0 : \
snappy::LogMessageVoidify() & snappy::LogMessageCrash()
class LogMessageCrash : public LogMessage {
public:
LogMessageCrash() { }
~LogMessageCrash() {
cerr << endl;
abort();
}
};
// This class is used to explicitly ignore values in the conditional
// logging macros. This avoids compiler warnings like "value computed
// is not used" and "statement has no effect".
class LogMessageVoidify {
public:
LogMessageVoidify() { }
// This has to be an operator with a precedence lower than << but
// higher than ?:
void operator&(const LogMessage&) { }
};
#define CHECK(cond) CRASH_UNLESS(cond)
#define CHECK_LE(a, b) CRASH_UNLESS((a) <= (b))
#define CHECK_GE(a, b) CRASH_UNLESS((a) >= (b))
#define CHECK_EQ(a, b) CRASH_UNLESS((a) == (b))
#define CHECK_NE(a, b) CRASH_UNLESS((a) != (b))
#define CHECK_LT(a, b) CRASH_UNLESS((a) < (b))
#define CHECK_GT(a, b) CRASH_UNLESS((a) > (b))
#ifdef NDEBUG
#define DCHECK(cond) CRASH_UNLESS(true)
#define DCHECK_LE(a, b) CRASH_UNLESS(true)
#define DCHECK_GE(a, b) CRASH_UNLESS(true)
#define DCHECK_EQ(a, b) CRASH_UNLESS(true)
#define DCHECK_NE(a, b) CRASH_UNLESS(true)
#define DCHECK_LT(a, b) CRASH_UNLESS(true)
#define DCHECK_GT(a, b) CRASH_UNLESS(true)
#else
#define DCHECK(cond) CHECK(cond)
#define DCHECK_LE(a, b) CHECK_LE(a, b)
#define DCHECK_GE(a, b) CHECK_GE(a, b)
#define DCHECK_EQ(a, b) CHECK_EQ(a, b)
#define DCHECK_NE(a, b) CHECK_NE(a, b)
#define DCHECK_LT(a, b) CHECK_LT(a, b)
#define DCHECK_GT(a, b) CHECK_GT(a, b)
#endif
// Potentially unaligned loads and stores.
#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
#define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
#define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
#define UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p))
#define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
#define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
#define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val))
#else
// These functions are provided for architectures that don't support
// unaligned loads and stores.
inline uint16 UNALIGNED_LOAD16(const void *p) {
uint16 t;
memcpy(&t, p, sizeof t);
return t;
}
inline uint32 UNALIGNED_LOAD32(const void *p) {
uint32 t;
memcpy(&t, p, sizeof t);
return t;
}
inline uint64 UNALIGNED_LOAD64(const void *p) {
uint64 t;
memcpy(&t, p, sizeof t);
return t;
}
inline void UNALIGNED_STORE16(void *p, uint16 v) {
memcpy(p, &v, sizeof v);
}
inline void UNALIGNED_STORE32(void *p, uint32 v) {
memcpy(p, &v, sizeof v);
}
inline void UNALIGNED_STORE64(void *p, uint64 v) {
memcpy(p, &v, sizeof v);
}
#endif
// The following guarantees declaration of the byte swap functions.
#ifdef WORDS_BIGENDIAN
#ifdef HAVE_SYS_BYTEORDER_H
#include <sys/byteorder.h>
#endif
#ifdef HAVE_SYS_ENDIAN_H
#include <sys/endian.h>
#endif
#ifdef _MSC_VER
#include <stdlib.h>
#define bswap_16(x) _byteswap_ushort(x)
#define bswap_32(x) _byteswap_ulong(x)
#define bswap_64(x) _byteswap_uint64(x)
#elif defined(__APPLE__)
// Mac OS X / Darwin features
#include <libkern/OSByteOrder.h>
#define bswap_16(x) OSSwapInt16(x)
#define bswap_32(x) OSSwapInt32(x)
#define bswap_64(x) OSSwapInt64(x)
#elif defined(HAVE_BYTESWAP_H)
#include <byteswap.h>
#elif defined(bswap32)
// FreeBSD defines bswap{16,32,64} in <sys/endian.h> (already #included).
#define bswap_16(x) bswap16(x)
#define bswap_32(x) bswap32(x)
#define bswap_64(x) bswap64(x)
#elif defined(BSWAP_64)
// Solaris 10 defines BSWAP_{16,32,64} in <sys/byteorder.h> (already #included).
#define bswap_16(x) BSWAP_16(x)
#define bswap_32(x) BSWAP_32(x)
#define bswap_64(x) BSWAP_64(x)
#else
inline uint16 bswap_16(uint16 x) {
return (x << 8) | (x >> 8);
}
inline uint32 bswap_32(uint32 x) {
x = ((x & 0xff00ff00UL) >> 8) | ((x & 0x00ff00ffUL) << 8);
return (x >> 16) | (x << 16);
}
inline uint64 bswap_64(uint64 x) {
x = ((x & 0xff00ff00ff00ff00ULL) >> 8) | ((x & 0x00ff00ff00ff00ffULL) << 8);
x = ((x & 0xffff0000ffff0000ULL) >> 16) | ((x & 0x0000ffff0000ffffULL) << 16);
return (x >> 32) | (x << 32);
}
#endif
#endif // WORDS_BIGENDIAN
// Convert to little-endian storage, opposite of network format.
// Convert x from host to little endian: x = LittleEndian.FromHost(x);
// convert x from little endian to host: x = LittleEndian.ToHost(x);
//
// Store values into unaligned memory converting to little endian order:
// LittleEndian.Store16(p, x);
//
// Load unaligned values stored in little endian converting to host order:
// x = LittleEndian.Load16(p);
class LittleEndian {
public:
// Conversion functions.
#ifdef WORDS_BIGENDIAN
static uint16 FromHost16(uint16 x) { return bswap_16(x); }
static uint16 ToHost16(uint16 x) { return bswap_16(x); }
static uint32 FromHost32(uint32 x) { return bswap_32(x); }
static uint32 ToHost32(uint32 x) { return bswap_32(x); }
static bool IsLittleEndian() { return false; }
#else // !defined(WORDS_BIGENDIAN)
static uint16 FromHost16(uint16 x) { return x; }
static uint16 ToHost16(uint16 x) { return x; }
static uint32 FromHost32(uint32 x) { return x; }
static uint32 ToHost32(uint32 x) { return x; }
static bool IsLittleEndian() { return true; }
#endif // !defined(WORDS_BIGENDIAN)
// Functions to do unaligned loads and stores in little-endian order.
static uint16 Load16(const void *p) {
return ToHost16(UNALIGNED_LOAD16(p));
}
static void Store16(void *p, uint16 v) {
UNALIGNED_STORE16(p, FromHost16(v));
}
static uint32 Load32(const void *p) {
return ToHost32(UNALIGNED_LOAD32(p));
}
static void Store32(void *p, uint32 v) {
UNALIGNED_STORE32(p, FromHost32(v));
}
};
// Some bit-manipulation functions.
class Bits {
public:
// Return floor(log2(n)) for positive integer n. Returns -1 iff n == 0.
static int Log2Floor(uint32 n);
// Return the first set least / most significant bit, 0-indexed. Returns an
// undefined value if n == 0. FindLSBSetNonZero() is similar to ffs() except
// that it's 0-indexed.
static int FindLSBSetNonZero(uint32 n);
static int FindLSBSetNonZero64(uint64 n);
private:
DISALLOW_COPY_AND_ASSIGN(Bits);
};
#ifdef HAVE_BUILTIN_CTZ
inline int Bits::Log2Floor(uint32 n) {
return n == 0 ? -1 : 31 ^ __builtin_clz(n);
}
inline int Bits::FindLSBSetNonZero(uint32 n) {
return __builtin_ctz(n);
}
inline int Bits::FindLSBSetNonZero64(uint64 n) {
return __builtin_ctzll(n);
}
#else // Portable versions.
inline int Bits::Log2Floor(uint32 n) {
if (n == 0)
return -1;
int log = 0;
uint32 value = n;
for (int i = 4; i >= 0; --i) {
int shift = (1 << i);
uint32 x = value >> shift;
if (x != 0) {
value = x;
log += shift;
}
}
assert(value == 1);
return log;
}
inline int Bits::FindLSBSetNonZero(uint32 n) {
int rc = 31;
for (int i = 4, shift = 1 << 4; i >= 0; --i) {
const uint32 x = n << shift;
if (x != 0) {
n = x;
rc -= shift;
}
shift >>= 1;
}
return rc;
}
// FindLSBSetNonZero64() is defined in terms of FindLSBSetNonZero().
inline int Bits::FindLSBSetNonZero64(uint64 n) {
const uint32 bottombits = static_cast<uint32>(n);
if (bottombits == 0) {
// Bottom bits are zero, so scan in top bits
return 32 + FindLSBSetNonZero(static_cast<uint32>(n >> 32));
} else {
return FindLSBSetNonZero(bottombits);
}
}
#endif // End portable versions.
// Variable-length integer encoding.
class Varint {
public:
// Maximum lengths of varint encoding of uint32.
static const int kMax32 = 5;
// Attempts to parse a varint32 from a prefix of the bytes in [ptr,limit-1].
// Never reads a character at or beyond limit. If a valid/terminated varint32
// was found in the range, stores it in *OUTPUT and returns a pointer just
// past the last byte of the varint32. Else returns NULL. On success,
// "result <= limit".
static const char* Parse32WithLimit(const char* ptr, const char* limit,
uint32* OUTPUT);
// REQUIRES "ptr" points to a buffer of length sufficient to hold "v".
// EFFECTS Encodes "v" into "ptr" and returns a pointer to the
// byte just past the last encoded byte.
static char* Encode32(char* ptr, uint32 v);
// EFFECTS Appends the varint representation of "value" to "*s".
static void Append32(string* s, uint32 value);
};
inline const char* Varint::Parse32WithLimit(const char* p,
const char* l,
uint32* OUTPUT) {
const unsigned char* ptr = reinterpret_cast<const unsigned char*>(p);
const unsigned char* limit = reinterpret_cast<const unsigned char*>(l);
uint32 b, result;
if (ptr >= limit) return NULL;
b = *(ptr++); result = b & 127; if (b < 128) goto done;
if (ptr >= limit) return NULL;
b = *(ptr++); result |= (b & 127) << 7; if (b < 128) goto done;
if (ptr >= limit) return NULL;
b = *(ptr++); result |= (b & 127) << 14; if (b < 128) goto done;
if (ptr >= limit) return NULL;
b = *(ptr++); result |= (b & 127) << 21; if (b < 128) goto done;
if (ptr >= limit) return NULL;
b = *(ptr++); result |= (b & 127) << 28; if (b < 16) goto done;
return NULL; // Value is too long to be a varint32
done:
*OUTPUT = result;
return reinterpret_cast<const char*>(ptr);
}
inline char* Varint::Encode32(char* sptr, uint32 v) {
// Operate on characters as unsigneds
unsigned char* ptr = reinterpret_cast<unsigned char*>(sptr);
static const int B = 128;
if (v < (1<<7)) {
*(ptr++) = v;
} else if (v < (1<<14)) {
*(ptr++) = v | B;
*(ptr++) = v>>7;
} else if (v < (1<<21)) {
*(ptr++) = v | B;
*(ptr++) = (v>>7) | B;
*(ptr++) = v>>14;
} else if (v < (1<<28)) {
*(ptr++) = v | B;
*(ptr++) = (v>>7) | B;
*(ptr++) = (v>>14) | B;
*(ptr++) = v>>21;
} else {
*(ptr++) = v | B;
*(ptr++) = (v>>7) | B;
*(ptr++) = (v>>14) | B;
*(ptr++) = (v>>21) | B;
*(ptr++) = v>>28;
}
return reinterpret_cast<char*>(ptr);
}
// If you know the internal layout of the std::string in use, you can
// replace this function with one that resizes the string without
// filling the new space with zeros (if applicable) --
// it will be non-portable but faster.
inline void STLStringResizeUninitialized(string* s, size_t new_size) {
s->resize(new_size);
}
// Return a mutable char* pointing to a string's internal buffer,
// which may not be null-terminated. Writing through this pointer will
// modify the string.
//
// string_as_array(&str)[i] is valid for 0 <= i < str.size() until the
// next call to a string method that invalidates iterators.
//
// As of 2006-04, there is no standard-blessed way of getting a
// mutable reference to a string's internal buffer. However, issue 530
// (http://www.open-std.org/JTC1/SC22/WG21/docs/lwg-defects.html#530)
// proposes this as the method. It will officially be part of the standard
// for C++0x. This should already work on all current implementations.
inline char* string_as_array(string* str) {
return str->empty() ? NULL : &*str->begin();
}
} // namespace snappy
#endif // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_

Просмотреть файл

@ -1,85 +0,0 @@
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: sesse@google.com (Steinar H. Gunderson)
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Various type stubs for the open-source version of Snappy.
//
// This file cannot include config.h, as it is included from snappy.h,
// which is a public header. Instead, snappy-stubs-public.h is generated by
// from snappy-stubs-public.h.in at configure time.
#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
#if @ac_cv_have_stdint_h@
#include <stdint.h>
#endif
#if @ac_cv_have_stddef_h@
#include <stddef.h>
#endif
#define SNAPPY_MAJOR @SNAPPY_MAJOR@
#define SNAPPY_MINOR @SNAPPY_MINOR@
#define SNAPPY_PATCHLEVEL @SNAPPY_PATCHLEVEL@
#define SNAPPY_VERSION \
((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL)
#include <string>
namespace snappy {
#if @ac_cv_have_stdint_h@
typedef int8_t int8;
typedef uint8_t uint8;
typedef int16_t int16;
typedef uint16_t uint16;
typedef int32_t int32;
typedef uint32_t uint32;
typedef int64_t int64;
typedef uint64_t uint64;
#else
typedef signed char int8;
typedef unsigned char uint8;
typedef short int16;
typedef unsigned short uint16;
typedef int int32;
typedef unsigned int uint32;
typedef long long int64;
typedef unsigned long long uint64;
#endif
typedef std::string string;
#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&); \
void operator=(const TypeName&)
} // namespace snappy
#endif // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_

Просмотреть файл

@ -1,596 +0,0 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Various stubs for the unit tests for the open-source version of Snappy.
#include "snappy-test.h"
#ifdef HAVE_WINDOWS_H
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#endif
#include <algorithm>
DEFINE_bool(run_microbenchmarks, true,
"Run microbenchmarks before doing anything else.");
namespace snappy {
string ReadTestDataFile(const string& base) {
string contents;
const char* srcdir = getenv("srcdir"); // This is set by Automake.
if (srcdir) {
File::ReadFileToStringOrDie(
string(srcdir) + "/testdata/" + base, &contents);
} else {
File::ReadFileToStringOrDie("testdata/" + base, &contents);
}
return contents;
}
string StringPrintf(const char* format, ...) {
char buf[4096];
va_list ap;
va_start(ap, format);
vsnprintf(buf, sizeof(buf), format, ap);
va_end(ap);
return buf;
}
bool benchmark_running = false;
int64 benchmark_real_time_us = 0;
int64 benchmark_cpu_time_us = 0;
string *benchmark_label = NULL;
int64 benchmark_bytes_processed = 0;
void ResetBenchmarkTiming() {
benchmark_real_time_us = 0;
benchmark_cpu_time_us = 0;
}
#ifdef WIN32
LARGE_INTEGER benchmark_start_real;
FILETIME benchmark_start_cpu;
#else // WIN32
struct timeval benchmark_start_real;
struct rusage benchmark_start_cpu;
#endif // WIN32
void StartBenchmarkTiming() {
#ifdef WIN32
QueryPerformanceCounter(&benchmark_start_real);
FILETIME dummy;
CHECK(GetProcessTimes(
GetCurrentProcess(), &dummy, &dummy, &dummy, &benchmark_start_cpu));
#else
gettimeofday(&benchmark_start_real, NULL);
if (getrusage(RUSAGE_SELF, &benchmark_start_cpu) == -1) {
perror("getrusage(RUSAGE_SELF)");
exit(1);
}
#endif
benchmark_running = true;
}
void StopBenchmarkTiming() {
if (!benchmark_running) {
return;
}
#ifdef WIN32
LARGE_INTEGER benchmark_stop_real;
LARGE_INTEGER benchmark_frequency;
QueryPerformanceCounter(&benchmark_stop_real);
QueryPerformanceFrequency(&benchmark_frequency);
double elapsed_real = static_cast<double>(
benchmark_stop_real.QuadPart - benchmark_start_real.QuadPart) /
benchmark_frequency.QuadPart;
benchmark_real_time_us += elapsed_real * 1e6 + 0.5;
FILETIME benchmark_stop_cpu, dummy;
CHECK(GetProcessTimes(
GetCurrentProcess(), &dummy, &dummy, &dummy, &benchmark_stop_cpu));
ULARGE_INTEGER start_ulargeint;
start_ulargeint.LowPart = benchmark_start_cpu.dwLowDateTime;
start_ulargeint.HighPart = benchmark_start_cpu.dwHighDateTime;
ULARGE_INTEGER stop_ulargeint;
stop_ulargeint.LowPart = benchmark_stop_cpu.dwLowDateTime;
stop_ulargeint.HighPart = benchmark_stop_cpu.dwHighDateTime;
benchmark_cpu_time_us +=
(stop_ulargeint.QuadPart - start_ulargeint.QuadPart + 5) / 10;
#else // WIN32
struct timeval benchmark_stop_real;
gettimeofday(&benchmark_stop_real, NULL);
benchmark_real_time_us +=
1000000 * (benchmark_stop_real.tv_sec - benchmark_start_real.tv_sec);
benchmark_real_time_us +=
(benchmark_stop_real.tv_usec - benchmark_start_real.tv_usec);
struct rusage benchmark_stop_cpu;
if (getrusage(RUSAGE_SELF, &benchmark_stop_cpu) == -1) {
perror("getrusage(RUSAGE_SELF)");
exit(1);
}
benchmark_cpu_time_us += 1000000 * (benchmark_stop_cpu.ru_utime.tv_sec -
benchmark_start_cpu.ru_utime.tv_sec);
benchmark_cpu_time_us += (benchmark_stop_cpu.ru_utime.tv_usec -
benchmark_start_cpu.ru_utime.tv_usec);
#endif // WIN32
benchmark_running = false;
}
void SetBenchmarkLabel(const string& str) {
if (benchmark_label) {
delete benchmark_label;
}
benchmark_label = new string(str);
}
void SetBenchmarkBytesProcessed(int64 bytes) {
benchmark_bytes_processed = bytes;
}
struct BenchmarkRun {
int64 real_time_us;
int64 cpu_time_us;
};
struct BenchmarkCompareCPUTime {
bool operator() (const BenchmarkRun& a, const BenchmarkRun& b) const {
return a.cpu_time_us < b.cpu_time_us;
}
};
void Benchmark::Run() {
for (int test_case_num = start_; test_case_num <= stop_; ++test_case_num) {
// Run a few iterations first to find out approximately how fast
// the benchmark is.
const int kCalibrateIterations = 100;
ResetBenchmarkTiming();
StartBenchmarkTiming();
(*function_)(kCalibrateIterations, test_case_num);
StopBenchmarkTiming();
// Let each test case run for about 200ms, but at least as many
// as we used to calibrate.
// Run five times and pick the median.
const int kNumRuns = 5;
const int kMedianPos = kNumRuns / 2;
int num_iterations = 0;
if (benchmark_real_time_us > 0) {
num_iterations = 200000 * kCalibrateIterations / benchmark_real_time_us;
}
num_iterations = max(num_iterations, kCalibrateIterations);
BenchmarkRun benchmark_runs[kNumRuns];
for (int run = 0; run < kNumRuns; ++run) {
ResetBenchmarkTiming();
StartBenchmarkTiming();
(*function_)(num_iterations, test_case_num);
StopBenchmarkTiming();
benchmark_runs[run].real_time_us = benchmark_real_time_us;
benchmark_runs[run].cpu_time_us = benchmark_cpu_time_us;
}
nth_element(benchmark_runs,
benchmark_runs + kMedianPos,
benchmark_runs + kNumRuns,
BenchmarkCompareCPUTime());
int64 real_time_us = benchmark_runs[kMedianPos].real_time_us;
int64 cpu_time_us = benchmark_runs[kMedianPos].cpu_time_us;
int64 bytes_per_second = benchmark_bytes_processed * 1000000 / cpu_time_us;
string heading = StringPrintf("%s/%d", name_.c_str(), test_case_num);
string human_readable_speed;
if (bytes_per_second < 1024) {
human_readable_speed = StringPrintf("%dB/s", bytes_per_second);
} else if (bytes_per_second < 1024 * 1024) {
human_readable_speed = StringPrintf(
"%.1fkB/s", bytes_per_second / 1024.0f);
} else if (bytes_per_second < 1024 * 1024 * 1024) {
human_readable_speed = StringPrintf(
"%.1fMB/s", bytes_per_second / (1024.0f * 1024.0f));
} else {
human_readable_speed = StringPrintf(
"%.1fGB/s", bytes_per_second / (1024.0f * 1024.0f * 1024.0f));
}
fprintf(stderr,
#ifdef WIN32
"%-18s %10I64d %10I64d %10d %s %s\n",
#else
"%-18s %10lld %10lld %10d %s %s\n",
#endif
heading.c_str(),
static_cast<long long>(real_time_us * 1000 / num_iterations),
static_cast<long long>(cpu_time_us * 1000 / num_iterations),
num_iterations,
human_readable_speed.c_str(),
benchmark_label->c_str());
}
}
#ifdef HAVE_LIBZ
ZLib::ZLib()
: comp_init_(false),
uncomp_init_(false) {
Reinit();
}
ZLib::~ZLib() {
if (comp_init_) { deflateEnd(&comp_stream_); }
if (uncomp_init_) { inflateEnd(&uncomp_stream_); }
}
void ZLib::Reinit() {
compression_level_ = Z_DEFAULT_COMPRESSION;
window_bits_ = MAX_WBITS;
mem_level_ = 8; // DEF_MEM_LEVEL
if (comp_init_) {
deflateEnd(&comp_stream_);
comp_init_ = false;
}
if (uncomp_init_) {
inflateEnd(&uncomp_stream_);
uncomp_init_ = false;
}
first_chunk_ = true;
}
void ZLib::Reset() {
first_chunk_ = true;
}
// --------- COMPRESS MODE
// Initialization method to be called if we hit an error while
// compressing. On hitting an error, call this method before returning
// the error.
void ZLib::CompressErrorInit() {
deflateEnd(&comp_stream_);
comp_init_ = false;
Reset();
}
int ZLib::DeflateInit() {
return deflateInit2(&comp_stream_,
compression_level_,
Z_DEFLATED,
window_bits_,
mem_level_,
Z_DEFAULT_STRATEGY);
}
int ZLib::CompressInit(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong *sourceLen) {
int err;
comp_stream_.next_in = (Bytef*)source;
comp_stream_.avail_in = (uInt)*sourceLen;
if ((uLong)comp_stream_.avail_in != *sourceLen) return Z_BUF_ERROR;
comp_stream_.next_out = dest;
comp_stream_.avail_out = (uInt)*destLen;
if ((uLong)comp_stream_.avail_out != *destLen) return Z_BUF_ERROR;
if ( !first_chunk_ ) // only need to set up stream the first time through
return Z_OK;
if (comp_init_) { // we've already initted it
err = deflateReset(&comp_stream_);
if (err != Z_OK) {
LOG(WARNING) << "ERROR: Can't reset compress object; creating a new one";
deflateEnd(&comp_stream_);
comp_init_ = false;
}
}
if (!comp_init_) { // first use
comp_stream_.zalloc = (alloc_func)0;
comp_stream_.zfree = (free_func)0;
comp_stream_.opaque = (voidpf)0;
err = DeflateInit();
if (err != Z_OK) return err;
comp_init_ = true;
}
return Z_OK;
}
// In a perfect world we'd always have the full buffer to compress
// when the time came, and we could just call Compress(). Alas, we
// want to do chunked compression on our webserver. In this
// application, we compress the header, send it off, then compress the
// results, send them off, then compress the footer. Thus we need to
// use the chunked compression features of zlib.
int ZLib::CompressAtMostOrAll(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong *sourceLen,
int flush_mode) { // Z_FULL_FLUSH or Z_FINISH
int err;
if ( (err=CompressInit(dest, destLen, source, sourceLen)) != Z_OK )
return err;
// This is used to figure out how many bytes we wrote *this chunk*
int compressed_size = comp_stream_.total_out;
// Some setup happens only for the first chunk we compress in a run
if ( first_chunk_ ) {
first_chunk_ = false;
}
// flush_mode is Z_FINISH for all mode, Z_SYNC_FLUSH for incremental
// compression.
err = deflate(&comp_stream_, flush_mode);
const uLong source_bytes_consumed = *sourceLen - comp_stream_.avail_in;
*sourceLen = comp_stream_.avail_in;
if ((err == Z_STREAM_END || err == Z_OK)
&& comp_stream_.avail_in == 0
&& comp_stream_.avail_out != 0 ) {
// we processed everything ok and the output buffer was large enough.
;
} else if (err == Z_STREAM_END && comp_stream_.avail_in > 0) {
return Z_BUF_ERROR; // should never happen
} else if (err != Z_OK && err != Z_STREAM_END && err != Z_BUF_ERROR) {
// an error happened
CompressErrorInit();
return err;
} else if (comp_stream_.avail_out == 0) { // not enough space
err = Z_BUF_ERROR;
}
assert(err == Z_OK || err == Z_STREAM_END || err == Z_BUF_ERROR);
if (err == Z_STREAM_END)
err = Z_OK;
// update the crc and other metadata
compressed_size = comp_stream_.total_out - compressed_size; // delta
*destLen = compressed_size;
return err;
}
int ZLib::CompressChunkOrAll(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong sourceLen,
int flush_mode) { // Z_FULL_FLUSH or Z_FINISH
const int ret =
CompressAtMostOrAll(dest, destLen, source, &sourceLen, flush_mode);
if (ret == Z_BUF_ERROR)
CompressErrorInit();
return ret;
}
// This routine only initializes the compression stream once. Thereafter, it
// just does a deflateReset on the stream, which should be faster.
int ZLib::Compress(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong sourceLen) {
int err;
const uLongf orig_destLen = *destLen;
if ( (err=CompressChunkOrAll(dest, destLen, source, sourceLen,
Z_FINISH)) != Z_OK )
return err;
Reset(); // reset for next call to Compress
return Z_OK;
}
// --------- UNCOMPRESS MODE
int ZLib::InflateInit() {
return inflateInit2(&uncomp_stream_, MAX_WBITS);
}
// Initialization method to be called if we hit an error while
// uncompressing. On hitting an error, call this method before
// returning the error.
void ZLib::UncompressErrorInit() {
inflateEnd(&uncomp_stream_);
uncomp_init_ = false;
Reset();
}
int ZLib::UncompressInit(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong *sourceLen) {
int err;
uncomp_stream_.next_in = (Bytef*)source;
uncomp_stream_.avail_in = (uInt)*sourceLen;
// Check for source > 64K on 16-bit machine:
if ((uLong)uncomp_stream_.avail_in != *sourceLen) return Z_BUF_ERROR;
uncomp_stream_.next_out = dest;
uncomp_stream_.avail_out = (uInt)*destLen;
if ((uLong)uncomp_stream_.avail_out != *destLen) return Z_BUF_ERROR;
if ( !first_chunk_ ) // only need to set up stream the first time through
return Z_OK;
if (uncomp_init_) { // we've already initted it
err = inflateReset(&uncomp_stream_);
if (err != Z_OK) {
LOG(WARNING)
<< "ERROR: Can't reset uncompress object; creating a new one";
UncompressErrorInit();
}
}
if (!uncomp_init_) {
uncomp_stream_.zalloc = (alloc_func)0;
uncomp_stream_.zfree = (free_func)0;
uncomp_stream_.opaque = (voidpf)0;
err = InflateInit();
if (err != Z_OK) return err;
uncomp_init_ = true;
}
return Z_OK;
}
// If you compressed your data a chunk at a time, with CompressChunk,
// you can uncompress it a chunk at a time with UncompressChunk.
// Only difference bewteen chunked and unchunked uncompression
// is the flush mode we use: Z_SYNC_FLUSH (chunked) or Z_FINISH (unchunked).
int ZLib::UncompressAtMostOrAll(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong *sourceLen,
int flush_mode) { // Z_SYNC_FLUSH or Z_FINISH
int err = Z_OK;
if ( (err=UncompressInit(dest, destLen, source, sourceLen)) != Z_OK ) {
LOG(WARNING) << "UncompressInit: Error: " << err << " SourceLen: "
<< *sourceLen;
return err;
}
// This is used to figure out how many output bytes we wrote *this chunk*:
const uLong old_total_out = uncomp_stream_.total_out;
// This is used to figure out how many input bytes we read *this chunk*:
const uLong old_total_in = uncomp_stream_.total_in;
// Some setup happens only for the first chunk we compress in a run
if ( first_chunk_ ) {
first_chunk_ = false; // so we don't do this again
// For the first chunk *only* (to avoid infinite troubles), we let
// there be no actual data to uncompress. This sometimes triggers
// when the input is only the gzip header, say.
if ( *sourceLen == 0 ) {
*destLen = 0;
return Z_OK;
}
}
// We'll uncompress as much as we can. If we end OK great, otherwise
// if we get an error that seems to be the gzip footer, we store the
// gzip footer and return OK, otherwise we return the error.
// flush_mode is Z_SYNC_FLUSH for chunked mode, Z_FINISH for all mode.
err = inflate(&uncomp_stream_, flush_mode);
// Figure out how many bytes of the input zlib slurped up:
const uLong bytes_read = uncomp_stream_.total_in - old_total_in;
CHECK_LE(source + bytes_read, source + *sourceLen);
*sourceLen = uncomp_stream_.avail_in;
if ((err == Z_STREAM_END || err == Z_OK) // everything went ok
&& uncomp_stream_.avail_in == 0) { // and we read it all
;
} else if (err == Z_STREAM_END && uncomp_stream_.avail_in > 0) {
LOG(WARNING)
<< "UncompressChunkOrAll: Received some extra data, bytes total: "
<< uncomp_stream_.avail_in << " bytes: "
<< string(reinterpret_cast<const char *>(uncomp_stream_.next_in),
min(int(uncomp_stream_.avail_in), 20));
UncompressErrorInit();
return Z_DATA_ERROR; // what's the extra data for?
} else if (err != Z_OK && err != Z_STREAM_END && err != Z_BUF_ERROR) {
// an error happened
LOG(WARNING) << "UncompressChunkOrAll: Error: " << err
<< " avail_out: " << uncomp_stream_.avail_out;
UncompressErrorInit();
return err;
} else if (uncomp_stream_.avail_out == 0) {
err = Z_BUF_ERROR;
}
assert(err == Z_OK || err == Z_BUF_ERROR || err == Z_STREAM_END);
if (err == Z_STREAM_END)
err = Z_OK;
*destLen = uncomp_stream_.total_out - old_total_out; // size for this call
return err;
}
int ZLib::UncompressChunkOrAll(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong sourceLen,
int flush_mode) { // Z_SYNC_FLUSH or Z_FINISH
const int ret =
UncompressAtMostOrAll(dest, destLen, source, &sourceLen, flush_mode);
if (ret == Z_BUF_ERROR)
UncompressErrorInit();
return ret;
}
int ZLib::UncompressAtMost(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong *sourceLen) {
return UncompressAtMostOrAll(dest, destLen, source, sourceLen, Z_SYNC_FLUSH);
}
// We make sure we've uncompressed everything, that is, the current
// uncompress stream is at a compressed-buffer-EOF boundary. In gzip
// mode, we also check the gzip footer to make sure we pass the gzip
// consistency checks. We RETURN true iff both types of checks pass.
bool ZLib::UncompressChunkDone() {
assert(!first_chunk_ && uncomp_init_);
// Make sure we're at the end-of-compressed-data point. This means
// if we call inflate with Z_FINISH we won't consume any input or
// write any output
Bytef dummyin, dummyout;
uLongf dummylen = 0;
if ( UncompressChunkOrAll(&dummyout, &dummylen, &dummyin, 0, Z_FINISH)
!= Z_OK ) {
return false;
}
// Make sure that when we exit, we can start a new round of chunks later
Reset();
return true;
}
// Uncompresses the source buffer into the destination buffer.
// The destination buffer must be long enough to hold the entire
// decompressed contents.
//
// We only initialize the uncomp_stream once. Thereafter, we use
// inflateReset, which should be faster.
//
// Returns Z_OK on success, otherwise, it returns a zlib error code.
int ZLib::Uncompress(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong sourceLen) {
int err;
if ( (err=UncompressChunkOrAll(dest, destLen, source, sourceLen,
Z_FINISH)) != Z_OK ) {
Reset(); // let us try to compress again
return err;
}
if ( !UncompressChunkDone() ) // calls Reset()
return Z_DATA_ERROR;
return Z_OK; // stream_end is ok
}
#endif // HAVE_LIBZ
} // namespace snappy

Просмотреть файл

@ -1,505 +0,0 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Various stubs for the unit tests for the open-source version of Snappy.
#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_TEST_H_
#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_TEST_H_
#include "snappy-stubs-internal.h"
#include <stdio.h>
#include <stdarg.h>
#ifdef HAVE_SYS_MMAN_H
#include <sys/mman.h>
#endif
#ifdef HAVE_SYS_RESOURCE_H
#include <sys/resource.h>
#endif
#include <sys/time.h>
#ifdef HAVE_WINDOWS_H
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#endif
#include <string>
#ifdef HAVE_GTEST
#include <gtest/gtest.h>
#undef TYPED_TEST
#define TYPED_TEST TEST
#define INIT_GTEST(argc, argv) ::testing::InitGoogleTest(argc, *argv)
#else
// Stubs for if the user doesn't have Google Test installed.
#define TEST(test_case, test_subcase) \
void Test_ ## test_case ## _ ## test_subcase()
#define INIT_GTEST(argc, argv)
#define TYPED_TEST TEST
#define EXPECT_EQ CHECK_EQ
#define EXPECT_NE CHECK_NE
#define EXPECT_FALSE(cond) CHECK(!(cond))
#endif
#ifdef HAVE_GFLAGS
#include <gflags/gflags.h>
// This is tricky; both gflags and Google Test want to look at the command line
// arguments. Google Test seems to be the most happy with unknown arguments,
// though, so we call it first and hope for the best.
#define InitGoogle(argv0, argc, argv, remove_flags) \
INIT_GTEST(argc, argv); \
google::ParseCommandLineFlags(argc, argv, remove_flags);
#else
// If we don't have the gflags package installed, these can only be
// changed at compile time.
#define DEFINE_int32(flag_name, default_value, description) \
static int FLAGS_ ## flag_name = default_value;
#define InitGoogle(argv0, argc, argv, remove_flags) \
INIT_GTEST(argc, argv)
#endif
#ifdef HAVE_LIBZ
#include "zlib.h"
#endif
#ifdef HAVE_LIBLZO2
#include "lzo/lzo1x.h"
#endif
#ifdef HAVE_LIBLZF
extern "C" {
#include "lzf.h"
}
#endif
#ifdef HAVE_LIBFASTLZ
#include "fastlz.h"
#endif
#ifdef HAVE_LIBQUICKLZ
#include "quicklz.h"
#endif
namespace {
namespace File {
void Init() { }
void ReadFileToStringOrDie(const char* filename, string* data) {
FILE* fp = fopen(filename, "rb");
if (fp == NULL) {
perror(filename);
exit(1);
}
data->clear();
while (!feof(fp)) {
char buf[4096];
size_t ret = fread(buf, 1, 4096, fp);
if (ret == -1) {
perror("fread");
exit(1);
}
data->append(string(buf, ret));
}
fclose(fp);
}
void ReadFileToStringOrDie(const string& filename, string* data) {
ReadFileToStringOrDie(filename.c_str(), data);
}
void WriteStringToFileOrDie(const string& str, const char* filename) {
FILE* fp = fopen(filename, "wb");
if (fp == NULL) {
perror(filename);
exit(1);
}
int ret = fwrite(str.data(), str.size(), 1, fp);
if (ret != 1) {
perror("fwrite");
exit(1);
}
fclose(fp);
}
} // namespace File
} // namespace
namespace snappy {
#define FLAGS_test_random_seed 301
typedef string TypeParam;
void Test_CorruptedTest_VerifyCorrupted();
void Test_Snappy_SimpleTests();
void Test_Snappy_MaxBlowup();
void Test_Snappy_RandomData();
void Test_Snappy_FourByteOffset();
void Test_SnappyCorruption_TruncatedVarint();
void Test_SnappyCorruption_UnterminatedVarint();
void Test_Snappy_ReadPastEndOfBuffer();
void Test_Snappy_FindMatchLength();
void Test_Snappy_FindMatchLengthRandom();
string ReadTestDataFile(const string& base);
// A sprintf() variant that returns a std::string.
// Not safe for general use due to truncation issues.
string StringPrintf(const char* format, ...);
// A simple, non-cryptographically-secure random generator.
class ACMRandom {
public:
explicit ACMRandom(uint32 seed) : seed_(seed) {}
int32 Next();
int32 Uniform(int32 n) {
return Next() % n;
}
uint8 Rand8() {
return static_cast<uint8>((Next() >> 1) & 0x000000ff);
}
bool OneIn(int X) { return Uniform(X) == 0; }
// Skewed: pick "base" uniformly from range [0,max_log] and then
// return "base" random bits. The effect is to pick a number in the
// range [0,2^max_log-1] with bias towards smaller numbers.
int32 Skewed(int max_log);
private:
static const uint32 M = 2147483647L; // 2^31-1
uint32 seed_;
};
inline int32 ACMRandom::Next() {
static const uint64 A = 16807; // bits 14, 8, 7, 5, 2, 1, 0
// We are computing
// seed_ = (seed_ * A) % M, where M = 2^31-1
//
// seed_ must not be zero or M, or else all subsequent computed values
// will be zero or M respectively. For all other values, seed_ will end
// up cycling through every number in [1,M-1]
uint64 product = seed_ * A;
// Compute (product % M) using the fact that ((x << 31) % M) == x.
seed_ = (product >> 31) + (product & M);
// The first reduction may overflow by 1 bit, so we may need to repeat.
// mod == M is not possible; using > allows the faster sign-bit-based test.
if (seed_ > M) {
seed_ -= M;
}
return seed_;
}
inline int32 ACMRandom::Skewed(int max_log) {
const int32 base = (Next() - 1) % (max_log+1);
return (Next() - 1) & ((1u << base)-1);
}
// A wall-time clock. This stub is not super-accurate, nor resistant to the
// system time changing.
class CycleTimer {
public:
CycleTimer() : real_time_us_(0) {}
void Start() {
#ifdef WIN32
QueryPerformanceCounter(&start_);
#else
gettimeofday(&start_, NULL);
#endif
}
void Stop() {
#ifdef WIN32
LARGE_INTEGER stop;
LARGE_INTEGER frequency;
QueryPerformanceCounter(&stop);
QueryPerformanceFrequency(&frequency);
double elapsed = static_cast<double>(stop.QuadPart - start_.QuadPart) /
frequency.QuadPart;
real_time_us_ += elapsed * 1e6 + 0.5;
#else
struct timeval stop;
gettimeofday(&stop, NULL);
real_time_us_ += 1000000 * (stop.tv_sec - start_.tv_sec);
real_time_us_ += (stop.tv_usec - start_.tv_usec);
#endif
}
double Get() {
return real_time_us_ * 1e-6;
}
private:
int64 real_time_us_;
#ifdef WIN32
LARGE_INTEGER start_;
#else
struct timeval start_;
#endif
};
// Minimalistic microbenchmark framework.
typedef void (*BenchmarkFunction)(int, int);
class Benchmark {
public:
Benchmark(const string& name, BenchmarkFunction function) :
name_(name), function_(function) {}
Benchmark* DenseRange(int start, int stop) {
start_ = start;
stop_ = stop;
return this;
}
void Run();
private:
const string name_;
const BenchmarkFunction function_;
int start_, stop_;
};
#define BENCHMARK(benchmark_name) \
Benchmark* Benchmark_ ## benchmark_name = \
(new Benchmark(#benchmark_name, benchmark_name))
extern Benchmark* Benchmark_BM_UFlat;
extern Benchmark* Benchmark_BM_UValidate;
extern Benchmark* Benchmark_BM_ZFlat;
void ResetBenchmarkTiming();
void StartBenchmarkTiming();
void StopBenchmarkTiming();
void SetBenchmarkLabel(const string& str);
void SetBenchmarkBytesProcessed(int64 bytes);
#ifdef HAVE_LIBZ
// Object-oriented wrapper around zlib.
class ZLib {
public:
ZLib();
~ZLib();
// Wipe a ZLib object to a virgin state. This differs from Reset()
// in that it also breaks any state.
void Reinit();
// Call this to make a zlib buffer as good as new. Here's the only
// case where they differ:
// CompressChunk(a); CompressChunk(b); CompressChunkDone(); vs
// CompressChunk(a); Reset(); CompressChunk(b); CompressChunkDone();
// You'll want to use Reset(), then, when you interrupt a compress
// (or uncompress) in the middle of a chunk and want to start over.
void Reset();
// According to the zlib manual, when you Compress, the destination
// buffer must have size at least src + .1%*src + 12. This function
// helps you calculate that. Augment this to account for a potential
// gzip header and footer, plus a few bytes of slack.
static int MinCompressbufSize(int uncompress_size) {
return uncompress_size + uncompress_size/1000 + 40;
}
// Compresses the source buffer into the destination buffer.
// sourceLen is the byte length of the source buffer.
// Upon entry, destLen is the total size of the destination buffer,
// which must be of size at least MinCompressbufSize(sourceLen).
// Upon exit, destLen is the actual size of the compressed buffer.
//
// This function can be used to compress a whole file at once if the
// input file is mmap'ed.
//
// Returns Z_OK if success, Z_MEM_ERROR if there was not
// enough memory, Z_BUF_ERROR if there was not enough room in the
// output buffer. Note that if the output buffer is exactly the same
// size as the compressed result, we still return Z_BUF_ERROR.
// (check CL#1936076)
int Compress(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong sourceLen);
// Uncompresses the source buffer into the destination buffer.
// The destination buffer must be long enough to hold the entire
// decompressed contents.
//
// Returns Z_OK on success, otherwise, it returns a zlib error code.
int Uncompress(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong sourceLen);
// Uncompress data one chunk at a time -- ie you can call this
// more than once. To get this to work you need to call per-chunk
// and "done" routines.
//
// Returns Z_OK if success, Z_MEM_ERROR if there was not
// enough memory, Z_BUF_ERROR if there was not enough room in the
// output buffer.
int UncompressAtMost(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong *sourceLen);
// Checks gzip footer information, as needed. Mostly this just
// makes sure the checksums match. Whenever you call this, it
// will assume the last 8 bytes from the previous UncompressChunk
// call are the footer. Returns true iff everything looks ok.
bool UncompressChunkDone();
private:
int InflateInit(); // sets up the zlib inflate structure
int DeflateInit(); // sets up the zlib deflate structure
// These init the zlib data structures for compressing/uncompressing
int CompressInit(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong *sourceLen);
int UncompressInit(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong *sourceLen);
// Initialization method to be called if we hit an error while
// uncompressing. On hitting an error, call this method before
// returning the error.
void UncompressErrorInit();
// Helper function for Compress
int CompressChunkOrAll(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong sourceLen,
int flush_mode);
int CompressAtMostOrAll(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong *sourceLen,
int flush_mode);
// Likewise for UncompressAndUncompressChunk
int UncompressChunkOrAll(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong sourceLen,
int flush_mode);
int UncompressAtMostOrAll(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong *sourceLen,
int flush_mode);
// Initialization method to be called if we hit an error while
// compressing. On hitting an error, call this method before
// returning the error.
void CompressErrorInit();
int compression_level_; // compression level
int window_bits_; // log base 2 of the window size used in compression
int mem_level_; // specifies the amount of memory to be used by
// compressor (1-9)
z_stream comp_stream_; // Zlib stream data structure
bool comp_init_; // True if we have initialized comp_stream_
z_stream uncomp_stream_; // Zlib stream data structure
bool uncomp_init_; // True if we have initialized uncomp_stream_
// These are used only with chunked compression.
bool first_chunk_; // true if we need to emit headers with this chunk
};
#endif // HAVE_LIBZ
} // namespace snappy
DECLARE_bool(run_microbenchmarks);
static void RunSpecifiedBenchmarks() {
if (!FLAGS_run_microbenchmarks) {
return;
}
fprintf(stderr, "Running microbenchmarks.\n");
#ifndef NDEBUG
fprintf(stderr, "WARNING: Compiled with assertions enabled, will be slow.\n");
#endif
#ifndef __OPTIMIZE__
fprintf(stderr, "WARNING: Compiled without optimization, will be slow.\n");
#endif
fprintf(stderr, "Benchmark Time(ns) CPU(ns) Iterations\n");
fprintf(stderr, "---------------------------------------------------\n");
snappy::Benchmark_BM_UFlat->Run();
snappy::Benchmark_BM_UValidate->Run();
snappy::Benchmark_BM_ZFlat->Run();
fprintf(stderr, "\n");
}
#ifndef HAVE_GTEST
static inline int RUN_ALL_TESTS() {
fprintf(stderr, "Running correctness tests.\n");
snappy::Test_CorruptedTest_VerifyCorrupted();
snappy::Test_Snappy_SimpleTests();
snappy::Test_Snappy_MaxBlowup();
snappy::Test_Snappy_RandomData();
snappy::Test_Snappy_FourByteOffset();
snappy::Test_SnappyCorruption_TruncatedVarint();
snappy::Test_SnappyCorruption_UnterminatedVarint();
snappy::Test_Snappy_ReadPastEndOfBuffer();
snappy::Test_Snappy_FindMatchLength();
snappy::Test_Snappy_FindMatchLengthRandom();
fprintf(stderr, "All tests passed.\n");
return 0;
}
#endif // HAVE_GTEST
// For main().
namespace snappy {
static void CompressFile(const char* fname);
static void UncompressFile(const char* fname);
static void MeasureFile(const char* fname);
} // namespace
using snappy::CompressFile;
using snappy::UncompressFile;
using snappy::MeasureFile;
#endif // UTIL_SNAPPY_OPENSOURCE_SNAPPY_TEST_H_

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,155 +0,0 @@
// Copyright 2005 and onwards Google Inc.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// A light-weight compression algorithm. It is designed for speed of
// compression and decompression, rather than for the utmost in space
// savings.
//
// For getting better compression ratios when you are compressing data
// with long repeated sequences or compressing data that is similar to
// other data, while still compressing fast, you might look at first
// using BMDiff and then compressing the output of BMDiff with
// Snappy.
#ifndef UTIL_SNAPPY_SNAPPY_H__
#define UTIL_SNAPPY_SNAPPY_H__
#include <stddef.h>
#include <string>
#include "snappy-stubs-public.h"
namespace snappy {
class Source;
class Sink;
// ------------------------------------------------------------------------
// Generic compression/decompression routines.
// ------------------------------------------------------------------------
// Compress the bytes read from "*source" and append to "*sink". Return the
// number of bytes written.
size_t Compress(Source* source, Sink* sink);
bool GetUncompressedLength(Source* source, uint32* result);
// ------------------------------------------------------------------------
// Higher-level string based routines (should be sufficient for most users)
// ------------------------------------------------------------------------
// Sets "*output" to the compressed version of "input[0,input_length-1]".
// Original contents of *output are lost.
//
// REQUIRES: "input[]" is not an alias of "*output".
size_t Compress(const char* input, size_t input_length, string* output);
// Decompresses "compressed[0,compressed_length-1]" to "*uncompressed".
// Original contents of "*uncompressed" are lost.
//
// REQUIRES: "compressed[]" is not an alias of "*uncompressed".
//
// returns false if the message is corrupted and could not be decompressed
bool Uncompress(const char* compressed, size_t compressed_length,
string* uncompressed);
// ------------------------------------------------------------------------
// Lower-level character array based routines. May be useful for
// efficiency reasons in certain circumstances.
// ------------------------------------------------------------------------
// REQUIRES: "compressed" must point to an area of memory that is at
// least "MaxCompressedLength(input_length)" bytes in length.
//
// Takes the data stored in "input[0..input_length]" and stores
// it in the array pointed to by "compressed".
//
// "*compressed_length" is set to the length of the compressed output.
//
// Example:
// char* output = new char[snappy::MaxCompressedLength(input_length)];
// size_t output_length;
// RawCompress(input, input_length, output, &output_length);
// ... Process(output, output_length) ...
// delete [] output;
void RawCompress(const char* input,
size_t input_length,
char* compressed,
size_t* compressed_length);
// Given data in "compressed[0..compressed_length-1]" generated by
// calling the Snappy::Compress routine, this routine
// stores the uncompressed data to
// uncompressed[0..GetUncompressedLength(compressed)-1]
// returns false if the message is corrupted and could not be decrypted
bool RawUncompress(const char* compressed, size_t compressed_length,
char* uncompressed);
// Given data from the byte source 'compressed' generated by calling
// the Snappy::Compress routine, this routine stores the uncompressed
// data to
// uncompressed[0..GetUncompressedLength(compressed,compressed_length)-1]
// returns false if the message is corrupted and could not be decrypted
bool RawUncompress(Source* compressed, char* uncompressed);
// Returns the maximal size of the compressed representation of
// input data that is "source_bytes" bytes in length;
size_t MaxCompressedLength(size_t source_bytes);
// REQUIRES: "compressed[]" was produced by RawCompress() or Compress()
// Returns true and stores the length of the uncompressed data in
// *result normally. Returns false on parsing error.
// This operation takes O(1) time.
bool GetUncompressedLength(const char* compressed, size_t compressed_length,
size_t* result);
// Returns true iff the contents of "compressed[]" can be uncompressed
// successfully. Does not return the uncompressed data. Takes
// time proportional to compressed_length, but is usually at least
// a factor of four faster than actual decompression.
bool IsValidCompressedBuffer(const char* compressed,
size_t compressed_length);
// *** DO NOT CHANGE THE VALUE OF kBlockSize ***
//
// New Compression code chops up the input into blocks of at most
// the following size. This ensures that back-references in the
// output never cross kBlockSize block boundaries. This can be
// helpful in implementing blocked decompression. However the
// decompression code should not rely on this guarantee since older
// compression code may not obey it.
static const int kBlockLog = 15;
static const int kBlockSize = 1 << kBlockLog;
static const int kMaxHashTableBits = 14;
static const int kMaxHashTableSize = 1 << kMaxHashTableBits;
} // end namespace snappy
#endif // UTIL_SNAPPY_SNAPPY_H__

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -143,7 +143,6 @@
<li><a href="about:license#xdg">Red Hat xdg_user_dir_lookup License</a></li>
<li><a href="about:license#hunspell-ru">Russian Spellchecking Dictionary License</a></li>
<li><a href="about:license#skia">Skia License</a></li>
<li><a href="about:license#snappy">Snappy License</a></li>
<li><a href="about:license#sparkle">Sparkle License</a></li>
<li><a href="about:license#sunsoft">SunSoft License</a></li>
<li><a href="about:license#ucal">University of California License</a></li>
@ -3311,44 +3310,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
<hr>
<h1><a name="snappy"></a>Snappy License</h1>
<p>This license applies to certain files in the directory
<span class="path">other-licenses/snappy/</span>.</p>
<pre>
Copyright 2011, Google Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
</pre>
<hr>
<h1><a name="sparkle"></a>Sparkle License</h1>
<p>This license applies to certain files in the directory

Просмотреть файл

@ -166,7 +166,6 @@ STATIC_LIBS += \
ucvutil_s \
chromium_s \
mozreg_s \
snappy_s \
$(NULL)
# component libraries

Просмотреть файл

@ -225,8 +225,6 @@ tier_platform_dirs += startupcache
tier_platform_dirs += js/ductwork/debugger
tier_platform_dirs += other-licenses/snappy
ifdef APP_LIBXUL_STATICDIRS
# Applications can cheat and ask for code to be
# built before libxul so libxul can be linked against it.
@ -272,3 +270,4 @@ tier_platform_dirs += testing/tools/screenshot
tier_platform_dirs += testing/peptest
tier_platform_dirs += testing/mozbase
endif