Bug 1131776 - Use WITHOUT ROWID tables for IndexedDB, r=janv.

This commit is contained in:
Ben Turner 2015-02-16 09:48:14 -08:00
Родитель ae054a3620
Коммит ad62e4a206
13 изменённых файлов: 4230 добавлений и 721 удалений

Просмотреть файл

@ -833,13 +833,15 @@ MainProcessRunnable::OpenCacheFileForWrite()
mQuotaObject = qm->GetQuotaObject(mPersistence, mGroup, mOrigin, file);
NS_ENSURE_STATE(mQuotaObject);
if (!mQuotaObject->MaybeAllocateMoreSpace(0, mWriteParams.mSize)) {
if (!mQuotaObject->MaybeUpdateSize(mWriteParams.mSize,
/* aTruncate */ false)) {
// If the request fails, it might be because mOrigin is using too much
// space (MaybeAllocateMoreSpace will not evict our own origin since it is
// space (MaybeUpdateSize will not evict our own origin since it is
// active). Try to make some space by evicting LRU entries until there is
// enough space.
EvictEntries(mDirectory, mGroup, mOrigin, mWriteParams.mSize, mMetadata);
if (!mQuotaObject->MaybeAllocateMoreSpace(0, mWriteParams.mSize)) {
if (!mQuotaObject->MaybeUpdateSize(mWriteParams.mSize,
/* aTruncate */ false)) {
mResult = JS::AsmJSCache_QuotaExceeded;
return NS_ERROR_FAILURE;
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -13,6 +13,7 @@
#include "mozilla/Endian.h"
#include "mozilla/FloatingPoint.h"
#include "mozIStorageStatement.h"
#include "mozIStorageValueArray.h"
#include "nsAlgorithm.h"
#include "nsJSUtils.h"
#include "ReportInternalError.h"
@ -27,10 +28,10 @@ namespace indexedDB {
Basic strategy is the following
Numbers: 1 n n n n n n n n ("n"s are encoded 64bit float)
Dates: 2 n n n n n n n n ("n"s are encoded 64bit float)
Strings: 3 s s s ... 0 ("s"s are encoded unicode bytes)
Arrays: 4 i i i ... 0 ("i"s are encoded array items)
Numbers: 0x10 n n n n n n n n ("n"s are encoded 64bit float)
Dates: 0x20 n n n n n n n n ("n"s are encoded 64bit float)
Strings: 0x30 s s s ... 0 ("s"s are encoded unicode bytes)
Arrays: 0x50 i i i ... 0 ("i"s are encoded array items)
When encoding floats, 64bit IEEE 754 are almost sortable, except that
@ -55,66 +56,59 @@ namespace indexedDB {
When encoding Arrays, we use an additional trick. Rather than adding a byte
containing the value '4' to indicate type, we instead add 4 to the next byte.
containing the value 0x50 to indicate type, we instead add 0x50 to the next byte.
This is usually the byte containing the type of the first item in the array.
So simple examples are
["foo"] 7 s s s 0 0 // 7 is 3 + 4
[1, 2] 5 n n n n n n n n 1 n n n n n n n n 0 // 5 is 1 + 4
["foo"] 0x80 s s s 0 0 // 0x80 is 0x30 + 0x50
[1, 2] 0x60 n n n n n n n n 1 n n n n n n n n 0 // 0x60 is 0x10 + 0x50
Whe do this iteratively if the first item in the array is also an array
[["foo"]] 11 s s s 0 0 0
[["foo"]] 0xA0 s s s 0 0 0
However, to avoid overflow in the byte, we only do this 3 times. If the first
item in an array is an array, and that array also has an array as first item,
we simply write out the total value accumulated so far and then follow the
"normal" rules.
[[["foo"]]] 12 3 s s s 0 0 0 0
[[["foo"]]] 0xF0 0x30 s s s 0 0 0 0
There is another edge case that can happen though, which is that the array
doesn't have a first item to which we can add 4 to the type. Instead the
doesn't have a first item to which we can add 0x50 to the type. Instead the
next byte would normally be the array terminator (per basic-strategy table)
so we simply add the 4 there.
so we simply add the 0x50 there.
[[]] 8 0 // 8 is 4 + 4 + 0
[] 4 // 4 is 4 + 0
[[], "foo"] 8 3 s s s 0 0 // 8 is 4 + 4 + 0
[[]] 0xA0 0 // 0xA0 is 0x50 + 0x50 + 0
[] 0x50 // 0x50 is 0x50 + 0
[[], "foo"] 0xA0 0x30 s s s 0 0 // 0xA0 is 0x50 + 0x50 + 0
Note that the max-3-times rule kicks in before we get a chance to add to the
array terminator
[[[]]] 12 0 0 0 // 12 is 4 + 4 + 4
We could use a much higher number than 3 at no complexity or performance cost,
however it seems unlikely that it'll make a practical difference, and the low
limit makes testing eaiser.
[[[]]] 0xF0 0 0 0 // 0xF0 is 0x50 + 0x50 + 0x50
As a final optimization we do a post-encoding step which drops all 0s at the
end of the encoded buffer.
"foo" // 3 s s s
1 // 1 bf f0
["a", "b"] // 7 s 3 s
[1, 2] // 5 bf f0 0 0 0 0 0 0 1 c0
[[]] // 8
"foo" // 0x30 s s s
1 // 0x10 bf f0
["a", "b"] // 0x80 s 0 0x30 s
[1, 2] // 0x60 bf f0 0 0 0 0 0 0 0x10 c0
[[]] // 0x80
*/
const int MaxArrayCollapse = 3;
const int MaxRecursionDepth = 256;
nsresult
Key::EncodeJSValInternal(JSContext* aCx, JS::Handle<JS::Value> aVal,
uint8_t aTypeOffset, uint16_t aRecursionDepth)
{
NS_ENSURE_TRUE(aRecursionDepth < MaxRecursionDepth, NS_ERROR_DOM_INDEXEDDB_DATA_ERR);
static_assert(eMaxType * MaxArrayCollapse < 256,
static_assert(eMaxType * kMaxArrayCollapse < 256,
"Unable to encode jsvals.");
if (NS_WARN_IF(aRecursionDepth == kMaxRecursionDepth)) {
return NS_ERROR_DOM_INDEXEDDB_DATA_ERR;
}
if (aVal.isString()) {
nsAutoJSString str;
if (!str.init(aCx, aVal)) {
@ -139,12 +133,12 @@ Key::EncodeJSValInternal(JSContext* aCx, JS::Handle<JS::Value> aVal,
if (JS_IsArrayObject(aCx, obj)) {
aTypeOffset += eMaxType;
if (aTypeOffset == eMaxType * MaxArrayCollapse) {
if (aTypeOffset == eMaxType * kMaxArrayCollapse) {
mBuffer.Append(aTypeOffset);
aTypeOffset = 0;
}
NS_ASSERTION((aTypeOffset % eMaxType) == 0 &&
aTypeOffset < (eMaxType * MaxArrayCollapse),
aTypeOffset < (eMaxType * kMaxArrayCollapse),
"Wrong typeoffset");
uint32_t length;
@ -192,7 +186,9 @@ Key::DecodeJSValInternal(const unsigned char*& aPos, const unsigned char* aEnd,
JSContext* aCx, uint8_t aTypeOffset, JS::MutableHandle<JS::Value> aVal,
uint16_t aRecursionDepth)
{
NS_ENSURE_TRUE(aRecursionDepth < MaxRecursionDepth, NS_ERROR_DOM_INDEXEDDB_DATA_ERR);
if (NS_WARN_IF(aRecursionDepth == kMaxRecursionDepth)) {
return NS_ERROR_DOM_INDEXEDDB_DATA_ERR;
}
if (*aPos - aTypeOffset >= eArray) {
JS::Rooted<JSObject*> array(aCx, JS_NewArrayObject(aCx, 0));
@ -204,7 +200,7 @@ Key::DecodeJSValInternal(const unsigned char*& aPos, const unsigned char* aEnd,
aTypeOffset += eMaxType;
if (aTypeOffset == eMaxType * MaxArrayCollapse) {
if (aTypeOffset == eMaxType * kMaxArrayCollapse) {
++aPos;
aTypeOffset = 0;
}
@ -331,10 +327,9 @@ nsresult
Key::DecodeJSVal(const unsigned char*& aPos,
const unsigned char* aEnd,
JSContext* aCx,
uint8_t aTypeOffset,
JS::MutableHandle<JS::Value> aVal)
{
return DecodeJSValInternal(aPos, aEnd, aCx, aTypeOffset, aVal, 0);
return DecodeJSValInternal(aPos, aEnd, aCx, 0, aVal, 0);
}
// static
@ -466,16 +461,14 @@ nsresult
Key::SetFromStatement(mozIStorageStatement* aStatement,
uint32_t aIndex)
{
uint8_t* data;
uint32_t dataLength = 0;
return SetFromSource(aStatement, aIndex);
}
nsresult rv = aStatement->GetBlob(aIndex, &dataLength, &data);
NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
mBuffer.Adopt(
reinterpret_cast<char*>(const_cast<uint8_t*>(data)), dataLength);
return NS_OK;
nsresult
Key::SetFromValueArray(mozIStorageValueArray* aValues,
uint32_t aIndex)
{
return SetFromSource(aValues, aIndex);
}
nsresult
@ -509,7 +502,7 @@ Key::ToJSVal(JSContext* aCx,
}
const unsigned char* pos = BufferStart();
nsresult rv = DecodeJSVal(pos, BufferEnd(), aCx, 0, aVal);
nsresult rv = DecodeJSVal(pos, BufferEnd(), aCx, aVal);
if (NS_WARN_IF(NS_FAILED(rv))) {
return rv;
}
@ -543,6 +536,23 @@ Key::AppendItem(JSContext* aCx, bool aFirstOfArray, JS::Handle<JS::Value> aVal)
return NS_OK;
}
template <typename T>
nsresult
Key::SetFromSource(T* aSource, uint32_t aIndex)
{
const uint8_t* data;
uint32_t dataLength = 0;
nsresult rv = aSource->GetSharedBlob(aIndex, &dataLength, &data);
if (NS_WARN_IF(NS_FAILED(rv))) {
return NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR;
}
mBuffer.Assign(reinterpret_cast<const char*>(data), dataLength);
return NS_OK;
}
#ifdef DEBUG
void

Просмотреть файл

@ -11,6 +11,7 @@
#include "nsString.h"
class mozIStorageStatement;
class mozIStorageValueArray;
namespace IPC {
@ -29,11 +30,28 @@ class Key
nsCString mBuffer;
public:
enum {
eTerminator = 0,
eFloat = 0x10,
eDate = 0x20,
eString = 0x30,
eArray = 0x50,
eMaxType = eArray
};
static const uint8_t kMaxArrayCollapse = uint8_t(3);
static const uint8_t kMaxRecursionDepth = uint8_t(64);
Key()
{
Unset();
}
explicit
Key(const nsACString& aBuffer)
: mBuffer(aBuffer)
{ }
Key&
operator=(const nsAString& aString)
{
@ -111,25 +129,25 @@ public:
bool
IsFloat() const
{
return !IsUnset() && mBuffer.First() == eFloat;
return !IsUnset() && *BufferStart() == eFloat;
}
bool
IsDate() const
{
return !IsUnset() && mBuffer.First() == eDate;
return !IsUnset() && *BufferStart() == eDate;
}
bool
IsString() const
{
return !IsUnset() && mBuffer.First() == eString;
return !IsUnset() && *BufferStart() == eString;
}
bool
IsArray() const
{
return !IsUnset() && mBuffer.First() >= eArray;
return !IsUnset() && *BufferStart() >= eArray;
}
double
@ -208,6 +226,9 @@ public:
nsresult
SetFromStatement(mozIStorageStatement* aStatement, uint32_t aIndex);
nsresult
SetFromValueArray(mozIStorageValueArray* aValues, uint32_t aIndex);
static int16_t
CompareKeys(Key& aFirst, Key& aSecond)
{
@ -237,15 +258,6 @@ private:
return reinterpret_cast<const unsigned char*>(mBuffer.EndReading());
}
enum {
eTerminator = 0,
eFloat = 1,
eDate = 2,
eString = 3,
eArray = 4,
eMaxType = eArray
};
// Encoding helper. Trims trailing zeros off of mBuffer as a post-processing
// step.
void
@ -275,7 +287,6 @@ private:
DecodeJSVal(const unsigned char*& aPos,
const unsigned char* aEnd,
JSContext* aCx,
uint8_t aTypeOffset,
JS::MutableHandle<JS::Value> aVal);
static void
@ -300,6 +311,10 @@ private:
JS::MutableHandle<JS::Value> aVal,
uint16_t aRecursionDepth);
template <typename T>
nsresult
SetFromSource(T* aSource, uint32_t aIndex);
void
Assert(bool aCondition) const
#ifdef DEBUG

Двоичные данные
dom/indexedDB/test/unit/schema18upgrade_profile.zip Normal file

Двоичный файл не отображается.

Просмотреть файл

@ -0,0 +1,336 @@
/**
* Any copyright is dedicated to the Public Domain.
* http://creativecommons.org/publicdomain/zero/1.0/
*/
var testGenerator = testSteps();
function testSteps()
{
const testName = "schema18upgrade";
const testKeys = [
-1/0,
-1.7e308,
-10000,
-2,
-1.5,
-1,
-1.00001e-200,
-1e-200,
0,
1e-200,
1.00001e-200,
1,
2,
10000,
1.7e308,
1/0,
new Date("1750-01-02"),
new Date("1800-12-31T12:34:56.001Z"),
new Date(-1000),
new Date(-10),
new Date(-1),
new Date(0),
new Date(1),
new Date(2),
new Date(1000),
new Date("1971-01-01"),
new Date("1971-01-01T01:01:01Z"),
new Date("1971-01-01T01:01:01.001Z"),
new Date("1971-01-01T01:01:01.01Z"),
new Date("1971-01-01T01:01:01.1Z"),
new Date("1980-02-02"),
new Date("3333-03-19T03:33:33.333Z"),
"",
"\x00",
"\x00\x00",
"\x00\x01",
"\x01",
"\x02",
"\x03",
"\x04",
"\x07",
"\x08",
"\x0F",
"\x10",
"\x1F",
"\x20",
"01234",
"\x3F",
"\x40",
"A",
"A\x00",
"A1",
"ZZZZ",
"a",
"a\x00",
"aa",
"azz",
"}",
"\x7E",
"\x7F",
"\x80",
"\xFF",
"\u0100",
"\u01FF",
"\u0200",
"\u03FF",
"\u0400",
"\u07FF",
"\u0800",
"\u0FFF",
"\u1000",
"\u1FFF",
"\u2000",
"\u3FFF",
"\u4000",
"\u7FFF",
"\u8000",
"\uD800",
"\uD800a",
"\uD800\uDC01",
"\uDBFF",
"\uDC00",
"\uDFFF\uD800",
"\uFFFE",
"\uFFFF",
"\uFFFF\x00",
"\uFFFFZZZ",
[],
[-1/0],
[-1],
[0],
[1],
[1, "a"],
[1, []],
[1, [""]],
[2, 3],
[2, 3.0000000000001],
[12, [[]]],
[12, [[[]]]],
[12, [[[""]]]],
[12, [[["foo"]]]],
[12, [[[[[3]]]]]],
[12, [[[[[[3]]]]]]],
[12, [[[[[[3],[[[[[4.2]]]]]]]]]]],
[new Date(-1)],
[new Date(1)],
[""],
["", [[]]],
["", [[[]]]],
["abc"],
["abc", "def"],
["abc\x00"],
["abc\x00", "\x00\x01"],
["abc\x00", "\x00def"],
["abc\x00\x00def"],
["x", [[]]],
["x", [[[]]]],
[[]],
[[],"foo"],
[[],[]],
[[[]]],
[[[]], []],
[[[]], [[]]],
[[[]], [[1]]],
[[[]], [[[]]]],
[[[1]]],
[[[[]], []]],
];
const testString =
"abcdefghijklmnopqrstuvwxyz0123456789`~!@#$%^&*()-_+=,<.>/?\\|";
clearAllDatabases(continueToNextStepSync);
yield undefined;
info("Installing profile");
installPackagedProfile(testName + "_profile");
info("Opening database with no version");
let request = indexedDB.open(testName);
request.onerror = errorHandler;
request.onupgradeneeded = unexpectedSuccessHandler;
request.onsuccess = grabEventAndContinueHandler;
let event = yield undefined;
let db = event.target.result;
is(db.version, 1, "Correct db version");
let transaction = db.transaction(testName);
transaction.oncomplete = grabEventAndContinueHandler;
let objectStore = transaction.objectStore(testName);
let index = objectStore.index("uniqueIndex");
info("Starting 'uniqueIndex' cursor");
let keyIndex = 0;
index.openCursor().onsuccess = event => {
let cursor = event.target.result;
if (cursor) {
info("Comparing " + JSON.stringify(cursor.primaryKey) + " to " +
JSON.stringify(testKeys[cursor.key]) +
" [" + cursor.key + "]");
is(indexedDB.cmp(cursor.primaryKey, testKeys[cursor.key]), 0,
"Keys compare equally via 'indexedDB.cmp'");
is(compareKeys(cursor.primaryKey, testKeys[cursor.key]), true,
"Keys compare equally via 'compareKeys'");
let indexProperty = cursor.value.index;
is(Array.isArray(indexProperty), true, "index property is Array");
is(indexProperty[0], cursor.key, "index property first item correct");
is(indexProperty[1], cursor.key + 1, "index property second item correct");
is(cursor.key, keyIndex, "Cursor key property is correct");
is(cursor.value.testString, testString, "Test string compared equally");
keyIndex++;
cursor.continue();
}
};
yield undefined;
is(keyIndex, testKeys.length, "Saw all keys");
transaction = db.transaction(testName, "readwrite");
transaction.oncomplete = grabEventAndContinueHandler;
objectStore = transaction.objectStore(testName);
index = objectStore.index("index");
info("Getting all 'index' keys");
index.getAllKeys().onsuccess = grabEventAndContinueHandler;
event = yield undefined;
is(event.target.result.length, testKeys.length * 2, "Got all keys");
info("Starting objectStore cursor");
objectStore.openCursor().onsuccess = event => {
let cursor = event.target.result;
if (cursor) {
let value = cursor.value;
is(value.testString, testString, "Test string compared equally");
delete value.index;
cursor.update(value);
cursor.continue();
} else {
continueToNextStepSync();
}
};
yield undefined;
info("Getting all 'index' keys");
index.getAllKeys().onsuccess = grabEventAndContinueHandler;
event = yield undefined;
is(event.target.result.length, 0, "Removed all keys");
yield undefined;
db.close();
info("Opening database with new version");
request = indexedDB.open(testName, 2);
request.onerror = errorHandler;
request.onupgradeneeded = grabEventAndContinueHandler;
request.onsuccess = grabEventAndContinueHandler;
event = yield undefined;
info("Deleting indexes");
objectStore = event.target.transaction.objectStore(testName);
objectStore.deleteIndex("index");
objectStore.deleteIndex("uniqueIndex");
event = yield undefined;
db = event.target.result;
transaction = db.transaction(testName, "readwrite");
transaction.oncomplete = grabEventAndContinueHandler;
info("Starting objectStore cursor");
objectStore = transaction.objectStore(testName);
objectStore.openCursor().onsuccess = event => {
let cursor = event.target.result;
if (cursor) {
let value = cursor.value;
is(value.testString, testString, "Test string compared equally");
value.index = value.keyPath;
cursor.update(value);
cursor.continue();
}
};
event = yield undefined;
db.close();
info("Opening database with new version");
request = indexedDB.open(testName, 3);
request.onerror = errorHandler;
request.onupgradeneeded = grabEventAndContinueHandler;
request.onsuccess = grabEventAndContinueHandler;
event = yield undefined;
info("Creating indexes");
objectStore = event.target.transaction.objectStore(testName);
objectStore.createIndex("index", "index");
event = yield undefined;
db = event.target.result;
transaction = db.transaction(testName);
transaction.oncomplete = grabEventAndContinueHandler;
objectStore = transaction.objectStore(testName);
index = objectStore.index("index");
info("Starting 'index' cursor");
keyIndex = 0;
index.openCursor().onsuccess = event => {
let cursor = event.target.result;
if (cursor) {
is(indexedDB.cmp(cursor.primaryKey, testKeys[keyIndex]), 0,
"Keys compare equally via 'indexedDB.cmp'");
is(compareKeys(cursor.primaryKey, testKeys[keyIndex]), true,
"Keys compare equally via 'compareKeys'");
is(indexedDB.cmp(cursor.key, testKeys[keyIndex]), 0,
"Keys compare equally via 'indexedDB.cmp'");
is(compareKeys(cursor.key, testKeys[keyIndex]), true,
"Keys compare equally via 'compareKeys'");
let indexProperty = cursor.value.index;
is(indexedDB.cmp(indexProperty, testKeys[keyIndex]), 0,
"Keys compare equally via 'indexedDB.cmp'");
is(compareKeys(indexProperty, testKeys[keyIndex]), true,
"Keys compare equally via 'compareKeys'");
is(cursor.value.testString, testString, "Test string compared equally");
keyIndex++;
cursor.continue();
}
};
yield undefined;
is(keyIndex, testKeys.length, "Added all keys again");
finishTest();
yield undefined;
}

Просмотреть файл

@ -15,6 +15,7 @@ support-files =
GlobalObjectsComponent.manifest
GlobalObjectsModule.jsm
GlobalObjectsSandbox.js
schema18upgrade_profile.zip
xpcshell-shared.ini
[include:xpcshell-shared.ini]
@ -29,6 +30,7 @@ skip-if = toolkit == 'android'
skip-if = true
[test_lowDiskSpace.js]
[test_readwriteflush_disabled.js]
[test_schema18upgrade.js]
[test_temporary_storage.js]
# bug 951017: intermittent failure on Android x86 emulator
skip-if = os == "android" && processor == "x86"

Просмотреть файл

@ -23,7 +23,7 @@ FileQuotaStream<FileStreamBase>::SetEOF()
nsresult rv = FileStreamBase::Tell(&offset);
NS_ENSURE_SUCCESS(rv, rv);
mQuotaObject->UpdateSize(offset);
mQuotaObject->MaybeUpdateSize(offset, /* aTruncate */ true);
}
return NS_OK;
@ -56,7 +56,7 @@ FileQuotaStream<FileStreamBase>::DoOpen()
NS_ENSURE_SUCCESS(rv, rv);
if (mQuotaObject && (FileStreamBase::mOpenParams.ioFlags & PR_TRUNCATE)) {
mQuotaObject->UpdateSize(0);
mQuotaObject->MaybeUpdateSize(0, /* aTruncate */ true);
}
return NS_OK;
@ -75,8 +75,11 @@ FileQuotaStreamWithWrite<FileStreamBase>::Write(const char* aBuf,
rv = FileStreamBase::Tell(&offset);
NS_ENSURE_SUCCESS(rv, rv);
MOZ_ASSERT(INT64_MAX - offset >= int64_t(aCount));
if (!FileQuotaStreamWithWrite::
mQuotaObject->MaybeAllocateMoreSpace(offset, aCount)) {
mQuotaObject->MaybeUpdateSize(offset + int64_t(aCount),
/* aTruncate */ false)) {
return NS_ERROR_FILE_NO_DEVICE_SPACE;
}
}

Просмотреть файл

@ -10,54 +10,47 @@
#include "QuotaManager.h"
#include "Utilities.h"
#ifdef DEBUG
#include "nsComponentManagerUtils.h"
#include "nsIFile.h"
#include "nsXPCOMCID.h"
#endif
USING_QUOTA_NAMESPACE
namespace {
template <typename T, typename U>
void
AssertPositiveIntegers(T aOne, U aTwo)
template <typename T, bool = mozilla::IsUnsigned<T>::value>
struct IntChecker
{
static_assert(mozilla::IsIntegral<T>::value, "Not an integer!");
static_assert(mozilla::IsIntegral<U>::value, "Not an integer!");
MOZ_ASSERT(aOne >= 0);
MOZ_ASSERT(aTwo >= 0);
static void
Assert(T aInt)
{
static_assert(mozilla::IsIntegral<T>::value, "Not an integer!");
MOZ_ASSERT(aInt >= 0);
}
};
template <typename T>
struct IntChecker<T, true>
{
static void
Assert(T aInt)
{
static_assert(mozilla::IsIntegral<T>::value, "Not an integer!");
}
};
template <typename T>
void
AssertNoOverflow(uint64_t aDest, T aArg)
{
IntChecker<T>::Assert(aDest);
IntChecker<T>::Assert(aArg);
MOZ_ASSERT(UINT64_MAX - aDest >= uint64_t(aArg));
}
template <typename T, typename U>
void
AssertNoOverflow(T aOne, U aTwo)
AssertNoUnderflow(T aDest, U aArg)
{
AssertPositiveIntegers(aOne, aTwo);
AssertNoOverflow(uint64_t(aOne), uint64_t(aTwo));
}
template <>
void
AssertNoOverflow<uint64_t, uint64_t>(uint64_t aOne, uint64_t aTwo)
{
MOZ_ASSERT(UINT64_MAX - aOne >= aTwo);
}
template <typename T, typename U>
void
AssertNoUnderflow(T aOne, U aTwo)
{
AssertPositiveIntegers(aOne, aTwo);
AssertNoUnderflow(uint64_t(aOne), uint64_t(aTwo));
}
template <>
void
AssertNoUnderflow<uint64_t, uint64_t>(uint64_t aOne, uint64_t aTwo)
{
MOZ_ASSERT(aOne >= aTwo);
IntChecker<T>::Assert(aDest);
IntChecker<T>::Assert(aArg);
MOZ_ASSERT(uint64_t(aDest) >= uint64_t(aArg));
}
} // anonymous namespace
@ -112,87 +105,51 @@ QuotaObject::Release()
delete this;
}
void
QuotaObject::UpdateSize(int64_t aSize)
{
MOZ_ASSERT(aSize >= 0);
#ifdef DEBUG
{
nsCOMPtr<nsIFile> file = do_CreateInstance(NS_LOCAL_FILE_CONTRACTID);
MOZ_ASSERT(file);
MOZ_ASSERT(NS_SUCCEEDED(file->InitWithPath(mPath)));
bool exists;
MOZ_ASSERT(NS_SUCCEEDED(file->Exists(&exists)));
if (exists) {
int64_t fileSize;
MOZ_ASSERT(NS_SUCCEEDED(file->GetFileSize(&fileSize)));
MOZ_ASSERT(aSize == fileSize);
} else {
MOZ_ASSERT(!aSize);
}
}
#endif
QuotaManager* quotaManager = QuotaManager::Get();
NS_ASSERTION(quotaManager, "Shouldn't be null!");
MutexAutoLock lock(quotaManager->mQuotaMutex);
if (!mOriginInfo || mSize == aSize) {
return;
}
AssertNoUnderflow(quotaManager->mTemporaryStorageUsage, mSize);
quotaManager->mTemporaryStorageUsage -= mSize;
GroupInfo* groupInfo = mOriginInfo->mGroupInfo;
AssertNoUnderflow(groupInfo->mUsage, mSize);
groupInfo->mUsage -= mSize;
AssertNoUnderflow(mOriginInfo->mUsage, mSize);
mOriginInfo->mUsage -= mSize;
mSize = aSize;
AssertNoOverflow(mOriginInfo->mUsage, mSize);
mOriginInfo->mUsage += mSize;
AssertNoOverflow(groupInfo->mUsage, mSize);
groupInfo->mUsage += mSize;
AssertNoOverflow(quotaManager->mTemporaryStorageUsage, mSize);
quotaManager->mTemporaryStorageUsage += mSize;
}
bool
QuotaObject::MaybeAllocateMoreSpace(int64_t aOffset, int32_t aCount)
QuotaObject::MaybeUpdateSize(int64_t aSize, bool aTruncate)
{
AssertNoOverflow(aOffset, aCount);
int64_t end = aOffset + aCount;
QuotaManager* quotaManager = QuotaManager::Get();
NS_ASSERTION(quotaManager, "Shouldn't be null!");
MOZ_ASSERT(quotaManager);
MutexAutoLock lock(quotaManager->mQuotaMutex);
if (mSize >= end || !mOriginInfo) {
if (mSize == aSize) {
return true;
}
if (!mOriginInfo) {
mSize = aSize;
return true;
}
GroupInfo* groupInfo = mOriginInfo->mGroupInfo;
MOZ_ASSERT(groupInfo);
if (mSize > aSize) {
if (aTruncate) {
const int64_t delta = mSize - aSize;
AssertNoUnderflow(quotaManager->mTemporaryStorageUsage, delta);
quotaManager->mTemporaryStorageUsage -= delta;
AssertNoUnderflow(groupInfo->mUsage, delta);
groupInfo->mUsage -= delta;
AssertNoUnderflow(mOriginInfo->mUsage, delta);
mOriginInfo->mUsage -= delta;
mSize = aSize;
}
return true;
}
MOZ_ASSERT(mSize < aSize);
nsRefPtr<GroupInfo> complementaryGroupInfo =
groupInfo->mGroupInfoPair->LockedGetGroupInfo(
ComplementaryPersistenceType(groupInfo->mPersistenceType));
AssertNoUnderflow(end, mSize);
uint64_t delta = end - mSize;
uint64_t delta = aSize - mSize;
AssertNoOverflow(mOriginInfo->mUsage, delta);
uint64_t newUsage = mOriginInfo->mUsage + delta;
@ -272,8 +229,8 @@ QuotaObject::MaybeAllocateMoreSpace(int64_t aOffset, int32_t aCount)
// We unlocked and relocked several times so we need to recompute all the
// essential variables and recheck the group limit.
AssertNoUnderflow(end, mSize);
delta = end - mSize;
AssertNoUnderflow(aSize, mSize);
delta = aSize - mSize;
AssertNoOverflow(mOriginInfo->mUsage, delta);
newUsage = mOriginInfo->mUsage + delta;
@ -314,8 +271,8 @@ QuotaObject::MaybeAllocateMoreSpace(int64_t aOffset, int32_t aCount)
// Some other thread could increase the size in the meantime, but no more
// than this one.
MOZ_ASSERT(mSize < end);
mSize = end;
MOZ_ASSERT(mSize < aSize);
mSize = aSize;
// Finally, release IO thread only objects and allow next synchronized
// ops for the evicted origins.
@ -330,7 +287,7 @@ QuotaObject::MaybeAllocateMoreSpace(int64_t aOffset, int32_t aCount)
groupInfo->mUsage = newGroupUsage;
quotaManager->mTemporaryStorageUsage = newTemporaryStorageUsage;
mSize = end;
mSize = aSize;
return true;
}

Просмотреть файл

@ -32,11 +32,8 @@ public:
void
Release();
void
UpdateSize(int64_t aSize);
bool
MaybeAllocateMoreSpace(int64_t aOffset, int32_t aCount);
MaybeUpdateSize(int64_t aSize, bool aTruncate);
private:
QuotaObject(OriginInfo* aOriginInfo, const nsAString& aPath, int64_t aSize)

Просмотреть файл

@ -140,6 +140,10 @@ struct telemetry_file {
// quota object for this file
nsRefPtr<QuotaObject> quotaObject;
// The chunk size for this file. See the documentation for
// sqlite3_file_control() and FCNTL_CHUNK_SIZE.
int fileChunkSize;
// This contains the vfs that actually does work
sqlite3_file pReal[1];
};
@ -283,8 +287,7 @@ GetQuotaObjectFromNameAndParameters(const char *zName,
MOZ_ASSERT(zURIParameterKey);
const char *persistenceType =
persistenceType = sqlite3_uri_parameter(zURIParameterKey,
"persistenceType");
sqlite3_uri_parameter(zURIParameterKey, "persistenceType");
if (!persistenceType) {
return nullptr;
}
@ -352,6 +355,9 @@ xClose(sqlite3_file *pFile)
delete p->base.pMethods;
p->base.pMethods = nullptr;
p->quotaObject = nullptr;
#ifdef DEBUG
p->fileChunkSize = 0;
#endif
}
return rc;
}
@ -372,23 +378,6 @@ xRead(sqlite3_file *pFile, void *zBuf, int iAmt, sqlite_int64 iOfst)
return rc;
}
/*
** Write data to a telemetry_file.
*/
int
xWrite(sqlite3_file *pFile, const void *zBuf, int iAmt, sqlite_int64 iOfst)
{
telemetry_file *p = (telemetry_file *)pFile;
if (p->quotaObject && !p->quotaObject->MaybeAllocateMoreSpace(iOfst, iAmt)) {
return SQLITE_FULL;
}
IOThreadAutoTimer ioTimer(p->histograms->writeMS, IOInterposeObserver::OpWrite);
int rc;
rc = p->pReal->pMethods->xWrite(p->pReal, zBuf, iAmt, iOfst);
Telemetry::Accumulate(p->histograms->writeB, rc == SQLITE_OK ? iAmt : 0);
return rc;
}
/*
** Return the current file-size of a telemetry_file.
*/
@ -402,6 +391,34 @@ xFileSize(sqlite3_file *pFile, sqlite_int64 *pSize)
return rc;
}
/*
** Write data to a telemetry_file.
*/
int
xWrite(sqlite3_file *pFile, const void *zBuf, int iAmt, sqlite_int64 iOfst)
{
telemetry_file *p = (telemetry_file *)pFile;
IOThreadAutoTimer ioTimer(p->histograms->writeMS, IOInterposeObserver::OpWrite);
int rc;
if (p->quotaObject) {
MOZ_ASSERT(INT64_MAX - iOfst >= iAmt);
if (!p->quotaObject->MaybeUpdateSize(iOfst + iAmt, /* aTruncate */ false)) {
return SQLITE_FULL;
}
}
rc = p->pReal->pMethods->xWrite(p->pReal, zBuf, iAmt, iOfst);
Telemetry::Accumulate(p->histograms->writeB, rc == SQLITE_OK ? iAmt : 0);
if (p->quotaObject && rc != SQLITE_OK) {
NS_WARNING("xWrite failed on a quota-controlled file, attempting to "
"update its current size...");
sqlite_int64 currentSize;
if (xFileSize(pFile, &currentSize) == SQLITE_OK) {
p->quotaObject->MaybeUpdateSize(currentSize, /* aTruncate */ true);
}
}
return rc;
}
/*
** Truncate a telemetry_file.
*/
@ -412,16 +429,32 @@ xTruncate(sqlite3_file *pFile, sqlite_int64 size)
telemetry_file *p = (telemetry_file *)pFile;
int rc;
Telemetry::AutoTimer<Telemetry::MOZ_SQLITE_TRUNCATE_MS> timer;
if (p->quotaObject) {
if (p->fileChunkSize > 0) {
// Round up to the smallest multiple of the chunk size that will hold all
// the data.
size =
((size + p->fileChunkSize - 1) / p->fileChunkSize) * p->fileChunkSize;
}
if (!p->quotaObject->MaybeUpdateSize(size, /* aTruncate */ true)) {
return SQLITE_FULL;
}
}
rc = p->pReal->pMethods->xTruncate(p->pReal, size);
if (rc == SQLITE_OK && p->quotaObject) {
// xTruncate doesn't always set the size of the file to the exact size
// requested (e.g. if a growth increment has been specified it will round up
// to the next multiple of the chunk size). Use xFileSize to see what the
// real size is.
sqlite_int64 newSize;
rc = xFileSize(pFile, &newSize);
if (p->quotaObject) {
if (rc == SQLITE_OK) {
p->quotaObject->UpdateSize(newSize);
#ifdef DEBUG
// Make sure xTruncate set the size exactly as we calculated above.
sqlite_int64 newSize;
MOZ_ASSERT(xFileSize(pFile, &newSize) == SQLITE_OK);
MOZ_ASSERT(newSize == size);
#endif
} else {
NS_WARNING("xTruncate failed on a quota-controlled file, attempting to "
"update its current size...");
if (xFileSize(pFile, &size) == SQLITE_OK) {
p->quotaObject->MaybeUpdateSize(size, /* aTruncate */ true);
}
}
}
return rc;
@ -480,7 +513,41 @@ int
xFileControl(sqlite3_file *pFile, int op, void *pArg)
{
telemetry_file *p = (telemetry_file *)pFile;
int rc = p->pReal->pMethods->xFileControl(p->pReal, op, pArg);
int rc;
// Hook SQLITE_FCNTL_SIZE_HINT for quota-controlled files and do the necessary
// work before passing to the SQLite VFS.
if (op == SQLITE_FCNTL_SIZE_HINT && p->quotaObject) {
sqlite3_int64 hintSize = *static_cast<sqlite3_int64*>(pArg);
sqlite3_int64 currentSize;
rc = xFileSize(pFile, &currentSize);
if (rc != SQLITE_OK) {
return rc;
}
if (hintSize > currentSize) {
rc = xTruncate(pFile, hintSize);
if (rc != SQLITE_OK) {
return rc;
}
}
}
rc = p->pReal->pMethods->xFileControl(p->pReal, op, pArg);
// Grab the file chunk size after the SQLite VFS has approved.
if (op == SQLITE_FCNTL_CHUNK_SIZE && rc == SQLITE_OK) {
p->fileChunkSize = *static_cast<int*>(pArg);
}
#ifdef DEBUG
if (op == SQLITE_FCNTL_SIZE_HINT && p->quotaObject && rc == SQLITE_OK) {
sqlite3_int64 hintSize = *static_cast<sqlite3_int64*>(pArg);
if (p->fileChunkSize > 0) {
hintSize =
((hintSize + p->fileChunkSize - 1) / p->fileChunkSize) *
p->fileChunkSize;
}
sqlite3_int64 currentSize;
MOZ_ASSERT(xFileSize(pFile, &currentSize) == SQLITE_OK);
MOZ_ASSERT(currentSize >= hintSize);
}
#endif
return rc;
}
@ -650,7 +717,7 @@ xDelete(sqlite3_vfs* vfs, const char *zName, int syncDir)
rc = orig_vfs->xDelete(orig_vfs, zName, syncDir);
if (rc == SQLITE_OK && quotaObject) {
quotaObject->UpdateSize(0);
MOZ_ALWAYS_TRUE(quotaObject->MaybeUpdateSize(0, /* aTruncate */ true));
}
return rc;

Просмотреть файл

@ -8957,6 +8957,7 @@
},
{
"path": "IndexedDB/key_valid.html",
"timeout": "long",
"url": "/IndexedDB/key_valid.html"
},
{

Просмотреть файл

@ -1,6 +1,7 @@
<!DOCTYPE html>
<!-- Submitted from TestTWF Paris -->
<meta charset=utf-8">
<meta name="timeout" content="long">
<title>Valid key</title>
<link rel=help href="http://dvcs.w3.org/hg/IndexedDB/raw-file/tip/Overview.html#key-construct">
<link rel=assert title="A value is said to be a valid key if it is one of the following types: Array JavaScript objects [ECMA-262], DOMString [WEBIDL], Date [ECMA-262] or float [WEBIDL]. However Arrays are only valid keys if every item in the array is defined and is a valid key (i.e. sparse arrays can not be valid keys) and if the Array doesn't directly or indirectly contain itself. Any non-numeric properties are ignored, and thus does not affect whether the Array is a valid key. Additionally, if the value is of type float, it is only a valid key if it is not NaN, and if the value is of type Date it is only a valid key if its [[PrimitiveValue]] internal property, as defined by [ECMA-262], is not NaN. Conforming user agents must support all valid keys as keys.">