Backed out 5 changesets (bug 866846, bug 1131776, bug 1131766, bug 1144806, bug 1112702) on suspicion of causing Windows debug devtools-4 storage crashes

CLOSED TREE

Backed out changeset 142d9ae5826c (bug 1131776)
Backed out changeset 0b4de21b759f (bug 1112702)
Backed out changeset 1b4ead852ae0 (bug 1131766)
Backed out changeset cbd862dd036f (bug 866846)
Backed out changeset 178412a2fe8b (bug 1144806)
This commit is contained in:
Phil Ringnalda 2015-03-29 12:55:11 -07:00
Родитель 52be9f47b5
Коммит 74fbc95715
32 изменённых файлов: 3600 добавлений и 9511 удалений

Просмотреть файл

@ -833,15 +833,13 @@ MainProcessRunnable::OpenCacheFileForWrite()
mQuotaObject = qm->GetQuotaObject(mPersistence, mGroup, mOrigin, file);
NS_ENSURE_STATE(mQuotaObject);
if (!mQuotaObject->MaybeUpdateSize(mWriteParams.mSize,
/* aTruncate */ false)) {
if (!mQuotaObject->MaybeAllocateMoreSpace(0, mWriteParams.mSize)) {
// If the request fails, it might be because mOrigin is using too much
// space (MaybeUpdateSize will not evict our own origin since it is
// space (MaybeAllocateMoreSpace will not evict our own origin since it is
// active). Try to make some space by evicting LRU entries until there is
// enough space.
EvictEntries(mDirectory, mGroup, mOrigin, mWriteParams.mSize, mMetadata);
if (!mQuotaObject->MaybeUpdateSize(mWriteParams.mSize,
/* aTruncate */ false)) {
if (!mQuotaObject->MaybeAllocateMoreSpace(0, mWriteParams.mSize)) {
mResult = JS::AsmJSCache_QuotaExceeded;
return NS_ERROR_FAILURE;
}

Просмотреть файл

@ -315,13 +315,9 @@ DataStoreDB::DatabaseOpened()
return rv;
}
StringOrStringSequence objectStores;
objectStores.RawSetAsStringSequence().AppendElements(mObjectStores);
nsRefPtr<IDBTransaction> txn;
error = mDatabase->Transaction(objectStores,
mTransactionMode,
getter_AddRefs(txn));
nsRefPtr<IDBTransaction> txn = mDatabase->Transaction(mObjectStores,
mTransactionMode,
error);
if (NS_WARN_IF(error.Failed())) {
return error.ErrorCode();
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -25,7 +25,6 @@
#include "mozilla/dom/BindingDeclarations.h"
#include "mozilla/dom/DOMStringList.h"
#include "mozilla/dom/DOMStringListBinding.h"
#include "mozilla/dom/Exceptions.h"
#include "mozilla/dom/File.h"
#include "mozilla/dom/IDBDatabaseBinding.h"
#include "mozilla/dom/IDBObjectStoreBinding.h"
@ -650,97 +649,73 @@ IDBDatabase::DeleteObjectStore(const nsAString& aName, ErrorResult& aRv)
}
already_AddRefed<IDBTransaction>
IDBDatabase::Transaction(const StringOrStringSequence& aStoreNames,
IDBDatabase::Transaction(const nsAString& aStoreName,
IDBTransactionMode aMode,
ErrorResult& aRv)
{
AssertIsOnOwningThread();
aRv.MightThrowJSException();
if (aMode == IDBTransactionMode::Readwriteflush &&
!IndexedDatabaseManager::ExperimentalFeaturesEnabled()) {
// Pretend that this mode doesn't exist. We don't have a way to annotate
// certain enum values as depending on preferences so we just duplicate the
// normal exception generation here.
ThreadsafeAutoJSContext cx;
// Disable any automatic error reporting that might be set up so that we
// can grab the exception object.
AutoForceSetExceptionOnContext forceExn(cx);
MOZ_ALWAYS_FALSE(
ThrowErrorMessage(cx,
MSG_INVALID_ENUM_VALUE,
"Argument 2 of IDBDatabase.transaction",
"readwriteflush",
"IDBTransactionMode"));
MOZ_ASSERT(JS_IsExceptionPending(cx));
JS::Rooted<JS::Value> exception(cx);
MOZ_ALWAYS_TRUE(JS_GetPendingException(cx, &exception));
aRv.ThrowJSException(cx, exception);
Sequence<nsString> storeNames;
if (!storeNames.AppendElement(aStoreName)) {
aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
return nullptr;
}
nsRefPtr<IDBTransaction> transaction;
aRv = Transaction(aStoreNames, aMode, getter_AddRefs(transaction));
if (NS_WARN_IF(aRv.Failed())) {
return nullptr;
}
return transaction.forget();
return Transaction(storeNames, aMode, aRv);
}
nsresult
IDBDatabase::Transaction(const StringOrStringSequence& aStoreNames,
already_AddRefed<IDBTransaction>
IDBDatabase::Transaction(const Sequence<nsString>& aStoreNames,
IDBTransactionMode aMode,
IDBTransaction** aTransaction)
ErrorResult& aRv)
{
AssertIsOnOwningThread();
if (NS_WARN_IF(aMode == IDBTransactionMode::Readwriteflush &&
!IndexedDatabaseManager::ExperimentalFeaturesEnabled())) {
IDB_REPORT_INTERNAL_ERR();
return NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR;
}
if (QuotaManager::IsShuttingDown()) {
IDB_REPORT_INTERNAL_ERR();
return NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR;
aRv.Throw(NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
return nullptr;
}
if (mClosed || RunningVersionChangeTransaction()) {
return NS_ERROR_DOM_INDEXEDDB_NOT_ALLOWED_ERR;
if (mClosed) {
aRv.Throw(NS_ERROR_DOM_INDEXEDDB_NOT_ALLOWED_ERR);
return nullptr;
}
nsAutoTArray<nsString, 1> stackSequence;
if (aStoreNames.IsString()) {
stackSequence.AppendElement(aStoreNames.GetAsString());
} else {
MOZ_ASSERT(aStoreNames.IsStringSequence());
if (aStoreNames.GetAsStringSequence().IsEmpty()) {
return NS_ERROR_DOM_INVALID_ACCESS_ERR;
}
if (RunningVersionChangeTransaction()) {
aRv.Throw(NS_ERROR_DOM_INDEXEDDB_NOT_ALLOWED_ERR);
return nullptr;
}
const nsTArray<nsString>& storeNames =
aStoreNames.IsString() ?
stackSequence :
static_cast<const nsTArray<nsString>&>(aStoreNames.GetAsStringSequence());
MOZ_ASSERT(!storeNames.IsEmpty());
if (aStoreNames.IsEmpty()) {
aRv.Throw(NS_ERROR_DOM_INVALID_ACCESS_ERR);
return nullptr;
}
IDBTransaction::Mode mode;
switch (aMode) {
case IDBTransactionMode::Readonly:
mode = IDBTransaction::READ_ONLY;
break;
case IDBTransactionMode::Readwrite:
mode = IDBTransaction::READ_WRITE;
break;
case IDBTransactionMode::Versionchange:
aRv.Throw(NS_ERROR_DOM_INVALID_ACCESS_ERR);
return nullptr;
default:
MOZ_CRASH("Unknown mode!");
}
const nsTArray<ObjectStoreSpec>& objectStores = mSpec->objectStores();
const uint32_t nameCount = storeNames.Length();
const uint32_t nameCount = aStoreNames.Length();
nsTArray<nsString> sortedStoreNames;
sortedStoreNames.SetCapacity(nameCount);
// Check to make sure the object store names we collected actually exist.
for (uint32_t nameIndex = 0; nameIndex < nameCount; nameIndex++) {
const nsString& name = storeNames[nameIndex];
const nsString& name = aStoreNames[nameIndex];
bool found = false;
@ -754,7 +729,8 @@ IDBDatabase::Transaction(const StringOrStringSequence& aStoreNames,
}
if (!found) {
return NS_ERROR_DOM_INDEXEDDB_NOT_FOUND_ERR;
aRv.Throw(NS_ERROR_DOM_INDEXEDDB_NOT_FOUND_ERR);
return nullptr;
}
sortedStoreNames.InsertElementSorted(name);
@ -767,29 +743,12 @@ IDBDatabase::Transaction(const StringOrStringSequence& aStoreNames,
}
}
IDBTransaction::Mode mode;
switch (aMode) {
case IDBTransactionMode::Readonly:
mode = IDBTransaction::READ_ONLY;
break;
case IDBTransactionMode::Readwrite:
mode = IDBTransaction::READ_WRITE;
break;
case IDBTransactionMode::Readwriteflush:
mode = IDBTransaction::READ_WRITE_FLUSH;
break;
case IDBTransactionMode::Versionchange:
return NS_ERROR_DOM_INVALID_ACCESS_ERR;
default:
MOZ_CRASH("Unknown mode!");
}
nsRefPtr<IDBTransaction> transaction =
IDBTransaction::Create(this, sortedStoreNames, mode);
if (NS_WARN_IF(!transaction)) {
IDB_REPORT_INTERNAL_ERR();
return NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR;
aRv.Throw(NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
return nullptr;
}
BackgroundTransactionChild* actor =
@ -810,8 +769,7 @@ IDBDatabase::Transaction(const StringOrStringSequence& aStoreNames,
transaction->SetBackgroundActor(actor);
transaction.forget(aTransaction);
return NS_OK;
return transaction.forget();
}
StorageType

Просмотреть файл

@ -32,8 +32,7 @@ namespace dom {
class File;
class DOMStringList;
struct IDBObjectStoreParameters;
template <class> class Optional;
class StringOrStringSequence;
template <typename> class Sequence;
namespace indexedDB {
@ -214,17 +213,15 @@ public:
void
DeleteObjectStore(const nsAString& name, ErrorResult& aRv);
// This will be called from the DOM.
already_AddRefed<IDBTransaction>
Transaction(const StringOrStringSequence& aStoreNames,
Transaction(const nsAString& aStoreName,
IDBTransactionMode aMode,
ErrorResult& aRv);
// This can be called from C++ to avoid JS exception.
nsresult
Transaction(const StringOrStringSequence& aStoreNames,
already_AddRefed<IDBTransaction>
Transaction(const Sequence<nsString>& aStoreNames,
IDBTransactionMode aMode,
IDBTransaction** aTransaction);
ErrorResult& aRv);
StorageType
Storage() const;

Просмотреть файл

@ -215,9 +215,7 @@ IDBTransaction::CreateVersionChange(
nsTArray<nsString> emptyObjectStoreNames;
nsRefPtr<IDBTransaction> transaction =
new IDBTransaction(aDatabase,
emptyObjectStoreNames,
VERSION_CHANGE);
new IDBTransaction(aDatabase, emptyObjectStoreNames, VERSION_CHANGE);
aOpenRequest->GetCallerLocation(transaction->mFilename,
&transaction->mLineNo);
@ -253,9 +251,7 @@ IDBTransaction::Create(IDBDatabase* aDatabase,
MOZ_ASSERT(aDatabase);
aDatabase->AssertIsOnOwningThread();
MOZ_ASSERT(!aObjectStoreNames.IsEmpty());
MOZ_ASSERT(aMode == READ_ONLY ||
aMode == READ_WRITE ||
aMode == READ_WRITE_FLUSH);
MOZ_ASSERT(aMode == READ_ONLY || aMode == READ_WRITE);
nsRefPtr<IDBTransaction> transaction =
new IDBTransaction(aDatabase, aObjectStoreNames, aMode);
@ -867,9 +863,6 @@ IDBTransaction::GetMode(ErrorResult& aRv) const
case READ_WRITE:
return IDBTransactionMode::Readwrite;
case READ_WRITE_FLUSH:
return IDBTransactionMode::Readwriteflush;
case VERSION_CHANGE:
return IDBTransactionMode::Versionchange;

Просмотреть файл

@ -60,7 +60,6 @@ public:
{
READ_ONLY = 0,
READ_WRITE,
READ_WRITE_FLUSH,
VERSION_CHANGE,
// Only needed for IPC serialization helper, should never be used in code.
@ -178,9 +177,7 @@ public:
IsWriteAllowed() const
{
AssertIsOnOwningThread();
return mMode == READ_WRITE ||
mMode == READ_WRITE_FLUSH ||
mMode == VERSION_CHANGE;
return mMode == READ_WRITE || mMode == VERSION_CHANGE;
}
bool

Просмотреть файл

@ -658,7 +658,8 @@ IndexedDatabaseManager::FullSynchronous()
// static
bool
IndexedDatabaseManager::ExperimentalFeaturesEnabled()
IndexedDatabaseManager::ExperimentalFeaturesEnabled(JSContext* aCx,
JSObject* aGlobal)
{
if (NS_IsMainThread()) {
if (NS_WARN_IF(!GetOrCreate())) {

Просмотреть файл

@ -109,13 +109,7 @@ public:
#endif
static bool
ExperimentalFeaturesEnabled();
static bool
ExperimentalFeaturesEnabled(JSContext* /* aCx */, JSObject* /* aGlobal */)
{
return ExperimentalFeaturesEnabled();
}
ExperimentalFeaturesEnabled(JSContext* aCx, JSObject* aGlobal);
already_AddRefed<FileManager>
GetFileManager(PersistenceType aPersistenceType,

Просмотреть файл

@ -13,7 +13,6 @@
#include "mozilla/Endian.h"
#include "mozilla/FloatingPoint.h"
#include "mozIStorageStatement.h"
#include "mozIStorageValueArray.h"
#include "nsAlgorithm.h"
#include "nsJSUtils.h"
#include "ReportInternalError.h"
@ -28,10 +27,10 @@ namespace indexedDB {
Basic strategy is the following
Numbers: 0x10 n n n n n n n n ("n"s are encoded 64bit float)
Dates: 0x20 n n n n n n n n ("n"s are encoded 64bit float)
Strings: 0x30 s s s ... 0 ("s"s are encoded unicode bytes)
Arrays: 0x50 i i i ... 0 ("i"s are encoded array items)
Numbers: 1 n n n n n n n n ("n"s are encoded 64bit float)
Dates: 2 n n n n n n n n ("n"s are encoded 64bit float)
Strings: 3 s s s ... 0 ("s"s are encoded unicode bytes)
Arrays: 4 i i i ... 0 ("i"s are encoded array items)
When encoding floats, 64bit IEEE 754 are almost sortable, except that
@ -56,58 +55,65 @@ namespace indexedDB {
When encoding Arrays, we use an additional trick. Rather than adding a byte
containing the value 0x50 to indicate type, we instead add 0x50 to the next byte.
containing the value '4' to indicate type, we instead add 4 to the next byte.
This is usually the byte containing the type of the first item in the array.
So simple examples are
["foo"] 0x80 s s s 0 0 // 0x80 is 0x30 + 0x50
[1, 2] 0x60 n n n n n n n n 1 n n n n n n n n 0 // 0x60 is 0x10 + 0x50
["foo"] 7 s s s 0 0 // 7 is 3 + 4
[1, 2] 5 n n n n n n n n 1 n n n n n n n n 0 // 5 is 1 + 4
Whe do this iteratively if the first item in the array is also an array
[["foo"]] 0xA0 s s s 0 0 0
[["foo"]] 11 s s s 0 0 0
However, to avoid overflow in the byte, we only do this 3 times. If the first
item in an array is an array, and that array also has an array as first item,
we simply write out the total value accumulated so far and then follow the
"normal" rules.
[[["foo"]]] 0xF0 0x30 s s s 0 0 0 0
[[["foo"]]] 12 3 s s s 0 0 0 0
There is another edge case that can happen though, which is that the array
doesn't have a first item to which we can add 0x50 to the type. Instead the
doesn't have a first item to which we can add 4 to the type. Instead the
next byte would normally be the array terminator (per basic-strategy table)
so we simply add the 0x50 there.
so we simply add the 4 there.
[[]] 0xA0 0 // 0xA0 is 0x50 + 0x50 + 0
[] 0x50 // 0x50 is 0x50 + 0
[[], "foo"] 0xA0 0x30 s s s 0 0 // 0xA0 is 0x50 + 0x50 + 0
[[]] 8 0 // 8 is 4 + 4 + 0
[] 4 // 4 is 4 + 0
[[], "foo"] 8 3 s s s 0 0 // 8 is 4 + 4 + 0
Note that the max-3-times rule kicks in before we get a chance to add to the
array terminator
[[[]]] 0xF0 0 0 0 // 0xF0 is 0x50 + 0x50 + 0x50
[[[]]] 12 0 0 0 // 12 is 4 + 4 + 4
We could use a much higher number than 3 at no complexity or performance cost,
however it seems unlikely that it'll make a practical difference, and the low
limit makes testing eaiser.
As a final optimization we do a post-encoding step which drops all 0s at the
end of the encoded buffer.
"foo" // 0x30 s s s
1 // 0x10 bf f0
["a", "b"] // 0x80 s 0 0x30 s
[1, 2] // 0x60 bf f0 0 0 0 0 0 0 0x10 c0
[[]] // 0x80
"foo" // 3 s s s
1 // 1 bf f0
["a", "b"] // 7 s 3 s
[1, 2] // 5 bf f0 0 0 0 0 0 0 1 c0
[[]] // 8
*/
const int MaxArrayCollapse = 3;
const int MaxRecursionDepth = 256;
nsresult
Key::EncodeJSValInternal(JSContext* aCx, JS::Handle<JS::Value> aVal,
uint8_t aTypeOffset, uint16_t aRecursionDepth)
{
static_assert(eMaxType * kMaxArrayCollapse < 256,
"Unable to encode jsvals.");
NS_ENSURE_TRUE(aRecursionDepth < MaxRecursionDepth, NS_ERROR_DOM_INDEXEDDB_DATA_ERR);
if (NS_WARN_IF(aRecursionDepth == kMaxRecursionDepth)) {
return NS_ERROR_DOM_INDEXEDDB_DATA_ERR;
}
static_assert(eMaxType * MaxArrayCollapse < 256,
"Unable to encode jsvals.");
if (aVal.isString()) {
nsAutoJSString str;
@ -133,12 +139,12 @@ Key::EncodeJSValInternal(JSContext* aCx, JS::Handle<JS::Value> aVal,
if (JS_IsArrayObject(aCx, obj)) {
aTypeOffset += eMaxType;
if (aTypeOffset == eMaxType * kMaxArrayCollapse) {
if (aTypeOffset == eMaxType * MaxArrayCollapse) {
mBuffer.Append(aTypeOffset);
aTypeOffset = 0;
}
NS_ASSERTION((aTypeOffset % eMaxType) == 0 &&
aTypeOffset < (eMaxType * kMaxArrayCollapse),
aTypeOffset < (eMaxType * MaxArrayCollapse),
"Wrong typeoffset");
uint32_t length;
@ -186,9 +192,7 @@ Key::DecodeJSValInternal(const unsigned char*& aPos, const unsigned char* aEnd,
JSContext* aCx, uint8_t aTypeOffset, JS::MutableHandle<JS::Value> aVal,
uint16_t aRecursionDepth)
{
if (NS_WARN_IF(aRecursionDepth == kMaxRecursionDepth)) {
return NS_ERROR_DOM_INDEXEDDB_DATA_ERR;
}
NS_ENSURE_TRUE(aRecursionDepth < MaxRecursionDepth, NS_ERROR_DOM_INDEXEDDB_DATA_ERR);
if (*aPos - aTypeOffset >= eArray) {
JS::Rooted<JSObject*> array(aCx, JS_NewArrayObject(aCx, 0));
@ -200,7 +204,7 @@ Key::DecodeJSValInternal(const unsigned char*& aPos, const unsigned char* aEnd,
aTypeOffset += eMaxType;
if (aTypeOffset == eMaxType * kMaxArrayCollapse) {
if (aTypeOffset == eMaxType * MaxArrayCollapse) {
++aPos;
aTypeOffset = 0;
}
@ -327,9 +331,10 @@ nsresult
Key::DecodeJSVal(const unsigned char*& aPos,
const unsigned char* aEnd,
JSContext* aCx,
uint8_t aTypeOffset,
JS::MutableHandle<JS::Value> aVal)
{
return DecodeJSValInternal(aPos, aEnd, aCx, 0, aVal, 0);
return DecodeJSValInternal(aPos, aEnd, aCx, aTypeOffset, aVal, 0);
}
// static
@ -461,14 +466,16 @@ nsresult
Key::SetFromStatement(mozIStorageStatement* aStatement,
uint32_t aIndex)
{
return SetFromSource(aStatement, aIndex);
}
uint8_t* data;
uint32_t dataLength = 0;
nsresult
Key::SetFromValueArray(mozIStorageValueArray* aValues,
uint32_t aIndex)
{
return SetFromSource(aValues, aIndex);
nsresult rv = aStatement->GetBlob(aIndex, &dataLength, &data);
NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
mBuffer.Adopt(
reinterpret_cast<char*>(const_cast<uint8_t*>(data)), dataLength);
return NS_OK;
}
nsresult
@ -502,7 +509,7 @@ Key::ToJSVal(JSContext* aCx,
}
const unsigned char* pos = BufferStart();
nsresult rv = DecodeJSVal(pos, BufferEnd(), aCx, aVal);
nsresult rv = DecodeJSVal(pos, BufferEnd(), aCx, 0, aVal);
if (NS_WARN_IF(NS_FAILED(rv))) {
return rv;
}
@ -536,23 +543,6 @@ Key::AppendItem(JSContext* aCx, bool aFirstOfArray, JS::Handle<JS::Value> aVal)
return NS_OK;
}
template <typename T>
nsresult
Key::SetFromSource(T* aSource, uint32_t aIndex)
{
const uint8_t* data;
uint32_t dataLength = 0;
nsresult rv = aSource->GetSharedBlob(aIndex, &dataLength, &data);
if (NS_WARN_IF(NS_FAILED(rv))) {
return NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR;
}
mBuffer.Assign(reinterpret_cast<const char*>(data), dataLength);
return NS_OK;
}
#ifdef DEBUG
void

Просмотреть файл

@ -11,7 +11,6 @@
#include "nsString.h"
class mozIStorageStatement;
class mozIStorageValueArray;
namespace IPC {
@ -30,28 +29,11 @@ class Key
nsCString mBuffer;
public:
enum {
eTerminator = 0,
eFloat = 0x10,
eDate = 0x20,
eString = 0x30,
eArray = 0x50,
eMaxType = eArray
};
static const uint8_t kMaxArrayCollapse = uint8_t(3);
static const uint8_t kMaxRecursionDepth = uint8_t(64);
Key()
{
Unset();
}
explicit
Key(const nsACString& aBuffer)
: mBuffer(aBuffer)
{ }
Key&
operator=(const nsAString& aString)
{
@ -129,25 +111,25 @@ public:
bool
IsFloat() const
{
return !IsUnset() && *BufferStart() == eFloat;
return !IsUnset() && mBuffer.First() == eFloat;
}
bool
IsDate() const
{
return !IsUnset() && *BufferStart() == eDate;
return !IsUnset() && mBuffer.First() == eDate;
}
bool
IsString() const
{
return !IsUnset() && *BufferStart() == eString;
return !IsUnset() && mBuffer.First() == eString;
}
bool
IsArray() const
{
return !IsUnset() && *BufferStart() >= eArray;
return !IsUnset() && mBuffer.First() >= eArray;
}
double
@ -226,9 +208,6 @@ public:
nsresult
SetFromStatement(mozIStorageStatement* aStatement, uint32_t aIndex);
nsresult
SetFromValueArray(mozIStorageValueArray* aValues, uint32_t aIndex);
static int16_t
CompareKeys(Key& aFirst, Key& aSecond)
{
@ -258,6 +237,15 @@ private:
return reinterpret_cast<const unsigned char*>(mBuffer.EndReading());
}
enum {
eTerminator = 0,
eFloat = 1,
eDate = 2,
eString = 3,
eArray = 4,
eMaxType = eArray
};
// Encoding helper. Trims trailing zeros off of mBuffer as a post-processing
// step.
void
@ -287,6 +275,7 @@ private:
DecodeJSVal(const unsigned char*& aPos,
const unsigned char* aEnd,
JSContext* aCx,
uint8_t aTypeOffset,
JS::MutableHandle<JS::Value> aVal);
static void
@ -311,10 +300,6 @@ private:
JS::MutableHandle<JS::Value> aVal,
uint16_t aRecursionDepth);
template <typename T>
nsresult
SetFromSource(T* aSource, uint32_t aIndex);
void
Assert(bool aCondition) const
#ifdef DEBUG

Просмотреть файл

@ -125,9 +125,6 @@ public:
case IDBTransaction::READ_WRITE:
AppendLiteral("\"readwrite\"");
break;
case IDBTransaction::READ_WRITE_FLUSH:
AppendLiteral("\"readwriteflush\"");
break;
case IDBTransaction::VERSION_CHANGE:
AppendLiteral("\"versionchange\"");
break;
@ -281,7 +278,7 @@ LoggingHelper(bool aUseProfiler, const char* aFmt, ...)
PRLogModuleInfo* logModule = IndexedDatabaseManager::GetLoggingModule();
MOZ_ASSERT(logModule);
static const PRLogModuleLevel logLevel = PR_LOG_WARNING;
static const PRLogModuleLevel logLevel = PR_LOG_DEBUG;
if (PR_LOG_TEST(logModule, logLevel) ||
(aUseProfiler && profiler_is_active())) {

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,159 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_dom_indexeddb_transactionthreadpool_h__
#define mozilla_dom_indexeddb_transactionthreadpool_h__
#include "mozilla/Attributes.h"
#include "nsAutoPtr.h"
#include "nsClassHashtable.h"
#include "nsCOMPtr.h"
#include "nsHashKeys.h"
#include "nsISupportsImpl.h"
#include "nsTArray.h"
struct nsID;
class nsIEventTarget;
class nsIRunnable;
class nsIThreadPool;
namespace mozilla {
namespace dom {
namespace indexedDB {
class TransactionThreadPool final
{
class FinishTransactionRunnable;
friend class FinishTransactionRunnable;
class TransactionQueue;
friend class TransactionQueue;
struct DatabaseTransactionInfo;
struct DatabasesCompleteCallback;
struct TransactionInfo;
struct TransactionInfoPair;
nsCOMPtr<nsIThreadPool> mThreadPool;
nsCOMPtr<nsIEventTarget> mOwningThread;
nsClassHashtable<nsCStringHashKey, DatabaseTransactionInfo>
mTransactionsInProgress;
nsTArray<nsAutoPtr<DatabasesCompleteCallback>> mCompleteCallbacks;
uint64_t mNextTransactionId;
bool mShutdownRequested;
bool mShutdownComplete;
public:
class FinishCallback;
static already_AddRefed<TransactionThreadPool> Create();
uint64_t NextTransactionId();
void Start(uint64_t aTransactionId,
const nsACString& aDatabaseId,
const nsTArray<nsString>& aObjectStoreNames,
uint16_t aMode,
const nsID& aBackgroundChildLoggingId,
int64_t aLoggingSerialNumber,
nsIRunnable* aRunnable);
void Dispatch(uint64_t aTransactionId,
const nsACString& aDatabaseId,
nsIRunnable* aRunnable,
bool aFinish,
FinishCallback* aFinishCallback);
void WaitForDatabasesToComplete(nsTArray<nsCString>& aDatabaseIds,
nsIRunnable* aCallback);
NS_INLINE_DECL_REFCOUNTING(TransactionThreadPool)
void Shutdown();
void AssertIsOnOwningThread() const
#ifdef DEBUG
;
#else
{ }
#endif
private:
static PLDHashOperator
CollectTransactions(const uint64_t& aTransactionId,
TransactionInfo* aValue,
void* aUserArg);
static PLDHashOperator
FindTransaction(const uint64_t& aTransactionId,
TransactionInfo* aValue,
void* aUserArg);
static PLDHashOperator
MaybeUnblockTransaction(nsPtrHashKey<TransactionInfo>* aKey,
void* aUserArg);
TransactionThreadPool();
// Reference counted.
~TransactionThreadPool();
nsresult Init();
void Cleanup();
void FinishTransaction(uint64_t aTransactionId,
const nsACString& aDatabaseId,
const nsTArray<nsString>& aObjectStoreNames,
uint16_t aMode);
TransactionQueue* GetQueueForTransaction(uint64_t aTransactionId,
const nsACString& aDatabaseId);
TransactionQueue& CreateQueueForTransaction(
uint64_t aTransactionId,
const nsACString& aDatabaseId,
const nsTArray<nsString>& aObjectStoreNames,
uint16_t aMode,
const nsID& aBackgroundChildLoggingId,
int64_t aLoggingSerialNumber);
bool MaybeFireCallback(DatabasesCompleteCallback* aCallback);
void CleanupAsync();
};
class NS_NO_VTABLE TransactionThreadPool::FinishCallback
{
public:
NS_IMETHOD_(MozExternalRefCountType)
AddRef() = 0;
NS_IMETHOD_(MozExternalRefCountType)
Release() = 0;
// Called on the owning thread before any additional transactions are
// unblocked.
virtual void
TransactionFinishedBeforeUnblock() = 0;
// Called on the owning thread after additional transactions may have been
// unblocked.
virtual void
TransactionFinishedAfterUnblock() = 0;
protected:
FinishCallback()
{ }
};
} // namespace indexedDB
} // namespace dom
} // namespace mozilla
#endif // mozilla_dom_indexeddb_transactionthreadpool_h__

Просмотреть файл

@ -64,6 +64,7 @@ UNIFIED_SOURCES += [
'KeyPath.cpp',
'PermissionRequestBase.cpp',
'ReportInternalError.cpp',
'TransactionThreadPool.cpp',
]
SOURCES += [

Просмотреть файл

@ -70,7 +70,6 @@ support-files =
unit/test_put_get_values.js
unit/test_put_get_values_autoIncrement.js
unit/test_readonly_transactions.js
unit/test_readwriteflush_disabled.js
unit/test_remove_index.js
unit/test_remove_objectStore.js
unit/test_request_readyState.js
@ -320,8 +319,6 @@ skip-if = (buildapp == 'b2g' && toolkit != 'gonk') # Bug 931116
skip-if = (buildapp == 'b2g' && toolkit != 'gonk') # Bug 931116
[test_readonly_transactions.html]
skip-if = (buildapp == 'b2g' && toolkit != 'gonk') # Bug 931116
[test_readwriteflush_disabled.html]
skip-if = (buildapp == 'b2g' && toolkit != 'gonk') # Bug 931116
[test_remove_index.html]
skip-if = (buildapp == 'b2g' && toolkit != 'gonk') # Bug 931116
[test_remove_objectStore.html]

Просмотреть файл

@ -1,19 +0,0 @@
<!--
Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/
-->
<html>
<head>
<title>Indexed Database Property Test</title>
<script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
<script type="text/javascript;version=1.7" src="unit/test_readwriteflush_disabled.js"></script>
<script type="text/javascript;version=1.7" src="helpers.js"></script>
</head>
<body onload="runTest();"></body>
</html>

Двоичный файл не отображается.

Просмотреть файл

@ -61,9 +61,7 @@ function testSteps()
ok(true, "clear should throw on READ_ONLY transactions");
}
request = db.transaction("foo", "readwriteflush")
.objectStore("foo")
.clear();
request = db.transaction("foo", "readwrite").objectStore("foo").clear();
request.onerror = errorHandler;
request.onsuccess = grabEventAndContinueHandler;
event = yield undefined;
@ -83,9 +81,7 @@ function testSteps()
}
yield undefined;
request = db.transaction("foo", "readwrite")
.objectStore("foo")
.add({});
request = db.transaction("foo", "readwrite").objectStore("foo").add({});
request.onerror = errorHandler;
request.onsuccess = grabEventAndContinueHandler;
event = yield undefined;

Просмотреть файл

@ -1,72 +0,0 @@
/**
* Any copyright is dedicated to the Public Domain.
* http://creativecommons.org/publicdomain/zero/1.0/
*/
let disableWorkerTest = "Need a way to set temporary prefs from a worker";
let testGenerator = testSteps();
function testSteps()
{
const name =
this.window ? window.location.pathname : "test_readwriteflush_disabled.js";
info("Resetting experimental pref");
if (this.window) {
SpecialPowers.pushPrefEnv(
{
"set": [
["dom.indexedDB.experimental", false]
]
},
continueToNextStep
);
yield undefined;
} else {
resetExperimental();
}
info("Opening database");
let request = indexedDB.open(name);
request.onerror = errorHandler;
request.onupgradeneeded = continueToNextStepSync;
request.onsuccess = unexpectedSuccessHandler;
yield undefined;
// upgradeneeded
request.onupgradeneeded = unexpectedSuccessHandler;
request.onsuccess = continueToNextStepSync;
info("Creating objectStore");
request.result.createObjectStore(name);
yield undefined;
// success
let db = request.result;
info("Attempting to create a 'readwriteflush' transaction");
let exception;
try {
let transaction = db.transaction(name, "readwriteflush");
} catch (e) {
exception = e;
}
ok(exception, "'readwriteflush' transaction threw");
ok(exception instanceof Error, "exception is an Error object");
is(exception.message,
"Argument 2 of IDBDatabase.transaction 'readwriteflush' is not a valid " +
"value for enumeration IDBTransactionMode.",
"exception has the correct message");
finishTest();
yield undefined;
}

Просмотреть файл

@ -1,336 +0,0 @@
/**
* Any copyright is dedicated to the Public Domain.
* http://creativecommons.org/publicdomain/zero/1.0/
*/
var testGenerator = testSteps();
function testSteps()
{
const testName = "schema18upgrade";
const testKeys = [
-1/0,
-1.7e308,
-10000,
-2,
-1.5,
-1,
-1.00001e-200,
-1e-200,
0,
1e-200,
1.00001e-200,
1,
2,
10000,
1.7e308,
1/0,
new Date("1750-01-02"),
new Date("1800-12-31T12:34:56.001Z"),
new Date(-1000),
new Date(-10),
new Date(-1),
new Date(0),
new Date(1),
new Date(2),
new Date(1000),
new Date("1971-01-01"),
new Date("1971-01-01T01:01:01Z"),
new Date("1971-01-01T01:01:01.001Z"),
new Date("1971-01-01T01:01:01.01Z"),
new Date("1971-01-01T01:01:01.1Z"),
new Date("1980-02-02"),
new Date("3333-03-19T03:33:33.333Z"),
"",
"\x00",
"\x00\x00",
"\x00\x01",
"\x01",
"\x02",
"\x03",
"\x04",
"\x07",
"\x08",
"\x0F",
"\x10",
"\x1F",
"\x20",
"01234",
"\x3F",
"\x40",
"A",
"A\x00",
"A1",
"ZZZZ",
"a",
"a\x00",
"aa",
"azz",
"}",
"\x7E",
"\x7F",
"\x80",
"\xFF",
"\u0100",
"\u01FF",
"\u0200",
"\u03FF",
"\u0400",
"\u07FF",
"\u0800",
"\u0FFF",
"\u1000",
"\u1FFF",
"\u2000",
"\u3FFF",
"\u4000",
"\u7FFF",
"\u8000",
"\uD800",
"\uD800a",
"\uD800\uDC01",
"\uDBFF",
"\uDC00",
"\uDFFF\uD800",
"\uFFFE",
"\uFFFF",
"\uFFFF\x00",
"\uFFFFZZZ",
[],
[-1/0],
[-1],
[0],
[1],
[1, "a"],
[1, []],
[1, [""]],
[2, 3],
[2, 3.0000000000001],
[12, [[]]],
[12, [[[]]]],
[12, [[[""]]]],
[12, [[["foo"]]]],
[12, [[[[[3]]]]]],
[12, [[[[[[3]]]]]]],
[12, [[[[[[3],[[[[[4.2]]]]]]]]]]],
[new Date(-1)],
[new Date(1)],
[""],
["", [[]]],
["", [[[]]]],
["abc"],
["abc", "def"],
["abc\x00"],
["abc\x00", "\x00\x01"],
["abc\x00", "\x00def"],
["abc\x00\x00def"],
["x", [[]]],
["x", [[[]]]],
[[]],
[[],"foo"],
[[],[]],
[[[]]],
[[[]], []],
[[[]], [[]]],
[[[]], [[1]]],
[[[]], [[[]]]],
[[[1]]],
[[[[]], []]],
];
const testString =
"abcdefghijklmnopqrstuvwxyz0123456789`~!@#$%^&*()-_+=,<.>/?\\|";
clearAllDatabases(continueToNextStepSync);
yield undefined;
info("Installing profile");
installPackagedProfile(testName + "_profile");
info("Opening database with no version");
let request = indexedDB.open(testName);
request.onerror = errorHandler;
request.onupgradeneeded = unexpectedSuccessHandler;
request.onsuccess = grabEventAndContinueHandler;
let event = yield undefined;
let db = event.target.result;
is(db.version, 1, "Correct db version");
let transaction = db.transaction(testName);
transaction.oncomplete = grabEventAndContinueHandler;
let objectStore = transaction.objectStore(testName);
let index = objectStore.index("uniqueIndex");
info("Starting 'uniqueIndex' cursor");
let keyIndex = 0;
index.openCursor().onsuccess = event => {
let cursor = event.target.result;
if (cursor) {
info("Comparing " + JSON.stringify(cursor.primaryKey) + " to " +
JSON.stringify(testKeys[cursor.key]) +
" [" + cursor.key + "]");
is(indexedDB.cmp(cursor.primaryKey, testKeys[cursor.key]), 0,
"Keys compare equally via 'indexedDB.cmp'");
is(compareKeys(cursor.primaryKey, testKeys[cursor.key]), true,
"Keys compare equally via 'compareKeys'");
let indexProperty = cursor.value.index;
is(Array.isArray(indexProperty), true, "index property is Array");
is(indexProperty[0], cursor.key, "index property first item correct");
is(indexProperty[1], cursor.key + 1, "index property second item correct");
is(cursor.key, keyIndex, "Cursor key property is correct");
is(cursor.value.testString, testString, "Test string compared equally");
keyIndex++;
cursor.continue();
}
};
yield undefined;
is(keyIndex, testKeys.length, "Saw all keys");
transaction = db.transaction(testName, "readwrite");
transaction.oncomplete = grabEventAndContinueHandler;
objectStore = transaction.objectStore(testName);
index = objectStore.index("index");
info("Getting all 'index' keys");
index.getAllKeys().onsuccess = grabEventAndContinueHandler;
event = yield undefined;
is(event.target.result.length, testKeys.length * 2, "Got all keys");
info("Starting objectStore cursor");
objectStore.openCursor().onsuccess = event => {
let cursor = event.target.result;
if (cursor) {
let value = cursor.value;
is(value.testString, testString, "Test string compared equally");
delete value.index;
cursor.update(value);
cursor.continue();
} else {
continueToNextStepSync();
}
};
yield undefined;
info("Getting all 'index' keys");
index.getAllKeys().onsuccess = grabEventAndContinueHandler;
event = yield undefined;
is(event.target.result.length, 0, "Removed all keys");
yield undefined;
db.close();
info("Opening database with new version");
request = indexedDB.open(testName, 2);
request.onerror = errorHandler;
request.onupgradeneeded = grabEventAndContinueHandler;
request.onsuccess = grabEventAndContinueHandler;
event = yield undefined;
info("Deleting indexes");
objectStore = event.target.transaction.objectStore(testName);
objectStore.deleteIndex("index");
objectStore.deleteIndex("uniqueIndex");
event = yield undefined;
db = event.target.result;
transaction = db.transaction(testName, "readwrite");
transaction.oncomplete = grabEventAndContinueHandler;
info("Starting objectStore cursor");
objectStore = transaction.objectStore(testName);
objectStore.openCursor().onsuccess = event => {
let cursor = event.target.result;
if (cursor) {
let value = cursor.value;
is(value.testString, testString, "Test string compared equally");
value.index = value.keyPath;
cursor.update(value);
cursor.continue();
}
};
event = yield undefined;
db.close();
info("Opening database with new version");
request = indexedDB.open(testName, 3);
request.onerror = errorHandler;
request.onupgradeneeded = grabEventAndContinueHandler;
request.onsuccess = grabEventAndContinueHandler;
event = yield undefined;
info("Creating indexes");
objectStore = event.target.transaction.objectStore(testName);
objectStore.createIndex("index", "index");
event = yield undefined;
db = event.target.result;
transaction = db.transaction(testName);
transaction.oncomplete = grabEventAndContinueHandler;
objectStore = transaction.objectStore(testName);
index = objectStore.index("index");
info("Starting 'index' cursor");
keyIndex = 0;
index.openCursor().onsuccess = event => {
let cursor = event.target.result;
if (cursor) {
is(indexedDB.cmp(cursor.primaryKey, testKeys[keyIndex]), 0,
"Keys compare equally via 'indexedDB.cmp'");
is(compareKeys(cursor.primaryKey, testKeys[keyIndex]), true,
"Keys compare equally via 'compareKeys'");
is(indexedDB.cmp(cursor.key, testKeys[keyIndex]), 0,
"Keys compare equally via 'indexedDB.cmp'");
is(compareKeys(cursor.key, testKeys[keyIndex]), true,
"Keys compare equally via 'compareKeys'");
let indexProperty = cursor.value.index;
is(indexedDB.cmp(indexProperty, testKeys[keyIndex]), 0,
"Keys compare equally via 'indexedDB.cmp'");
is(compareKeys(indexProperty, testKeys[keyIndex]), true,
"Keys compare equally via 'compareKeys'");
is(cursor.value.testString, testString, "Test string compared equally");
keyIndex++;
cursor.continue();
}
};
yield undefined;
is(keyIndex, testKeys.length, "Added all keys again");
finishTest();
yield undefined;
}

Просмотреть файл

@ -7,254 +7,230 @@ var testGenerator = testSteps();
function testSteps()
{
const name = this.window ?
window.location.pathname :
"test_temporary_storage.js";
const finalVersion = 2;
const name = this.window ? window.location.pathname : "Splendid Test";
const tempStorageLimitKB = 1024;
const checkpointSleepTimeSec = 5;
const urls = [
{ url: "http://www.alpha.com", flags: [true, true, true, true] },
{ url: "http://www.beta.com", flags: [true, false, false, false] },
{ url: "http://www.gamma.com", flags: [true, true, false, false] },
{ url: "http://www.delta.com", flags: [true, true, false, false] },
{ url: "http://www.epsilon.com", flags: [true, true, false, false] },
{ url: "http://www2.alpha.com", flags: [true, true, false, false] },
{ url: "http://www2.beta.com", flags: [true, true, false, false] },
{ url: "http://www2.gamma.com", flags: [true, true, false, false] },
{ url: "http://www2.delta.com", flags: [true, true, true, false] },
{ url: "http://www2.epsilon.com", flags: [true, true, true, true] },
{ url: "http://joe.blog.alpha.com", flags: [true, true, true, true] },
{ url: "http://joe.blog.beta.com", flags: [true, true, true, true] },
{ url: "http://joe.blog.gamma.com", flags: [true, true, true, true] },
{ url: "http://joe.blog.delta.com", flags: [true, true, true, true] },
{ url: "http://joe.blog.epsilon.com", flags: [true, true, true, true] },
{ url: "http://www.rudolf.org", flags: [true, true, true, true] },
{ url: "http://www.pauline.org", flags: [true, true, true, true] },
{ url: "http://www.marie.org", flags: [true, true, true, true] },
{ url: "http://www.john.org", flags: [true, true, true, true] },
{ url: "http://www.ema.org", flags: [true, true, true, true] },
{ url: "http://www.trigger.com", flags: [false, true, true, true] }
];
const lastIndex = urls.length - 1;
const lastUrl = urls[lastIndex].url;
const openDBOptions = [
{ version: 1, storage: "temporary" },
{ version: 1 }
];
let quotaManager =
Components.classes["@mozilla.org/dom/quota/manager;1"]
.getService(Components.interfaces.nsIQuotaManager);
let ioService = Components.classes["@mozilla.org/network/io-service;1"]
.getService(Components.interfaces.nsIIOService);
let dbSize = 0;
function setLimit(limit) {
const pref = "dom.quotaManager.temporaryStorage.fixedLimit";
if (limit) {
info("Setting temporary storage limit to " + limit);
SpecialPowers.setIntPref(pref, limit);
} else {
info("Removing temporary storage limit");
SpecialPowers.clearUserPref(pref);
SpecialPowers.setIntPref("dom.quotaManager.temporaryStorage.fixedLimit",
limit);
return;
}
}
function getSpec(index) {
return "http://foo" + index + ".com";
SpecialPowers.clearUserPref("dom.quotaManager.temporaryStorage.fixedLimit");
}
function getPrincipal(url) {
let uri = Cc["@mozilla.org/network/io-service;1"]
.getService(Ci.nsIIOService)
.newURI(url, null, null);
return Cc["@mozilla.org/scriptsecuritymanager;1"]
.getService(Ci.nsIScriptSecurityManager)
.getNoAppCodebasePrincipal(uri);
let uri = ioService.newURI(url, null, null);
return Components.classes["@mozilla.org/scriptsecuritymanager;1"]
.getService(Components.interfaces.nsIScriptSecurityManager)
.getNoAppCodebasePrincipal(uri);
}
for (let temporary of [true, false]) {
info("Testing '" + (temporary ? "temporary" : "default") + "' storage");
function getUsageForUrl(url, usageHandler) {
let uri = ioService.newURI(url, null, null);
function callback(uri, usage, fileUsage) {
usageHandler(usage, fileUsage);
}
quotaManager.getUsageForURI(uri, callback);
}
setLimit(tempStorageLimitKB);
function grabUsageAndContinueHandler(usage, fileUsage) {
testGenerator.send(usage);
}
clearAllDatabases(continueToNextStepSync);
yield undefined;
function checkUsage(stageIndex) {
let handledIndex = 0;
info("Stage 1 - Creating empty databases until we reach the quota limit");
function usageHandler(usage, fileUsage) {
let data = urls[handledIndex++];
if (data.flags[stageIndex - 1]) {
ok(usage > 0, "Non-zero usage for '" + data.url + "'");
}
else {
ok(usage == 0, "Zero usage for '" + data.url + "'");
}
if (handledIndex == urls.length) {
continueToNextStep();
}
}
for (let i = 0; i < urls.length; i++) {
getUsageForUrl(urls[i].url, usageHandler);
}
}
// Enable clear() and test()
let testingEnabled =
SpecialPowers.getBoolPref("dom.quotaManager.testing");
SpecialPowers.setBoolPref("dom.quotaManager.testing", true)
// Calibration
let request = indexedDB.openForPrincipal(getPrincipal(lastUrl), name,
{ storage: "temporary" });
request.onerror = errorHandler;
request.onsuccess = grabEventAndContinueHandler;
let event = yield undefined;
getUsageForUrl(lastUrl, grabUsageAndContinueHandler);
dbSize = yield undefined;
for (let options of openDBOptions) {
setLimit(lastIndex * dbSize / 1024);
quotaManager.clear();
info("Stage 1");
let databases = [];
let options = { version: finalVersion };
if (temporary) {
options.storage = "temporary";
for (let i = 0; i < lastIndex; i++) {
let data = urls[i];
info("Opening database for " + data.url);
request = indexedDB.openForPrincipal(getPrincipal(data.url), name,
options);
request.onerror = errorHandler;
request.onupgradeneeded = grabEventAndContinueHandler;
request.onsuccess = grabEventAndContinueHandler;
event = yield undefined;
is(event.type, "upgradeneeded", "Got correct event type");
let db = event.target.result;
db.createObjectStore("foo", { });
event = yield undefined;
is(event.type, "success", "Got correct event type");
databases.push(event.target.result);
}
while (true) {
let spec = getSpec(databases.length);
request = indexedDB.openForPrincipal(getPrincipal(lastUrl), name, options);
request.addEventListener("error", new ExpectError("QuotaExceededError"));
request.onsuccess = unexpectedSuccessHandler;
event = yield undefined;
info("Opening database for " + spec + " with version " + options.version);
checkUsage(1);
yield undefined;
let gotUpgradeNeeded = false;
info("Stage 2");
let request =
indexedDB.openForPrincipal(getPrincipal(spec), name, options);
request.onerror = function(event) {
is(request.error.name, "QuotaExceededError", "Reached quota limit");
event.preventDefault();
testGenerator.send(false);
}
request.onupgradeneeded = function(event) {
gotUpgradeNeeded = true;
}
request.onsuccess = function(event) {
let db = event.target.result;
is(db.version, finalVersion, "Correct version " + finalVersion);
databases.push(db);
testGenerator.send(true);
}
for (let i = 1; i < urls.length; i++) {
databases[i] = null;
let shouldContinue = yield undefined;
if (shouldContinue) {
is(gotUpgradeNeeded, true, "Got upgradeneeded event");
ok(true, "Got success event");
} else {
break;
}
}
while (true) {
info("Sleeping for " + checkpointSleepTimeSec + " seconds to let all " +
"checkpoints finish so that we know we have reached quota limit");
setTimeout(continueToNextStepSync, checkpointSleepTimeSec * 1000);
scheduleGC();
yield undefined;
let spec = getSpec(databases.length);
info("Opening database for " + spec + " with version " + options.version);
let gotUpgradeNeeded = false;
let request =
indexedDB.openForPrincipal(getPrincipal(spec), name, options);
request.onerror = function(event) {
is(request.error.name, "QuotaExceededError", "Reached quota limit");
event.preventDefault();
testGenerator.send(false);
}
request.onupgradeneeded = function(event) {
gotUpgradeNeeded = true;
}
request.onsuccess = function(event) {
let db = event.target.result;
is(db.version, finalVersion, "Correct version " + finalVersion);
databases.push(db);
testGenerator.send(true);
}
let shouldContinue = yield undefined;
if (shouldContinue) {
is(gotUpgradeNeeded, true, "Got upgradeneeded event");
ok(true, "Got success event");
} else {
break;
}
}
let databaseCount = databases.length;
info("Created " + databaseCount + " databases before quota limit reached");
info("Stage 2 - " +
"Closing all databases and then attempting to create one more, then " +
"verifying that the oldest origin was cleared");
for (let i = 0; i < databases.length; i++) {
info("Closing database for " + getSpec(i));
databases[i].close();
// Timer resolution on Windows is low so wait for 40ms just to be safe.
setTimeout(continueToNextStepSync, 40);
// The origin access time is set to the current system time when the first
// database for an origin is registered or the last one is unregistered.
// The registration happens when the database object is being created and
// the unregistration when it is unlinked/garbage collected.
// Some older windows systems have the system time limited to a maximum
// resolution of 10 or 15 milliseconds, so without a pause here we would
// end up with origins with the same access time which would cause random
// failures.
setTimeout(function() { testGenerator.next(); }, 20);
yield undefined;
}
databases = null;
let spec = getSpec(databaseCount);
info("Opening database for " + spec + " with version " + options.version);
let request = indexedDB.openForPrincipal(getPrincipal(spec), name, options);
request = indexedDB.openForPrincipal(getPrincipal(lastUrl), name, options);
request.onerror = errorHandler;
request.onupgradeneeded = grabEventAndContinueHandler;
request.onsuccess = unexpectedSuccessHandler;
let event = yield undefined;
is(event.type, "upgradeneeded", "Got upgradeneeded event");
request.onupgradeneeded = unexpectedSuccessHandler;
request.onsuccess = grabEventAndContinueHandler;
event = yield undefined;
is(event.type, "success", "Got success event");
is(event.type, "upgradeneeded", "Got correct event type");
let db = event.target.result;
is(db.version, finalVersion, "Correct version " + finalVersion);
db.close();
db = null;
db.createObjectStore("foo", { });
setLimit(tempStorageLimitKB * 2);
resetAllDatabases(continueToNextStepSync);
yield undefined;
delete options.version;
spec = getSpec(0);
info("Opening database for " + spec + " with unspecified version");
request = indexedDB.openForPrincipal(getPrincipal(spec), name, options);
request.onerror = errorHandler;
request.onupgradeneeded = grabEventAndContinueHandler;
request.onsuccess = unexpectedSuccessHandler;
event = yield undefined;
is(event.type, "upgradeneeded", "Got upgradeneeded event");
is(event.type, "success", "Got correct event type");
request.onupgradeneeded = unexpectedSuccessHandler;
request.onsuccess = grabEventAndContinueHandler;
event = yield undefined;
is(event.type, "success", "Got success event");
db = event.target.result;
is(db.version, 1, "Correct version 1 (database was recreated)");
db.close();
db = null;
info("Stage 3 - " +
"Cutting storage limit in half to force deletion of some databases");
setLimit(tempStorageLimitKB / 2);
resetAllDatabases(continueToNextStepSync);
checkUsage(2);
yield undefined;
info("Opening database for " + spec + " with unspecified version");
info("Stage 3");
// Open the same db again to force QM to delete others. The first origin (0)
// should be the most recent so it should not be deleted and we should not
// get an upgradeneeded event here.
request = indexedDB.openForPrincipal(getPrincipal(spec), name, options);
setLimit(14 * dbSize / 1024);
quotaManager.reset();
request = indexedDB.openForPrincipal(getPrincipal(lastUrl), name, options);
request.onerror = errorHandler;
request.onupgradeneeded = unexpectedSuccessHandler;
request.onsuccess = grabEventAndContinueHandler;
event = yield undefined;
is(event.type, "success", "Got correct event type");
db = event.target.result;
is(db.version, 1, "Correct version 1");
db.close();
db = null;
setLimit(tempStorageLimitKB * 2);
resetAllDatabases(continueToNextStepSync);
checkUsage(3);
yield undefined;
options.version = finalVersion;
info("Stage 4");
let newDatabaseCount = 0;
for (let i = 0; i < databaseCount; i++) {
let spec = getSpec(i);
info("Opening database for " + spec + " with version " + options.version);
let trans = db.transaction(["foo"], "readwrite");
let request =
indexedDB.openForPrincipal(getPrincipal(spec), name, options);
request.onerror = errorHandler;
request.onupgradeneeded = function(event) {
if (!event.oldVersion) {
newDatabaseCount++;
}
}
request.onsuccess = grabEventAndContinueHandler;
let event = yield undefined;
let blob = new Blob(["bar"]);
request = trans.objectStore("foo").add(blob, 42);
request.onerror = errorHandler;
request.onsuccess = grabEventAndContinueHandler;
event = yield undefined;
is(event.type, "success", "Got correct event type");
trans.oncomplete = grabEventAndContinueHandler;
event = yield undefined;
let db = request.result;
is(db.version, finalVersion, "Correct version " + finalVersion);
db.close();
}
info("Needed to recreate " + newDatabaseCount + " databases");
ok(newDatabaseCount, "Created some new databases");
ok(newDatabaseCount < databaseCount, "Didn't recreate all databases");
checkUsage(4);
yield undefined;
}
info("Cleanup");
setLimit();
quotaManager.reset();
SpecialPowers.setBoolPref("dom.quotaManager.testing", testingEnabled);
finishTest();
yield undefined;
}

Просмотреть файл

@ -3,14 +3,6 @@
* http://creativecommons.org/publicdomain/zero/1.0/
*/
var disableWorkerTest =
"This test requires a precise 'executeSoon()' to complete reliably. On a " +
"worker 'executeSoon()' currently uses 'setTimeout()', and that switches " +
"to the timer thread and back before completing. That gives the IndexedDB " +
"transaction thread time to fully complete transactions and to place " +
"'complete' events in the worker thread's queue before the timer event, " +
"causing ordering problems in the spot marked 'Worker Fails Here' below.";
var testGenerator = testSteps();
var abortFired = false;
@ -338,8 +330,6 @@ function testSteps()
// During COMMITTING
transaction = db.transaction("foo", "readwrite");
transaction.objectStore("foo").put({hello: "world"}, 1).onsuccess = function(event) {
// Worker Fails Here! Due to the thread switching of 'executeSoon()' the
// transaction can commit and fire a 'complete' event before we continue.
continueToNextStep();
};
yield undefined;

Просмотреть файл

@ -15,7 +15,6 @@ support-files =
GlobalObjectsComponent.manifest
GlobalObjectsModule.jsm
GlobalObjectsSandbox.js
schema18upgrade_profile.zip
xpcshell-shared.ini
[include:xpcshell-shared.ini]
@ -29,8 +28,6 @@ skip-if = toolkit == 'android'
# disabled for the moment.
skip-if = true
[test_lowDiskSpace.js]
[test_readwriteflush_disabled.js]
[test_schema18upgrade.js]
[test_temporary_storage.js]
# bug 951017: intermittent failure on Android x86 emulator
skip-if = os == "android" && processor == "x86"

Просмотреть файл

@ -23,7 +23,7 @@ FileQuotaStream<FileStreamBase>::SetEOF()
nsresult rv = FileStreamBase::Tell(&offset);
NS_ENSURE_SUCCESS(rv, rv);
mQuotaObject->MaybeUpdateSize(offset, /* aTruncate */ true);
mQuotaObject->UpdateSize(offset);
}
return NS_OK;
@ -56,7 +56,7 @@ FileQuotaStream<FileStreamBase>::DoOpen()
NS_ENSURE_SUCCESS(rv, rv);
if (mQuotaObject && (FileStreamBase::mOpenParams.ioFlags & PR_TRUNCATE)) {
mQuotaObject->MaybeUpdateSize(0, /* aTruncate */ true);
mQuotaObject->UpdateSize(0);
}
return NS_OK;
@ -75,11 +75,8 @@ FileQuotaStreamWithWrite<FileStreamBase>::Write(const char* aBuf,
rv = FileStreamBase::Tell(&offset);
NS_ENSURE_SUCCESS(rv, rv);
MOZ_ASSERT(INT64_MAX - offset >= int64_t(aCount));
if (!FileQuotaStreamWithWrite::
mQuotaObject->MaybeUpdateSize(offset + int64_t(aCount),
/* aTruncate */ false)) {
mQuotaObject->MaybeAllocateMoreSpace(offset, aCount)) {
return NS_ERROR_FILE_NO_DEVICE_SPACE;
}
}

Просмотреть файл

@ -6,55 +6,11 @@
#include "QuotaObject.h"
#include "mozilla/TypeTraits.h"
#include "QuotaManager.h"
#include "Utilities.h"
USING_QUOTA_NAMESPACE
namespace {
template <typename T, bool = mozilla::IsUnsigned<T>::value>
struct IntChecker
{
static void
Assert(T aInt)
{
static_assert(mozilla::IsIntegral<T>::value, "Not an integer!");
MOZ_ASSERT(aInt >= 0);
}
};
template <typename T>
struct IntChecker<T, true>
{
static void
Assert(T aInt)
{
static_assert(mozilla::IsIntegral<T>::value, "Not an integer!");
}
};
template <typename T>
void
AssertNoOverflow(uint64_t aDest, T aArg)
{
IntChecker<T>::Assert(aDest);
IntChecker<T>::Assert(aArg);
MOZ_ASSERT(UINT64_MAX - aDest >= uint64_t(aArg));
}
template <typename T, typename U>
void
AssertNoUnderflow(T aDest, U aArg)
{
IntChecker<T>::Assert(aDest);
IntChecker<T>::Assert(aArg);
MOZ_ASSERT(uint64_t(aDest) >= uint64_t(aArg));
}
} // anonymous namespace
void
QuotaObject::AddRef()
{
@ -105,75 +61,71 @@ QuotaObject::Release()
delete this;
}
bool
QuotaObject::MaybeUpdateSize(int64_t aSize, bool aTruncate)
void
QuotaObject::UpdateSize(int64_t aSize)
{
QuotaManager* quotaManager = QuotaManager::Get();
MOZ_ASSERT(quotaManager);
NS_ASSERTION(quotaManager, "Shouldn't be null!");
MutexAutoLock lock(quotaManager->mQuotaMutex);
if (mSize == aSize) {
return true;
if (!mOriginInfo) {
return;
}
if (!mOriginInfo) {
mSize = aSize;
GroupInfo* groupInfo = mOriginInfo->mGroupInfo;
quotaManager->mTemporaryStorageUsage -= mSize;
groupInfo->mUsage -= mSize;
mOriginInfo->mUsage -= mSize;
mSize = aSize;
mOriginInfo->mUsage += mSize;
groupInfo->mUsage += mSize;
quotaManager->mTemporaryStorageUsage += mSize;
}
bool
QuotaObject::MaybeAllocateMoreSpace(int64_t aOffset, int32_t aCount)
{
int64_t end = aOffset + aCount;
QuotaManager* quotaManager = QuotaManager::Get();
NS_ASSERTION(quotaManager, "Shouldn't be null!");
MutexAutoLock lock(quotaManager->mQuotaMutex);
if (mSize >= end || !mOriginInfo) {
return true;
}
GroupInfo* groupInfo = mOriginInfo->mGroupInfo;
MOZ_ASSERT(groupInfo);
if (mSize > aSize) {
if (aTruncate) {
const int64_t delta = mSize - aSize;
AssertNoUnderflow(quotaManager->mTemporaryStorageUsage, delta);
quotaManager->mTemporaryStorageUsage -= delta;
AssertNoUnderflow(groupInfo->mUsage, delta);
groupInfo->mUsage -= delta;
AssertNoUnderflow(mOriginInfo->mUsage, delta);
mOriginInfo->mUsage -= delta;
mSize = aSize;
}
return true;
}
MOZ_ASSERT(mSize < aSize);
nsRefPtr<GroupInfo> complementaryGroupInfo =
groupInfo->mGroupInfoPair->LockedGetGroupInfo(
ComplementaryPersistenceType(groupInfo->mPersistenceType));
uint64_t delta = aSize - mSize;
uint64_t delta = end - mSize;
AssertNoOverflow(mOriginInfo->mUsage, delta);
uint64_t newUsage = mOriginInfo->mUsage + delta;
// Temporary storage has no limit for origin usage (there's a group and the
// global limit though).
AssertNoOverflow(groupInfo->mUsage, delta);
uint64_t newGroupUsage = groupInfo->mUsage + delta;
uint64_t groupUsage = groupInfo->mUsage;
if (complementaryGroupInfo) {
AssertNoOverflow(groupUsage, complementaryGroupInfo->mUsage);
groupUsage += complementaryGroupInfo->mUsage;
}
// Temporary storage has a hard limit for group usage (20 % of the global
// limit).
AssertNoOverflow(groupUsage, delta);
if (groupUsage + delta > quotaManager->GetGroupLimit()) {
return false;
}
AssertNoOverflow(quotaManager->mTemporaryStorageUsage, delta);
uint64_t newTemporaryStorageUsage = quotaManager->mTemporaryStorageUsage +
delta;
@ -229,22 +181,17 @@ QuotaObject::MaybeUpdateSize(int64_t aSize, bool aTruncate)
// We unlocked and relocked several times so we need to recompute all the
// essential variables and recheck the group limit.
AssertNoUnderflow(aSize, mSize);
delta = aSize - mSize;
delta = end - mSize;
AssertNoOverflow(mOriginInfo->mUsage, delta);
newUsage = mOriginInfo->mUsage + delta;
AssertNoOverflow(groupInfo->mUsage, delta);
newGroupUsage = groupInfo->mUsage + delta;
groupUsage = groupInfo->mUsage;
if (complementaryGroupInfo) {
AssertNoOverflow(groupUsage, complementaryGroupInfo->mUsage);
groupUsage += complementaryGroupInfo->mUsage;
}
AssertNoOverflow(groupUsage, delta);
if (groupUsage + delta > quotaManager->GetGroupLimit()) {
// Unfortunately some other thread increased the group usage in the
// meantime and we are not below the group limit anymore.
@ -257,7 +204,6 @@ QuotaObject::MaybeUpdateSize(int64_t aSize, bool aTruncate)
return false;
}
AssertNoOverflow(quotaManager->mTemporaryStorageUsage, delta);
newTemporaryStorageUsage = quotaManager->mTemporaryStorageUsage + delta;
NS_ASSERTION(newTemporaryStorageUsage <=
@ -265,14 +211,15 @@ QuotaObject::MaybeUpdateSize(int64_t aSize, bool aTruncate)
// Ok, we successfully freed enough space and the operation can continue
// without throwing the quota error.
mOriginInfo->mUsage = newUsage;
groupInfo->mUsage = newGroupUsage;
quotaManager->mTemporaryStorageUsage = newTemporaryStorageUsage;;
// Some other thread could increase the size in the meantime, but no more
// than this one.
MOZ_ASSERT(mSize < aSize);
mSize = aSize;
NS_ASSERTION(mSize < end, "This shouldn't happen!");
mSize = end;
// Finally, release IO thread only objects and allow next synchronized
// ops for the evicted origins.
@ -287,7 +234,7 @@ QuotaObject::MaybeUpdateSize(int64_t aSize, bool aTruncate)
groupInfo->mUsage = newGroupUsage;
quotaManager->mTemporaryStorageUsage = newTemporaryStorageUsage;
mSize = aSize;
mSize = end;
return true;
}
@ -297,19 +244,30 @@ OriginInfo::LockedDecreaseUsage(int64_t aSize)
{
AssertCurrentThreadOwnsQuotaMutex();
AssertNoUnderflow(mUsage, aSize);
mUsage -= aSize;
AssertNoUnderflow(mGroupInfo->mUsage, aSize);
mGroupInfo->mUsage -= aSize;
QuotaManager* quotaManager = QuotaManager::Get();
MOZ_ASSERT(quotaManager);
AssertNoUnderflow(quotaManager->mTemporaryStorageUsage, aSize);
quotaManager->mTemporaryStorageUsage -= aSize;
}
// static
PLDHashOperator
OriginInfo::ClearOriginInfoCallback(const nsAString& aKey,
QuotaObject* aValue,
void* aUserArg)
{
NS_ASSERTION(!aKey.IsEmpty(), "Empty key!");
NS_ASSERTION(aValue, "Null pointer!");
aValue->mOriginInfo = nullptr;
return PL_DHASH_NEXT;
}
already_AddRefed<OriginInfo>
GroupInfo::LockedGetOriginInfo(const nsACString& aOrigin)
{
@ -336,13 +294,11 @@ GroupInfo::LockedAddOriginInfo(OriginInfo* aOriginInfo)
"Replacing an existing entry!");
mOriginInfos.AppendElement(aOriginInfo);
AssertNoOverflow(mUsage, aOriginInfo->mUsage);
mUsage += aOriginInfo->mUsage;
QuotaManager* quotaManager = QuotaManager::Get();
MOZ_ASSERT(quotaManager);
AssertNoOverflow(quotaManager->mTemporaryStorageUsage, aOriginInfo->mUsage);
quotaManager->mTemporaryStorageUsage += aOriginInfo->mUsage;
}
@ -353,14 +309,14 @@ GroupInfo::LockedRemoveOriginInfo(const nsACString& aOrigin)
for (uint32_t index = 0; index < mOriginInfos.Length(); index++) {
if (mOriginInfos[index]->mOrigin == aOrigin) {
AssertNoUnderflow(mUsage, mOriginInfos[index]->mUsage);
MOZ_ASSERT(mUsage >= mOriginInfos[index]->mUsage);
mUsage -= mOriginInfos[index]->mUsage;
QuotaManager* quotaManager = QuotaManager::Get();
MOZ_ASSERT(quotaManager);
AssertNoUnderflow(quotaManager->mTemporaryStorageUsage,
mOriginInfos[index]->mUsage);
MOZ_ASSERT(quotaManager->mTemporaryStorageUsage >=
mOriginInfos[index]->mUsage);
quotaManager->mTemporaryStorageUsage -= mOriginInfos[index]->mUsage;
mOriginInfos.RemoveElementAt(index);
@ -381,10 +337,10 @@ GroupInfo::LockedRemoveOriginInfos()
for (uint32_t index = mOriginInfos.Length(); index > 0; index--) {
OriginInfo* originInfo = mOriginInfos[index - 1];
AssertNoUnderflow(mUsage, originInfo->mUsage);
MOZ_ASSERT(mUsage >= originInfo->mUsage);
mUsage -= originInfo->mUsage;
AssertNoUnderflow(quotaManager->mTemporaryStorageUsage, originInfo->mUsage);
MOZ_ASSERT(quotaManager->mTemporaryStorageUsage >= originInfo->mUsage);
quotaManager->mTemporaryStorageUsage -= originInfo->mUsage;
mOriginInfos.RemoveElementAt(index - 1);

Просмотреть файл

@ -32,8 +32,11 @@ public:
void
Release();
void
UpdateSize(int64_t aSize);
bool
MaybeUpdateSize(int64_t aSize, bool aTruncate);
MaybeAllocateMoreSpace(int64_t aOffset, int32_t aCount);
private:
QuotaObject(OriginInfo* aOriginInfo, const nsAString& aPath, int64_t aSize)
@ -93,8 +96,6 @@ private:
~OriginInfo()
{
MOZ_COUNT_DTOR(OriginInfo);
MOZ_ASSERT(!mQuotaObjects.Count());
}
void
@ -108,6 +109,18 @@ private:
mAccessTime = aAccessTime;
}
void
LockedClearOriginInfos()
{
AssertCurrentThreadOwnsQuotaMutex();
mQuotaObjects.EnumerateRead(ClearOriginInfoCallback, nullptr);
}
static PLDHashOperator
ClearOriginInfoCallback(const nsAString& aKey,
QuotaObject* aValue, void* aUserArg);
nsDataHashtable<nsStringHashKey, QuotaObject*> mQuotaObjects;
GroupInfo* mGroupInfo;

Просмотреть файл

@ -23,9 +23,15 @@ interface IDBDatabase : EventTarget {
[Throws]
void deleteObjectStore (DOMString name);
// This should be:
// IDBTransaction transaction ((DOMString or sequence<DOMString>) storeNames, optional IDBTransactionMode mode = "readonly");
// but unions are not currently supported.
[Throws]
IDBTransaction transaction ((DOMString or sequence<DOMString>) storeNames,
optional IDBTransactionMode mode = "readonly");
IDBTransaction transaction (DOMString storeName, optional IDBTransactionMode mode = "readonly");
[Throws]
IDBTransaction transaction (sequence<DOMString> storeNames, optional IDBTransactionMode mode = "readonly");
void close ();

Просмотреть файл

@ -11,10 +11,6 @@
enum IDBTransactionMode {
"readonly",
"readwrite",
// The "readwriteflush" mode is only available when the
// |IndexedDatabaseManager::ExperimentalFeaturesEnabled()| function returns
// true. This mode is not yet part of the standard.
"readwriteflush",
"versionchange"
};

Просмотреть файл

@ -140,205 +140,10 @@ struct telemetry_file {
// quota object for this file
nsRefPtr<QuotaObject> quotaObject;
// The chunk size for this file. See the documentation for
// sqlite3_file_control() and FCNTL_CHUNK_SIZE.
int fileChunkSize;
// This contains the vfs that actually does work
sqlite3_file pReal[1];
};
const char*
DatabasePathFromWALPath(const char *zWALName)
{
/**
* Do some sketchy pointer arithmetic to find the parameter key. The WAL
* filename is in the middle of a big allocated block that contains:
*
* - Random Values
* - Main Database Path
* - \0
* - Multiple URI components consisting of:
* - Key
* - \0
* - Value
* - \0
* - \0
* - Journal Path
* - \0
* - WAL Path (zWALName)
* - \0
*
* Because the main database path is preceded by a random value we have to be
* careful when trying to figure out when we should terminate this loop.
*/
MOZ_ASSERT(zWALName);
nsDependentCSubstring dbPath(zWALName, strlen(zWALName));
// Chop off the "-wal" suffix.
NS_NAMED_LITERAL_CSTRING(kWALSuffix, "-wal");
MOZ_ASSERT(StringEndsWith(dbPath, kWALSuffix));
dbPath.Rebind(zWALName, dbPath.Length() - kWALSuffix.Length());
MOZ_ASSERT(!dbPath.IsEmpty());
// We want to scan to the end of the key/value URI pairs. Skip the preceding
// null and go to the last char of the journal path.
const char* cursor = zWALName - 2;
// Make sure we just skipped a null.
MOZ_ASSERT(!*(cursor + 1));
// Walk backwards over the journal path.
while (*cursor) {
cursor--;
}
// There should be another null here.
cursor--;
MOZ_ASSERT(!*cursor);
// Back up one more char to the last char of the previous string. It may be
// the database path or it may be a key/value URI pair.
cursor--;
#ifdef DEBUG
{
// Verify that we just walked over the journal path. Account for the two
// nulls we just skipped.
const char *journalStart = cursor + 3;
nsDependentCSubstring journalPath(journalStart,
strlen(journalStart));
// Chop off the "-journal" suffix.
NS_NAMED_LITERAL_CSTRING(kJournalSuffix, "-journal");
MOZ_ASSERT(StringEndsWith(journalPath, kJournalSuffix));
journalPath.Rebind(journalStart,
journalPath.Length() - kJournalSuffix.Length());
MOZ_ASSERT(!journalPath.IsEmpty());
// Make sure that the database name is a substring of the journal name.
MOZ_ASSERT(journalPath == dbPath);
}
#endif
// Now we're either at the end of the key/value URI pairs or we're at the
// end of the database path. Carefully walk backwards one character at a
// time to do this safely without running past the beginning of the database
// path.
const char *const dbPathStart = dbPath.BeginReading();
const char *dbPathCursor = dbPath.EndReading() - 1;
bool isDBPath = true;
while (true) {
MOZ_ASSERT(*dbPathCursor, "dbPathCursor should never see a null char!");
if (isDBPath) {
isDBPath = dbPathStart <= dbPathCursor &&
*dbPathCursor == *cursor &&
*cursor;
}
if (!isDBPath) {
// This isn't the database path so it must be a value. Scan past it and
// the key also.
for (size_t stringCount = 0; stringCount < 2; stringCount++) {
// Scan past the string to the preceding null character.
while (*cursor) {
cursor--;
}
// Back up one more char to the last char of preceding string.
cursor--;
}
// Reset and start again.
dbPathCursor = dbPath.EndReading() - 1;
isDBPath = true;
continue;
}
MOZ_ASSERT(isDBPath);
MOZ_ASSERT(*cursor);
if (dbPathStart == dbPathCursor) {
// Found the full database path, we're all done.
MOZ_ASSERT(nsDependentCString(cursor) == dbPath);
return cursor;
}
// Change the cursors and go through the loop again.
cursor--;
dbPathCursor--;
}
MOZ_CRASH("Should never get here!");
}
already_AddRefed<QuotaObject>
GetQuotaObjectFromNameAndParameters(const char *zName,
const char *zURIParameterKey)
{
MOZ_ASSERT(zName);
MOZ_ASSERT(zURIParameterKey);
const char *persistenceType =
sqlite3_uri_parameter(zURIParameterKey, "persistenceType");
if (!persistenceType) {
return nullptr;
}
const char *group = sqlite3_uri_parameter(zURIParameterKey, "group");
if (!group) {
NS_WARNING("SQLite URI had 'persistenceType' but not 'group'?!");
return nullptr;
}
const char *origin = sqlite3_uri_parameter(zURIParameterKey, "origin");
if (!origin) {
NS_WARNING("SQLite URI had 'persistenceType' and 'group' but not "
"'origin'?!");
return nullptr;
}
QuotaManager *quotaManager = QuotaManager::Get();
MOZ_ASSERT(quotaManager);
return quotaManager->GetQuotaObject(
PersistenceTypeFromText(nsDependentCString(persistenceType)),
nsDependentCString(group),
nsDependentCString(origin),
NS_ConvertUTF8toUTF16(zName));
}
void
MaybeEstablishQuotaControl(const char *zName,
telemetry_file *pFile,
int flags)
{
MOZ_ASSERT(pFile);
MOZ_ASSERT(!pFile->quotaObject);
if (!(flags & (SQLITE_OPEN_URI | SQLITE_OPEN_WAL))) {
return;
}
MOZ_ASSERT(zName);
const char *zURIParameterKey = (flags & SQLITE_OPEN_WAL) ?
DatabasePathFromWALPath(zName) :
zName;
MOZ_ASSERT(zURIParameterKey);
pFile->quotaObject =
GetQuotaObjectFromNameAndParameters(zName, zURIParameterKey);
}
/*
** Close a telemetry_file.
*/
@ -355,9 +160,6 @@ xClose(sqlite3_file *pFile)
delete p->base.pMethods;
p->base.pMethods = nullptr;
p->quotaObject = nullptr;
#ifdef DEBUG
p->fileChunkSize = 0;
#endif
}
return rc;
}
@ -378,19 +180,6 @@ xRead(sqlite3_file *pFile, void *zBuf, int iAmt, sqlite_int64 iOfst)
return rc;
}
/*
** Return the current file-size of a telemetry_file.
*/
int
xFileSize(sqlite3_file *pFile, sqlite_int64 *pSize)
{
IOThreadAutoTimer ioTimer(IOInterposeObserver::OpStat);
telemetry_file *p = (telemetry_file *)pFile;
int rc;
rc = p->pReal->pMethods->xFileSize(p->pReal, pSize);
return rc;
}
/*
** Write data to a telemetry_file.
*/
@ -398,24 +187,13 @@ int
xWrite(sqlite3_file *pFile, const void *zBuf, int iAmt, sqlite_int64 iOfst)
{
telemetry_file *p = (telemetry_file *)pFile;
if (p->quotaObject && !p->quotaObject->MaybeAllocateMoreSpace(iOfst, iAmt)) {
return SQLITE_FULL;
}
IOThreadAutoTimer ioTimer(p->histograms->writeMS, IOInterposeObserver::OpWrite);
int rc;
if (p->quotaObject) {
MOZ_ASSERT(INT64_MAX - iOfst >= iAmt);
if (!p->quotaObject->MaybeUpdateSize(iOfst + iAmt, /* aTruncate */ false)) {
return SQLITE_FULL;
}
}
rc = p->pReal->pMethods->xWrite(p->pReal, zBuf, iAmt, iOfst);
Telemetry::Accumulate(p->histograms->writeB, rc == SQLITE_OK ? iAmt : 0);
if (p->quotaObject && rc != SQLITE_OK) {
NS_WARNING("xWrite failed on a quota-controlled file, attempting to "
"update its current size...");
sqlite_int64 currentSize;
if (xFileSize(pFile, &currentSize) == SQLITE_OK) {
p->quotaObject->MaybeUpdateSize(currentSize, /* aTruncate */ true);
}
}
return rc;
}
@ -429,33 +207,9 @@ xTruncate(sqlite3_file *pFile, sqlite_int64 size)
telemetry_file *p = (telemetry_file *)pFile;
int rc;
Telemetry::AutoTimer<Telemetry::MOZ_SQLITE_TRUNCATE_MS> timer;
if (p->quotaObject) {
if (p->fileChunkSize > 0) {
// Round up to the smallest multiple of the chunk size that will hold all
// the data.
size =
((size + p->fileChunkSize - 1) / p->fileChunkSize) * p->fileChunkSize;
}
if (!p->quotaObject->MaybeUpdateSize(size, /* aTruncate */ true)) {
return SQLITE_FULL;
}
}
rc = p->pReal->pMethods->xTruncate(p->pReal, size);
if (p->quotaObject) {
if (rc == SQLITE_OK) {
#ifdef DEBUG
// Make sure xTruncate set the size exactly as we calculated above.
sqlite_int64 newSize;
MOZ_ASSERT(xFileSize(pFile, &newSize) == SQLITE_OK);
MOZ_ASSERT(newSize == size);
#endif
} else {
NS_WARNING("xTruncate failed on a quota-controlled file, attempting to "
"update its current size...");
if (xFileSize(pFile, &size) == SQLITE_OK) {
p->quotaObject->MaybeUpdateSize(size, /* aTruncate */ true);
}
}
if (rc == SQLITE_OK && p->quotaObject) {
p->quotaObject->UpdateSize(size);
}
return rc;
}
@ -471,6 +225,19 @@ xSync(sqlite3_file *pFile, int flags)
return p->pReal->pMethods->xSync(p->pReal, flags);
}
/*
** Return the current file-size of a telemetry_file.
*/
int
xFileSize(sqlite3_file *pFile, sqlite_int64 *pSize)
{
IOThreadAutoTimer ioTimer(IOInterposeObserver::OpStat);
telemetry_file *p = (telemetry_file *)pFile;
int rc;
rc = p->pReal->pMethods->xFileSize(p->pReal, pSize);
return rc;
}
/*
** Lock a telemetry_file.
*/
@ -513,41 +280,7 @@ int
xFileControl(sqlite3_file *pFile, int op, void *pArg)
{
telemetry_file *p = (telemetry_file *)pFile;
int rc;
// Hook SQLITE_FCNTL_SIZE_HINT for quota-controlled files and do the necessary
// work before passing to the SQLite VFS.
if (op == SQLITE_FCNTL_SIZE_HINT && p->quotaObject) {
sqlite3_int64 hintSize = *static_cast<sqlite3_int64*>(pArg);
sqlite3_int64 currentSize;
rc = xFileSize(pFile, &currentSize);
if (rc != SQLITE_OK) {
return rc;
}
if (hintSize > currentSize) {
rc = xTruncate(pFile, hintSize);
if (rc != SQLITE_OK) {
return rc;
}
}
}
rc = p->pReal->pMethods->xFileControl(p->pReal, op, pArg);
// Grab the file chunk size after the SQLite VFS has approved.
if (op == SQLITE_FCNTL_CHUNK_SIZE && rc == SQLITE_OK) {
p->fileChunkSize = *static_cast<int*>(pArg);
}
#ifdef DEBUG
if (op == SQLITE_FCNTL_SIZE_HINT && p->quotaObject && rc == SQLITE_OK) {
sqlite3_int64 hintSize = *static_cast<sqlite3_int64*>(pArg);
if (p->fileChunkSize > 0) {
hintSize =
((hintSize + p->fileChunkSize - 1) / p->fileChunkSize) *
p->fileChunkSize;
}
sqlite3_int64 currentSize;
MOZ_ASSERT(xFileSize(pFile, &currentSize) == SQLITE_OK);
MOZ_ASSERT(currentSize >= hintSize);
}
#endif
int rc = p->pReal->pMethods->xFileControl(p->pReal, op, pArg);
return rc;
}
@ -653,7 +386,20 @@ xOpen(sqlite3_vfs* vfs, const char *zName, sqlite3_file* pFile,
}
p->histograms = h;
MaybeEstablishQuotaControl(zName, p, flags);
const char* persistenceType;
const char* group;
const char* origin;
if ((flags & SQLITE_OPEN_URI) &&
(persistenceType = sqlite3_uri_parameter(zName, "persistenceType")) &&
(group = sqlite3_uri_parameter(zName, "group")) &&
(origin = sqlite3_uri_parameter(zName, "origin"))) {
QuotaManager* quotaManager = QuotaManager::Get();
MOZ_ASSERT(quotaManager);
p->quotaObject = quotaManager->GetQuotaObject(PersistenceTypeFromText(
nsDependentCString(persistenceType)), nsDependentCString(group),
nsDependentCString(origin), NS_ConvertUTF8toUTF16(zName));
}
rc = orig_vfs->xOpen(orig_vfs, zName, p->pReal, flags, pOutFlags);
if( rc != SQLITE_OK )
@ -705,22 +451,7 @@ int
xDelete(sqlite3_vfs* vfs, const char *zName, int syncDir)
{
sqlite3_vfs *orig_vfs = static_cast<sqlite3_vfs*>(vfs->pAppData);
int rc;
nsRefPtr<QuotaObject> quotaObject;
if (StringEndsWith(nsDependentCString(zName), NS_LITERAL_CSTRING("-wal"))) {
const char *zURIParameterKey = DatabasePathFromWALPath(zName);
MOZ_ASSERT(zURIParameterKey);
quotaObject = GetQuotaObjectFromNameAndParameters(zName, zURIParameterKey);
}
rc = orig_vfs->xDelete(orig_vfs, zName, syncDir);
if (rc == SQLITE_OK && quotaObject) {
MOZ_ALWAYS_TRUE(quotaObject->MaybeUpdateSize(0, /* aTruncate */ true));
}
return rc;
return orig_vfs->xDelete(orig_vfs, zName, syncDir);
}
int

Просмотреть файл

@ -8957,7 +8957,6 @@
},
{
"path": "IndexedDB/key_valid.html",
"timeout": "long",
"url": "/IndexedDB/key_valid.html"
},
{

Просмотреть файл

@ -1,7 +1,6 @@
<!DOCTYPE html>
<!-- Submitted from TestTWF Paris -->
<meta charset=utf-8">
<meta name="timeout" content="long">
<title>Valid key</title>
<link rel=help href="http://dvcs.w3.org/hg/IndexedDB/raw-file/tip/Overview.html#key-construct">
<link rel=assert title="A value is said to be a valid key if it is one of the following types: Array JavaScript objects [ECMA-262], DOMString [WEBIDL], Date [ECMA-262] or float [WEBIDL]. However Arrays are only valid keys if every item in the array is defined and is a valid key (i.e. sparse arrays can not be valid keys) and if the Array doesn't directly or indirectly contain itself. Any non-numeric properties are ignored, and thus does not affect whether the Array is a valid key. Additionally, if the value is of type float, it is only a valid key if it is not NaN, and if the value is of type Date it is only a valid key if its [[PrimitiveValue]] internal property, as defined by [ECMA-262], is not NaN. Conforming user agents must support all valid keys as keys.">