зеркало из https://github.com/mozilla/gecko-dev.git
Bug 985145 - make node reassignment work correctly with FxA. r=rnewman
This commit is contained in:
Родитель
35bd372c3f
Коммит
19e4e8c0ac
|
@ -539,6 +539,7 @@ this.BrowserIDManager.prototype = {
|
|||
// current user stored in this._token. When resolved, this._token is valid.
|
||||
_ensureValidToken: function() {
|
||||
if (this.hasValidToken()) {
|
||||
this._log.debug("_ensureValidToken already has one");
|
||||
return Promise.resolve();
|
||||
}
|
||||
return this._fetchTokenForUser().then(
|
||||
|
@ -627,15 +628,25 @@ BrowserIDClusterManager.prototype = {
|
|||
if (!endpoint.endsWith("/")) {
|
||||
endpoint += "/";
|
||||
}
|
||||
log.debug("_findCluster returning " + endpoint);
|
||||
return endpoint;
|
||||
}.bind(this);
|
||||
|
||||
// Spinningly ensure we are ready to authenticate and have a valid token.
|
||||
let promiseClusterURL = function() {
|
||||
return this.identity.whenReadyToAuthenticate.promise.then(
|
||||
() => this.identity._ensureValidToken()
|
||||
).then(
|
||||
() => endPointFromIdentityToken()
|
||||
() => {
|
||||
// We need to handle node reassignment here. If we are being asked
|
||||
// for a clusterURL while the service already has a clusterURL, then
|
||||
// it's likely a 401 was received using the existing token - in which
|
||||
// case we just discard the existing token and fetch a new one.
|
||||
if (this.service.clusterURL) {
|
||||
log.debug("_findCluster found existing clusterURL, so discarding the current token");
|
||||
this.identity._token = null;
|
||||
}
|
||||
return this.identity._ensureValidToken();
|
||||
}
|
||||
).then(endPointFromIdentityToken
|
||||
);
|
||||
}.bind(this);
|
||||
|
||||
|
|
|
@ -656,7 +656,7 @@ Sync11Service.prototype = {
|
|||
}
|
||||
},
|
||||
|
||||
verifyLogin: function verifyLogin() {
|
||||
verifyLogin: function verifyLogin(allow40XRecovery = true) {
|
||||
// If the identity isn't ready it might not know the username...
|
||||
if (!this.identity.readyToAuthenticate) {
|
||||
this._log.info("Not ready to authenticate in verifyLogin.");
|
||||
|
@ -727,8 +727,8 @@ Sync11Service.prototype = {
|
|||
|
||||
case 404:
|
||||
// Check that we're verifying with the correct cluster
|
||||
if (this._clusterManager.setCluster()) {
|
||||
return this.verifyLogin();
|
||||
if (allow40XRecovery && this._clusterManager.setCluster()) {
|
||||
return this.verifyLogin(false);
|
||||
}
|
||||
|
||||
// We must have the right cluster, but the server doesn't expect us
|
||||
|
@ -986,10 +986,9 @@ Sync11Service.prototype = {
|
|||
},
|
||||
|
||||
logout: function logout() {
|
||||
// No need to do anything if we're already logged out.
|
||||
if (!this._loggedIn)
|
||||
return;
|
||||
|
||||
// If we failed during login, we aren't going to have this._loggedIn set,
|
||||
// but we still want to ask the identity to logout, so it doesn't try and
|
||||
// reuse any old credentials next time we sync.
|
||||
this._log.info("Logging out");
|
||||
this.identity.logout();
|
||||
this._loggedIn = false;
|
||||
|
@ -1059,6 +1058,14 @@ Sync11Service.prototype = {
|
|||
// ... fetch the current record from the server, and COPY THE FLAGS.
|
||||
let newMeta = this.recordManager.get(this.metaURL);
|
||||
|
||||
// If we got a 401, we do not want to create a new meta/global - we
|
||||
// should be able to get the existing meta after we get a new node.
|
||||
if (this.recordManager.response.status == 401) {
|
||||
this._log.debug("Fetching meta/global record on the server returned 401.");
|
||||
this.errorHandler.checkServerError(this.recordManager.response);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!this.recordManager.response.success || !newMeta) {
|
||||
this._log.debug("No meta/global record on the server. Creating one.");
|
||||
newMeta = new WBORecord("meta", "global");
|
||||
|
|
|
@ -806,7 +806,12 @@ SyncServer.prototype = {
|
|||
}
|
||||
|
||||
let [all, version, username, first, rest] = parts;
|
||||
if (version != SYNC_API_VERSION) {
|
||||
// Doing a float compare of the version allows for us to pretend there was
|
||||
// a node-reassignment - eg, we could re-assign from "1.1/user/" to
|
||||
// "1.10/user" - this server will then still accept requests with the new
|
||||
// URL while any code in sync itself which compares URLs will see a
|
||||
// different URL.
|
||||
if (parseFloat(version) != parseFloat(SYNC_API_VERSION)) {
|
||||
this._log.debug("SyncServer: Unknown version.");
|
||||
throw HTTP_404;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,321 @@
|
|||
/* Any copyright is dedicated to the Public Domain.
|
||||
http://creativecommons.org/publicdomain/zero/1.0/ */
|
||||
|
||||
_("Test that node reassignment happens correctly using the FxA identity mgr.");
|
||||
// The node-reassignment logic is quite different for FxA than for the legacy
|
||||
// provider. In particular, there's no special request necessary for
|
||||
// reassignment - it comes from the token server - so we need to ensure the
|
||||
// Fxa cluster manager grabs a new token.
|
||||
|
||||
Cu.import("resource://gre/modules/Log.jsm");
|
||||
Cu.import("resource://services-common/rest.js");
|
||||
Cu.import("resource://services-sync/constants.js");
|
||||
Cu.import("resource://services-sync/service.js");
|
||||
Cu.import("resource://services-sync/status.js");
|
||||
Cu.import("resource://services-sync/util.js");
|
||||
Cu.import("resource://testing-common/services/sync/rotaryengine.js");
|
||||
Cu.import("resource://services-sync/browserid_identity.js");
|
||||
Cu.import("resource://testing-common/services/sync/utils.js");
|
||||
|
||||
Service.engineManager.clear();
|
||||
|
||||
function run_test() {
|
||||
Log.repository.getLogger("Sync.AsyncResource").level = Log.Level.Trace;
|
||||
Log.repository.getLogger("Sync.ErrorHandler").level = Log.Level.Trace;
|
||||
Log.repository.getLogger("Sync.Resource").level = Log.Level.Trace;
|
||||
Log.repository.getLogger("Sync.RESTRequest").level = Log.Level.Trace;
|
||||
Log.repository.getLogger("Sync.Service").level = Log.Level.Trace;
|
||||
Log.repository.getLogger("Sync.SyncScheduler").level = Log.Level.Trace;
|
||||
initTestLogging();
|
||||
|
||||
Service.engineManager.register(RotaryEngine);
|
||||
|
||||
// Setup the FxA identity manager and cluster manager.
|
||||
Status.__authManager = Service.identity = new BrowserIDManager();
|
||||
Service._clusterManager = Service.identity.createClusterManager(Service);
|
||||
|
||||
// None of the failures in this file should result in a UI error.
|
||||
function onUIError() {
|
||||
do_throw("Errors should not be presented in the UI.");
|
||||
}
|
||||
Svc.Obs.add("weave:ui:login:error", onUIError);
|
||||
Svc.Obs.add("weave:ui:sync:error", onUIError);
|
||||
|
||||
run_next_test();
|
||||
}
|
||||
|
||||
|
||||
// API-compatible with SyncServer handler. Bind `handler` to something to use
|
||||
// as a ServerCollection handler.
|
||||
function handleReassign(handler, req, resp) {
|
||||
resp.setStatusLine(req.httpVersion, 401, "Node reassignment");
|
||||
resp.setHeader("Content-Type", "application/json");
|
||||
let reassignBody = JSON.stringify({error: "401inator in place"});
|
||||
resp.bodyOutputStream.write(reassignBody, reassignBody.length);
|
||||
}
|
||||
|
||||
let numTokenRequests = 0;
|
||||
|
||||
function prepareServer(cbAfterTokenFetch) {
|
||||
let config = makeIdentityConfig({username: "johndoe"});
|
||||
let server = new SyncServer();
|
||||
server.registerUser("johndoe");
|
||||
server.start();
|
||||
|
||||
// Set the token endpoint for the initial token request that's done implicitly
|
||||
// via configureIdentity.
|
||||
config.fxaccount.token.endpoint = server.baseURI + "1.1/johndoe";
|
||||
// And future token fetches will do magic around numReassigns.
|
||||
let numReassigns = 0;
|
||||
return configureIdentity(config).then(() => {
|
||||
Service.identity._tokenServerClient = {
|
||||
getTokenFromBrowserIDAssertion: function(uri, assertion, cb) {
|
||||
// Build a new URL with trailing zeros for the SYNC_VERSION part - this
|
||||
// will still be seen as equivalent by the test server, but different
|
||||
// by sync itself.
|
||||
numReassigns += 1;
|
||||
let trailingZeros = new Array(numReassigns + 1).join('0');
|
||||
let token = config.fxaccount.token;
|
||||
token.endpoint = server.baseURI + "1.1" + trailingZeros + "/johndoe";
|
||||
token.uid = config.username;
|
||||
numTokenRequests += 1;
|
||||
cb(null, token);
|
||||
if (cbAfterTokenFetch) {
|
||||
cbAfterTokenFetch();
|
||||
}
|
||||
},
|
||||
};
|
||||
Service.clusterURL = config.fxaccount.token.endpoint;
|
||||
return server;
|
||||
});
|
||||
}
|
||||
|
||||
function getReassigned() {
|
||||
try {
|
||||
return Services.prefs.getBoolPref("services.sync.lastSyncReassigned");
|
||||
} catch (ex if (ex.result == Cr.NS_ERROR_UNEXPECTED)) {
|
||||
return false;
|
||||
} catch (ex) {
|
||||
do_throw("Got exception retrieving lastSyncReassigned: " +
|
||||
Utils.exceptionStr(ex));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Make a test request to `url`, then watch the result of two syncs
|
||||
* to ensure that a node request was made.
|
||||
* Runs `between` between the two. This can be used to undo deliberate failure
|
||||
* setup, detach observers, etc.
|
||||
*/
|
||||
function syncAndExpectNodeReassignment(server, firstNotification, between,
|
||||
secondNotification, url) {
|
||||
_("Starting syncAndExpectNodeReassignment\n");
|
||||
let deferred = Promise.defer();
|
||||
function onwards() {
|
||||
let numTokenRequestsBefore;
|
||||
function onFirstSync() {
|
||||
_("First sync completed.");
|
||||
Svc.Obs.remove(firstNotification, onFirstSync);
|
||||
Svc.Obs.add(secondNotification, onSecondSync);
|
||||
|
||||
do_check_eq(Service.clusterURL, "");
|
||||
|
||||
// Track whether we fetched a new token.
|
||||
numTokenRequestsBefore = numTokenRequests;
|
||||
|
||||
// Allow for tests to clean up error conditions.
|
||||
between();
|
||||
}
|
||||
function onSecondSync() {
|
||||
_("Second sync completed.");
|
||||
Svc.Obs.remove(secondNotification, onSecondSync);
|
||||
Service.scheduler.clearSyncTriggers();
|
||||
|
||||
// Make absolutely sure that any event listeners are done with their work
|
||||
// before we proceed.
|
||||
waitForZeroTimer(function () {
|
||||
_("Second sync nextTick.");
|
||||
do_check_eq(numTokenRequests, numTokenRequestsBefore + 1, "fetched a new token");
|
||||
Service.startOver();
|
||||
server.stop(deferred.resolve);
|
||||
});
|
||||
}
|
||||
|
||||
Svc.Obs.add(firstNotification, onFirstSync);
|
||||
Service.sync();
|
||||
}
|
||||
|
||||
// Make sure that it works!
|
||||
_("Making request to " + url + " which should 401");
|
||||
let request = new RESTRequest(url);
|
||||
request.get(function () {
|
||||
do_check_eq(request.response.status, 401);
|
||||
Utils.nextTick(onwards);
|
||||
});
|
||||
yield deferred.promise;
|
||||
}
|
||||
|
||||
add_task(function test_momentary_401_engine() {
|
||||
_("Test a failure for engine URLs that's resolved by reassignment.");
|
||||
let server = yield prepareServer();
|
||||
let john = server.user("johndoe");
|
||||
|
||||
_("Enabling the Rotary engine.");
|
||||
let engine = Service.engineManager.get("rotary");
|
||||
engine.enabled = true;
|
||||
|
||||
// We need the server to be correctly set up prior to experimenting. Do this
|
||||
// through a sync.
|
||||
let global = {syncID: Service.syncID,
|
||||
storageVersion: STORAGE_VERSION,
|
||||
rotary: {version: engine.version,
|
||||
syncID: engine.syncID}}
|
||||
john.createCollection("meta").insert("global", global);
|
||||
|
||||
_("First sync to prepare server contents.");
|
||||
Service.sync();
|
||||
|
||||
_("Setting up Rotary collection to 401.");
|
||||
let rotary = john.createCollection("rotary");
|
||||
let oldHandler = rotary.collectionHandler;
|
||||
rotary.collectionHandler = handleReassign.bind(this, undefined);
|
||||
|
||||
// We want to verify that the clusterURL pref has been cleared after a 401
|
||||
// inside a sync. Flag the Rotary engine to need syncing.
|
||||
john.collection("rotary").timestamp += 1000;
|
||||
|
||||
function between() {
|
||||
_("Undoing test changes.");
|
||||
rotary.collectionHandler = oldHandler;
|
||||
|
||||
function onLoginStart() {
|
||||
// lastSyncReassigned shouldn't be cleared until a sync has succeeded.
|
||||
_("Ensuring that lastSyncReassigned is still set at next sync start.");
|
||||
Svc.Obs.remove("weave:service:login:start", onLoginStart);
|
||||
do_check_true(getReassigned());
|
||||
}
|
||||
|
||||
_("Adding observer that lastSyncReassigned is still set on login.");
|
||||
Svc.Obs.add("weave:service:login:start", onLoginStart);
|
||||
}
|
||||
|
||||
yield syncAndExpectNodeReassignment(server,
|
||||
"weave:service:sync:finish",
|
||||
between,
|
||||
"weave:service:sync:finish",
|
||||
Service.storageURL + "rotary");
|
||||
});
|
||||
|
||||
// This test ends up being a failing info fetch *after we're already logged in*.
|
||||
add_task(function test_momentary_401_info_collections_loggedin() {
|
||||
_("Test a failure for info/collections after login that's resolved by reassignment.");
|
||||
let server = yield prepareServer();
|
||||
|
||||
_("First sync to prepare server contents.");
|
||||
Service.sync();
|
||||
|
||||
_("Arrange for info/collections to return a 401.");
|
||||
let oldHandler = server.toplevelHandlers.info;
|
||||
server.toplevelHandlers.info = handleReassign;
|
||||
|
||||
function undo() {
|
||||
_("Undoing test changes.");
|
||||
server.toplevelHandlers.info = oldHandler;
|
||||
}
|
||||
|
||||
do_check_true(Service.isLoggedIn, "already logged in");
|
||||
|
||||
yield syncAndExpectNodeReassignment(server,
|
||||
"weave:service:sync:error",
|
||||
undo,
|
||||
"weave:service:sync:finish",
|
||||
Service.infoURL);
|
||||
});
|
||||
|
||||
// This test ends up being a failing info fetch *before we're logged in*.
|
||||
// In this case we expect to recover during the login phase - so the first
|
||||
// sync succeeds.
|
||||
add_task(function test_momentary_401_info_collections_loggedout() {
|
||||
_("Test a failure for info/collections before login that's resolved by reassignment.");
|
||||
|
||||
let oldHandler;
|
||||
let sawTokenFetch = false;
|
||||
|
||||
function afterTokenFetch() {
|
||||
// After a single token fetch, we undo our evil handleReassign hack, so
|
||||
// the next /info request returns the collection instead of a 401
|
||||
server.toplevelHandlers.info = oldHandler;
|
||||
sawTokenFetch = true;
|
||||
}
|
||||
|
||||
let server = yield prepareServer(afterTokenFetch);
|
||||
|
||||
// Return a 401 for the next /info request - it will be reset immediately
|
||||
// after a new token is fetched.
|
||||
oldHandler = server.toplevelHandlers.info
|
||||
server.toplevelHandlers.info = handleReassign;
|
||||
|
||||
do_check_false(Service.isLoggedIn, "not already logged in");
|
||||
|
||||
Service.sync();
|
||||
do_check_eq(Status.sync, SYNC_SUCCEEDED, "sync succeeded");
|
||||
// sync was successful - check we grabbed a new token.
|
||||
do_check_true(sawTokenFetch, "a new token was fetched by this test.")
|
||||
// and we are done.
|
||||
Service.startOver();
|
||||
let deferred = Promise.defer();
|
||||
server.stop(deferred.resolve);
|
||||
yield deferred.promise;
|
||||
});
|
||||
|
||||
// This test ends up being a failing meta/global fetch *after we're already logged in*.
|
||||
add_task(function test_momentary_401_storage_loggedin() {
|
||||
_("Test a failure for any storage URL after login that's resolved by" +
|
||||
"reassignment.");
|
||||
let server = yield prepareServer();
|
||||
|
||||
_("First sync to prepare server contents.");
|
||||
Service.sync();
|
||||
|
||||
_("Arrange for meta/global to return a 401.");
|
||||
let oldHandler = server.toplevelHandlers.storage;
|
||||
server.toplevelHandlers.storage = handleReassign;
|
||||
|
||||
function undo() {
|
||||
_("Undoing test changes.");
|
||||
server.toplevelHandlers.storage = oldHandler;
|
||||
}
|
||||
|
||||
do_check_true(Service.isLoggedIn, "already logged in");
|
||||
|
||||
yield syncAndExpectNodeReassignment(server,
|
||||
"weave:service:sync:error",
|
||||
undo,
|
||||
"weave:service:sync:finish",
|
||||
Service.storageURL + "meta/global");
|
||||
});
|
||||
|
||||
// This test ends up being a failing meta/global fetch *before we've logged in*.
|
||||
add_task(function test_momentary_401_storage_loggedout() {
|
||||
_("Test a failure for any storage URL before login, not just engine parts. " +
|
||||
"Resolved by reassignment.");
|
||||
let server = yield prepareServer();
|
||||
|
||||
// Return a 401 for all storage requests.
|
||||
let oldHandler = server.toplevelHandlers.storage;
|
||||
server.toplevelHandlers.storage = handleReassign;
|
||||
|
||||
function undo() {
|
||||
_("Undoing test changes.");
|
||||
server.toplevelHandlers.storage = oldHandler;
|
||||
}
|
||||
|
||||
do_check_false(Service.isLoggedIn, "already logged in");
|
||||
|
||||
yield syncAndExpectNodeReassignment(server,
|
||||
"weave:service:login:error",
|
||||
undo,
|
||||
"weave:service:sync:finish",
|
||||
Service.storageURL + "meta/global");
|
||||
});
|
||||
|
|
@ -24,6 +24,8 @@ function run_test() {
|
|||
Log.repository.getLogger("Sync.SyncScheduler").level = Log.Level.Trace;
|
||||
initTestLogging();
|
||||
|
||||
ensureLegacyIdentityManager();
|
||||
|
||||
Service.engineManager.register(RotaryEngine);
|
||||
|
||||
// None of the failures in this file should result in a UI error.
|
||||
|
@ -158,7 +160,7 @@ function syncAndExpectNodeReassignment(server, firstNotification, between,
|
|||
yield deferred.promise;
|
||||
}
|
||||
|
||||
add_identity_test(this, function test_momentary_401_engine() {
|
||||
add_task(function test_momentary_401_engine() {
|
||||
_("Test a failure for engine URLs that's resolved by reassignment.");
|
||||
let server = yield prepareServer();
|
||||
let john = server.user("johndoe");
|
||||
|
@ -210,7 +212,7 @@ add_identity_test(this, function test_momentary_401_engine() {
|
|||
});
|
||||
|
||||
// This test ends up being a failing fetch *after we're already logged in*.
|
||||
add_identity_test(this, function test_momentary_401_info_collections() {
|
||||
add_task(function test_momentary_401_info_collections() {
|
||||
_("Test a failure for info/collections that's resolved by reassignment.");
|
||||
let server = yield prepareServer();
|
||||
|
||||
|
@ -233,7 +235,32 @@ add_identity_test(this, function test_momentary_401_info_collections() {
|
|||
Service.infoURL);
|
||||
});
|
||||
|
||||
add_identity_test(this, function test_momentary_401_storage() {
|
||||
add_task(function test_momentary_401_storage_loggedin() {
|
||||
_("Test a failure for any storage URL, not just engine parts. " +
|
||||
"Resolved by reassignment.");
|
||||
let server = yield prepareServer();
|
||||
|
||||
_("Performing initial sync to ensure we are logged in.")
|
||||
Service.sync();
|
||||
|
||||
// Return a 401 for all storage requests.
|
||||
let oldHandler = server.toplevelHandlers.storage;
|
||||
server.toplevelHandlers.storage = handleReassign;
|
||||
|
||||
function undo() {
|
||||
_("Undoing test changes.");
|
||||
server.toplevelHandlers.storage = oldHandler;
|
||||
}
|
||||
|
||||
do_check_true(Service.isLoggedIn, "already logged in");
|
||||
yield syncAndExpectNodeReassignment(server,
|
||||
"weave:service:sync:error",
|
||||
undo,
|
||||
"weave:service:sync:finish",
|
||||
Service.storageURL + "meta/global");
|
||||
});
|
||||
|
||||
add_task(function test_momentary_401_storage_loggedout() {
|
||||
_("Test a failure for any storage URL, not just engine parts. " +
|
||||
"Resolved by reassignment.");
|
||||
let server = yield prepareServer();
|
||||
|
@ -247,6 +274,7 @@ add_identity_test(this, function test_momentary_401_storage() {
|
|||
server.toplevelHandlers.storage = oldHandler;
|
||||
}
|
||||
|
||||
do_check_false(Service.isLoggedIn, "not already logged in");
|
||||
yield syncAndExpectNodeReassignment(server,
|
||||
"weave:service:login:error",
|
||||
undo,
|
||||
|
@ -254,7 +282,7 @@ add_identity_test(this, function test_momentary_401_storage() {
|
|||
Service.storageURL + "meta/global");
|
||||
});
|
||||
|
||||
add_identity_test(this, function test_loop_avoidance_storage() {
|
||||
add_task(function test_loop_avoidance_storage() {
|
||||
_("Test that a repeated failure doesn't result in a sync loop " +
|
||||
"if node reassignment cannot resolve the failure.");
|
||||
|
||||
|
@ -354,7 +382,7 @@ add_identity_test(this, function test_loop_avoidance_storage() {
|
|||
yield deferred.promise;
|
||||
});
|
||||
|
||||
add_identity_test(this, function test_loop_avoidance_engine() {
|
||||
add_task(function test_loop_avoidance_engine() {
|
||||
_("Test that a repeated 401 in an engine doesn't result in a sync loop " +
|
||||
"if node reassignment cannot resolve the failure.");
|
||||
let server = yield prepareServer();
|
||||
|
|
|
@ -124,6 +124,7 @@ skip-if = os == "android"
|
|||
# Firefox Accounts specific tests
|
||||
[test_fxa_startOver.js]
|
||||
[test_fxa_service_cluster.js]
|
||||
[test_fxa_node_reassignment.js]
|
||||
|
||||
# Finally, we test each engine.
|
||||
[test_addons_engine.js]
|
||||
|
|
Загрузка…
Ссылка в новой задаче