зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1514780 - Use the Quantum Bar tokenizer in UnifiedComplete. r=adw
Differential Revision: https://phabricator.services.mozilla.com/D16676 --HG-- extra : moz-landing-system : lando
This commit is contained in:
Родитель
ed23e847c7
Коммит
de59370b24
|
@ -25,8 +25,9 @@ var UrlbarTokenizer = {
|
|||
REGEXP_SPACES: /\s+/,
|
||||
|
||||
// Regex used to guess url-like strings.
|
||||
// These are not expected to cover 100% of the cases.
|
||||
REGEXP_PROTOCOL: /^[A-Z+.-]+:(\/\/)?(?!\/)/i,
|
||||
// These are not expected to be 100% correct, we accept some user mistypes
|
||||
// and we're unlikely to be able to cover 100% of the cases.
|
||||
REGEXP_LIKE_PROTOCOL: /^[A-Z+.-]+:\/{0,2}(?!\/)/i,
|
||||
REGEXP_USERINFO_INVALID_CHARS: /[^\w.~%!$&'()*+,;=:-]/,
|
||||
REGEXP_HOSTPORT_INVALID_CHARS: /[^\[\]A-Z0-9.:-]/i,
|
||||
REGEXP_HOSTPORT_IP_LIKE: /^[a-f0-9\.\[\]:]+$/i,
|
||||
|
@ -84,7 +85,7 @@ var UrlbarTokenizer = {
|
|||
if (this.REGEXP_SPACES.test(token))
|
||||
return false;
|
||||
// If it starts with something that looks like a protocol, it's likely a url.
|
||||
if (this.REGEXP_PROTOCOL.test(token))
|
||||
if (this.REGEXP_LIKE_PROTOCOL.test(token))
|
||||
return true;
|
||||
// Guess path and prePath. At this point we should be analyzing strings not
|
||||
// having a protocol.
|
||||
|
@ -103,6 +104,11 @@ var UrlbarTokenizer = {
|
|||
if (path.length && userinfo.length)
|
||||
return true;
|
||||
|
||||
// If the first character after the slash in the path is a letter, then the
|
||||
// token may be an "abc/def" url.
|
||||
if (/^\/[a-z]/i.test(path)) {
|
||||
return true;
|
||||
}
|
||||
// If the path contains special chars, it is likely a url.
|
||||
if (["%", "?", "#"].some(c => path.includes(c)))
|
||||
return true;
|
||||
|
@ -132,6 +138,9 @@ var UrlbarTokenizer = {
|
|||
* @returns {boolean} whether the token looks like an origin.
|
||||
*/
|
||||
looksLikeOrigin(token) {
|
||||
if (token.length == 0) {
|
||||
return false;
|
||||
}
|
||||
let atIndex = token.indexOf("@");
|
||||
if (atIndex != -1 && this.REGEXP_COMMON_EMAIL.test(token)) {
|
||||
// We prefer handling it as an email rather than an origin with userinfo.
|
||||
|
@ -141,12 +150,14 @@ var UrlbarTokenizer = {
|
|||
let hostPort = atIndex != -1 ? token.slice(atIndex + 1) : token;
|
||||
logger.debug("userinfo", userinfo);
|
||||
logger.debug("hostPort", hostPort);
|
||||
if (this.REGEXP_HOSTPORT_IPV4.test(hostPort))
|
||||
return true;
|
||||
if (this.REGEXP_HOSTPORT_IPV6.test(hostPort))
|
||||
if (this.REGEXP_HOSTPORT_IPV4.test(hostPort) ||
|
||||
this.REGEXP_HOSTPORT_IPV6.test(hostPort)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check for invalid chars.
|
||||
return !this.REGEXP_USERINFO_INVALID_CHARS.test(userinfo) &&
|
||||
return !this.REGEXP_LIKE_PROTOCOL.test(hostPort) &&
|
||||
!this.REGEXP_USERINFO_INVALID_CHARS.test(userinfo) &&
|
||||
!this.REGEXP_HOSTPORT_INVALID_CHARS.test(hostPort) &&
|
||||
(!this.REGEXP_HOSTPORT_IP_LIKE.test(hostPort) ||
|
||||
!this.REGEXP_HOSTPORT_INVALID_IP.test(hostPort));
|
||||
|
@ -237,31 +248,23 @@ function splitString(searchString) {
|
|||
* @param {array} tokens
|
||||
* An array of strings, representing search tokens.
|
||||
* @returns {array} An array of token objects.
|
||||
* @note restriction characters are only considered if they appear at the start
|
||||
* or at the end of the tokens list. In case of restriction characters
|
||||
* conflict, the most external ones win. Leading ones win over trailing
|
||||
* ones. Discarded restriction characters are considered text.
|
||||
*/
|
||||
function filterTokens(tokens) {
|
||||
let filtered = [];
|
||||
let foundRestriction = [];
|
||||
// Tokens that can be combined with others (but not with themselves).
|
||||
// We can have a maximum of 2 tokens, one combinable and one non-combinable.
|
||||
let combinables = new Set([
|
||||
UrlbarTokenizer.TYPE.RESTRICT_TITLE,
|
||||
UrlbarTokenizer.TYPE.RESTRICT_URL,
|
||||
]);
|
||||
for (let token of tokens) {
|
||||
let restrictions = [];
|
||||
for (let i = 0; i < tokens.length; ++i) {
|
||||
let token = tokens[i];
|
||||
let tokenObj = {
|
||||
value: token,
|
||||
type: UrlbarTokenizer.TYPE.TEXT,
|
||||
};
|
||||
let restrictionType = CHAR_TO_TYPE_MAP.get(token);
|
||||
let firstRestriction = foundRestriction.length > 0 ? foundRestriction[0] : null;
|
||||
if (tokens.length > 1 &&
|
||||
restrictionType &&
|
||||
!firstRestriction ||
|
||||
(foundRestriction.length == 1 &&
|
||||
(combinables.has(firstRestriction) && !combinables.has(restrictionType)) ||
|
||||
(!combinables.has(firstRestriction) && combinables.has(restrictionType)))) {
|
||||
tokenObj.type = restrictionType;
|
||||
foundRestriction.push(restrictionType);
|
||||
if (restrictionType) {
|
||||
restrictions.push({index: i, type: restrictionType});
|
||||
} else if (UrlbarTokenizer.looksLikeOrigin(token)) {
|
||||
tokenObj.type = UrlbarTokenizer.TYPE.POSSIBLE_ORIGIN;
|
||||
} else if (UrlbarTokenizer.looksLikeUrl(token, {requirePath: true})) {
|
||||
|
@ -269,6 +272,47 @@ function filterTokens(tokens) {
|
|||
}
|
||||
filtered.push(tokenObj);
|
||||
}
|
||||
|
||||
// Handle restriction characters.
|
||||
if (restrictions.length > 0) {
|
||||
// We can apply two kind of restrictions: type (bookmark, search, ...) and
|
||||
// matching (url, title). These kind of restrictions can be combined, but we
|
||||
// can only have one restriction per kind.
|
||||
let matchingRestrictionFound = false;
|
||||
let typeRestrictionFound = false;
|
||||
function assignRestriction(r) {
|
||||
if (r && !(matchingRestrictionFound && typeRestrictionFound)) {
|
||||
if ([UrlbarTokenizer.TYPE.RESTRICT_TITLE,
|
||||
UrlbarTokenizer.TYPE.RESTRICT_URL].includes(r.type)) {
|
||||
if (!matchingRestrictionFound) {
|
||||
matchingRestrictionFound = true;
|
||||
filtered[r.index].type = r.type;
|
||||
return true;
|
||||
}
|
||||
} else if (!typeRestrictionFound) {
|
||||
typeRestrictionFound = true;
|
||||
filtered[r.index].type = r.type;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Look at the first token.
|
||||
let found = assignRestriction(restrictions.find(r => r.index == 0));
|
||||
if (found) {
|
||||
// If the first token was assigned, look at the next one.
|
||||
assignRestriction(restrictions.find(r => r.index == 1));
|
||||
}
|
||||
// Then look at the last token.
|
||||
let lastIndex = tokens.length - 1;
|
||||
found = assignRestriction(restrictions.find(r => r.index == lastIndex));
|
||||
if (found) {
|
||||
// If the last token was assigned, look at the previous one.
|
||||
assignRestriction(restrictions.find(r => r.index == lastIndex - 1));
|
||||
}
|
||||
}
|
||||
|
||||
logger.info("Filtered Tokens", tokens);
|
||||
return filtered;
|
||||
}
|
||||
|
|
|
@ -4,10 +4,8 @@
|
|||
add_task(async function test_tokenizer() {
|
||||
let testContexts = [
|
||||
{ desc: "Empty string",
|
||||
searchString: "test",
|
||||
expectedTokens: [
|
||||
{ value: "test", type: UrlbarTokenizer.TYPE.POSSIBLE_ORIGIN },
|
||||
],
|
||||
searchString: "",
|
||||
expectedTokens: [],
|
||||
},
|
||||
{ desc: "Single word string",
|
||||
searchString: "test",
|
||||
|
@ -45,6 +43,20 @@ add_task(async function test_tokenizer() {
|
|||
{ value: UrlbarTokenizer.RESTRICT.BOOKMARK, type: UrlbarTokenizer.TYPE.RESTRICT_BOOKMARK },
|
||||
],
|
||||
},
|
||||
{ desc: "separate restriction char in the middle",
|
||||
searchString: `test ${UrlbarTokenizer.RESTRICT.BOOKMARK} test`,
|
||||
expectedTokens: [
|
||||
{ value: "test", type: UrlbarTokenizer.TYPE.POSSIBLE_ORIGIN },
|
||||
{ value: UrlbarTokenizer.RESTRICT.BOOKMARK, type: UrlbarTokenizer.TYPE.TEXT },
|
||||
{ value: "test", type: UrlbarTokenizer.TYPE.POSSIBLE_ORIGIN },
|
||||
],
|
||||
},
|
||||
{ desc: "restriction char in the middle",
|
||||
searchString: `test${UrlbarTokenizer.RESTRICT.BOOKMARK}test`,
|
||||
expectedTokens: [
|
||||
{ value: `test${UrlbarTokenizer.RESTRICT.BOOKMARK}test`, type: UrlbarTokenizer.TYPE.TEXT },
|
||||
],
|
||||
},
|
||||
{ desc: "double boundary restriction char",
|
||||
searchString: `${UrlbarTokenizer.RESTRICT.BOOKMARK}test${UrlbarTokenizer.RESTRICT.TITLE}`,
|
||||
expectedTokens: [
|
||||
|
@ -69,7 +81,7 @@ add_task(async function test_tokenizer() {
|
|||
{ desc: "only the boundary restriction char",
|
||||
searchString: UrlbarTokenizer.RESTRICT.BOOKMARK,
|
||||
expectedTokens: [
|
||||
{ value: UrlbarTokenizer.RESTRICT.BOOKMARK, type: UrlbarTokenizer.TYPE.TEXT },
|
||||
{ value: UrlbarTokenizer.RESTRICT.BOOKMARK, type: UrlbarTokenizer.TYPE.RESTRICT_BOOKMARK },
|
||||
],
|
||||
},
|
||||
// Some restriction chars may be # or ?, that are also valid path parts.
|
||||
|
@ -90,8 +102,8 @@ add_task(async function test_tokenizer() {
|
|||
searchString: `test ${UrlbarTokenizer.RESTRICT.HISTORY} ${UrlbarTokenizer.RESTRICT.TAG}`,
|
||||
expectedTokens: [
|
||||
{ value: "test", type: UrlbarTokenizer.TYPE.POSSIBLE_ORIGIN },
|
||||
{ value: UrlbarTokenizer.RESTRICT.HISTORY, type: UrlbarTokenizer.TYPE.RESTRICT_HISTORY },
|
||||
{ value: UrlbarTokenizer.RESTRICT.TAG, type: UrlbarTokenizer.TYPE.TEXT },
|
||||
{ value: UrlbarTokenizer.RESTRICT.HISTORY, type: UrlbarTokenizer.TYPE.TEXT },
|
||||
{ value: UrlbarTokenizer.RESTRICT.TAG, type: UrlbarTokenizer.TYPE.RESTRICT_TAG },
|
||||
],
|
||||
},
|
||||
{ desc: "multiple boundary restriction chars prefix",
|
||||
|
@ -127,6 +139,18 @@ add_task(async function test_tokenizer() {
|
|||
{ value: "http://test", type: UrlbarTokenizer.TYPE.POSSIBLE_URL },
|
||||
],
|
||||
},
|
||||
{ desc: "almost a protocol",
|
||||
searchString: "http:",
|
||||
expectedTokens: [
|
||||
{ value: "http:", type: UrlbarTokenizer.TYPE.POSSIBLE_URL },
|
||||
],
|
||||
},
|
||||
{ desc: "almost a protocol 2",
|
||||
searchString: "http:/",
|
||||
expectedTokens: [
|
||||
{ value: "http:/", type: UrlbarTokenizer.TYPE.POSSIBLE_URL },
|
||||
],
|
||||
},
|
||||
{ desc: "bogus protocol",
|
||||
searchString: "http:///",
|
||||
expectedTokens: [
|
||||
|
@ -169,6 +193,18 @@ add_task(async function test_tokenizer() {
|
|||
{ value: "192.2134.1.2", type: UrlbarTokenizer.TYPE.TEXT },
|
||||
],
|
||||
},
|
||||
{ desc: "ipv4",
|
||||
searchString: "1.2.3.4",
|
||||
expectedTokens: [
|
||||
{ value: "1.2.3.4", type: UrlbarTokenizer.TYPE.POSSIBLE_ORIGIN },
|
||||
],
|
||||
},
|
||||
{ desc: "host/path",
|
||||
searchString: "test/test",
|
||||
expectedTokens: [
|
||||
{ value: "test/test", type: UrlbarTokenizer.TYPE.POSSIBLE_URL },
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
for (let queryContext of testContexts) {
|
||||
|
|
|
@ -36,11 +36,6 @@ const MAXIMUM_ALLOWED_EXTENSION_TIME_MS = 3000;
|
|||
// Any remaining remote tabs are added in queue if no other results are found.
|
||||
const RECENT_REMOTE_TAB_THRESHOLD_MS = 259200000; // 72 hours.
|
||||
|
||||
// A regex that matches "single word" hostnames for whitelisting purposes.
|
||||
// The hostname will already have been checked for general validity, so we
|
||||
// don't need to be exhaustive here, so allow dashes anywhere.
|
||||
const REGEXP_SINGLEWORD_HOST = new RegExp("^[a-z0-9-]+$", "i");
|
||||
|
||||
// Regex used to match userContextId.
|
||||
const REGEXP_USER_CONTEXT_ID = /(?:^| )user-context-id:(\d+)/;
|
||||
|
||||
|
@ -54,10 +49,7 @@ const REGEXP_INSERT_METHOD = /(?:^| )insert-method:(\d+)/;
|
|||
const REGEXP_SPACES = /\s+/;
|
||||
|
||||
// Regex used to strip prefixes from URLs. See stripPrefix().
|
||||
const REGEXP_STRIP_PREFIX = /^[a-zA-Z]+:(?:\/\/)?/;
|
||||
|
||||
// Cannot contains spaces or path delims.
|
||||
const REGEXP_ORIGIN = /^[^\s\/\?\#]+$/;
|
||||
const REGEXP_STRIP_PREFIX = /^[a-z]+:(?:\/){0,2}/i;
|
||||
|
||||
// The result is notified on a delay, to avoid rebuilding the panel at every match.
|
||||
const NOTIFYRESULT_DELAY_MS = 16;
|
||||
|
@ -344,15 +336,6 @@ XPCOMUtils.defineLazyModuleGetters(this, {
|
|||
XPCOMUtils.defineLazyPreferenceGetter(this, "syncUsernamePref",
|
||||
"services.sync.username");
|
||||
|
||||
// The special characters below can be typed into the urlbar to either restrict
|
||||
// the search to visited history, bookmarked, tagged pages; or force a match on
|
||||
// just the title text or url.
|
||||
XPCOMUtils.defineLazyGetter(this, "TOKEN_TO_BEHAVIOR_MAP", () => new Map(
|
||||
Object.entries(UrlbarTokenizer.RESTRICT).map(
|
||||
([type, char]) => [char, type.toLowerCase()]
|
||||
)
|
||||
));
|
||||
|
||||
function setTimeout(callback, ms) {
|
||||
let timer = Cc["@mozilla.org/timer;1"].createInstance(Ci.nsITimer);
|
||||
timer.initWithCallback(callback, ms, timer.TYPE_ONE_SHOT);
|
||||
|
@ -409,21 +392,20 @@ XPCOMUtils.defineLazyGetter(this, "ProfileAgeCreatedPromise", async () => {
|
|||
return times.created;
|
||||
});
|
||||
|
||||
// Helper functions
|
||||
// Maps restriction character types to textual behaviors.
|
||||
XPCOMUtils.defineLazyGetter(this, "typeToBehaviorMap", () => {
|
||||
return new Map([
|
||||
[UrlbarTokenizer.TYPE.RESTRICT_HISTORY, "history"],
|
||||
[UrlbarTokenizer.TYPE.RESTRICT_BOOKMARK, "bookmark"],
|
||||
[UrlbarTokenizer.TYPE.RESTRICT_TAG, "tag"],
|
||||
[UrlbarTokenizer.TYPE.RESTRICT_OPENPAGE, "openpage"],
|
||||
[UrlbarTokenizer.TYPE.RESTRICT_SEARCH, "search"],
|
||||
[UrlbarTokenizer.TYPE.RESTRICT_TITLE, "title"],
|
||||
[UrlbarTokenizer.TYPE.RESTRICT_URL, "url"],
|
||||
]);
|
||||
});
|
||||
|
||||
/**
|
||||
* Generates the tokens used in searching from a given string.
|
||||
*
|
||||
* @param searchString
|
||||
* The string to generate tokens from.
|
||||
* @return an array of tokens.
|
||||
* @note Calling split on an empty string will return an array containing one
|
||||
* empty string. We don't want that, as it'll break our logic, so return
|
||||
* an empty array then.
|
||||
*/
|
||||
function getUnfilteredSearchTokens(searchString) {
|
||||
return searchString.length ? searchString.split(REGEXP_SPACES) : [];
|
||||
}
|
||||
// Helper functions
|
||||
|
||||
/**
|
||||
* Strips the prefix from a URL and returns the prefix and the remainder of the
|
||||
|
@ -526,14 +508,6 @@ function looksLikeUrl(str, ignoreAlphanumericHosts = false) {
|
|||
str.includes(".")));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns whether the passed in string looks like an origin.
|
||||
*/
|
||||
function looksLikeOrigin(str) {
|
||||
// Single word not including path delimiters.
|
||||
return REGEXP_ORIGIN.test(str);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the portion of a string starting at the index where another string
|
||||
* begins.
|
||||
|
@ -600,8 +574,10 @@ function Search(searchString, searchParam, autocompleteListener,
|
|||
// We want to store the original string for case sensitive searches.
|
||||
this._originalSearchString = searchString;
|
||||
this._trimmedOriginalSearchString = searchString.trim();
|
||||
let [prefix, suffix] = stripPrefix(this._trimmedOriginalSearchString);
|
||||
this._searchString = Services.textToSubURI.unEscapeURIForUI("UTF-8", suffix);
|
||||
let unescapedSearchString =
|
||||
Services.textToSubURI.unEscapeURIForUI("UTF-8", this._trimmedOriginalSearchString);
|
||||
let [prefix, suffix] = stripPrefix(unescapedSearchString);
|
||||
this._searchString = suffix;
|
||||
this._strippedPrefix = prefix.toLowerCase();
|
||||
|
||||
this._matchBehavior = Ci.mozIPlacesAutoComplete.MATCH_BOUNDARY;
|
||||
|
@ -627,21 +603,25 @@ function Search(searchString, searchParam, autocompleteListener,
|
|||
parseInt(userContextId[1], 10) :
|
||||
Ci.nsIScriptSecurityManager.DEFAULT_USER_CONTEXT_ID;
|
||||
|
||||
let unfilteredTokens = getUnfilteredSearchTokens(this._searchString);
|
||||
// Use the original string here, not the stripped one, so the tokenizer can
|
||||
// properly recognize token types.
|
||||
let {tokens} = UrlbarTokenizer.tokenize({searchString: unescapedSearchString});
|
||||
|
||||
// We handle any leading restriction character specially, in particular for
|
||||
// a search restriction we also handle the case where there's no space before
|
||||
// the query, like "?porcupine".
|
||||
// This allows to handle a leading restriction character specially.
|
||||
this._leadingRestrictionToken = null;
|
||||
if (unfilteredTokens.length > 1 &&
|
||||
this._trimmedOriginalSearchString.startsWith(unfilteredTokens[0]) &&
|
||||
Object.values(UrlbarTokenizer.RESTRICT).includes(unfilteredTokens[0])) {
|
||||
this._leadingRestrictionToken = unfilteredTokens[0];
|
||||
} else if (this._trimmedOriginalSearchString.startsWith(UrlbarTokenizer.RESTRICT.SEARCH)) {
|
||||
this._leadingRestrictionToken = UrlbarTokenizer.RESTRICT.SEARCH;
|
||||
if (tokens.length > 0) {
|
||||
if (UrlbarTokenizer.isRestrictionToken(tokens[0]) &&
|
||||
(tokens.length > 1 || tokens[0].type == UrlbarTokenizer.TYPE.RESTRICT_SEARCH)) {
|
||||
this._leadingRestrictionToken = tokens[0].value;
|
||||
}
|
||||
// Check if the first token has a strippable prefix and remove it, but don't
|
||||
// create an empty token.
|
||||
if (prefix && tokens[0].value.length > prefix.length) {
|
||||
tokens[0].value = tokens[0].value.substring(prefix.length);
|
||||
}
|
||||
}
|
||||
|
||||
this._searchTokens = this.filterTokens(unfilteredTokens);
|
||||
this._searchTokens = this.filterTokens(tokens);
|
||||
|
||||
// The heuristic token is the first filtered search token, but only when it's
|
||||
// actually the first thing in the search string. If a prefix or restriction
|
||||
|
@ -650,11 +630,9 @@ function Search(searchString, searchParam, autocompleteListener,
|
|||
// keyword, a search engine alias, an extension keyword, or simply a URL or
|
||||
// part of the search string the user has typed. We won't know until we
|
||||
// create the heuristic result.
|
||||
this._heuristicToken =
|
||||
this._searchTokens[0] &&
|
||||
this._trimmedOriginalSearchString.startsWith(this._searchTokens[0]) ?
|
||||
this._searchTokens[0] :
|
||||
null;
|
||||
let firstToken = this._searchTokens.length > 0 && this._searchTokens[0].value;
|
||||
this._heuristicToken = firstToken &&
|
||||
this._trimmedOriginalSearchString.startsWith(firstToken) ? firstToken : null;
|
||||
|
||||
this._keywordSubstitute = null;
|
||||
|
||||
|
@ -768,21 +746,28 @@ Search.prototype = {
|
|||
|
||||
/**
|
||||
* Given an array of tokens, this function determines which query should be
|
||||
* ran. It also removes any special search tokens. The given array of tokens
|
||||
* is modified in place and returned.
|
||||
* ran. It also removes any special search tokens.
|
||||
*
|
||||
* @param tokens
|
||||
* An array of search tokens. This array is modified in place.
|
||||
* @return The given array of tokens, modified to remove special search tokens.
|
||||
* An array of search tokens.
|
||||
* @return A new, filtered array of tokens.
|
||||
*/
|
||||
filterTokens(tokens) {
|
||||
let foundToken = false;
|
||||
// Set the proper behavior while filtering tokens.
|
||||
for (let i = tokens.length - 1; i >= 0; i--) {
|
||||
let behavior = TOKEN_TO_BEHAVIOR_MAP.get(tokens[i]);
|
||||
let filtered = [];
|
||||
for (let token of tokens) {
|
||||
if (!UrlbarTokenizer.isRestrictionToken(token)) {
|
||||
filtered.push(token);
|
||||
continue;
|
||||
}
|
||||
let behavior = typeToBehaviorMap.get(token.type);
|
||||
if (!behavior) {
|
||||
throw new Error(`Unknown token type ${token.type}`);
|
||||
}
|
||||
// Don't remove the token if it didn't match, or if it's an action but
|
||||
// actions are not enabled.
|
||||
if (behavior && (behavior != "openpage" || this._enableActions)) {
|
||||
if (behavior != "openpage" || this._enableActions) {
|
||||
// Don't use the suggest preferences if it is a token search and
|
||||
// set the restrict bit to 1 (to intersect the search results).
|
||||
if (!foundToken) {
|
||||
|
@ -792,18 +777,15 @@ Search.prototype = {
|
|||
this.setBehavior("restrict");
|
||||
}
|
||||
this.setBehavior(behavior);
|
||||
tokens.splice(i, 1);
|
||||
}
|
||||
}
|
||||
|
||||
// Set the right JavaScript behavior based on our preference. Note that the
|
||||
// preference is whether or not we should filter JavaScript, and the
|
||||
// behavior is if we should search it or not.
|
||||
if (!UrlbarPrefs.get("filter.javascript")) {
|
||||
this.setBehavior("javascript");
|
||||
}
|
||||
|
||||
return tokens;
|
||||
return filtered;
|
||||
},
|
||||
|
||||
/**
|
||||
|
@ -967,7 +949,7 @@ Search.prototype = {
|
|||
!this._inPrivateWindow) {
|
||||
let query =
|
||||
this._searchEngineAliasMatch ? this._searchEngineAliasMatch.query :
|
||||
substringAt(this._originalSearchString, this._searchTokens[0]);
|
||||
substringAt(this._originalSearchString, this._searchTokens[0].value);
|
||||
if (query) {
|
||||
// Limit the string sent for search suggestions to a maximum length.
|
||||
query = query.substr(0, UrlbarPrefs.get("maxCharsForSearchSuggestions"));
|
||||
|
@ -1417,8 +1399,8 @@ Search.prototype = {
|
|||
|
||||
// The first token may be a whitelisted host.
|
||||
if (this._searchTokens.length == 1 &&
|
||||
REGEXP_SINGLEWORD_HOST.test(this._searchTokens[0]) &&
|
||||
Services.uriFixup.isDomainWhitelisted(this._searchTokens[0], -1)) {
|
||||
this._searchTokens[0].type == UrlbarTokenizer.TYPE.POSSIBLE_ORIGIN &&
|
||||
Services.uriFixup.isDomainWhitelisted(this._searchTokens[0].value, -1)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1429,9 +1411,14 @@ Search.prototype = {
|
|||
return true;
|
||||
}
|
||||
|
||||
// Disallow fetching search suggestions for strings looking like URLs, to
|
||||
// avoid disclosing information about networks or passwords.
|
||||
return this._searchTokens.some(looksLikeUrl);
|
||||
// Disallow fetching search suggestions for strings looking like URLs, or
|
||||
// non-alphanumeric origins, to avoid disclosing information about networks
|
||||
// or passwords.
|
||||
return this._searchTokens.some(t => {
|
||||
return t.type == UrlbarTokenizer.TYPE.POSSIBLE_URL ||
|
||||
(t.type == UrlbarTokenizer.TYPE.POSSIBLE_ORIGIN &&
|
||||
!/^[a-z0-9-]+$/i.test(t.value));
|
||||
});
|
||||
},
|
||||
|
||||
async _matchKnownUrl(conn) {
|
||||
|
@ -1441,7 +1428,7 @@ Search.prototype = {
|
|||
// Otherwise treat it as a possible URL. When the string has only one slash
|
||||
// at the end, we still treat it as an URL.
|
||||
let query, params;
|
||||
if (looksLikeOrigin(this._searchString)) {
|
||||
if (UrlbarTokenizer.looksLikeOrigin(this._searchString)) {
|
||||
[query, params] = this._originQuery;
|
||||
} else {
|
||||
[query, params] = this._urlQuery;
|
||||
|
@ -1539,7 +1526,7 @@ Search.prototype = {
|
|||
searchStr = searchStr.slice(0, -1);
|
||||
}
|
||||
// If the search string looks more like a url than a domain, bail out.
|
||||
if (!looksLikeOrigin(searchStr)) {
|
||||
if (!UrlbarTokenizer.looksLikeOrigin(searchStr)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1760,6 +1747,11 @@ Search.prototype = {
|
|||
// TODO (bug 1054814): Use visited URLs to inform which scheme to use, if the
|
||||
// scheme isn't specificed.
|
||||
_matchUnknownUrl() {
|
||||
if (!this._searchString && this._strippedPrefix) {
|
||||
// The user just typed a stripped protocol, don't build a non-sense url
|
||||
// like http://http/ for it.
|
||||
return false;
|
||||
}
|
||||
let flags = Ci.nsIURIFixup.FIXUP_FLAG_FIX_SCHEME_TYPOS |
|
||||
Ci.nsIURIFixup.FIXUP_FLAG_ALLOW_KEYWORD_LOOKUP;
|
||||
let fixupInfo = null;
|
||||
|
@ -1879,7 +1871,7 @@ Search.prototype = {
|
|||
// when searching for "Firefox".
|
||||
let terms = parseResult.terms.toLowerCase();
|
||||
if (this._searchTokens.length > 0 &&
|
||||
this._searchTokens.every(token => !terms.includes(token))) {
|
||||
this._searchTokens.every(token => !terms.includes(token.value))) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2287,9 +2279,9 @@ Search.prototype = {
|
|||
* to the keyword search rather than searching for the literal keyword.
|
||||
*/
|
||||
get _keywordSubstitutedSearchString() {
|
||||
let tokens = this._searchTokens;
|
||||
let tokens = this._searchTokens.map(t => t.value);
|
||||
if (this._keywordSubstitute) {
|
||||
tokens = [this._keywordSubstitute, ...this._searchTokens.slice(1)];
|
||||
tokens = [this._keywordSubstitute, ...tokens.slice(1)];
|
||||
}
|
||||
return tokens.join(" ");
|
||||
},
|
||||
|
|
|
@ -921,11 +921,7 @@ add_task(async function avoid_http_url_suggestions() {
|
|||
search: "ftp:/",
|
||||
searchParam: "enable-actions",
|
||||
matches: [
|
||||
{
|
||||
uri: makeActionURI("visiturl", { url: "http://ftp/", input: "ftp:/" }),
|
||||
style: [ "action", "visiturl", "heuristic" ],
|
||||
title: "http://ftp/",
|
||||
},
|
||||
makeSearchMatch("ftp:/", { engineName: ENGINE_NAME, heuristic: true }),
|
||||
],
|
||||
});
|
||||
|
||||
|
@ -987,11 +983,7 @@ add_task(async function avoid_http_url_suggestions() {
|
|||
search: "http:/",
|
||||
searchParam: "enable-actions",
|
||||
matches: [
|
||||
{
|
||||
uri: makeActionURI("visiturl", { url: "http://http/", input: "http:/" }),
|
||||
style: [ "action", "visiturl", "heuristic" ],
|
||||
title: "http://http/",
|
||||
},
|
||||
makeSearchMatch("http:/", { engineName: ENGINE_NAME, heuristic: true }),
|
||||
],
|
||||
});
|
||||
|
||||
|
@ -999,11 +991,7 @@ add_task(async function avoid_http_url_suggestions() {
|
|||
search: "https:/",
|
||||
searchParam: "enable-actions",
|
||||
matches: [
|
||||
{
|
||||
uri: makeActionURI("visiturl", { url: "http://https/", input: "https:/" }),
|
||||
style: [ "action", "visiturl", "heuristic" ],
|
||||
title: "http://https/",
|
||||
},
|
||||
makeSearchMatch("https:/", { engineName: ENGINE_NAME, heuristic: true }),
|
||||
],
|
||||
});
|
||||
|
||||
|
|
|
@ -45,7 +45,8 @@ add_task(async function test_special_searches() {
|
|||
await addBookmark( { uri: uri11, title: "title", tags: [ "foo.bar" ] } );
|
||||
await addBookmark( { uri: uri12, title: "foo.bar", tags: [ "foo.bar" ] } );
|
||||
|
||||
// Test restricting searches
|
||||
// Test restricting searches.
|
||||
|
||||
info("History restrict");
|
||||
await check_autocomplete({
|
||||
search: UrlbarTokenizer.RESTRICT.HISTORY,
|
||||
|
@ -79,7 +80,6 @@ add_task(async function test_special_searches() {
|
|||
{ uri: uri12, title: "foo.bar", tags: [ "foo.bar" ], style: [ "tag" ] } ],
|
||||
});
|
||||
|
||||
// Test specials as any word position
|
||||
info("Special as first word");
|
||||
await check_autocomplete({
|
||||
search: `${UrlbarTokenizer.RESTRICT.HISTORY} foo bar`,
|
||||
|
@ -90,16 +90,6 @@ add_task(async function test_special_searches() {
|
|||
{ uri: uri11, title: "title", tags: [ "foo.bar" ], style: [ "tag" ] } ],
|
||||
});
|
||||
|
||||
info("Special as middle word");
|
||||
await check_autocomplete({
|
||||
search: `foo ${UrlbarTokenizer.RESTRICT.HISTORY} bar`,
|
||||
matches: [ { uri: uri2, title: "foo.bar" },
|
||||
{ uri: uri3, title: "title" },
|
||||
{ uri: uri4, title: "foo.bar" },
|
||||
{ uri: uri6, title: "foo.bar" },
|
||||
{ uri: uri11, title: "title", tags: [ "foo.bar" ], style: [ "tag" ] } ],
|
||||
});
|
||||
|
||||
info("Special as last word");
|
||||
await check_autocomplete({
|
||||
search: `foo bar ${UrlbarTokenizer.RESTRICT.HISTORY}`,
|
||||
|
@ -110,7 +100,8 @@ add_task(async function test_special_searches() {
|
|||
{ uri: uri11, title: "title", tags: [ "foo.bar" ], style: [ "tag" ] } ],
|
||||
});
|
||||
|
||||
// Test restricting and matching searches with a term
|
||||
// Test restricting and matching searches with a term.
|
||||
|
||||
info(`foo ${UrlbarTokenizer.RESTRICT.HISTORY} -> history`);
|
||||
await check_autocomplete({
|
||||
search: `foo ${UrlbarTokenizer.RESTRICT.HISTORY}`,
|
||||
|
@ -167,12 +158,6 @@ add_task(async function test_special_searches() {
|
|||
});
|
||||
|
||||
// Test various pairs of special searches
|
||||
info(`foo ${UrlbarTokenizer.RESTRICT.HISTORY} ${UrlbarTokenizer.RESTRICT.BOOKMARK} -> history, is star`);
|
||||
await check_autocomplete({
|
||||
search: `foo ${UrlbarTokenizer.RESTRICT.HISTORY} ${UrlbarTokenizer.RESTRICT.BOOKMARK}`,
|
||||
matches: [ { uri: uri6, title: "foo.bar", style: [ "bookmark" ] },
|
||||
{ uri: uri11, title: "title", tags: [ "foo.bar" ], style: [ "bookmark-tag" ] } ],
|
||||
});
|
||||
|
||||
info(`foo ${UrlbarTokenizer.RESTRICT.HISTORY} ${UrlbarTokenizer.RESTRICT.TITLE} -> history, in title`);
|
||||
await check_autocomplete({
|
||||
|
@ -191,12 +176,6 @@ add_task(async function test_special_searches() {
|
|||
{ uri: uri11, title: "title", tags: [ "foo.bar" ], style: [ "tag" ] } ],
|
||||
});
|
||||
|
||||
info(`foo ${UrlbarTokenizer.RESTRICT.HISTORY} ${UrlbarTokenizer.RESTRICT.TAG} -> history, is tag`);
|
||||
await check_autocomplete({
|
||||
search: `foo ${UrlbarTokenizer.RESTRICT.HISTORY} ${UrlbarTokenizer.RESTRICT.TAG}`,
|
||||
matches: [ { uri: uri11, title: "title", tags: [ "foo.bar" ], style: [ "tag" ] } ],
|
||||
});
|
||||
|
||||
info(`foo ${UrlbarTokenizer.RESTRICT.BOOKMARK} ${UrlbarTokenizer.RESTRICT.TITLE} -> is star, in title`);
|
||||
await check_autocomplete({
|
||||
search: `foo ${UrlbarTokenizer.RESTRICT.BOOKMARK} ${UrlbarTokenizer.RESTRICT.TITLE}`,
|
||||
|
@ -217,24 +196,6 @@ add_task(async function test_special_searches() {
|
|||
{ uri: uri12, title: "foo.bar", tags: [ "foo.bar" ], style: [ "bookmark-tag" ] } ],
|
||||
});
|
||||
|
||||
info(`foo ${UrlbarTokenizer.RESTRICT.BOOKMARK} ${UrlbarTokenizer.RESTRICT.TAG} -> same as ${UrlbarTokenizer.RESTRICT.TAG}`);
|
||||
await check_autocomplete({
|
||||
search: `foo ${UrlbarTokenizer.RESTRICT.BOOKMARK} ${UrlbarTokenizer.RESTRICT.TAG}`,
|
||||
matches: [ { uri: uri9, title: "title", tags: [ "foo.bar" ], style: [ "bookmark-tag" ] },
|
||||
{ uri: uri10, title: "foo.bar", tags: [ "foo.bar" ], style: [ "bookmark-tag" ] },
|
||||
{ uri: uri11, title: "title", tags: [ "foo.bar" ], style: [ "bookmark-tag" ] },
|
||||
{ uri: uri12, title: "foo.bar", tags: [ "foo.bar" ], style: [ "bookmark-tag" ] } ],
|
||||
});
|
||||
|
||||
info(`foo ${UrlbarTokenizer.RESTRICT.TITLE} ${UrlbarTokenizer.RESTRICT.URL} -> in title, in url`);
|
||||
await check_autocomplete({
|
||||
search: `foo ${UrlbarTokenizer.RESTRICT.TITLE} ${UrlbarTokenizer.RESTRICT.URL}`,
|
||||
matches: [ { uri: uri4, title: "foo.bar" },
|
||||
{ uri: uri8, title: "foo.bar", style: [ "bookmark" ] },
|
||||
{ uri: uri11, title: "title", tags: [ "foo.bar" ], style: [ "tag" ] },
|
||||
{ uri: uri12, title: "foo.bar", tags: [ "foo.bar" ], style: [ "tag" ] } ],
|
||||
});
|
||||
|
||||
info(`foo ${UrlbarTokenizer.RESTRICT.TITLE} ${UrlbarTokenizer.RESTRICT.TAG} -> in title, is tag`);
|
||||
await check_autocomplete({
|
||||
search: `foo ${UrlbarTokenizer.RESTRICT.TITLE} ${UrlbarTokenizer.RESTRICT.TAG}`,
|
||||
|
@ -251,6 +212,39 @@ add_task(async function test_special_searches() {
|
|||
{ uri: uri12, title: "foo.bar", tags: [ "foo.bar" ], style: [ "tag" ] } ],
|
||||
});
|
||||
|
||||
// Test conflicting restrictions.
|
||||
|
||||
info(`conflict ${UrlbarTokenizer.RESTRICT.TITLE} ${UrlbarTokenizer.RESTRICT.URL} -> url wins`);
|
||||
await PlacesTestUtils.addVisits([
|
||||
{ uri: `http://conflict.com/${UrlbarTokenizer.RESTRICT.TITLE}`, title: "test" },
|
||||
{ uri: "http://conflict.com/", title: `test${UrlbarTokenizer.RESTRICT.TITLE}` },
|
||||
]);
|
||||
await check_autocomplete({
|
||||
search: `conflict ${UrlbarTokenizer.RESTRICT.TITLE} ${UrlbarTokenizer.RESTRICT.URL}`,
|
||||
matches: [
|
||||
{ uri: `http://conflict.com/${UrlbarTokenizer.RESTRICT.TITLE}`, title: "test" },
|
||||
],
|
||||
});
|
||||
|
||||
info(`conflict ${UrlbarTokenizer.RESTRICT.HISTORY} ${UrlbarTokenizer.RESTRICT.BOOKMARK} -> bookmark wins`);
|
||||
await addBookmark( { uri: "http://bookmark.conflict.com/", title: `conflict ${UrlbarTokenizer.RESTRICT.HISTORY}` } );
|
||||
await check_autocomplete({
|
||||
search: `conflict ${UrlbarTokenizer.RESTRICT.HISTORY} ${UrlbarTokenizer.RESTRICT.BOOKMARK}`,
|
||||
matches: [
|
||||
{ uri: "http://bookmark.conflict.com/", title: `conflict ${UrlbarTokenizer.RESTRICT.HISTORY}`, style: [ "bookmark" ] },
|
||||
],
|
||||
});
|
||||
|
||||
info(`conflict ${UrlbarTokenizer.RESTRICT.BOOKMARK} ${UrlbarTokenizer.RESTRICT.TAG} -> tag wins`);
|
||||
await addBookmark( { uri: "http://tag.conflict.com/", title: `conflict ${UrlbarTokenizer.RESTRICT.BOOKMARK}`, tags: [ "one" ] } );
|
||||
await addBookmark( { uri: "http://nontag.conflict.com/", title: `conflict ${UrlbarTokenizer.RESTRICT.BOOKMARK}` } );
|
||||
await check_autocomplete({
|
||||
search: `conflict ${UrlbarTokenizer.RESTRICT.BOOKMARK} ${UrlbarTokenizer.RESTRICT.TAG}`,
|
||||
matches: [
|
||||
{ uri: "http://tag.conflict.com/", title: `conflict ${UrlbarTokenizer.RESTRICT.BOOKMARK}`, tags: [ "one" ], style: [ "tag" ] },
|
||||
],
|
||||
});
|
||||
|
||||
// Disable autoFill for the next tests, see test_autoFill_default_behavior.js
|
||||
// for specific tests.
|
||||
Services.prefs.setBoolPref("browser.urlbar.autoFill", false);
|
||||
|
|
|
@ -115,9 +115,9 @@ add_task(async function test_escape() {
|
|||
],
|
||||
});
|
||||
|
||||
info("Match word boundaries '()_+' that are among word boundaries");
|
||||
info("Match word boundaries '()_' that are among word boundaries");
|
||||
await check_autocomplete({
|
||||
search: "()_+",
|
||||
search: "()_",
|
||||
checkSorting: true,
|
||||
matches: [
|
||||
{ uri: "http://crazytitle/", title: "!@#$%^&*()_+{}|:<>?word" },
|
||||
|
|
Загрузка…
Ссылка в новой задаче