diff --git a/browser/base/content/pageinfo/pageInfo.js b/browser/base/content/pageinfo/pageInfo.js index b944ce9468bf..c4ec836ec673 100644 --- a/browser/base/content/pageinfo/pageInfo.js +++ b/browser/base/content/pageinfo/pageInfo.js @@ -928,7 +928,7 @@ function makePreview(row) var imageContainer = document.getElementById("theimagecontainer"); var oldImage = document.getElementById("thepreviewimage"); - const regex = /^(https?|ftp|file|gopher|about|chrome|resource):/; + const regex = /^(https?|ftp|file|about|chrome|resource):/; var isProtocolAllowed = regex.test(url); if (/^data:/.test(url) && /^image\//.test(mimeType)) isProtocolAllowed = true; diff --git a/browser/components/migration/src/nsDogbertProfileMigrator.cpp b/browser/components/migration/src/nsDogbertProfileMigrator.cpp index 224a826cbf2c..5dd0b667eccb 100644 --- a/browser/components/migration/src/nsDogbertProfileMigrator.cpp +++ b/browser/components/migration/src/nsDogbertProfileMigrator.cpp @@ -407,8 +407,6 @@ nsDogbertProfileMigrator::PrefTransform gTransforms[] = { { "network.proxy.autoconfig_url", 0, F(GetString), F(SetString), PR_FALSE, { -1 } }, { "network.proxy.ftp", 0, F(GetString), F(SetString), PR_FALSE, { -1 } }, { "network.proxy.ftp_port", 0, F(GetInt), F(SetInt), PR_FALSE, { -1 } }, - { "network.proxy.gopher", 0, F(GetString), F(SetString), PR_FALSE, { -1 } }, - { "network.proxy.gopher_port", 0, F(GetInt), F(SetInt), PR_FALSE, { -1 } }, { "network.proxy.http", 0, F(GetString), F(SetString), PR_FALSE, { -1 } }, { "network.proxy.http_port", 0, F(GetInt), F(SetInt), PR_FALSE, { -1 } }, { "network.proxy.ssl", 0, F(GetString), F(SetString), PR_FALSE, { -1 } }, diff --git a/browser/components/migration/src/nsIEProfileMigrator.cpp b/browser/components/migration/src/nsIEProfileMigrator.cpp index 42d5fe8c01c1..4b8250d64625 100644 --- a/browser/components/migration/src/nsIEProfileMigrator.cpp +++ b/browser/components/migration/src/nsIEProfileMigrator.cpp @@ -2151,8 +2151,6 @@ nsIEProfileMigrator::CopyProxyPreferences(nsIPrefBranch* aPrefs) ProxyData data[] = { { "ftp=", 4, PR_FALSE, "network.proxy.ftp", "network.proxy.ftp_port" }, - { "gopher=", 7, PR_FALSE, "network.proxy.gopher", - "network.proxy.gopher_port" }, { "http=", 5, PR_FALSE, "network.proxy.http", "network.proxy.http_port" }, { "https=", 6, PR_FALSE, "network.proxy.ssl", @@ -2163,7 +2161,7 @@ nsIEProfileMigrator::CopyProxyPreferences(nsIPrefBranch* aPrefs) PRInt32 startIndex = 0, count = 0; PRBool foundSpecificProxy = PR_FALSE; - for (PRUint32 i = 0; i < 5; ++i) { + for (PRUint32 i = 0; i < NS_ARRAY_LENGTH(data); ++i) { PRInt32 offset = buf.Find(NS_ConvertASCIItoUTF16(data[i].prefix)); if (offset >= 0) { foundSpecificProxy = PR_TRUE; @@ -2186,7 +2184,7 @@ nsIEProfileMigrator::CopyProxyPreferences(nsIPrefBranch* aPrefs) // No proxy config for any specific type was found, assume // the ProxyServer value is of the form host:port and that // it applies to all protocols. - for (PRUint32 i = 0; i < 5; ++i) + for (PRUint32 i = 0; i < NS_ARRAY_LENGTH(data); ++i) SetProxyPref(buf, data[i].hostPref, data[i].portPref, aPrefs); aPrefs->SetBoolPref("network.proxy.share_proxy_settings", PR_TRUE); } diff --git a/browser/components/migration/src/nsOperaProfileMigrator.cpp b/browser/components/migration/src/nsOperaProfileMigrator.cpp index 4dbb0f88d869..d3c9722663c4 100644 --- a/browser/components/migration/src/nsOperaProfileMigrator.cpp +++ b/browser/components/migration/src/nsOperaProfileMigrator.cpp @@ -486,12 +486,12 @@ nsOperaProfileMigrator::CopyProxySettings(nsINIParser &aParser, PRInt32 networkProxyType = 0; - const char* protocols[4] = { "HTTP", "HTTPS", "FTP", "GOPHER" }; - const char* protocols_l[4] = { "http", "https", "ftp", "gopher" }; + const char* protocols[] = { "HTTP", "HTTPS", "FTP" }; + const char* protocols_l[] = { "http", "https", "ftp" }; char toggleBuf[15], serverBuf[20], serverPrefBuf[20], serverPortPrefBuf[25]; PRInt32 enabled; - for (PRUint32 i = 0; i < 4; ++i) { + for (PRUint32 i = 0; i < NS_ARRAY_LENGTH(protocols); ++i) { sprintf(toggleBuf, "Use %s", protocols[i]); GetInteger(aParser, "Proxy", toggleBuf, &enabled); if (enabled) { diff --git a/browser/components/migration/src/nsSeamonkeyProfileMigrator.cpp b/browser/components/migration/src/nsSeamonkeyProfileMigrator.cpp index 40b4483d1002..3c45e7a4fcc2 100644 --- a/browser/components/migration/src/nsSeamonkeyProfileMigrator.cpp +++ b/browser/components/migration/src/nsSeamonkeyProfileMigrator.cpp @@ -386,8 +386,6 @@ nsSeamonkeyProfileMigrator::PrefTransform gTransforms[] = { MAKESAMETYPEPREFTRANSFORM("network.proxy.ssl_port", Int), MAKESAMETYPEPREFTRANSFORM("network.proxy.socks", String), MAKESAMETYPEPREFTRANSFORM("network.proxy.socks_port", Int), - MAKESAMETYPEPREFTRANSFORM("network.proxy.gopher", String), - MAKESAMETYPEPREFTRANSFORM("network.proxy.gopher_port", Int), MAKESAMETYPEPREFTRANSFORM("network.proxy.no_proxies_on", String), MAKESAMETYPEPREFTRANSFORM("network.proxy.autoconfig_url", String), MAKESAMETYPEPREFTRANSFORM("browser.display.foreground_color", String), diff --git a/browser/components/preferences/connection.js b/browser/components/preferences/connection.js index 7b942c05a205..723ce45427e2 100644 --- a/browser/components/preferences/connection.js +++ b/browser/components/preferences/connection.js @@ -52,7 +52,7 @@ var gConnectionsDialog = { var httpProxyPortPref = document.getElementById("network.proxy.http_port"); var shareProxiesPref = document.getElementById("network.proxy.share_proxy_settings"); if (shareProxiesPref.value) { - var proxyPrefs = ["ssl", "ftp", "socks", "gopher"]; + var proxyPrefs = ["ssl", "ftp", "socks"]; for (var i = 0; i < proxyPrefs.length; ++i) { var proxyServerURLPref = document.getElementById("network.proxy." + proxyPrefs[i]); var proxyPortPref = document.getElementById("network.proxy." + proxyPrefs[i] + "_port"); @@ -134,7 +134,7 @@ var gConnectionsDialog = { { var proxyTypePref = document.getElementById("network.proxy.type"); var shareProxiesPref = document.getElementById("network.proxy.share_proxy_settings"); - var proxyPrefs = ["ssl", "ftp", "socks", "gopher"]; + var proxyPrefs = ["ssl", "ftp", "socks"]; for (var i = 0; i < proxyPrefs.length; ++i) { var proxyServerURLPref = document.getElementById("network.proxy." + proxyPrefs[i]); var proxyPortPref = document.getElementById("network.proxy." + proxyPrefs[i] + "_port"); diff --git a/browser/components/preferences/connection.xul b/browser/components/preferences/connection.xul index 8212c20e8b81..933ede5de4f7 100644 --- a/browser/components/preferences/connection.xul +++ b/browser/components/preferences/connection.xul @@ -70,8 +70,6 @@ - - @@ -89,8 +87,6 @@ - - \n"); - if (!isSchemeGopher) { - buffer.AppendLiteral("\n"); - } buffer.AppendLiteral(" innerUri = NS_GetInnermostURI(uri); if (!innerUri) @@ -525,7 +511,7 @@ nsIndexedToHTML::OnStartRequest(nsIRequest* request, nsISupports *aContext) { } buffer.AppendLiteral("\" />\n"); - // Anything but a gopher url needs to end in a /, + // Everything needs to end in a /, // otherwise we end up linking to file:///foo/dirfile if (!mTextToSubURI) { @@ -644,35 +630,33 @@ nsIndexedToHTML::OnStartRequest(nsIRequest* request, nsISupports *aContext) { buffer.AppendLiteral("<table>\n"); - if (!isSchemeGopher) { - nsXPIDLString columnText; + nsXPIDLString columnText; - buffer.AppendLiteral(" <thead>\n" - " <tr>\n" - " <th>"); + buffer.AppendLiteral(" <thead>\n" + " <tr>\n" + " <th>"); - rv = mBundle->GetStringFromName(NS_LITERAL_STRING("DirColName").get(), - getter_Copies(columnText)); - if (NS_FAILED(rv)) return rv; - AppendNonAsciiToNCR(columnText, buffer); - buffer.AppendLiteral("</th>\n" - " <th>"); + rv = mBundle->GetStringFromName(NS_LITERAL_STRING("DirColName").get(), + getter_Copies(columnText)); + if (NS_FAILED(rv)) return rv; + AppendNonAsciiToNCR(columnText, buffer); + buffer.AppendLiteral("</th>\n" + " <th>"); - rv = mBundle->GetStringFromName(NS_LITERAL_STRING("DirColSize").get(), - getter_Copies(columnText)); - if (NS_FAILED(rv)) return rv; - AppendNonAsciiToNCR(columnText, buffer); - buffer.AppendLiteral("</th>\n" - " <th colspan=\"2\">"); + rv = mBundle->GetStringFromName(NS_LITERAL_STRING("DirColSize").get(), + getter_Copies(columnText)); + if (NS_FAILED(rv)) return rv; + AppendNonAsciiToNCR(columnText, buffer); + buffer.AppendLiteral("</th>\n" + " <th colspan=\"2\">"); - rv = mBundle->GetStringFromName(NS_LITERAL_STRING("DirColMTime").get(), - getter_Copies(columnText)); - if (NS_FAILED(rv)) return rv; - AppendNonAsciiToNCR(columnText, buffer); - buffer.AppendLiteral("</th>\n" - " </tr>\n" - " </thead>\n"); - } + rv = mBundle->GetStringFromName(NS_LITERAL_STRING("DirColMTime").get(), + getter_Copies(columnText)); + if (NS_FAILED(rv)) return rv; + AppendNonAsciiToNCR(columnText, buffer); + buffer.AppendLiteral("</th>\n" + " </tr>\n" + " </thead>\n"); buffer.AppendLiteral(" <tbody>\n"); // Push buffer to the listener now, so the initial HTML will not @@ -845,32 +829,28 @@ nsIndexedToHTML::OnIndexAvailable(nsIRequest *aRequest, rv = channel->GetURI(getter_AddRefs(uri)); if (NS_FAILED(rv)) return rv; - // No need to do this for Gopher, as the table has only one column in that case - PRBool isSchemeGopher = PR_FALSE; - if (!(NS_SUCCEEDED(uri->SchemeIs("gopher", &isSchemeGopher)) && isSchemeGopher)) { - //XXX this potentially truncates after a combining char (bug 391472) - nsXPIDLString descriptionAffix; - descriptionAffix.Assign(description); - descriptionAffix.Cut(0, descriptionAffix.Length() - 25); - if (NS_IS_LOW_SURROGATE(descriptionAffix.First())) - descriptionAffix.Cut(0, 1); - description.Truncate(PR_MIN(71, description.Length() - 28)); - if (NS_IS_HIGH_SURROGATE(description.Last())) - description.Truncate(description.Length() - 1); + //XXX this potentially truncates after a combining char (bug 391472) + nsXPIDLString descriptionAffix; + descriptionAffix.Assign(description); + descriptionAffix.Cut(0, descriptionAffix.Length() - 25); + if (NS_IS_LOW_SURROGATE(descriptionAffix.First())) + descriptionAffix.Cut(0, 1); + description.Truncate(PR_MIN(71, description.Length() - 28)); + if (NS_IS_HIGH_SURROGATE(description.Last())) + description.Truncate(description.Length() - 1); - escapedShort.Adopt(nsEscapeHTML2(description.get(), description.Length())); + escapedShort.Adopt(nsEscapeHTML2(description.get(), description.Length())); - escapedShort.Append(mEscapedEllipsis); - // add ZERO WIDTH SPACE (U+200B) for wrapping - escapedShort.AppendLiteral("​"); - nsString tmp; - tmp.Adopt(nsEscapeHTML2(descriptionAffix.get(), descriptionAffix.Length())); - escapedShort.Append(tmp); + escapedShort.Append(mEscapedEllipsis); + // add ZERO WIDTH SPACE (U+200B) for wrapping + escapedShort.AppendLiteral("​"); + nsString tmp; + tmp.Adopt(nsEscapeHTML2(descriptionAffix.get(), descriptionAffix.Length())); + escapedShort.Append(tmp); - pushBuffer.AppendLiteral(" title=\""); - pushBuffer.Append(escaped); - pushBuffer.AppendLiteral("\""); - } + pushBuffer.AppendLiteral(" title=\""); + pushBuffer.Append(escaped); + pushBuffer.AppendLiteral("\""); } if (escapedShort.IsEmpty()) escapedShort.Assign(escaped); @@ -908,7 +888,7 @@ nsIndexedToHTML::OnIndexAvailable(nsIRequest *aRequest, // now minimally re-escape the location... PRUint32 escFlags; - // for some protocols, like gopher, we expect the location to be absolute. + // for some protocols, we expect the location to be absolute. // if so, and if the location indeed appears to be a valid URI, then go // ahead and treat it like one. if (mExpectAbsLoc && diff --git a/toolkit/toolkit-makefiles.sh b/toolkit/toolkit-makefiles.sh index 6e80a8b31f27..bda9520afc50 100644 --- a/toolkit/toolkit-makefiles.sh +++ b/toolkit/toolkit-makefiles.sh @@ -374,7 +374,6 @@ MAKEFILES_netwerk=" netwerk/protocol/data/Makefile netwerk/protocol/file/Makefile netwerk/protocol/ftp/Makefile - netwerk/protocol/gopher/Makefile netwerk/protocol/http/Makefile netwerk/protocol/res/Makefile netwerk/protocol/viewsource/Makefile diff --git a/xpfe/components/directory/nsDirectoryViewer.cpp b/xpfe/components/directory/nsDirectoryViewer.cpp index b140e83b096d..5b2234d7c432 100644 --- a/xpfe/components/directory/nsDirectoryViewer.cpp +++ b/xpfe/components/directory/nsDirectoryViewer.cpp @@ -47,7 +47,7 @@ http://www.mozilla.org/projects/netlib/dirindexformat.html One added change is for a description entry, for when the - target does not match the filename (ie gopher) + target does not match the filename */ @@ -107,7 +107,6 @@ static NS_DEFINE_CID(kRDFServiceCID, NS_RDFSERVICE_CID); // Various protocols we have to special case static const char kFTPProtocol[] = "ftp://"; -static const char kGopherProtocol[] = "gopher://"; //---------------------------------------------------------------------- // @@ -428,19 +427,12 @@ nsHTTPIndex::OnIndexAvailable(nsIRequest* aRequest, nsISupports *aContext, // we found the filename; construct a resource for its entry nsCAutoString entryuriC(baseStr); - // gopher resources don't point to an entry in the same directory - // like ftp uris. So the entryuriC is just a unique string, while - // the URL attribute is the destination of this element - // The naming scheme for the attributes is taken from the bookmarks nsXPIDLCString filename; nsresult rv = aIndex->GetLocation(getter_Copies(filename)); if (NS_FAILED(rv)) return rv; entryuriC.Append(filename); // if its a directory, make sure it ends with a trailing slash. - // This doesn't matter for gopher, (where directories don't have - // to end in a trailing /), because the filename is used for the URL - // attribute. PRUint32 type; rv = aIndex->GetType(&type); if (NS_FAILED(rv)) @@ -464,14 +456,7 @@ nsHTTPIndex::OnIndexAvailable(nsIRequest* aRequest, nsISupports *aContext, nsCOMPtr<nsIRDFLiteral> lit; nsString str; - // For gopher, the target is the filename. We still have to do all - // the above string manipulation though, because we need the entryuric - // as the key for the RDF data source - if (!strncmp(entryuriC.get(), kGopherProtocol, sizeof(kGopherProtocol)-1)) - str.AssignWithConversion(filename); - else { - str.AssignWithConversion(entryuriC.get()); - } + str.AssignWithConversion(entryuriC.get()); rv = mDirRDF->GetLiteral(str.get(), getter_AddRefs(lit)); @@ -789,7 +774,6 @@ void nsHTTPIndex::GetDestination(nsIRDFResource* r, nsXPIDLCString& dest) { // get double the # of answers we really want... also, "rdf:file" is // less expensive in terms of both memory usage as well as speed -// We also handle gopher now // We use an rdf attribute to mark if this is a container or not. @@ -809,8 +793,6 @@ nsHTTPIndex::isWellknownContainerURI(nsIRDFResource *r) } else { nsXPIDLCString uri; - // For gopher, we need to follow the URL attribute to get the - // real destination GetDestination(r,uri); if ((uri.get()) && (!strncmp(uri, kFTPProtocol, sizeof(kFTPProtocol) - 1))) { @@ -818,23 +800,6 @@ nsHTTPIndex::isWellknownContainerURI(nsIRDFResource *r) isContainerFlag = PR_TRUE; } } - - // A gopher url is of the form: - // gopher://example.com/xFileNameToGet - // where x is a single character representing the type of file - // 1 is a directory, and 7 is a search. - // Searches will cause a dialog to be popped up (asking the user what - // to search for), and so even though searches return a directory as a - // result, don't treat it as a directory here. - - // The isContainerFlag test above will correctly handle this when a - // search url is passed in as the baseuri - if ((uri.get()) && - (!strncmp(uri,kGopherProtocol, sizeof(kGopherProtocol)-1))) { - char* pos = PL_strchr(uri+sizeof(kGopherProtocol)-1, '/'); - if (!pos || pos[1] == '\0' || pos[1] == '1') - isContainerFlag = PR_TRUE; - } } return isContainerFlag; }