Bug 963216 - Uplift Add-on SDK

This commit is contained in:
Erik Vold 2014-01-24 15:51:44 -08:00
Родитель d1f2ed3875
Коммит 54cf406014
79 изменённых файлов: 882 добавлений и 6139 удалений

Просмотреть файл

@ -6,7 +6,8 @@ Before proceeding, please make sure you've installed Python 2.5,
http://python.org/download/
Note that Python 3.0 and 3.1 are not supported in this release.
Note that Python 3 is not supported on any platform, and Python 2.7.6
is not supported on Windows.
For Windows users, MozillaBuild (https://wiki.mozilla.org/MozillaBuild)
will install the correct version of Python and the MSYS package, which
@ -22,12 +23,12 @@ Windows users using cmd.exe should instead run:
bin\activate.bat
Then go to https://addons.mozilla.org/developers/docs/sdk/latest/dev-guide to
browse the SDK documentation.
Then go to https://developer.mozilla.org/en-US/Add-ons/SDK/
to browse the SDK documentation.
If you get an error when running cfx or have any other problems getting
started, see the "Troubleshooting" guide at:
https://addons.mozilla.org/en-US/developers/docs/sdk/latest/dev-guide/tutorials/troubleshooting.html
https://developer.mozilla.org/en-US/Add-ons/SDK/Tutorials/Troubleshooting
Bugs
-------

2
addon-sdk/source/app-extension/bootstrap.js поставляемый
Просмотреть файл

@ -221,6 +221,8 @@ function startup(data, reasonCode) {
resultFile: options.resultFile,
// Arguments passed as --static-args
staticArgs: options.staticArgs,
// Add-on preferences branch name
preferencesBranch: options.preferencesBranch,
// Arguments related to test runner.
modules: {

Просмотреть файл

@ -1,6 +1,6 @@
"use strict";
var method = require("method/core")
var method = require("../method/core")
// Method is designed to work with data structures representing application
// state. Calling it with a state should return object representing `delta`

Просмотреть файл

@ -1,6 +1,6 @@
"use strict";
var method = require("method/core")
var method = require("../method/core")
var rebase = require("./rebase")
// Method is designed to work with data structures representing application

Просмотреть файл

@ -8,7 +8,7 @@ module.metadata = {
};
const { Ci } = require("chrome");
const method = require("method/core");
const method = require("../../method/core");
const { add, remove, iterator } = require("../lang/weak-set");
let getTargetWindow = method("getTargetWindow");

Просмотреть файл

@ -10,7 +10,7 @@ module.metadata = {
let { merge } = require('../util/object');
let assetsURI = require('../self').data.url();
let isArray = Array.isArray;
let method = require('method/core');
let method = require('../../method/core');
function isAddonContent({ contentURL }) {
return typeof(contentURL) === 'string' && contentURL.indexOf(assetsURI) === 0;

Просмотреть файл

@ -9,7 +9,7 @@ module.metadata = {
const { CC } = require('chrome');
const { id, name, prefixURI, rootURI, metadata,
version, loadReason } = require('@loader/options');
version, loadReason, preferencesBranch } = require('@loader/options');
const { readURISync } = require('./net/url');
@ -24,16 +24,16 @@ const uri = (path="") =>
// associated unique URI string that can be used for that.
exports.uri = 'addon:' + id;
exports.id = id;
exports.preferencesBranch = preferencesBranch || id;
exports.name = name;
exports.loadReason = loadReason;
exports.version = version;
// If `rootURI` is jar:file://...!/ than add-on is packed.
exports.packed = rootURI.indexOf('jar:') === 0
exports.packed = (rootURI || '').indexOf('jar:') === 0;
exports.data = Object.freeze({
url: uri,
load: function read(path) {
return readURISync(uri(path));
}
});
exports.isPrivateBrowsingSupported = ((metadata.permissions || {})['private-browsing'] === true) ?
true : false;
exports.isPrivateBrowsingSupported = ((metadata || {}).permissions || {})['private-browsing'] === true;

Просмотреть файл

@ -9,10 +9,10 @@ module.metadata = {
const { emit, off } = require("./event/core");
const { PrefsTarget } = require("./preferences/event-target");
const { id } = require("./self");
const { preferencesBranch, id } = require("./self");
const { on } = require("./system/events");
const ADDON_BRANCH = "extensions." + id + ".";
const ADDON_BRANCH = "extensions." + preferencesBranch + ".";
const BUTTON_PRESSED = id + "-cmdPressed";
const target = PrefsTarget({ branchName: ADDON_BRANCH });

Просмотреть файл

@ -142,6 +142,7 @@ function create(options) {
node.setAttribute('label', label);
node.setAttribute('tooltiptext', label);
node.setAttribute('image', image);
node.setAttribute('sdk-button', 'true');
views.set(id, {
area: this.currentArea,

Просмотреть файл

@ -7,7 +7,7 @@ module.metadata = {
'stability': 'experimental'
};
const method = require('method/core');
const method = require('../../method/core');
const { uuid } = require('../util/uuid');
// NOTE: use lang/functional memoize when it is updated to use WeakMap

Просмотреть файл

@ -3,7 +3,7 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
'use strict';
const method = require('method/core');
const method = require('../../../method/core');
exports.show = method('show');
exports.hide = method('hide');

Просмотреть файл

@ -3,6 +3,6 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
'use strict';
const method = require('method/core');
const method = require('../../../method/core');
exports.isShowing = method('isShowing');

Просмотреть файл

@ -9,7 +9,7 @@ module.metadata = {
};
var { Ci } = require("chrome");
var method = require("method/core");
var method = require("../../method/core");
// Returns DOM node associated with a view for
// the given `value`. If `value` has no view associated

Просмотреть файл

@ -806,6 +806,8 @@ def run(arguments=sys.argv[1:], target_cfg=None, pkg_cfg=None,
if options.templatedir:
app_extension_dir = os.path.abspath(options.templatedir)
elif os.path.exists(os.path.join(options.pkgdir, "app-extension")):
app_extension_dir = os.path.join(options.pkgdir, "app-extension")
else:
mydir = os.path.dirname(os.path.abspath(__file__))
app_extension_dir = os.path.join(mydir, "../../app-extension")

Просмотреть файл

@ -1,4 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

Просмотреть файл

@ -1,392 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys, re, textwrap
VERSION = 4
class ParseError(Exception):
# args[1] is the line number that caused the problem
def __init__(self, why, lineno):
self.why = why
self.lineno = lineno
def __str__(self):
return ("ParseError: the JS API docs were unparseable on line %d: %s" %
(self.lineno, self.why))
class Accumulator:
def __init__(self, holder, firstline):
self.holder = holder
self.firstline = firstline
self.otherlines = []
def addline(self, line):
self.otherlines.append(line)
def finish(self):
# take a list of strings like:
# "initial stuff" (this is in firstline)
# " more stuff" (this is in lines[0])
# " yet more stuff"
# " indented block"
# " indented block"
# " nonindented stuff" (lines[-1])
#
# calculate the indentation level by looking at all but the first
# line, and removing the whitespace they all have in common. Then
# join the results with newlines and return a single string.
pieces = []
if self.firstline:
pieces.append(self.firstline)
if self.otherlines:
pieces.append(textwrap.dedent("\n".join(self.otherlines)))
self.holder["description"] = "\n".join(pieces)
class APIParser:
def parse(self, lines, lineno):
api = {"line_number": lineno + 1}
# assign the name from the first line, of the form "<api name="API_NAME">"
title_line = lines[lineno].rstrip("\n")
api["name"] = self._parse_title_line(title_line, lineno + 1)
lineno += 1
# finished with the first line, assigned the name
working_set = self._initialize_working_set()
props = []
currentPropHolder = api
# fetch the next line, of the form "@tag [name] {datatype} description"
# and parse it into tag, info, description
tag, info, firstline = self._parseTypeLine(lines[lineno], lineno + 1)
api["type"] = tag
# if this API element is a property then datatype must be set
if tag == 'property':
api['datatype'] = info['datatype']
# info is ignored
currentAccumulator = Accumulator(api, firstline)
lineno += 1
while (lineno) < len(lines):
line = lines[lineno].rstrip("\n")
# accumulate any multiline descriptive text belonging to
# the preceding "@" section
if self._is_description_line(line):
currentAccumulator.addline(line)
else:
currentAccumulator.finish()
if line.startswith("<api"):
# then we should recursively handle a nested element
nested_api, lineno = self.parse(lines, lineno)
self._update_working_set(nested_api, working_set)
elif line.startswith("</api"):
# then we have finished parsing this api element
currentAccumulator.finish()
if props and currentPropHolder:
currentPropHolder["props"] = props
self._assemble_api_element(api, working_set)
return api, lineno
else:
# then we are looking at a subcomponent of an <api> element
tag, info, desc = self._parseTypeLine(line, lineno + 1)
currentAccumulator = Accumulator(info, desc)
if tag == "prop":
# build up props[]
props.append(info)
elif tag == "returns":
# close off the @prop list
if props and currentPropHolder:
currentPropHolder["props"] = props
props = []
api["returns"] = info
currentPropHolder = info
elif tag == "param":
# close off the @prop list
if props and currentPropHolder:
currentPropHolder["props"] = props
props = []
working_set["params"].append(info)
currentPropHolder = info
elif tag == "argument":
# close off the @prop list
if props and currentPropHolder:
currentPropHolder["props"] = props
props = []
working_set["arguments"].append(info)
currentPropHolder = info
else:
raise ParseError("unknown '@' section header %s in \
'%s'" % (tag, line), lineno + 1)
lineno += 1
raise ParseError("closing </api> tag not found for <api name=\"" +
api["name"] + "\">", lineno + 1)
def _parse_title_line(self, title_line, lineno):
if "name" not in title_line:
raise ParseError("Opening <api> tag must have a name attribute.",
lineno)
m = re.search("name=['\"]{0,1}([-\w\.]*?)['\"]", title_line)
if not m:
raise ParseError("No value for name attribute found in "
"opening <api> tag.", lineno)
return m.group(1)
def _is_description_line(self, line):
return not ( (line.lstrip().startswith("@")) or
(line.lstrip().startswith("<api")) or
(line.lstrip().startswith("</api")) )
def _initialize_working_set(self):
# working_set accumulates api elements
# that might belong to a parent api element
working_set = {}
working_set["constructors"] = []
working_set["methods"] = []
working_set["properties"] = []
working_set["params"] = []
working_set["events"] = []
working_set["arguments"] = []
return working_set
def _update_working_set(self, nested_api, working_set):
# add this api element to whichever list is appropriate
if nested_api["type"] == "constructor":
working_set["constructors"].append(nested_api)
if nested_api["type"] == "method":
working_set["methods"].append(nested_api)
if nested_api["type"] == "property":
working_set["properties"].append(nested_api)
if nested_api["type"] == "event":
working_set["events"].append(nested_api)
def _assemble_signature(self, api_element, params):
signature = api_element["name"] + "("
if len(params) > 0:
signature += params[0]["name"]
for param in params[1:]:
signature += ", " + param["name"]
signature += ")"
api_element["signature"] = signature
def _assemble_api_element(self, api_element, working_set):
# if any of this working set's lists are non-empty,
# add it to the current api element
if (api_element["type"] == "constructor") or \
(api_element["type"] == "function") or \
(api_element["type"] == "method"):
self._assemble_signature(api_element, working_set["params"])
if len(working_set["params"]) > 0:
api_element["params"] = working_set["params"]
if len(working_set["properties"]) > 0:
api_element["properties"] = working_set["properties"]
if len(working_set["constructors"]) > 0:
api_element["constructors"] = working_set["constructors"]
if len(working_set["methods"]) > 0:
api_element["methods"] = working_set["methods"]
if len(working_set["events"]) > 0:
api_element["events"] = working_set["events"]
if len(working_set["arguments"]) > 0:
api_element["arguments"] = working_set["arguments"]
def _validate_info(self, tag, info, line, lineno):
if tag == 'property':
if not 'datatype' in info:
raise ParseError("No type found for @property.", lineno)
elif tag == "param":
if info.get("required", False) and "default" in info:
raise ParseError(
"required parameters should not have defaults: '%s'"
% line, lineno)
elif tag == "prop":
if "datatype" not in info:
raise ParseError("@prop lines must include {type}: '%s'" %
line, lineno)
if "name" not in info:
raise ParseError("@prop lines must provide a name: '%s'" %
line, lineno)
def _parseTypeLine(self, line, lineno):
# handle these things:
# @method
# @returns description
# @returns {string} description
# @param NAME {type} description
# @param NAME
# @prop NAME {type} description
# @prop NAME
# returns:
# tag: type of api element
# info: linenumber, required, default, name, datatype
# description
info = {"line_number": lineno}
line = line.rstrip("\n")
pieces = line.split()
if not pieces:
raise ParseError("line is too short: '%s'" % line, lineno)
if not pieces[0].startswith("@"):
raise ParseError("type line should start with @: '%s'" % line,
lineno)
tag = pieces[0][1:]
skip = 1
expect_name = tag in ("param", "prop")
if len(pieces) == 1:
description = ""
else:
if pieces[1].startswith("{"):
# NAME is missing, pieces[1] is TYPE
pass
else:
if expect_name:
info["required"] = not pieces[1].startswith("[")
name = pieces[1].strip("[ ]")
if "=" in name:
name, info["default"] = name.split("=")
info["name"] = name
skip += 1
if len(pieces) > skip and pieces[skip].startswith("{"):
info["datatype"] = pieces[skip].strip("{ }")
skip += 1
# we've got the metadata, now extract the description
pieces = line.split(None, skip)
if len(pieces) > skip:
description = pieces[skip]
else:
description = ""
self._validate_info(tag, info, line, lineno)
return tag, info, description
def parse_hunks(text):
# return a list of tuples. Each is one of:
# ("raw", string) : non-API blocks
# ("api-json", dict) : API blocks
yield ("version", VERSION)
lines = text.splitlines(True)
line_number = 0
markdown_string = ""
while line_number < len(lines):
line = lines[line_number]
if line.startswith("<api"):
if len(markdown_string) > 0:
yield ("markdown", markdown_string)
markdown_string = ""
api, line_number = APIParser().parse(lines, line_number)
# this business with 'leftover' is a horrible thing to do,
# and exists only to collect the \n after the closing /api tag.
# It's not needed probably, except to help keep compatibility
# with the previous behaviour
leftover = lines[line_number].lstrip("</api>")
if len(leftover) > 0:
markdown_string += leftover
line_number = line_number + 1
yield ("api-json", api)
else:
markdown_string += line
line_number = line_number + 1
if len(markdown_string) > 0:
yield ("markdown", markdown_string)
class TestRenderer:
# render docs for test purposes
def getm(self, d, key):
return d.get(key, "<MISSING>")
def join_lines(self, text):
return " ".join([line.strip() for line in text.split("\n")])
def render_prop(self, p):
s = "props[%s]: " % self.getm(p, "name")
pieces = []
for k in ("type", "description", "required", "default"):
if k in p:
pieces.append("%s=%s" % (k, self.join_lines(str(p[k]))))
return s + ", ".join(pieces)
def render_param(self, p):
pieces = []
for k in ("name", "type", "description", "required", "default"):
if k in p:
pieces.append("%s=%s" % (k, self.join_lines(str(p[k]))))
yield ", ".join(pieces)
for prop in p.get("props", []):
yield " " + self.render_prop(prop)
def render_method(self, method):
yield "name= %s" % self.getm(method, "name")
yield "type= %s" % self.getm(method, "type")
yield "description= %s" % self.getm(method, "description")
signature = method.get("signature")
if signature:
yield "signature= %s" % self.getm(method, "signature")
params = method.get("params", [])
if params:
yield "parameters:"
for p in params:
for pline in self.render_param(p):
yield " " + pline
r = method.get("returns", None)
if r:
yield "returns:"
if "type" in r:
yield " type= %s" % r["type"]
if "description" in r:
yield " description= %s" % self.join_lines(r["description"])
props = r.get("props", [])
for p in props:
yield " " + self.render_prop(p)
def format_api(self, api):
for mline in self.render_method(api):
yield mline
constructors = api.get("constructors", [])
if constructors:
yield "constructors:"
for m in constructors:
for mline in self.render_method(m):
yield " " + mline
methods = api.get("methods", [])
if methods:
yield "methods:"
for m in methods:
for mline in self.render_method(m):
yield " " + mline
properties = api.get("properties", [])
if properties:
yield "properties:"
for p in properties:
yield " " + self.render_prop(p)
def render_docs(self, docs_json, outf=sys.stdout):
for (t,data) in docs_json:
if t == "api-json":
for line in self.format_api(data):
line = line.rstrip("\n")
outf.write("API: " + line + "\n")
else:
for line in str(data).split("\n"):
outf.write("MD :" + line + "\n")
def hunks_to_dict(docs_json):
exports = {}
for (t,data) in docs_json:
if t != "api-json":
continue
if data["name"]:
exports[data["name"]] = data
return exports
if __name__ == "__main__":
json = False
if sys.argv[1] == "--json":
json = True
del sys.argv[1]
docs_text = open(sys.argv[1]).read()
docs_parsed = list(parse_hunks(docs_text))
if json:
import simplejson
print simplejson.dumps(docs_parsed, indent=2)
else:
TestRenderer().render_docs(docs_parsed)

Просмотреть файл

@ -1,301 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys, os
import markdown
import apiparser
# list of all the 'class' and 'id' attributes assigned to
# <div> and <span> tags by the renderer.
API_REFERENCE = 'api_reference'
MODULE_API_DOCS_CLASS = 'module_api_docs'
MODULE_API_DOCS_ID = '_module_api_docs'
API_HEADER = 'api_header'
API_NAME = 'api_name'
API_COMPONENT_GROUP = 'api_component_group'
API_COMPONENT = 'api_component'
DATATYPE = 'datatype'
RETURNS = 'returns'
PARAMETER_SET = 'parameter_set'
MODULE_DESCRIPTION = 'module_description'
HTML_HEADER = '''
<!DOCTYPE html>\n
<html>\n
<head>\n
<meta http-equiv="Content-type" content="text/html; charset=utf-8" />\n
<base target="_blank"/>\n
<link rel="stylesheet" type="text/css" media="all"\n
href="../../../css/base.css" />\n
<link rel="stylesheet" type="text/css" media="all"\n
href="../../../css/apidocs.css" />\n
<title>Add-on SDK Documentation</title>\n
<style type="text/css">\n
body {\n
border: 50px solid #FFFFFF;\n
}\n
</style>\n
\n
<script type="text/javascript">\n
function rewrite_links() {\n
var images = document.getElementsByTagName("img");\n
for (var i = 0; i < images.length; i++) {\n
var before = images[i].src.split("packages/")[0];\n
var after = images[i].src.split("/docs")[1];\n
images[i].src = before + after;\n
}\n
}\n
</script>\n
</head>\n
\n
<body onload = "rewrite_links()">\n'''
HTML_FOOTER = '''
</body>\n
\n
</html>\n'''
def indent(text_in):
text_out = ''
lines = text_in.splitlines(True)
indentation_level = 0
indentation_depth = 2
for line in lines:
if (line.startswith('<div')):
text_out += ((' ' * indentation_depth) * indentation_level) + line
if not '</div>' in line:
indentation_level += 1
else:
if (line.startswith('</div>')):
indentation_level -= 1
text_out += ((' ' * indentation_depth) * indentation_level) + line
return text_out
def tag_wrap_id(text, classname, id, tag = 'div'):
return ''.join(['\n<'+ tag + ' id="', id, '" class="', \
classname, '">\n', text + '\n</' + tag +'>\n'])
def tag_wrap(text, classname, tag = 'div', inline = False):
if inline:
return ''.join(['\n<' + tag + ' class="', classname, '">', \
text, '</'+ tag + '>\n'])
else:
return ''.join(['\n<' + tag + ' class="', classname, '">', \
text, '\n</'+ tag + '>\n'])
def tag_wrap_inline(text, classname, tag = 'div'):
return ''.join(['\n<' + tag + ' class="', classname, '">', \
text, '</'+ tag + '>\n'])
def span_wrap(text, classname):
return ''.join(['<span class="', classname, '">', \
text, '</span>'])
class API_Renderer(object):
def __init__(self, json, tag):
self.name = json.get('name', None)
self.tag = tag
self.description = json.get('description', '')
self.json = json
def render_name(self):
raise Exception('not implemented in this class')
def render_description(self):
return markdown.markdown(self.description)
def render_subcomponents(self):
raise Exception('not implemented in this class')
def get_tag(self):
return self.tag
class Class_Doc(API_Renderer):
def __init__(self, json, tag):
API_Renderer.__init__(self, json, tag)
def render_name(self):
return self.name
def render_subcomponents(self):
return render_object_contents(self.json, 'h5', 'h6')
class Event_Doc(API_Renderer):
def __init__(self, json, tag):
API_Renderer.__init__(self, json, tag)
self.arguments_json = json.get('arguments', None)
def render_name(self):
return self.name
def render_subcomponents(self):
if not self.arguments_json:
return ''
text = ''.join([render_comp(Argument_Doc(argument_json, 'div')) \
for argument_json in self.arguments_json])
return tag_wrap(text, PARAMETER_SET)
class Argument_Doc(API_Renderer):
def __init__(self, json, tag):
API_Renderer.__init__(self, json, tag)
self.datatype = json.get('datatype', None)
def render_name(self):
return span_wrap(self.datatype, DATATYPE)
def render_subcomponents(self):
return ''
class Function_Doc(API_Renderer):
def __init__(self, json, tag):
API_Renderer.__init__(self, json, tag)
self.signature = json['signature']
self.returns = json.get('returns', None)
self.parameters_json = json.get('params', None)
def render_name(self):
return self.signature
def render_subcomponents(self):
return self._render_parameters() + self._render_returns()
def _render_parameters(self):
if not self.parameters_json:
return ''
text = ''.join([render_comp(Parameter_Doc(parameter_json, 'div')) \
for parameter_json in self.parameters_json])
return tag_wrap(text, PARAMETER_SET)
def _render_returns(self):
if not self.returns:
return ''
text = 'Returns: ' + span_wrap(self.returns['datatype'], DATATYPE)
text += markdown.markdown(self.returns['description'])
return tag_wrap(text, RETURNS)
class Property_Doc(API_Renderer):
def __init__(self, json, tag):
API_Renderer.__init__(self, json, tag)
self.datatype = json.get('datatype', None)
self.required = json.get('required', True)
self.default = json.get('default', False)
def render_name(self):
rendered = self.name
if self.default:
rendered = rendered + " = " + self.default
if self.datatype:
rendered = rendered + ' : ' + span_wrap(self.datatype, DATATYPE)
if not self.required:
rendered = '[ ' + rendered + ' ]'
return rendered
def render_subcomponents(self):
return render_object_contents(self.json)
class Parameter_Doc(Property_Doc):
def __init__(self, json, tag):
Property_Doc.__init__(self, json, tag)
self.properties_json = json.get('props', None)
def render_subcomponents(self):
if not self.properties_json:
return ''
text = ''.join([render_comp(Property_Doc(property_json, 'div')) \
for property_json in self.properties_json])
return text
def render_object_contents(json, tag = 'div', comp_tag = 'div'):
ctors = json.get('constructors', None)
text = render_comp_group(ctors, 'Constructors', Function_Doc, tag, comp_tag)
methods = json.get('methods', None)
text += render_comp_group(methods, 'Methods', Function_Doc, tag, comp_tag)
properties = json.get('properties', None)
text += render_comp_group(properties, 'Properties', Property_Doc, tag, comp_tag)
events = json.get('events', None)
text += render_comp_group(events, 'Events', Event_Doc, tag, comp_tag)
return text
def render_comp(component):
# a component is wrapped inside a single div marked 'API_COMPONENT'
# containing:
# 1) the component name, marked 'API_NAME'
text = tag_wrap(component.render_name(), API_NAME, component.get_tag(), True)
# 2) the component description
text += component.render_description()
# 3) the component contents
text += component.render_subcomponents()
return tag_wrap(text, API_COMPONENT)
def render_comp_group(group, group_name, ctor, tag = 'div', comp_tag = 'div'):
if not group:
return ''
# component group is a list of components in a single div called
# 'API_COMPONENT_GROUP' containing:
# 1) a title for the group marked with 'API_HEADER'
text = tag_wrap(group_name, API_HEADER, tag, True)
# 2) each component
text += ''.join([render_comp(ctor(api, comp_tag)) for api in group])
return tag_wrap(text, API_COMPONENT_GROUP)
def render_descriptions(descriptions_md):
text = ''.join([description_md for description_md in descriptions_md])
return tag_wrap(markdown.markdown(text), MODULE_DESCRIPTION)
def render_api_reference(api_docs):
if (len(api_docs) == 0):
return ''
# at the top level api reference is in a single div marked 'API_REFERENCE',
# containing:
# 1) a title 'API Reference' marked with 'API_HEADER'
text = tag_wrap('API Reference', API_HEADER, 'h2', True)
# 2) a component group called 'Classes' containing any class elements
classes = [api for api in api_docs if api['type'] == 'class']
text += render_comp_group(classes, 'Classes', Class_Doc, 'h3', 'h4')
# 3) a component group called 'Functions' containing any global functions
functions = [api for api in api_docs if api['type'] == 'function']
text += render_comp_group(functions, 'Functions', Function_Doc, 'h3', 'h4')
# 4) a component group called 'Properties' containing any global properties
properties = [api for api in api_docs if api['type'] == 'property']
text += render_comp_group(properties, 'Properties', Property_Doc, 'h3', 'h4')
# 5) a component group called 'Events' containing any global events
events = [api for api in api_docs if api['type'] == 'event']
text += render_comp_group(events, 'Events', Event_Doc, 'h3', 'h4')
return tag_wrap(text, API_REFERENCE)
# take the JSON output of apiparser
# return the HTML DIV containing the rendered component
def json_to_div(json, markdown_filename, module_name):
descriptions = [hunk[1] for hunk in json if hunk[0]=='markdown']
api_docs = [hunk[1] for hunk in json if hunk[0]=='api-json']
text = "<h1>" + module_name + "</h1>"
text += render_descriptions(descriptions)
text += render_api_reference(api_docs)
text = tag_wrap_id(text, MODULE_API_DOCS_CLASS, \
module_name + MODULE_API_DOCS_ID)
return text.encode('utf8')
# take the JSON output of apiparser
# return standalone HTML containing the rendered component
def json_to_html(json, markdown_filename):
return indent(HTML_HEADER + \
json_to_div(json, markdown_filename) + HTML_FOOTER)
# take the name of a Markdown file
# return the HTML DIV containing the rendered component
def md_to_div(markdown_filename, module_name):
markdown_contents = open(markdown_filename).read().decode('utf8')
json = list(apiparser.parse_hunks(markdown_contents))
return json_to_div(json, markdown_filename, module_name)
# take the name of a Markdown file
# return standalone HTML containing the rendered component
def md_to_html(markdown_filename, module_name):
return indent(HTML_HEADER + md_to_div(markdown_filename, module_name) + HTML_FOOTER)
if __name__ == '__main__':
if (len(sys.argv) == 0):
print 'Supply the name of a docs file to parse'
else:
print md_to_html(sys.argv[1])

Просмотреть файл

@ -1,164 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys, os, re, simplejson
class DocumentationItemInfo(object):
def __init__(self, env_root, md_path, filename):
self.env_root = env_root
# full path to MD file, without filename
self.source_path = md_path
# MD filename
self.source_filename = filename
def env_root(self):
return self.env_root
def source_path(self):
return self.source_path
def source_filename(self):
return self.source_filename
def base_filename(self):
return self.source_filename[:-len(".md")]
def source_path_and_filename(self):
return os.sep.join([self.source_path, self.source_filename])
def source_path_relative_from_env_root(self):
return self.source_path[len(self.env_root) + 1:]
class DevGuideItemInfo(DocumentationItemInfo):
def __init__(self, env_root, devguide_root, md_path, filename):
DocumentationItemInfo.__init__(self, env_root, md_path, filename)
self.devguide_root = devguide_root
def source_path_relative_from_devguide_root(self):
return self.source_path[len(self.devguide_root) + 1:]
def destination_path(self):
root_pieces = self.devguide_root.split(os.sep)
root_pieces[-1] = "dev-guide"
return os.sep.join([os.sep.join(root_pieces), self.source_path_relative_from_devguide_root()])
class ModuleInfo(DocumentationItemInfo):
def __init__(self, env_root, module_root, md_path, filename):
DocumentationItemInfo.__init__(self, env_root, md_path, filename)
self.module_root = module_root
self.metadata = self.get_metadata()
def remove_comments(self, text):
def replacer(match):
s = match.group(0)
if s.startswith('/'):
return ""
else:
return s
pattern = re.compile(
r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"',
re.DOTALL | re.MULTILINE
)
return re.sub(pattern, replacer, text)
def get_metadata(self):
if self.level() == "third-party":
return simplejson.loads("{}")
try:
js = unicode(open(self.js_module_path(),"r").read(), 'utf8')
except IOError:
raise Exception, "JS module: '" + path_to_js + \
"', corresponding to documentation file: '"\
+ self.source_path_and_filename() + "' wasn't found"
js = self.remove_comments(js)
js_lines = js.splitlines(True)
metadata = ''
reading_metadata = False
for line in js_lines:
if reading_metadata:
if line.startswith("};"):
break
metadata += line
continue
if line.startswith("module.metadata"):
reading_metadata = True
metadata = metadata.replace("'", '"')
return simplejson.loads("{" + metadata + "}")
def source_path_relative_from_module_root(self):
return self.source_path[len(self.module_root) + 1:]
def destination_path(self):
if self.level() == "third-party":
return os.sep.join([self.env_root, "doc", "modules", "packages"])
root_pieces = self.module_root.split(os.sep)
root_pieces[-1] = "modules"
relative_pieces = self.source_path_relative_from_module_root().split(os.sep)
return os.sep.join(root_pieces + relative_pieces)
def js_module_path(self):
return os.path.join(self.env_root, "lib", \
self.source_path_relative_from_module_root(), \
self.source_filename[:-len(".md")] + ".js")
def relative_url(self):
if self.level() == "third-party":
relative_pieces = ["packages"]
else:
relative_pieces = self.source_path_relative_from_module_root().split(os.sep)
return "/".join(relative_pieces) + "/" + self.base_filename() + ".html"
def name(self):
if os.sep.join([self.module_root, "sdk"]) == self.source_path or self.level() == "third-party":
return self.source_filename[:-3]
else:
path_from_root_pieces = self.source_path_relative_from_module_root().split(os.sep)
return "/".join(["/".join(path_from_root_pieces[1:]), self.source_filename[:-len(".md")]])
def level(self):
if self.source_path_relative_from_env_root().startswith("packages"):
return "third-party"
else:
if os.sep.join([self.module_root, "sdk"]) == self.source_path:
return "high"
else:
return "low"
def get_modules_in_package(env_root, package_docs_dir, module_list, ignore_files_in_root):
for (dirpath, dirnames, filenames) in os.walk(package_docs_dir):
for filename in filenames:
# ignore files in the root
if ignore_files_in_root and package_docs_dir == dirpath:
continue
if filename.endswith(".md"):
module_list.append(ModuleInfo(env_root, package_docs_dir, dirpath, filename))
def get_module_list(env_root):
module_list = []
# get the built-in modules
module_root = os.sep.join([env_root, "doc", "module-source"])
get_modules_in_package(env_root, module_root, module_list, True)
# get the third-party modules
packages_root = os.sep.join([env_root, "packages"])
if os.path.exists(packages_root):
for entry in os.listdir(packages_root):
if os.path.isdir(os.sep.join([packages_root, entry])):
package_docs = os.sep.join([packages_root, entry, "docs"])
if os.path.exists(package_docs):
get_modules_in_package(env_root, package_docs, module_list, False)
module_list.sort(key=lambda x: x.name())
return module_list
def get_devguide_list(env_root):
devguide_list = []
devguide_root = os.sep.join([env_root, "doc", "dev-guide-source"])
for (dirpath, dirnames, filenames) in os.walk(devguide_root):
for filename in filenames:
if filename.endswith(".md"):
devguide_list.append(DevGuideItemInfo(env_root, devguide_root, dirpath, filename))
return devguide_list
if __name__ == "__main__":
module_list = get_module_list(sys.argv[1])
print [module_info.name for module_info in module_list]

Просмотреть файл

@ -1,198 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import sys
import shutil
import hashlib
import tarfile
import StringIO
from cuddlefish._version import get_versions
from cuddlefish.docs import apiparser
from cuddlefish.docs import apirenderer
from cuddlefish.docs import webdocs
from documentationitem import get_module_list
from documentationitem import get_devguide_list
from documentationitem import ModuleInfo
from documentationitem import DevGuideItemInfo
from linkrewriter import rewrite_links
import simplejson as json
DIGEST = "status.md5"
TGZ_FILENAME = "addon-sdk-docs.tgz"
def get_sdk_docs_path(env_root):
return os.path.join(env_root, "doc")
def get_base_url(env_root):
sdk_docs_path = get_sdk_docs_path(env_root).lstrip("/")
return "file://"+"/"+"/".join(sdk_docs_path.split(os.sep))+"/"
def clean_generated_docs(docs_dir):
status_file = os.path.join(docs_dir, "status.md5")
if os.path.exists(status_file):
os.remove(status_file)
index_file = os.path.join(docs_dir, "index.html")
if os.path.exists(index_file):
os.remove(index_file)
dev_guide_dir = os.path.join(docs_dir, "dev-guide")
if os.path.exists(dev_guide_dir):
shutil.rmtree(dev_guide_dir)
api_doc_dir = os.path.join(docs_dir, "modules")
if os.path.exists(api_doc_dir):
shutil.rmtree(api_doc_dir)
def generate_static_docs(env_root, override_version=get_versions()["version"]):
clean_generated_docs(get_sdk_docs_path(env_root))
generate_docs(env_root, override_version, stdout=StringIO.StringIO())
tgz = tarfile.open(TGZ_FILENAME, 'w:gz')
tgz.add(get_sdk_docs_path(env_root), "doc")
tgz.close()
return TGZ_FILENAME
def generate_local_docs(env_root):
return generate_docs(env_root, get_versions()["version"], get_base_url(env_root))
def generate_named_file(env_root, filename_and_path):
module_list = get_module_list(env_root)
web_docs = webdocs.WebDocs(env_root, module_list, get_versions()["version"], get_base_url(env_root))
abs_path = os.path.abspath(filename_and_path)
path, filename = os.path.split(abs_path)
if abs_path.startswith(os.path.join(env_root, 'doc', 'module-source')):
module_root = os.sep.join([env_root, "doc", "module-source"])
module_info = ModuleInfo(env_root, module_root, path, filename)
write_module_doc(env_root, web_docs, module_info, False)
elif abs_path.startswith(os.path.join(get_sdk_docs_path(env_root), 'dev-guide-source')):
devguide_root = os.sep.join([env_root, "doc", "dev-guide-source"])
devguideitem_info = DevGuideItemInfo(env_root, devguide_root, path, filename)
write_devguide_doc(env_root, web_docs, devguideitem_info, False)
else:
raise ValueError("Not a valid path to a documentation file")
def generate_docs(env_root, version=get_versions()["version"], base_url=None, stdout=sys.stdout):
docs_dir = get_sdk_docs_path(env_root)
# if the generated docs don't exist, generate everything
if not os.path.exists(os.path.join(docs_dir, "dev-guide")):
print >>stdout, "Generating documentation..."
generate_docs_from_scratch(env_root, version, base_url)
current_status = calculate_current_status(env_root)
open(os.path.join(docs_dir, DIGEST), "w").write(current_status)
else:
current_status = calculate_current_status(env_root)
previous_status_file = os.path.join(docs_dir, DIGEST)
docs_are_up_to_date = False
if os.path.exists(previous_status_file):
docs_are_up_to_date = current_status == open(previous_status_file, "r").read()
# if the docs are not up to date, generate everything
if not docs_are_up_to_date:
print >>stdout, "Regenerating documentation..."
generate_docs_from_scratch(env_root, version, base_url)
open(os.path.join(docs_dir, DIGEST), "w").write(current_status)
return get_base_url(env_root) + "index.html"
# this function builds a hash of the name and last modification date of:
# * every file in "doc/sdk" which ends in ".md"
# * every file in "doc/dev-guide-source" which ends in ".md"
# * every file in "doc/static-files" which does not start with "."
def calculate_current_status(env_root):
docs_dir = get_sdk_docs_path(env_root)
current_status = hashlib.md5()
module_src_dir = os.path.join(env_root, "doc", "module-source")
for (dirpath, dirnames, filenames) in os.walk(module_src_dir):
for filename in filenames:
if filename.endswith(".md"):
current_status.update(filename)
current_status.update(str(os.path.getmtime(os.path.join(dirpath, filename))))
guide_src_dir = os.path.join(docs_dir, "dev-guide-source")
for (dirpath, dirnames, filenames) in os.walk(guide_src_dir):
for filename in filenames:
if filename.endswith(".md"):
current_status.update(filename)
current_status.update(str(os.path.getmtime(os.path.join(dirpath, filename))))
package_dir = os.path.join(env_root, "packages")
for (dirpath, dirnames, filenames) in os.walk(package_dir):
for filename in filenames:
if filename.endswith(".md"):
current_status.update(filename)
current_status.update(str(os.path.getmtime(os.path.join(dirpath, filename))))
base_html_file = os.path.join(docs_dir, "static-files", "base.html")
current_status.update(base_html_file)
current_status.update(str(os.path.getmtime(os.path.join(dirpath, base_html_file))))
return current_status.digest()
def generate_docs_from_scratch(env_root, version, base_url):
docs_dir = get_sdk_docs_path(env_root)
module_list = get_module_list(env_root)
web_docs = webdocs.WebDocs(env_root, module_list, version, base_url)
must_rewrite_links = True
if base_url:
must_rewrite_links = False
clean_generated_docs(docs_dir)
# py2.5 doesn't have ignore=, so we delete tempfiles afterwards. If we
# required >=py2.6, we could use ignore=shutil.ignore_patterns("*~")
for (dirpath, dirnames, filenames) in os.walk(docs_dir):
for n in filenames:
if n.endswith("~"):
os.unlink(os.path.join(dirpath, n))
# generate api docs for all modules
if not os.path.exists(os.path.join(docs_dir, "modules")):
os.mkdir(os.path.join(docs_dir, "modules"))
[write_module_doc(env_root, web_docs, module_info, must_rewrite_links) for module_info in module_list]
# generate third-party module index
third_party_index_file = os.sep.join([env_root, "doc", "module-source", "third-party-modules.md"])
third_party_module_list = [module_info for module_info in module_list if module_info.level() == "third-party"]
write_module_index(env_root, web_docs, third_party_index_file, third_party_module_list, must_rewrite_links)
# generate high-level module index
high_level_index_file = os.sep.join([env_root, "doc", "module-source", "high-level-modules.md"])
high_level_module_list = [module_info for module_info in module_list if module_info.level() == "high"]
write_module_index(env_root, web_docs, high_level_index_file, high_level_module_list, must_rewrite_links)
# generate low-level module index
low_level_index_file = os.sep.join([env_root, "doc", "module-source", "low-level-modules.md"])
low_level_module_list = [module_info for module_info in module_list if module_info.level() == "low"]
write_module_index(env_root, web_docs, low_level_index_file, low_level_module_list, must_rewrite_links)
# generate dev-guide docs
devguide_list = get_devguide_list(env_root)
[write_devguide_doc(env_root, web_docs, devguide_info, must_rewrite_links) for devguide_info in devguide_list]
# make /md/dev-guide/welcome.html the top level index file
doc_html = web_docs.create_guide_page(os.path.join(docs_dir, 'dev-guide-source', 'index.md'))
write_file(env_root, doc_html, docs_dir, 'index', False)
def write_module_index(env_root, web_docs, source_file, module_list, must_rewrite_links):
doc_html = web_docs.create_module_index(source_file, module_list)
base_filename, extension = os.path.splitext(os.path.basename(source_file))
destination_path = os.sep.join([env_root, "doc", "modules"])
write_file(env_root, doc_html, destination_path, base_filename, must_rewrite_links)
def write_module_doc(env_root, web_docs, module_info, must_rewrite_links):
doc_html = web_docs.create_module_page(module_info)
write_file(env_root, doc_html, module_info.destination_path(), module_info.base_filename(), must_rewrite_links)
def write_devguide_doc(env_root, web_docs, devguide_info, must_rewrite_links):
doc_html = web_docs.create_guide_page(devguide_info.source_path_and_filename())
write_file(env_root, doc_html, devguide_info.destination_path(), devguide_info.base_filename(), must_rewrite_links)
def write_file(env_root, doc_html, dest_dir, filename, must_rewrite_links):
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
dest_path_html = os.path.join(dest_dir, filename) + ".html"
replace_file(env_root, dest_path_html, doc_html, must_rewrite_links)
return dest_path_html
def replace_file(env_root, dest_path, file_contents, must_rewrite_links):
if os.path.exists(dest_path):
os.remove(dest_path)
# before we copy the final version, we'll rewrite the links
# I'll do this last, just because we know definitely what the dest_path is at this point
if must_rewrite_links and dest_path.endswith(".html"):
file_contents = rewrite_links(env_root, get_sdk_docs_path(env_root), file_contents, dest_path)
open(dest_path, "w").write(file_contents)

Просмотреть файл

@ -1,78 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import sys
import HTMLParser
import urlparse
def rewrite_links(env_root, sdk_docs_path, page, dest_path):
dest_path_depth = len(dest_path.split(os.sep)) -1 # because dest_path includes filename
docs_root_depth = len(sdk_docs_path.split(os.sep))
relative_depth = dest_path_depth - docs_root_depth
linkRewriter = LinkRewriter("../" * relative_depth)
return linkRewriter.rewrite_links(page)
class LinkRewriter(HTMLParser.HTMLParser):
def __init__(self, link_prefix):
HTMLParser.HTMLParser.__init__(self)
self.stack = []
self.link_prefix = link_prefix
def rewrite_links(self, page):
self.feed(page)
self.close()
page = ''.join(self.stack)
self.stack = []
return page
def handle_decl(self, decl):
self.stack.append("<!" + decl + ">")
def handle_comment(self, decl):
self.stack.append("<!--" + decl + "-->")
def handle_starttag(self, tag, attrs):
self.stack.append(self.__html_start_tag(tag, self._rewrite_link(attrs)))
def handle_entityref(self, name):
self.stack.append("&" + name + ";")
def handle_endtag(self, tag):
self.stack.append(self.__html_end_tag(tag))
def handle_startendtag(self, tag, attrs):
self.stack.append(self.__html_startend_tag(tag, self._rewrite_link(attrs)))
def _update_attribute(self, attr_name, attrs):
attr_value = attrs.get(attr_name, '')
if attr_value:
parsed = urlparse.urlparse(attr_value)
if not parsed.scheme:
attrs[attr_name] = self.link_prefix + attr_value
def _rewrite_link(self, attrs):
attrs = dict(attrs)
self._update_attribute('href', attrs)
self._update_attribute('src', attrs)
self._update_attribute('action', attrs)
return attrs
def handle_data(self, data):
self.stack.append(data)
def __html_start_tag(self, tag, attrs):
return '<%s%s>' % (tag, self.__html_attrs(attrs))
def __html_startend_tag(self, tag, attrs):
return '<%s%s/>' % (tag, self.__html_attrs(attrs))
def __html_end_tag(self, tag):
return '</%s>' % (tag)
def __html_attrs(self, attrs):
_attrs = ''
if attrs:
_attrs = ' %s' % (' '.join([('%s="%s"' % (k,v)) for k,v in dict(attrs).iteritems()]))
return _attrs

Просмотреть файл

@ -1,209 +0,0 @@
<!-- This Source Code Form is subject to the terms of the Mozilla Public
- License, v. 2.0. If a copy of the MPL was not distributed with this
- file, You can obtain one at http://mozilla.org/MPL/2.0/. -->
This document describes the structure of the HTML generated by the renderapi.py
tool. The particular HTML id and class attributes embedded in the files,
as well as their organization, represent the interface between the tool and any
front-end code wanting to style the docs in some particular way.
renderapi generates two sorts of files:
- a file called "<module-name>.div": this is the contents of the parsed
Markdown file rendered inside a well-defined DIV tag
- a file called "<module-name>.html": this is the DIV from above inserted into
a simple HTML template that references a sample CSS file which styles the
contents of the DIV. This CSS file is the same as the one used by the SDK
itself.
DIV tags
--------
The following class and id attributes are used in the DIV:
renderapi uses a number of class attributes and a single id attribute in the DIV:
id attribute <module_name>"_module_api_docs"
class attribute "api_reference"
class attribute "module_api_docs"
class attribute "api_header"
class attribute "api_name"
class attribute "api_component_group"
class attribute "api_component"
class attribute "datatype"
class attribute "returns"
class attribute "parameter_set"
class attribute "module_description"
DIV structure
-------------
The top level DIV is marked with the id attribute and the "module_api_docs" class
attribute:
<div id='tabs_module_api_docs' class='module_api_docs'>
//module doc contents
</div>
Inside this:
- the first item is an <h1> heading containing the name of the module:
- all "markdown" hunks (that is, all descriptive text not occurring
inside <api></api> tags) are rendered inside a DIV marked with the
"module-description" class attribute
- all <api></api> content is rendered, enclosed in a single tag marked
with the "api_reference" class attribute:
<div id='tabs_module_api_docs' class='module_api_docs'>
<div class='module_description'>
//descriptions
</div>
<div class='api_reference'>
//api reference
</div>
</div>
If there is no <api></api> content, then the "api-reference" section is absent.
### API Reference structure ###
The first item in API reference is an <h2> heading title marked with the
"api_header" attribute. This might have the text content "API Reference"
(but you should not rely on that):
<div class='api_reference'>
<h2 class='api_header'>API Reference</h2>
//api contents
</div>
After the title come one or more component groups.
#### Component Group ####
A component group is marked with the "api_component_group" attribute. The
component group is a collection of some sort of component: for example, a group
of classes, a group of functions, or a group of events.
Each component group starts off with a header marked with the
"api_header" attribute and is followed by one or more sections marked with the
"api_component" attribute.
At the top level (that is, when they are directly under the "API Reference"
heading), the "api_header" items are <h3> headings, otherwise they are divs.
<div class='api_reference'>
<h2 class='api_header'>API Reference</h2>
<div class='api_component_group'>
<h3 class='api_header'>Classes</h3>
<div class='api_component'>
// the first class
</div>
<div class='api_component'>
// another class
</div>
</div>
<div class='api_component_group'>
//some different components
<h3 class='api_header'>Functions</h3>
<div class='api_component'>
the first function
</div>
<div class='api_component'>
another function
</div>
</div>
</div>
#### Component ####
API components represent actual objects in the API like classes, functions,
properties and events.
Each component starts with a section marked with the
"api_name" tag, which includes the name of the component in the API: for
example "postMessage(message)".
Components at the top level (i.e., directly under h3 headings) are <h4>
headings, otherwise they are divs.
After the name, the component's contents are listed. Different sorts of
components may have different sorts of contents: for example, a function might
have parameters. If the component is composite then it may contain its own
component group. For example, a class may contain methods and properties,
which might be grouped together.
<div class='api_component'>
<h4 class='api_name'>Panel</h4>
<div class='api_component_group'>
<div class='api_header'>
Methods
</div>
<div class='api_component'>
show()
</div>
</div>
</div>
Other attributes
-----------------------------
### Datatype ###
All primitive data types, like "string" and "number", are marked with the
"datatype" class attribute:
<div class="api_component">
<div class="api_name">
label : <span class="datatype">string</span>
</div>
<p>A required string description of the widget used for accessibility,
title bars, and error reporting.</p>
</div>
### Returns ###
Functions mark return values with the "returns" class attribute.
<div class="api_component">
<div class="api_name">
get()
</div>
Make a `GET` request.
<div class="returns">
Returns: <span class="datatype">Request</span>
</div>
</div>
### Parameter_set ###
Functions that take parameters mark them with the parameter_set class
attribute.

Просмотреть файл

@ -1,111 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os, re, errno
import markdown
import cgi
from cuddlefish import packaging
from cuddlefish.docs import apirenderer
from cuddlefish._version import get_versions
INDEX_PAGE = '/doc/static-files/base.html'
BASE_URL_INSERTION_POINT = '<base '
VERSION_INSERTION_POINT = '<div id="version">'
MODULE_INDEX_INSERTION_POINT = '<ul id="module-index">'
THIRD_PARTY_MODULE_SUMMARIES = '<ul id="third-party-module-summaries">'
HIGH_LEVEL_MODULE_SUMMARIES = '<ul id="high-level-module-summaries">'
LOW_LEVEL_MODULE_SUMMARIES = '<ul id="low-level-module-summaries">'
CONTENT_ID = '<div id="main-content">'
TITLE_ID = '<title>'
DEFAULT_TITLE = 'Add-on SDK Documentation'
def tag_wrap(text, tag, attributes={}):
result = '\n<' + tag
for name in attributes.keys():
result += ' ' + name + '=' + '"' + attributes[name] + '"'
result +='>' + text + '</'+ tag + '>\n'
return result
def insert_after(target, insertion_point_id, text_to_insert):
insertion_point = target.find(insertion_point_id) + len(insertion_point_id)
return target[:insertion_point] + text_to_insert + target[insertion_point:]
class WebDocs(object):
def __init__(self, root, module_list, version=get_versions()["version"], base_url = None):
self.root = root
self.module_list = module_list
self.version = version
self.pkg_cfg = packaging.build_pkg_cfg(root)
self.packages_json = packaging.build_pkg_index(self.pkg_cfg)
self.base_page = self._create_base_page(root, base_url)
def create_guide_page(self, path):
md_content = unicode(open(path, 'r').read(), 'utf8')
guide_content = markdown.markdown(md_content)
return self._create_page(guide_content)
def create_module_page(self, module_info):
path, ext = os.path.splitext(module_info.source_path_and_filename())
md_path = path + '.md'
module_content = apirenderer.md_to_div(md_path, module_info.name())
stability = module_info.metadata.get("stability", "undefined")
stability_note = tag_wrap(stability, "a", {"class":"stability-note stability-" + stability, \
"href":"dev-guide/guides/stability.html"})
module_content = stability_note + module_content
return self._create_page(module_content)
def create_module_index(self, path, module_list):
md_content = unicode(open(path, 'r').read(), 'utf8')
index_content = markdown.markdown(md_content)
module_list_content = self._make_module_text(module_list)
index_content = insert_after(index_content, MODULE_INDEX_INSERTION_POINT, module_list_content)
return self._create_page(index_content)
def _create_page(self, page_content):
page = self._insert_title(self.base_page, page_content)
page = insert_after(page, CONTENT_ID, page_content)
return page.encode('utf8')
def _make_module_text(self, module_list):
module_text = ''
for module in module_list:
module_link = tag_wrap(module.name(), 'a', \
{'href': "/".join(["modules", module.relative_url()])})
module_list_item = tag_wrap(module_link, "li")
module_text += module_list_item
return module_text
def _create_base_page(self, root, base_url):
base_page = unicode(open(root + INDEX_PAGE, 'r').read(), 'utf8')
if base_url:
base_tag = 'href="' + base_url + '"'
base_page = insert_after(base_page, BASE_URL_INSERTION_POINT, base_tag)
base_page = insert_after(base_page, VERSION_INSERTION_POINT, "Version " + self.version)
third_party_module_list = [module_info for module_info in self.module_list if module_info.level() == "third-party"]
third_party_module_text = self._make_module_text(third_party_module_list)
base_page = insert_after(base_page, \
THIRD_PARTY_MODULE_SUMMARIES, third_party_module_text)
high_level_module_list = [module_info for module_info in self.module_list if module_info.level() == "high"]
high_level_module_text = self._make_module_text(high_level_module_list)
base_page = insert_after(base_page, \
HIGH_LEVEL_MODULE_SUMMARIES, high_level_module_text)
low_level_module_list = [module_info for module_info in self.module_list if module_info.level() == "low"]
low_level_module_text = self._make_module_text(low_level_module_list)
base_page = insert_after(base_page, \
LOW_LEVEL_MODULE_SUMMARIES, low_level_module_text)
return base_page
def _insert_title(self, target, content):
match = re.search('<h1>.*</h1>', content)
if match:
title = match.group(0)[len('<h1>'):-len('</h1>')] + ' - ' + \
DEFAULT_TITLE
else:
title = DEFAULT_TITLE
target = insert_after(target, TITLE_ID, title)
return target

Просмотреть файл

@ -2,7 +2,7 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
def parse_options_defaults(options, jetpack_id):
def parse_options_defaults(options, preferencesBranch):
# this returns a unicode string
pref_list = []
@ -21,6 +21,6 @@ def parse_options_defaults(options, jetpack_id):
else:
value = str(pref["value"])
pref_list.append("pref(\"extensions." + jetpack_id + "." + pref["name"] + "\", " + value + ");")
pref_list.append("pref(\"extensions." + preferencesBranch + "." + pref["name"] + "\", " + value + ");")
return "\n".join(pref_list) + "\n"

Просмотреть файл

@ -45,7 +45,7 @@ def validate_prefs(options):
# TODO: Check that pref["type"] matches default value type
def parse_options(options, jetpack_id):
def parse_options(options, jetpack_id, preferencesBranch):
doc = Document()
root = doc.createElement("vbox")
root.setAttribute("xmlns", "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul")
@ -58,7 +58,7 @@ def parse_options(options, jetpack_id):
setting = doc.createElement("setting")
setting.setAttribute("pref-name", pref["name"])
setting.setAttribute("data-jetpack-id", jetpack_id)
setting.setAttribute("pref", "extensions." + jetpack_id + "." + pref["name"])
setting.setAttribute("pref", "extensions." + preferencesBranch + "." + pref["name"])
setting.setAttribute("type", pref["type"])
setting.setAttribute("title", pref["title"])

Просмотреть файл

@ -396,6 +396,21 @@ def generate_build_for_target(pkg_cfg, target, deps,
if ('preferences' in target_cfg):
build['preferences'] = target_cfg.preferences
if 'id' in target_cfg:
# NOTE: logic duplicated from buildJID()
jid = target_cfg['id']
if not ('@' in jid or jid.startswith('{')):
jid += '@jetpack'
build['preferencesBranch'] = jid
if 'preferences-branch' in target_cfg:
# check it's a non-empty, valid branch name
preferencesBranch = target_cfg['preferences-branch']
if re.match('^[\w{@}-]+$', preferencesBranch):
build['preferencesBranch'] = preferencesBranch
elif not is_running_tests:
print >>sys.stderr, "IGNORING preferences-branch (not a valid branch name)"
return build
def _get_files_in_dir(path):

Просмотреть файл

@ -35,19 +35,6 @@ def get_tests():
if len(test.examples) > 0:
tests.append(doctest.DocTestCase(test))
md_dir = os.path.join(env_root, 'dev-guide')
doctest_opts = (doctest.NORMALIZE_WHITESPACE |
doctest.REPORT_UDIFF)
for dirpath, dirnames, filenames in os.walk(md_dir):
for filename in filenames:
if filename.endswith('.md'):
absname = os.path.join(dirpath, filename)
tests.append(doctest.DocFileTest(
absname,
module_relative=False,
optionflags=doctest_opts
))
return tests
def run(verbose=False):

Просмотреть файл

@ -0,0 +1,4 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */

Просмотреть файл

@ -0,0 +1,14 @@
{
"id": "{34a1eae1-c20a-464f-9b0e-000000000000}",
"fullName": "curly ID test",
"author": "Tomislav Jovanovic",
"preferences": [{
"name": "test13",
"type": "integer",
"title": "test13",
"value": 26
}],
"preferences-branch": "invalid^branch*name"
}

Просмотреть файл

@ -0,0 +1,4 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */

Просмотреть файл

@ -0,0 +1,14 @@
{
"id": "test-preferences-branch",
"fullName": "preferences-branch test",
"author": "Tomislav Jovanovic",
"preferences": [{
"name": "test42",
"type": "bool",
"title": "test42",
"value": true
}],
"preferences-branch": "human-readable"
}

Просмотреть файл

@ -22,7 +22,8 @@ def get_configs(pkg_name, dirname='static-files'):
build = packaging.generate_build_for_target(
pkg_cfg=pkg_cfg,
target=pkg_name,
deps=deps
deps=deps,
is_running_tests=True,
)
return Bunch(target_cfg=target_cfg, pkg_cfg=pkg_cfg, build=build)

Просмотреть файл

@ -40,10 +40,11 @@ class PrefsTests(unittest.TestCase):
def testPackageWithSimplePrefs(self):
self.makexpi('simple-prefs')
packageName = 'jid1-fZHqN9JfrDBa8A@jetpack'
self.failUnless('options.xul' in self.xpi.namelist())
optsxul = self.xpi.read('options.xul').decode("utf-8")
self.failUnlessEqual(self.xpi_harness_options["jetpackID"],
"jid1-fZHqN9JfrDBa8A@jetpack")
self.failUnlessEqual(self.xpi_harness_options["jetpackID"], packageName)
self.failUnlessEqual(self.xpi_harness_options["preferencesBranch"], packageName)
root = ElementTree.XML(optsxul.encode('utf-8'))
@ -53,7 +54,6 @@ class PrefsTests(unittest.TestCase):
settings = root.findall(xulNamespacePrefix + 'setting')
def assertPref(setting, name, prefType, title):
packageName = 'jid1-fZHqN9JfrDBa8A@jetpack'
self.failUnlessEqual(setting.get('data-jetpack-id'), packageName)
self.failUnlessEqual(setting.get('pref'),
'extensions.' + packageName + '.' + name)
@ -88,6 +88,25 @@ class PrefsTests(unittest.TestCase):
]
self.failUnlessEqual(prefsjs, "\n".join(exp)+"\n")
def testPackageWithPreferencesBranch(self):
self.makexpi('preferences-branch')
self.failUnless('options.xul' in self.xpi.namelist())
optsxul = self.xpi.read('options.xul').decode("utf-8")
self.failUnlessEqual(self.xpi_harness_options["preferencesBranch"],
"human-readable")
root = ElementTree.XML(optsxul.encode('utf-8'))
xulNamespacePrefix = \
"{http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul}"
setting = root.find(xulNamespacePrefix + 'setting')
self.failUnlessEqual(setting.get('pref'),
'extensions.human-readable.test42')
prefsjs = self.xpi.read('defaults/preferences/prefs.js').decode("utf-8")
self.failUnlessEqual(prefsjs,
'pref("extensions.human-readable.test42", true);\n')
def testPackageWithNoPrefs(self):
self.makexpi('no-prefs')
self.failIf('options.xul' in self.xpi.namelist())
@ -96,6 +115,33 @@ class PrefsTests(unittest.TestCase):
prefsjs = self.xpi.read('defaults/preferences/prefs.js').decode("utf-8")
self.failUnlessEqual(prefsjs, "")
def testPackageWithInvalidPreferencesBranch(self):
self.makexpi('curly-id')
self.failIfEqual(self.xpi_harness_options["preferencesBranch"],
"invalid^branch*name")
self.failUnlessEqual(self.xpi_harness_options["preferencesBranch"],
"{34a1eae1-c20a-464f-9b0e-000000000000}")
def testPackageWithCurlyID(self):
self.makexpi('curly-id')
self.failUnlessEqual(self.xpi_harness_options["jetpackID"],
"{34a1eae1-c20a-464f-9b0e-000000000000}")
self.failUnless('options.xul' in self.xpi.namelist())
optsxul = self.xpi.read('options.xul').decode("utf-8")
root = ElementTree.XML(optsxul.encode('utf-8'))
xulNamespacePrefix = \
"{http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul}"
setting = root.find(xulNamespacePrefix + 'setting')
self.failUnlessEqual(setting.get('pref'),
'extensions.{34a1eae1-c20a-464f-9b0e-000000000000}.test13')
prefsjs = self.xpi.read('defaults/preferences/prefs.js').decode("utf-8")
self.failUnlessEqual(prefsjs,
'pref("extensions.{34a1eae1-c20a-464f-9b0e-000000000000}.test13", 26);\n')
class Bug588119Tests(unittest.TestCase):
def makexpi(self, pkg_name):

Просмотреть файл

@ -77,14 +77,15 @@ def build_xpi(template_root_dir, manifest, xpi_path,
validate_prefs(harness_options["preferences"])
opts_xul = parse_options(harness_options["preferences"],
harness_options["jetpackID"])
harness_options["jetpackID"],
harness_options["preferencesBranch"])
open('.options.xul', 'wb').write(opts_xul.encode("utf-8"))
zf.write('.options.xul', 'options.xul')
os.remove('.options.xul')
from options_defaults import parse_options_defaults
prefs_js = parse_options_defaults(harness_options["preferences"],
harness_options["jetpackID"])
harness_options["preferencesBranch"])
open('.prefs.js', 'wb').write(prefs_js.encode("utf-8"))
else:

Просмотреть файл

@ -1,43 +0,0 @@
Primary Authors
===============
Yuri Takteyev <http://freewisdom.org/>, who has written much of the current code
while procrastingating his Ph.D.
Waylan Limberg <http://achinghead.com/>, who has written most of the available
extensions and later was asked to join Yuri, fixing nummrious bugs, adding
documentation and making general improvements to the existing codebase,
included a complete refactor of the core.
Artem Yunusov, who as part of a 2008 GSoC project, has refactored inline
patterns, replaced the NanoDOM with ElementTree support and made various other
improvements.
Manfed Stienstra <http://www.dwerg.net/>, who wrote the original version of
the script and is responsible for various parts of the existing codebase.
David Wolever, who refactored the extension API and made other improvements
as he helped to integrate Markdown into Dr.Project.
Other Contributors
==================
The incomplete list of individuals below have provided patches or otherwise
contributed to the project in various ways. We would like to thank everyone
who has contributed to the progect in any way.
Eric Abrahamsen
Jeff Balogh
Sergej Chodarev
Chris Clark
Tiago Cogumbreiro
Kjell Magne Fauske
G. Clark Haynes
Daniel Krech
Steward Midwinter
Jack Miller
Neale Pickett
John Szakmeister
Malcolm Tredinnick
Ben Wilson
and many others who helped by reporting bugs

Просмотреть файл

@ -1,30 +0,0 @@
Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE PYTHON MARKDOWN PROJECT ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL ANY CONTRIBUTORS TO THE PYTHON MARKDOWN PROJECT
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.

Просмотреть файл

@ -1,603 +0,0 @@
"""
Python Markdown
===============
Python Markdown converts Markdown to HTML and can be used as a library or
called from the command line.
## Basic usage as a module:
import markdown
md = Markdown()
html = md.convert(your_text_string)
## Basic use from the command line:
python markdown.py source.txt > destination.html
Run "python markdown.py --help" to see more options.
## Extensions
See <http://www.freewisdom.org/projects/python-markdown/> for more
information and instructions on how to extend the functionality of
Python Markdown. Read that before you try modifying this file.
## Authors and License
Started by [Manfred Stienstra](http://www.dwerg.net/). Continued and
maintained by [Yuri Takhteyev](http://www.freewisdom.org), [Waylan
Limberg](http://achinghead.com/) and [Artem Yunusov](http://blog.splyer.com).
Contact: markdown@freewisdom.org
Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
Copyright 200? Django Software Foundation (OrderedDict implementation)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see docs/LICENSE for details).
"""
version = "2.0"
version_info = (2,0,0, "Final")
import re
import codecs
import sys
import warnings
import logging
from logging import DEBUG, INFO, WARN, ERROR, CRITICAL
"""
CONSTANTS
=============================================================================
"""
"""
Constants you might want to modify
-----------------------------------------------------------------------------
"""
# default logging level for command-line use
COMMAND_LINE_LOGGING_LEVEL = CRITICAL
TAB_LENGTH = 4 # expand tabs to this many spaces
ENABLE_ATTRIBUTES = True # @id = xyz -> <... id="xyz">
SMART_EMPHASIS = True # this_or_that does not become this<i>or</i>that
DEFAULT_OUTPUT_FORMAT = 'xhtml1' # xhtml or html4 output
HTML_REMOVED_TEXT = "[HTML_REMOVED]" # text used instead of HTML in safe mode
BLOCK_LEVEL_ELEMENTS = re.compile("p|div|h[1-6]|blockquote|pre|table|dl|ol|ul"
"|script|noscript|form|fieldset|iframe|math"
"|ins|del|hr|hr/|style|li|dt|dd|thead|tbody"
"|tr|th|td")
DOC_TAG = "div" # Element used to wrap document - later removed
# Placeholders
STX = u'\u0002' # Use STX ("Start of text") for start-of-placeholder
ETX = u'\u0003' # Use ETX ("End of text") for end-of-placeholder
INLINE_PLACEHOLDER_PREFIX = STX+"klzzwxh:"
INLINE_PLACEHOLDER = INLINE_PLACEHOLDER_PREFIX + "%s" + ETX
AMP_SUBSTITUTE = STX+"amp"+ETX
"""
Constants you probably do not need to change
-----------------------------------------------------------------------------
"""
RTL_BIDI_RANGES = ( (u'\u0590', u'\u07FF'),
# Hebrew (0590-05FF), Arabic (0600-06FF),
# Syriac (0700-074F), Arabic supplement (0750-077F),
# Thaana (0780-07BF), Nko (07C0-07FF).
(u'\u2D30', u'\u2D7F'), # Tifinagh
)
"""
AUXILIARY GLOBAL FUNCTIONS
=============================================================================
"""
def message(level, text):
""" A wrapper method for logging debug messages. """
logger = logging.getLogger('MARKDOWN')
if logger.handlers:
# The logger is configured
logger.log(level, text)
if level > WARN:
sys.exit(0)
elif level > WARN:
raise MarkdownException, text
else:
warnings.warn(text, MarkdownWarning)
def isBlockLevel(tag):
"""Check if the tag is a block level HTML tag."""
return BLOCK_LEVEL_ELEMENTS.match(tag)
"""
MISC AUXILIARY CLASSES
=============================================================================
"""
class AtomicString(unicode):
"""A string which should not be further processed."""
pass
class MarkdownException(Exception):
""" A Markdown Exception. """
pass
class MarkdownWarning(Warning):
""" A Markdown Warning. """
pass
"""
OVERALL DESIGN
=============================================================================
Markdown processing takes place in four steps:
1. A bunch of "preprocessors" munge the input text.
2. BlockParser() parses the high-level structural elements of the
pre-processed text into an ElementTree.
3. A bunch of "treeprocessors" are run against the ElementTree. One such
treeprocessor runs InlinePatterns against the ElementTree, detecting inline
markup.
4. Some post-processors are run against the text after the ElementTree has
been serialized into text.
5. The output is written to a string.
Those steps are put together by the Markdown() class.
"""
import preprocessors
import blockprocessors
import treeprocessors
import inlinepatterns
import postprocessors
import blockparser
import etree_loader
import odict
# Extensions should use "markdown.etree" instead of "etree" (or do `from
# markdown import etree`). Do not import it by yourself.
etree = etree_loader.importETree()
# Adds the ability to output html4
import html4
class Markdown:
"""Convert Markdown to HTML."""
def __init__(self,
extensions=[],
extension_configs={},
safe_mode = False,
output_format=DEFAULT_OUTPUT_FORMAT):
"""
Creates a new Markdown instance.
Keyword arguments:
* extensions: A list of extensions.
If they are of type string, the module mdx_name.py will be loaded.
If they are a subclass of markdown.Extension, they will be used
as-is.
* extension-configs: Configuration setting for extensions.
* safe_mode: Disallow raw html. One of "remove", "replace" or "escape".
* output_format: Format of output. Supported formats are:
* "xhtml1": Outputs XHTML 1.x. Default.
* "xhtml": Outputs latest supported version of XHTML (currently XHTML 1.1).
* "html4": Outputs HTML 4
* "html": Outputs latest supported version of HTML (currently HTML 4).
Note that it is suggested that the more specific formats ("xhtml1"
and "html4") be used as "xhtml" or "html" may change in the future
if it makes sense at that time.
"""
self.safeMode = safe_mode
self.registeredExtensions = []
self.docType = ""
self.stripTopLevelTags = True
# Preprocessors
self.preprocessors = odict.OrderedDict()
self.preprocessors["html_block"] = \
preprocessors.HtmlBlockPreprocessor(self)
self.preprocessors["reference"] = \
preprocessors.ReferencePreprocessor(self)
# footnote preprocessor will be inserted with "<reference"
# Block processors - ran by the parser
self.parser = blockparser.BlockParser()
self.parser.blockprocessors['empty'] = \
blockprocessors.EmptyBlockProcessor(self.parser)
self.parser.blockprocessors['indent'] = \
blockprocessors.ListIndentProcessor(self.parser)
self.parser.blockprocessors['code'] = \
blockprocessors.CodeBlockProcessor(self.parser)
self.parser.blockprocessors['hashheader'] = \
blockprocessors.HashHeaderProcessor(self.parser)
self.parser.blockprocessors['setextheader'] = \
blockprocessors.SetextHeaderProcessor(self.parser)
self.parser.blockprocessors['hr'] = \
blockprocessors.HRProcessor(self.parser)
self.parser.blockprocessors['olist'] = \
blockprocessors.OListProcessor(self.parser)
self.parser.blockprocessors['ulist'] = \
blockprocessors.UListProcessor(self.parser)
self.parser.blockprocessors['quote'] = \
blockprocessors.BlockQuoteProcessor(self.parser)
self.parser.blockprocessors['paragraph'] = \
blockprocessors.ParagraphProcessor(self.parser)
#self.prePatterns = []
# Inline patterns - Run on the tree
self.inlinePatterns = odict.OrderedDict()
self.inlinePatterns["backtick"] = \
inlinepatterns.BacktickPattern(inlinepatterns.BACKTICK_RE)
self.inlinePatterns["escape"] = \
inlinepatterns.SimpleTextPattern(inlinepatterns.ESCAPE_RE)
self.inlinePatterns["reference"] = \
inlinepatterns.ReferencePattern(inlinepatterns.REFERENCE_RE, self)
self.inlinePatterns["link"] = \
inlinepatterns.LinkPattern(inlinepatterns.LINK_RE, self)
self.inlinePatterns["image_link"] = \
inlinepatterns.ImagePattern(inlinepatterns.IMAGE_LINK_RE, self)
self.inlinePatterns["image_reference"] = \
inlinepatterns.ImageReferencePattern(inlinepatterns.IMAGE_REFERENCE_RE, self)
self.inlinePatterns["autolink"] = \
inlinepatterns.AutolinkPattern(inlinepatterns.AUTOLINK_RE, self)
self.inlinePatterns["automail"] = \
inlinepatterns.AutomailPattern(inlinepatterns.AUTOMAIL_RE, self)
self.inlinePatterns["linebreak2"] = \
inlinepatterns.SubstituteTagPattern(inlinepatterns.LINE_BREAK_2_RE, 'br')
self.inlinePatterns["linebreak"] = \
inlinepatterns.SubstituteTagPattern(inlinepatterns.LINE_BREAK_RE, 'br')
self.inlinePatterns["html"] = \
inlinepatterns.HtmlPattern(inlinepatterns.HTML_RE, self)
self.inlinePatterns["entity"] = \
inlinepatterns.HtmlPattern(inlinepatterns.ENTITY_RE, self)
self.inlinePatterns["not_strong"] = \
inlinepatterns.SimpleTextPattern(inlinepatterns.NOT_STRONG_RE)
self.inlinePatterns["strong_em"] = \
inlinepatterns.DoubleTagPattern(inlinepatterns.STRONG_EM_RE, 'strong,em')
self.inlinePatterns["strong"] = \
inlinepatterns.SimpleTagPattern(inlinepatterns.STRONG_RE, 'strong')
self.inlinePatterns["emphasis"] = \
inlinepatterns.SimpleTagPattern(inlinepatterns.EMPHASIS_RE, 'em')
self.inlinePatterns["emphasis2"] = \
inlinepatterns.SimpleTagPattern(inlinepatterns.EMPHASIS_2_RE, 'em')
# The order of the handlers matters!!!
# Tree processors - run once we have a basic parse.
self.treeprocessors = odict.OrderedDict()
self.treeprocessors["inline"] = treeprocessors.InlineProcessor(self)
self.treeprocessors["prettify"] = \
treeprocessors.PrettifyTreeprocessor(self)
# Postprocessors - finishing touches.
self.postprocessors = odict.OrderedDict()
self.postprocessors["raw_html"] = \
postprocessors.RawHtmlPostprocessor(self)
self.postprocessors["amp_substitute"] = \
postprocessors.AndSubstitutePostprocessor()
# footnote postprocessor will be inserted with ">amp_substitute"
# Map format keys to serializers
self.output_formats = {
'html' : html4.to_html_string,
'html4' : html4.to_html_string,
'xhtml' : etree.tostring,
'xhtml1': etree.tostring,
}
self.references = {}
self.htmlStash = preprocessors.HtmlStash()
self.registerExtensions(extensions = extensions,
configs = extension_configs)
self.set_output_format(output_format)
self.reset()
def registerExtensions(self, extensions, configs):
"""
Register extensions with this instance of Markdown.
Keyword aurguments:
* extensions: A list of extensions, which can either
be strings or objects. See the docstring on Markdown.
* configs: A dictionary mapping module names to config options.
"""
for ext in extensions:
if isinstance(ext, basestring):
ext = load_extension(ext, configs.get(ext, []))
try:
ext.extendMarkdown(self, globals())
except AttributeError:
message(ERROR, "Incorrect type! Extension '%s' is "
"neither a string or an Extension." %(repr(ext)))
def registerExtension(self, extension):
""" This gets called by the extension """
self.registeredExtensions.append(extension)
def reset(self):
"""
Resets all state variables so that we can start with a new text.
"""
self.htmlStash.reset()
self.references.clear()
for extension in self.registeredExtensions:
extension.reset()
def set_output_format(self, format):
""" Set the output format for the class instance. """
try:
self.serializer = self.output_formats[format.lower()]
except KeyError:
message(CRITICAL, 'Invalid Output Format: "%s". Use one of %s.' \
% (format, self.output_formats.keys()))
def convert(self, source):
"""
Convert markdown to serialized XHTML or HTML.
Keyword arguments:
* source: Source text as a Unicode string.
"""
# Fixup the source text
if not source.strip():
return u"" # a blank unicode string
try:
source = unicode(source)
except UnicodeDecodeError:
message(CRITICAL, 'UnicodeDecodeError: Markdown only accepts unicode or ascii input.')
return u""
source = source.replace(STX, "").replace(ETX, "")
source = source.replace("\r\n", "\n").replace("\r", "\n") + "\n\n"
source = re.sub(r'\n\s+\n', '\n\n', source)
source = source.expandtabs(TAB_LENGTH)
# Split into lines and run the line preprocessors.
self.lines = source.split("\n")
for prep in self.preprocessors.values():
self.lines = prep.run(self.lines)
# Parse the high-level elements.
root = self.parser.parseDocument(self.lines).getroot()
# Run the tree-processors
for treeprocessor in self.treeprocessors.values():
newRoot = treeprocessor.run(root)
if newRoot:
root = newRoot
# Serialize _properly_. Strip top-level tags.
output, length = codecs.utf_8_decode(self.serializer(root, encoding="utf8"))
if self.stripTopLevelTags:
start = output.index('<%s>'%DOC_TAG)+len(DOC_TAG)+2
end = output.rindex('</%s>'%DOC_TAG)
output = output[start:end].strip()
# Run the text post-processors
for pp in self.postprocessors.values():
output = pp.run(output)
return output.strip()
def convertFile(self, input=None, output=None, encoding=None):
"""Converts a markdown file and returns the HTML as a unicode string.
Decodes the file using the provided encoding (defaults to utf-8),
passes the file content to markdown, and outputs the html to either
the provided stream or the file with provided name, using the same
encoding as the source file.
**Note:** This is the only place that decoding and encoding of unicode
takes place in Python-Markdown. (All other code is unicode-in /
unicode-out.)
Keyword arguments:
* input: Name of source text file.
* output: Name of output file. Writes to stdout if `None`.
* encoding: Encoding of input and output files. Defaults to utf-8.
"""
encoding = encoding or "utf-8"
# Read the source
input_file = codecs.open(input, mode="r", encoding=encoding)
text = input_file.read()
input_file.close()
text = text.lstrip(u'\ufeff') # remove the byte-order mark
# Convert
html = self.convert(text)
# Write to file or stdout
if isinstance(output, (str, unicode)):
output_file = codecs.open(output, "w", encoding=encoding)
output_file.write(html)
output_file.close()
else:
output.write(html.encode(encoding))
"""
Extensions
-----------------------------------------------------------------------------
"""
class Extension:
""" Base class for extensions to subclass. """
def __init__(self, configs = {}):
"""Create an instance of an Extention.
Keyword arguments:
* configs: A dict of configuration setting used by an Extension.
"""
self.config = configs
def getConfig(self, key):
""" Return a setting for the given key or an empty string. """
if key in self.config:
return self.config[key][0]
else:
return ""
def getConfigInfo(self):
""" Return all config settings as a list of tuples. """
return [(key, self.config[key][1]) for key in self.config.keys()]
def setConfig(self, key, value):
""" Set a config setting for `key` with the given `value`. """
self.config[key][0] = value
def extendMarkdown(self, md, md_globals):
"""
Add the various proccesors and patterns to the Markdown Instance.
This method must be overriden by every extension.
Keyword arguments:
* md: The Markdown instance.
* md_globals: Global variables in the markdown module namespace.
"""
pass
def load_extension(ext_name, configs = []):
"""Load extension by name, then return the module.
The extension name may contain arguments as part of the string in the
following format: "extname(key1=value1,key2=value2)"
"""
# Parse extensions config params (ignore the order)
configs = dict(configs)
pos = ext_name.find("(") # find the first "("
if pos > 0:
ext_args = ext_name[pos+1:-1]
ext_name = ext_name[:pos]
pairs = [x.split("=") for x in ext_args.split(",")]
configs.update([(x.strip(), y.strip()) for (x, y) in pairs])
# Setup the module names
ext_module = 'markdown.extensions'
module_name_new_style = '.'.join([ext_module, ext_name])
module_name_old_style = '_'.join(['mdx', ext_name])
# Try loading the extention first from one place, then another
try: # New style (markdown.extensons.<extension>)
module = __import__(module_name_new_style, {}, {}, [ext_module])
except ImportError:
try: # Old style (mdx.<extension>)
module = __import__(module_name_old_style)
except ImportError:
message(WARN, "Failed loading extension '%s' from '%s' or '%s'"
% (ext_name, module_name_new_style, module_name_old_style))
# Return None so we don't try to initiate none-existant extension
return None
# If the module is loaded successfully, we expect it to define a
# function called makeExtension()
try:
return module.makeExtension(configs.items())
except AttributeError:
message(CRITICAL, "Failed to initiate extension '%s'" % ext_name)
def load_extensions(ext_names):
"""Loads multiple extensions"""
extensions = []
for ext_name in ext_names:
extension = load_extension(ext_name)
if extension:
extensions.append(extension)
return extensions
"""
EXPORTED FUNCTIONS
=============================================================================
Those are the two functions we really mean to export: markdown() and
markdownFromFile().
"""
def markdown(text,
extensions = [],
safe_mode = False,
output_format = DEFAULT_OUTPUT_FORMAT):
"""Convert a markdown string to HTML and return HTML as a unicode string.
This is a shortcut function for `Markdown` class to cover the most
basic use case. It initializes an instance of Markdown, loads the
necessary extensions and runs the parser on the given text.
Keyword arguments:
* text: Markdown formatted text as Unicode or ASCII string.
* extensions: A list of extensions or extension names (may contain config args).
* safe_mode: Disallow raw html. One of "remove", "replace" or "escape".
* output_format: Format of output. Supported formats are:
* "xhtml1": Outputs XHTML 1.x. Default.
* "xhtml": Outputs latest supported version of XHTML (currently XHTML 1.1).
* "html4": Outputs HTML 4
* "html": Outputs latest supported version of HTML (currently HTML 4).
Note that it is suggested that the more specific formats ("xhtml1"
and "html4") be used as "xhtml" or "html" may change in the future
if it makes sense at that time.
Returns: An HTML document as a string.
"""
md = Markdown(extensions=load_extensions(extensions),
safe_mode=safe_mode,
output_format=output_format)
return md.convert(text)
def markdownFromFile(input = None,
output = None,
extensions = [],
encoding = None,
safe_mode = False,
output_format = DEFAULT_OUTPUT_FORMAT):
"""Read markdown code from a file and write it to a file or a stream."""
md = Markdown(extensions=load_extensions(extensions),
safe_mode=safe_mode,
output_format=output_format)
md.convertFile(input, output, encoding)

Просмотреть файл

@ -1,95 +0,0 @@
import markdown
class State(list):
""" Track the current and nested state of the parser.
This utility class is used to track the state of the BlockParser and
support multiple levels if nesting. It's just a simple API wrapped around
a list. Each time a state is set, that state is appended to the end of the
list. Each time a state is reset, that state is removed from the end of
the list.
Therefore, each time a state is set for a nested block, that state must be
reset when we back out of that level of nesting or the state could be
corrupted.
While all the methods of a list object are available, only the three
defined below need be used.
"""
def set(self, state):
""" Set a new state. """
self.append(state)
def reset(self):
""" Step back one step in nested state. """
self.pop()
def isstate(self, state):
""" Test that top (current) level is of given state. """
if len(self):
return self[-1] == state
else:
return False
class BlockParser:
""" Parse Markdown blocks into an ElementTree object.
A wrapper class that stitches the various BlockProcessors together,
looping through them and creating an ElementTree object.
"""
def __init__(self):
self.blockprocessors = markdown.odict.OrderedDict()
self.state = State()
def parseDocument(self, lines):
""" Parse a markdown document into an ElementTree.
Given a list of lines, an ElementTree object (not just a parent Element)
is created and the root element is passed to the parser as the parent.
The ElementTree object is returned.
This should only be called on an entire document, not pieces.
"""
# Create a ElementTree from the lines
self.root = markdown.etree.Element(markdown.DOC_TAG)
self.parseChunk(self.root, '\n'.join(lines))
return markdown.etree.ElementTree(self.root)
def parseChunk(self, parent, text):
""" Parse a chunk of markdown text and attach to given etree node.
While the ``text`` argument is generally assumed to contain multiple
blocks which will be split on blank lines, it could contain only one
block. Generally, this method would be called by extensions when
block parsing is required.
The ``parent`` etree Element passed in is altered in place.
Nothing is returned.
"""
self.parseBlocks(parent, text.split('\n\n'))
def parseBlocks(self, parent, blocks):
""" Process blocks of markdown text and attach to given etree node.
Given a list of ``blocks``, each blockprocessor is stepped through
until there are no blocks left. While an extension could potentially
call this method directly, it's generally expected to be used internally.
This is a public method as an extension may need to add/alter additional
BlockProcessors which call this method to recursively parse a nested
block.
"""
while blocks:
for processor in self.blockprocessors.values():
if processor.test(parent, blocks[0]):
processor.run(parent, blocks)
break

Просмотреть файл

@ -1,460 +0,0 @@
"""
CORE MARKDOWN BLOCKPARSER
=============================================================================
This parser handles basic parsing of Markdown blocks. It doesn't concern itself
with inline elements such as **bold** or *italics*, but rather just catches
blocks, lists, quotes, etc.
The BlockParser is made up of a bunch of BlockProssors, each handling a
different type of block. Extensions may add/replace/remove BlockProcessors
as they need to alter how markdown blocks are parsed.
"""
import re
import markdown
class BlockProcessor:
""" Base class for block processors.
Each subclass will provide the methods below to work with the source and
tree. Each processor will need to define it's own ``test`` and ``run``
methods. The ``test`` method should return True or False, to indicate
whether the current block should be processed by this processor. If the
test passes, the parser will call the processors ``run`` method.
"""
def __init__(self, parser=None):
self.parser = parser
def lastChild(self, parent):
""" Return the last child of an etree element. """
if len(parent):
return parent[-1]
else:
return None
def detab(self, text):
""" Remove a tab from the front of each line of the given text. """
newtext = []
lines = text.split('\n')
for line in lines:
if line.startswith(' '*markdown.TAB_LENGTH):
newtext.append(line[markdown.TAB_LENGTH:])
elif not line.strip():
newtext.append('')
else:
break
return '\n'.join(newtext), '\n'.join(lines[len(newtext):])
def looseDetab(self, text, level=1):
""" Remove a tab from front of lines but allowing dedented lines. """
lines = text.split('\n')
for i in range(len(lines)):
if lines[i].startswith(' '*markdown.TAB_LENGTH*level):
lines[i] = lines[i][markdown.TAB_LENGTH*level:]
return '\n'.join(lines)
def test(self, parent, block):
""" Test for block type. Must be overridden by subclasses.
As the parser loops through processors, it will call the ``test`` method
on each to determine if the given block of text is of that type. This
method must return a boolean ``True`` or ``False``. The actual method of
testing is left to the needs of that particular block type. It could
be as simple as ``block.startswith(some_string)`` or a complex regular
expression. As the block type may be different depending on the parent
of the block (i.e. inside a list), the parent etree element is also
provided and may be used as part of the test.
Keywords:
* ``parent``: A etree element which will be the parent of the block.
* ``block``: A block of text from the source which has been split at
blank lines.
"""
pass
def run(self, parent, blocks):
""" Run processor. Must be overridden by subclasses.
When the parser determines the appropriate type of a block, the parser
will call the corresponding processor's ``run`` method. This method
should parse the individual lines of the block and append them to
the etree.
Note that both the ``parent`` and ``etree`` keywords are pointers
to instances of the objects which should be edited in place. Each
processor must make changes to the existing objects as there is no
mechanism to return new/different objects to replace them.
This means that this method should be adding SubElements or adding text
to the parent, and should remove (``pop``) or add (``insert``) items to
the list of blocks.
Keywords:
* ``parent``: A etree element which is the parent of the current block.
* ``blocks``: A list of all remaining blocks of the document.
"""
pass
class ListIndentProcessor(BlockProcessor):
""" Process children of list items.
Example:
* a list item
process this part
or this part
"""
INDENT_RE = re.compile(r'^(([ ]{%s})+)'% markdown.TAB_LENGTH)
ITEM_TYPES = ['li']
LIST_TYPES = ['ul', 'ol']
def test(self, parent, block):
return block.startswith(' '*markdown.TAB_LENGTH) and \
not self.parser.state.isstate('detabbed') and \
(parent.tag in self.ITEM_TYPES or \
(len(parent) and parent[-1] and \
(parent[-1].tag in self.LIST_TYPES)
)
)
def run(self, parent, blocks):
block = blocks.pop(0)
level, sibling = self.get_level(parent, block)
block = self.looseDetab(block, level)
self.parser.state.set('detabbed')
if parent.tag in self.ITEM_TYPES:
# The parent is already a li. Just parse the child block.
self.parser.parseBlocks(parent, [block])
elif sibling.tag in self.ITEM_TYPES:
# The sibling is a li. Use it as parent.
self.parser.parseBlocks(sibling, [block])
elif len(sibling) and sibling[-1].tag in self.ITEM_TYPES:
# The parent is a list (``ol`` or ``ul``) which has children.
# Assume the last child li is the parent of this block.
if sibling[-1].text:
# If the parent li has text, that text needs to be moved to a p
block = '%s\n\n%s' % (sibling[-1].text, block)
sibling[-1].text = ''
self.parser.parseChunk(sibling[-1], block)
else:
self.create_item(sibling, block)
self.parser.state.reset()
def create_item(self, parent, block):
""" Create a new li and parse the block with it as the parent. """
li = markdown.etree.SubElement(parent, 'li')
self.parser.parseBlocks(li, [block])
def get_level(self, parent, block):
""" Get level of indent based on list level. """
# Get indent level
m = self.INDENT_RE.match(block)
if m:
indent_level = len(m.group(1))/markdown.TAB_LENGTH
else:
indent_level = 0
if self.parser.state.isstate('list'):
# We're in a tightlist - so we already are at correct parent.
level = 1
else:
# We're in a looselist - so we need to find parent.
level = 0
# Step through children of tree to find matching indent level.
while indent_level > level:
child = self.lastChild(parent)
if child and (child.tag in self.LIST_TYPES or child.tag in self.ITEM_TYPES):
if child.tag in self.LIST_TYPES:
level += 1
parent = child
else:
# No more child levels. If we're short of indent_level,
# we have a code block. So we stop here.
break
return level, parent
class CodeBlockProcessor(BlockProcessor):
""" Process code blocks. """
def test(self, parent, block):
return block.startswith(' '*markdown.TAB_LENGTH)
def run(self, parent, blocks):
sibling = self.lastChild(parent)
block = blocks.pop(0)
theRest = ''
if sibling and sibling.tag == "pre" and len(sibling) \
and sibling[0].tag == "code":
# The previous block was a code block. As blank lines do not start
# new code blocks, append this block to the previous, adding back
# linebreaks removed from the split into a list.
code = sibling[0]
block, theRest = self.detab(block)
code.text = markdown.AtomicString('%s\n%s\n' % (code.text, block.rstrip()))
else:
# This is a new codeblock. Create the elements and insert text.
pre = markdown.etree.SubElement(parent, 'pre')
code = markdown.etree.SubElement(pre, 'code')
block, theRest = self.detab(block)
code.text = markdown.AtomicString('%s\n' % block.rstrip())
if theRest:
# This block contained unindented line(s) after the first indented
# line. Insert these lines as the first block of the master blocks
# list for future processing.
blocks.insert(0, theRest)
class BlockQuoteProcessor(BlockProcessor):
RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()] # Lines before blockquote
# Pass lines before blockquote in recursively for parsing forst.
self.parser.parseBlocks(parent, [before])
# Remove ``> `` from begining of each line.
block = '\n'.join([self.clean(line) for line in
block[m.start():].split('\n')])
sibling = self.lastChild(parent)
if sibling and sibling.tag == "blockquote":
# Previous block was a blockquote so set that as this blocks parent
quote = sibling
else:
# This is a new blockquote. Create a new parent element.
quote = markdown.etree.SubElement(parent, 'blockquote')
# Recursively parse block with blockquote as parent.
self.parser.parseChunk(quote, block)
def clean(self, line):
""" Remove ``>`` from beginning of a line. """
m = self.RE.match(line)
if line.strip() == ">":
return ""
elif m:
return m.group(2)
else:
return line
class OListProcessor(BlockProcessor):
""" Process ordered list blocks. """
TAG = 'ol'
# Detect an item (``1. item``). ``group(1)`` contains contents of item.
RE = re.compile(r'^[ ]{0,3}\d+\.[ ](.*)')
# Detect items on secondary lines. they can be of either list type.
CHILD_RE = re.compile(r'^[ ]{0,3}((\d+\.)|[*+-])[ ](.*)')
# Detect indented (nested) items of either type
INDENT_RE = re.compile(r'^[ ]{4,7}((\d+\.)|[*+-])[ ].*')
def test(self, parent, block):
return bool(self.RE.match(block))
def run(self, parent, blocks):
# Check fr multiple items in one block.
items = self.get_items(blocks.pop(0))
sibling = self.lastChild(parent)
if sibling and sibling.tag in ['ol', 'ul']:
# Previous block was a list item, so set that as parent
lst = sibling
# make sure previous item is in a p.
if len(lst) and lst[-1].text and not len(lst[-1]):
p = markdown.etree.SubElement(lst[-1], 'p')
p.text = lst[-1].text
lst[-1].text = ''
# parse first block differently as it gets wrapped in a p.
li = markdown.etree.SubElement(lst, 'li')
self.parser.state.set('looselist')
firstitem = items.pop(0)
self.parser.parseBlocks(li, [firstitem])
self.parser.state.reset()
else:
# This is a new list so create parent with appropriate tag.
lst = markdown.etree.SubElement(parent, self.TAG)
self.parser.state.set('list')
# Loop through items in block, recursively parsing each with the
# appropriate parent.
for item in items:
if item.startswith(' '*markdown.TAB_LENGTH):
# Item is indented. Parse with last item as parent
self.parser.parseBlocks(lst[-1], [item])
else:
# New item. Create li and parse with it as parent
li = markdown.etree.SubElement(lst, 'li')
self.parser.parseBlocks(li, [item])
self.parser.state.reset()
def get_items(self, block):
""" Break a block into list items. """
items = []
for line in block.split('\n'):
m = self.CHILD_RE.match(line)
if m:
# This is a new item. Append
items.append(m.group(3))
elif self.INDENT_RE.match(line):
# This is an indented (possibly nested) item.
if items[-1].startswith(' '*markdown.TAB_LENGTH):
# Previous item was indented. Append to that item.
items[-1] = '%s\n%s' % (items[-1], line)
else:
items.append(line)
else:
# This is another line of previous item. Append to that item.
items[-1] = '%s\n%s' % (items[-1], line)
return items
class UListProcessor(OListProcessor):
""" Process unordered list blocks. """
TAG = 'ul'
RE = re.compile(r'^[ ]{0,3}[*+-][ ](.*)')
class HashHeaderProcessor(BlockProcessor):
""" Process Hash Headers. """
# Detect a header at start of any line in block
RE = re.compile(r'(^|\n)(?P<level>#{1,6})(?P<header>.*?)#*(\n|$)')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()] # All lines before header
after = block[m.end():] # All lines after header
if before:
# As the header was not the first line of the block and the
# lines before the header must be parsed first,
# recursively parse this lines as a block.
self.parser.parseBlocks(parent, [before])
# Create header using named groups from RE
h = markdown.etree.SubElement(parent, 'h%d' % len(m.group('level')))
h.text = m.group('header').strip()
if after:
# Insert remaining lines as first block for future parsing.
blocks.insert(0, after)
else:
# This should never happen, but just in case...
message(CRITICAL, "We've got a problem header!")
class SetextHeaderProcessor(BlockProcessor):
""" Process Setext-style Headers. """
# Detect Setext-style header. Must be first 2 lines of block.
RE = re.compile(r'^.*?\n[=-]{3,}', re.MULTILINE)
def test(self, parent, block):
return bool(self.RE.match(block))
def run(self, parent, blocks):
lines = blocks.pop(0).split('\n')
# Determine level. ``=`` is 1 and ``-`` is 2.
if lines[1].startswith('='):
level = 1
else:
level = 2
h = markdown.etree.SubElement(parent, 'h%d' % level)
h.text = lines[0].strip()
if len(lines) > 2:
# Block contains additional lines. Add to master blocks for later.
blocks.insert(0, '\n'.join(lines[2:]))
class HRProcessor(BlockProcessor):
""" Process Horizontal Rules. """
RE = r'[ ]{0,3}(?P<ch>[*_-])[ ]?((?P=ch)[ ]?){2,}[ ]*'
# Detect hr on any line of a block.
SEARCH_RE = re.compile(r'(^|\n)%s(\n|$)' % RE)
# Match a hr on a single line of text.
MATCH_RE = re.compile(r'^%s$' % RE)
def test(self, parent, block):
return bool(self.SEARCH_RE.search(block))
def run(self, parent, blocks):
lines = blocks.pop(0).split('\n')
prelines = []
# Check for lines in block before hr.
for line in lines:
m = self.MATCH_RE.match(line)
if m:
break
else:
prelines.append(line)
if len(prelines):
# Recursively parse lines before hr so they get parsed first.
self.parser.parseBlocks(parent, ['\n'.join(prelines)])
# create hr
hr = markdown.etree.SubElement(parent, 'hr')
# check for lines in block after hr.
lines = lines[len(prelines)+1:]
if len(lines):
# Add lines after hr to master blocks for later parsing.
blocks.insert(0, '\n'.join(lines))
class EmptyBlockProcessor(BlockProcessor):
""" Process blocks and start with an empty line. """
# Detect a block that only contains whitespace
# or only whitespace on the first line.
RE = re.compile(r'^\s*\n')
def test(self, parent, block):
return bool(self.RE.match(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.match(block)
if m:
# Add remaining line to master blocks for later.
blocks.insert(0, block[m.end():])
sibling = self.lastChild(parent)
if sibling and sibling.tag == 'pre' and sibling[0] and \
sibling[0].tag == 'code':
# Last block is a codeblock. Append to preserve whitespace.
sibling[0].text = markdown.AtomicString('%s/n/n/n' % sibling[0].text )
class ParagraphProcessor(BlockProcessor):
""" Process Paragraph blocks. """
def test(self, parent, block):
return True
def run(self, parent, blocks):
block = blocks.pop(0)
if block.strip():
# Not a blank block. Add to parent, otherwise throw it away.
if self.parser.state.isstate('list'):
# The parent is a tight-list. Append to parent.text
if parent.text:
parent.text = '%s\n%s' % (parent.text, block)
else:
parent.text = block.lstrip()
else:
# Create a regular paragraph
p = markdown.etree.SubElement(parent, 'p')
p.text = block.lstrip()

Просмотреть файл

@ -1,96 +0,0 @@
"""
COMMAND-LINE SPECIFIC STUFF
=============================================================================
The rest of the code is specifically for handling the case where Python
Markdown is called from the command line.
"""
import markdown
import sys
import logging
from logging import DEBUG, INFO, WARN, ERROR, CRITICAL
EXECUTABLE_NAME_FOR_USAGE = "python markdown.py"
""" The name used in the usage statement displayed for python versions < 2.3.
(With python 2.3 and higher the usage statement is generated by optparse
and uses the actual name of the executable called.) """
OPTPARSE_WARNING = """
Python 2.3 or higher required for advanced command line options.
For lower versions of Python use:
%s INPUT_FILE > OUTPUT_FILE
""" % EXECUTABLE_NAME_FOR_USAGE
def parse_options():
"""
Define and parse `optparse` options for command-line usage.
"""
try:
optparse = __import__("optparse")
except:
if len(sys.argv) == 2:
return {'input': sys.argv[1],
'output': None,
'safe': False,
'extensions': [],
'encoding': None }, CRITICAL
else:
print OPTPARSE_WARNING
return None, None
parser = optparse.OptionParser(usage="%prog INPUTFILE [options]")
parser.add_option("-f", "--file", dest="filename", default=sys.stdout,
help="write output to OUTPUT_FILE",
metavar="OUTPUT_FILE")
parser.add_option("-e", "--encoding", dest="encoding",
help="encoding for input and output files",)
parser.add_option("-q", "--quiet", default = CRITICAL,
action="store_const", const=CRITICAL+10, dest="verbose",
help="suppress all messages")
parser.add_option("-v", "--verbose",
action="store_const", const=INFO, dest="verbose",
help="print info messages")
parser.add_option("-s", "--safe", dest="safe", default=False,
metavar="SAFE_MODE",
help="safe mode ('replace', 'remove' or 'escape' user's HTML tag)")
parser.add_option("-o", "--output_format", dest="output_format",
default='xhtml1', metavar="OUTPUT_FORMAT",
help="Format of output. One of 'xhtml1' (default) or 'html4'.")
parser.add_option("--noisy",
action="store_const", const=DEBUG, dest="verbose",
help="print debug messages")
parser.add_option("-x", "--extension", action="append", dest="extensions",
help = "load extension EXTENSION", metavar="EXTENSION")
(options, args) = parser.parse_args()
if not len(args) == 1:
parser.print_help()
return None, None
else:
input_file = args[0]
if not options.extensions:
options.extensions = []
return {'input': input_file,
'output': options.filename,
'safe_mode': options.safe,
'extensions': options.extensions,
'encoding': options.encoding,
'output_format': options.output_format}, options.verbose
def run():
"""Run Markdown from the command line."""
# Parse options and adjust logging level if necessary
options, logging_level = parse_options()
if not options: sys.exit(0)
if logging_level: logging.getLogger('MARKDOWN').setLevel(logging_level)
# Run
markdown.markdownFromFile(**options)

Просмотреть файл

@ -1,33 +0,0 @@
from markdown import message, CRITICAL
import sys
## Import
def importETree():
"""Import the best implementation of ElementTree, return a module object."""
etree_in_c = None
try: # Is it Python 2.5+ with C implemenation of ElementTree installed?
import xml.etree.cElementTree as etree_in_c
except ImportError:
try: # Is it Python 2.5+ with Python implementation of ElementTree?
import xml.etree.ElementTree as etree
except ImportError:
try: # An earlier version of Python with cElementTree installed?
import cElementTree as etree_in_c
except ImportError:
try: # An earlier version of Python with Python ElementTree?
import elementtree.ElementTree as etree
except ImportError:
message(CRITICAL, "Failed to import ElementTree")
sys.exit(1)
if etree_in_c and etree_in_c.VERSION < "1.0":
message(CRITICAL, "For cElementTree version 1.0 or higher is required.")
sys.exit(1)
elif etree_in_c :
return etree_in_c
elif etree.VERSION < "1.1":
message(CRITICAL, "For ElementTree version 1.1 or higher is required")
sys.exit(1)
else :
return etree

Просмотреть файл

Просмотреть файл

@ -1,95 +0,0 @@
'''
Abbreviation Extension for Python-Markdown
==========================================
This extension adds abbreviation handling to Python-Markdown.
Simple Usage:
>>> import markdown
>>> text = """
... Some text with an ABBR and a REF. Ignore REFERENCE and ref.
...
... *[ABBR]: Abbreviation
... *[REF]: Abbreviation Reference
... """
>>> markdown.markdown(text, ['abbr'])
u'<p>Some text with an <abbr title="Abbreviation">ABBR</abbr> and a <abbr title="Abbreviation Reference">REF</abbr>. Ignore REFERENCE and ref.</p>'
Copyright 2007-2008
* [Waylan Limberg](http://achinghead.com/)
* [Seemant Kulleen](http://www.kulleen.org/)
'''
import markdown, re
from markdown import etree
# Global Vars
ABBR_REF_RE = re.compile(r'[*]\[(?P<abbr>[^\]]*)\][ ]?:\s*(?P<title>.*)')
class AbbrExtension(markdown.Extension):
""" Abbreviation Extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Insert AbbrPreprocessor before ReferencePreprocessor. """
md.preprocessors.add('abbr', AbbrPreprocessor(md), '<reference')
class AbbrPreprocessor(markdown.preprocessors.Preprocessor):
""" Abbreviation Preprocessor - parse text for abbr references. """
def run(self, lines):
'''
Find and remove all Abbreviation references from the text.
Each reference is set as a new AbbrPattern in the markdown instance.
'''
new_text = []
for line in lines:
m = ABBR_REF_RE.match(line)
if m:
abbr = m.group('abbr').strip()
title = m.group('title').strip()
self.markdown.inlinePatterns['abbr-%s'%abbr] = \
AbbrPattern(self._generate_pattern(abbr), title)
else:
new_text.append(line)
return new_text
def _generate_pattern(self, text):
'''
Given a string, returns an regex pattern to match that string.
'HTML' -> r'(?P<abbr>[H][T][M][L])'
Note: we force each char as a literal match (in brackets) as we don't
know what they will be beforehand.
'''
chars = list(text)
for i in range(len(chars)):
chars[i] = r'[%s]' % chars[i]
return r'(?P<abbr>\b%s\b)' % (r''.join(chars))
class AbbrPattern(markdown.inlinepatterns.Pattern):
""" Abbreviation inline pattern. """
def __init__(self, pattern, title):
markdown.inlinepatterns.Pattern.__init__(self, pattern)
self.title = title
def handleMatch(self, m):
abbr = etree.Element('abbr')
abbr.text = m.group('abbr')
abbr.set('title', self.title)
return abbr
def makeExtension(configs=None):
return AbbrExtension(configs=configs)
if __name__ == "__main__":
import doctest
doctest.testmod()

Просмотреть файл

@ -1,224 +0,0 @@
#!/usr/bin/python
"""
CodeHilite Extension for Python-Markdown
========================================
Adds code/syntax highlighting to standard Python-Markdown code blocks.
Copyright 2006-2008 [Waylan Limberg](http://achinghead.com/).
Project website: <http://www.freewisdom.org/project/python-markdown/CodeHilite>
Contact: markdown@freewisdom.org
License: BSD (see ../docs/LICENSE for details)
Dependencies:
* [Python 2.3+](http://python.org/)
* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
* [Pygments](http://pygments.org/)
"""
import markdown
# --------------- CONSTANTS YOU MIGHT WANT TO MODIFY -----------------
try:
TAB_LENGTH = markdown.TAB_LENGTH
except AttributeError:
TAB_LENGTH = 4
# ------------------ The Main CodeHilite Class ----------------------
class CodeHilite:
"""
Determine language of source code, and pass it into the pygments hilighter.
Basic Usage:
>>> code = CodeHilite(src = 'some text')
>>> html = code.hilite()
* src: Source string or any object with a .readline attribute.
* linenos: (Boolen) Turn line numbering 'on' or 'off' (off by default).
* css_class: Set class name of wrapper div ('codehilite' by default).
Low Level Usage:
>>> code = CodeHilite()
>>> code.src = 'some text' # String or anything with a .readline attr.
>>> code.linenos = True # True or False; Turns line numbering on or of.
>>> html = code.hilite()
"""
def __init__(self, src=None, linenos=False, css_class="codehilite"):
self.src = src
self.lang = None
self.linenos = linenos
self.css_class = css_class
def hilite(self):
"""
Pass code to the [Pygments](http://pygments.pocoo.org/) highliter with
optional line numbers. The output should then be styled with css to
your liking. No styles are applied by default - only styling hooks
(i.e.: <span class="k">).
returns : A string of html.
"""
self.src = self.src.strip('\n')
self._getLang()
try:
from pygments import highlight
from pygments.lexers import get_lexer_by_name, guess_lexer, \
TextLexer
from pygments.formatters import HtmlFormatter
except ImportError:
# just escape and pass through
txt = self._escape(self.src)
if self.linenos:
txt = self._number(txt)
else :
txt = '<div class="%s"><pre>%s</pre></div>\n'% \
(self.css_class, txt)
return txt
else:
try:
lexer = get_lexer_by_name(self.lang)
except ValueError:
try:
lexer = guess_lexer(self.src)
except ValueError:
lexer = TextLexer()
formatter = HtmlFormatter(linenos=self.linenos,
cssclass=self.css_class)
return highlight(self.src, lexer, formatter)
def _escape(self, txt):
""" basic html escaping """
txt = txt.replace('&', '&amp;')
txt = txt.replace('<', '&lt;')
txt = txt.replace('>', '&gt;')
txt = txt.replace('"', '&quot;')
return txt
def _number(self, txt):
""" Use <ol> for line numbering """
# Fix Whitespace
txt = txt.replace('\t', ' '*TAB_LENGTH)
txt = txt.replace(" "*4, "&nbsp; &nbsp; ")
txt = txt.replace(" "*3, "&nbsp; &nbsp;")
txt = txt.replace(" "*2, "&nbsp; ")
# Add line numbers
lines = txt.splitlines()
txt = '<div class="codehilite"><pre><ol>\n'
for line in lines:
txt += '\t<li>%s</li>\n'% line
txt += '</ol></pre></div>\n'
return txt
def _getLang(self):
"""
Determines language of a code block from shebang lines and whether said
line should be removed or left in place. If the sheband line contains a
path (even a single /) then it is assumed to be a real shebang lines and
left alone. However, if no path is given (e.i.: #!python or :::python)
then it is assumed to be a mock shebang for language identifitation of a
code fragment and removed from the code block prior to processing for
code highlighting. When a mock shebang (e.i: #!python) is found, line
numbering is turned on. When colons are found in place of a shebang
(e.i.: :::python), line numbering is left in the current state - off
by default.
"""
import re
#split text into lines
lines = self.src.split("\n")
#pull first line to examine
fl = lines.pop(0)
c = re.compile(r'''
(?:(?:::+)|(?P<shebang>[#]!)) # Shebang or 2 or more colons.
(?P<path>(?:/\w+)*[/ ])? # Zero or 1 path
(?P<lang>[\w+-]*) # The language
''', re.VERBOSE)
# search first line for shebang
m = c.search(fl)
if m:
# we have a match
try:
self.lang = m.group('lang').lower()
except IndexError:
self.lang = None
if m.group('path'):
# path exists - restore first line
lines.insert(0, fl)
if m.group('shebang'):
# shebang exists - use line numbers
self.linenos = True
else:
# No match
lines.insert(0, fl)
self.src = "\n".join(lines).strip("\n")
# ------------------ The Markdown Extension -------------------------------
class HiliteTreeprocessor(markdown.treeprocessors.Treeprocessor):
""" Hilight source code in code blocks. """
def run(self, root):
""" Find code blocks and store in htmlStash. """
blocks = root.getiterator('pre')
for block in blocks:
children = block.getchildren()
if len(children) == 1 and children[0].tag == 'code':
code = CodeHilite(children[0].text,
linenos=self.config['force_linenos'][0],
css_class=self.config['css_class'][0])
placeholder = self.markdown.htmlStash.store(code.hilite(),
safe=True)
# Clear codeblock in etree instance
block.clear()
# Change to p element which will later
# be removed when inserting raw html
block.tag = 'p'
block.text = placeholder
class CodeHiliteExtension(markdown.Extension):
""" Add source code hilighting to markdown codeblocks. """
def __init__(self, configs):
# define default configs
self.config = {
'force_linenos' : [False, "Force line numbers - Default: False"],
'css_class' : ["codehilite",
"Set class name for wrapper <div> - Default: codehilite"],
}
# Override defaults with user settings
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
""" Add HilitePostprocessor to Markdown instance. """
hiliter = HiliteTreeprocessor(md)
hiliter.config = self.config
md.treeprocessors.add("hilite", hiliter, "_begin")
def makeExtension(configs={}):
return CodeHiliteExtension(configs=configs)

Просмотреть файл

@ -1,104 +0,0 @@
#!/usr/bin/env Python
"""
Definition List Extension for Python-Markdown
=============================================
Added parsing of Definition Lists to Python-Markdown.
A simple example:
Apple
: Pomaceous fruit of plants of the genus Malus in
the family Rosaceae.
: An american computer company.
Orange
: The fruit of an evergreen tree of the genus Citrus.
Copyright 2008 - [Waylan Limberg](http://achinghead.com)
"""
import markdown, re
from markdown import etree
class DefListProcessor(markdown.blockprocessors.BlockProcessor):
""" Process Definition Lists. """
RE = re.compile(r'(^|\n)[ ]{0,3}:[ ]{1,3}(.*?)(\n|$)')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
terms = [l.strip() for l in block[:m.start()].split('\n') if l.strip()]
d, theRest = self.detab(block[m.end():])
if d:
d = '%s\n%s' % (m.group(2), d)
else:
d = m.group(2)
#import ipdb; ipdb.set_trace()
sibling = self.lastChild(parent)
if not terms and sibling.tag == 'p':
# The previous paragraph contains the terms
state = 'looselist'
terms = sibling.text.split('\n')
parent.remove(sibling)
# Aquire new sibling
sibling = self.lastChild(parent)
else:
state = 'list'
if sibling and sibling.tag == 'dl':
# This is another item on an existing list
dl = sibling
if len(dl) and dl[-1].tag == 'dd' and len(dl[-1]):
state = 'looselist'
else:
# This is a new list
dl = etree.SubElement(parent, 'dl')
# Add terms
for term in terms:
dt = etree.SubElement(dl, 'dt')
dt.text = term
# Add definition
self.parser.state.set(state)
dd = etree.SubElement(dl, 'dd')
self.parser.parseBlocks(dd, [d])
self.parser.state.reset()
if theRest:
blocks.insert(0, theRest)
class DefListIndentProcessor(markdown.blockprocessors.ListIndentProcessor):
""" Process indented children of definition list items. """
ITEM_TYPES = ['dd']
LIST_TYPES = ['dl']
def create_item(parent, block):
""" Create a new dd and parse the block with it as the parent. """
dd = markdown.etree.SubElement(parent, 'dd')
self.parser.parseBlocks(dd, [block])
class DefListExtension(markdown.Extension):
""" Add definition lists to Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add an instance of DefListProcessor to BlockParser. """
md.parser.blockprocessors.add('defindent',
DefListIndentProcessor(md.parser),
'>indent')
md.parser.blockprocessors.add('deflist',
DefListProcessor(md.parser),
'>ulist')
def makeExtension(configs={}):
return DefListExtension(configs=configs)

Просмотреть файл

@ -1,49 +0,0 @@
#!/usr/bin/env python
"""
Python-Markdown Extra Extension
===============================
A compilation of various Python-Markdown extensions that imitates
[PHP Markdown Extra](http://michelf.com/projects/php-markdown/extra/).
Note that each of the individual extensions still need to be available
on your PYTHONPATH. This extension simply wraps them all up as a
convenience so that only one extension needs to be listed when
initiating Markdown. See the documentation for each individual
extension for specifics about that extension.
In the event that one or more of the supported extensions are not
available for import, Markdown will issue a warning and simply continue
without that extension.
There may be additional extensions that are distributed with
Python-Markdown that are not included here in Extra. Those extensions
are not part of PHP Markdown Extra, and therefore, not part of
Python-Markdown Extra. If you really would like Extra to include
additional extensions, we suggest creating your own clone of Extra
under a differant name. You could also edit the `extensions` global
variable defined below, but be aware that such changes may be lost
when you upgrade to any future version of Python-Markdown.
"""
import markdown
extensions = ['fenced_code',
'footnotes',
'headerid',
'def_list',
'tables',
'abbr',
]
class ExtraExtension(markdown.Extension):
""" Add various extensions to Markdown class."""
def extendMarkdown(self, md, md_globals):
""" Register extension instances. """
md.registerExtensions(extensions, self.config)
def makeExtension(configs={}):
return ExtraExtension(configs=dict(configs))

Просмотреть файл

@ -1,117 +0,0 @@
#!/usr/bin/env python
"""
Fenced Code Extension for Python Markdown
=========================================
This extension adds Fenced Code Blocks to Python-Markdown.
>>> import markdown
>>> text = '''
... A paragraph before a fenced code block:
...
... ~~~
... Fenced code block
... ~~~
... '''
>>> html = markdown.markdown(text, extensions=['fenced_code'])
>>> html
u'<p>A paragraph before a fenced code block:</p>\\n<pre><code>Fenced code block\\n</code></pre>'
Works with safe_mode also (we check this because we are using the HtmlStash):
>>> markdown.markdown(text, extensions=['fenced_code'], safe_mode='replace')
u'<p>A paragraph before a fenced code block:</p>\\n<pre><code>Fenced code block\\n</code></pre>'
Include tilde's in a code block and wrap with blank lines:
>>> text = '''
... ~~~~~~~~
...
... ~~~~
...
... ~~~~~~~~'''
>>> markdown.markdown(text, extensions=['fenced_code'])
u'<pre><code>\\n~~~~\\n\\n</code></pre>'
Multiple blocks and language tags:
>>> text = '''
... ~~~~{.python}
... block one
... ~~~~
...
... ~~~~.html
... <p>block two</p>
... ~~~~'''
>>> markdown.markdown(text, extensions=['fenced_code'])
u'<pre><code class="python">block one\\n</code></pre>\\n\\n<pre><code class="html">&lt;p&gt;block two&lt;/p&gt;\\n</code></pre>'
Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
Project website: <http://www.freewisdom.org/project/python-markdown/Fenced__Code__Blocks>
Contact: markdown@freewisdom.org
License: BSD (see ../docs/LICENSE for details)
Dependencies:
* [Python 2.3+](http://python.org)
* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
"""
import markdown, re
# Global vars
FENCED_BLOCK_RE = re.compile( \
r'(?P<fence>^~{3,})[ ]*(\{?\.(?P<lang>[a-zA-Z0-9_-]*)\}?)?[ ]*\n(?P<code>.*?)(?P=fence)[ ]*$',
re.MULTILINE|re.DOTALL
)
CODE_WRAP = '<pre><code%s>%s</code></pre>'
LANG_TAG = ' class="%s"'
class FencedCodeExtension(markdown.Extension):
def extendMarkdown(self, md, md_globals):
""" Add FencedBlockPreprocessor to the Markdown instance. """
md.preprocessors.add('fenced_code_block',
FencedBlockPreprocessor(md),
"_begin")
class FencedBlockPreprocessor(markdown.preprocessors.Preprocessor):
def run(self, lines):
""" Match and store Fenced Code Blocks in the HtmlStash. """
text = "\n".join(lines)
while 1:
m = FENCED_BLOCK_RE.search(text)
if m:
lang = ''
if m.group('lang'):
lang = LANG_TAG % m.group('lang')
code = CODE_WRAP % (lang, self._escape(m.group('code')))
placeholder = self.markdown.htmlStash.store(code, safe=True)
text = '%s\n%s\n%s'% (text[:m.start()], placeholder, text[m.end():])
else:
break
return text.split("\n")
def _escape(self, txt):
""" basic html escaping """
txt = txt.replace('&', '&amp;')
txt = txt.replace('<', '&lt;')
txt = txt.replace('>', '&gt;')
txt = txt.replace('"', '&quot;')
return txt
def makeExtension(configs=None):
return FencedCodeExtension()
if __name__ == "__main__":
import doctest
doctest.testmod()

Просмотреть файл

@ -1,293 +0,0 @@
"""
========================= FOOTNOTES =================================
This section adds footnote handling to markdown. It can be used as
an example for extending python-markdown with relatively complex
functionality. While in this case the extension is included inside
the module itself, it could just as easily be added from outside the
module. Not that all markdown classes above are ignorant about
footnotes. All footnote functionality is provided separately and
then added to the markdown instance at the run time.
Footnote functionality is attached by calling extendMarkdown()
method of FootnoteExtension. The method also registers the
extension to allow it's state to be reset by a call to reset()
method.
Example:
Footnotes[^1] have a label[^label] and a definition[^!DEF].
[^1]: This is a footnote
[^label]: A footnote on "label"
[^!DEF]: The footnote for definition
"""
import re, markdown
from markdown import etree
FN_BACKLINK_TEXT = "zz1337820767766393qq"
NBSP_PLACEHOLDER = "qq3936677670287331zz"
DEF_RE = re.compile(r'(\ ?\ ?\ ?)\[\^([^\]]*)\]:\s*(.*)')
TABBED_RE = re.compile(r'((\t)|( ))(.*)')
class FootnoteExtension(markdown.Extension):
""" Footnote Extension. """
def __init__ (self, configs):
""" Setup configs. """
self.config = {'PLACE_MARKER':
["///Footnotes Go Here///",
"The text string that marks where the footnotes go"]}
for key, value in configs:
self.config[key][0] = value
self.reset()
def extendMarkdown(self, md, md_globals):
""" Add pieces to Markdown. """
md.registerExtension(self)
self.parser = md.parser
# Insert a preprocessor before ReferencePreprocessor
md.preprocessors.add("footnote", FootnotePreprocessor(self),
"<reference")
# Insert an inline pattern before ImageReferencePattern
FOOTNOTE_RE = r'\[\^([^\]]*)\]' # blah blah [^1] blah
md.inlinePatterns.add("footnote", FootnotePattern(FOOTNOTE_RE, self),
"<reference")
# Insert a tree-processor that would actually add the footnote div
# This must be before the inline treeprocessor so inline patterns
# run on the contents of the div.
md.treeprocessors.add("footnote", FootnoteTreeprocessor(self),
"<inline")
# Insert a postprocessor after amp_substitute oricessor
md.postprocessors.add("footnote", FootnotePostprocessor(self),
">amp_substitute")
def reset(self):
""" Clear the footnotes on reset. """
self.footnotes = markdown.odict.OrderedDict()
def findFootnotesPlaceholder(self, root):
""" Return ElementTree Element that contains Footnote placeholder. """
def finder(element):
for child in element:
if child.text:
if child.text.find(self.getConfig("PLACE_MARKER")) > -1:
return child, True
if child.tail:
if child.tail.find(self.getConfig("PLACE_MARKER")) > -1:
return (child, element), False
finder(child)
return None
res = finder(root)
return res
def setFootnote(self, id, text):
""" Store a footnote for later retrieval. """
self.footnotes[id] = text
def makeFootnoteId(self, id):
""" Return footnote link id. """
return 'fn:%s' % id
def makeFootnoteRefId(self, id):
""" Return footnote back-link id. """
return 'fnref:%s' % id
def makeFootnotesDiv(self, root):
""" Return div of footnotes as et Element. """
if not self.footnotes.keys():
return None
div = etree.Element("div")
div.set('class', 'footnote')
hr = etree.SubElement(div, "hr")
ol = etree.SubElement(div, "ol")
for id in self.footnotes.keys():
li = etree.SubElement(ol, "li")
li.set("id", self.makeFootnoteId(id))
self.parser.parseChunk(li, self.footnotes[id])
backlink = etree.Element("a")
backlink.set("href", "#" + self.makeFootnoteRefId(id))
backlink.set("rev", "footnote")
backlink.set("title", "Jump back to footnote %d in the text" % \
(self.footnotes.index(id)+1))
backlink.text = FN_BACKLINK_TEXT
if li.getchildren():
node = li[-1]
if node.tag == "p":
node.text = node.text + NBSP_PLACEHOLDER
node.append(backlink)
else:
p = etree.SubElement(li, "p")
p.append(backlink)
return div
class FootnotePreprocessor(markdown.preprocessors.Preprocessor):
""" Find all footnote references and store for later use. """
def __init__ (self, footnotes):
self.footnotes = footnotes
def run(self, lines):
lines = self._handleFootnoteDefinitions(lines)
text = "\n".join(lines)
return text.split("\n")
def _handleFootnoteDefinitions(self, lines):
"""
Recursively find all footnote definitions in lines.
Keywords:
* lines: A list of lines of text
Return: A list of lines with footnote definitions removed.
"""
i, id, footnote = self._findFootnoteDefinition(lines)
if id :
plain = lines[:i]
detabbed, theRest = self.detectTabbed(lines[i+1:])
self.footnotes.setFootnote(id,
footnote + "\n"
+ "\n".join(detabbed))
more_plain = self._handleFootnoteDefinitions(theRest)
return plain + [""] + more_plain
else :
return lines
def _findFootnoteDefinition(self, lines):
"""
Find the parts of a footnote definition.
Keywords:
* lines: A list of lines of text.
Return: A three item tuple containing the index of the first line of a
footnote definition, the id of the definition and the body of the
definition.
"""
counter = 0
for line in lines:
m = DEF_RE.match(line)
if m:
return counter, m.group(2), m.group(3)
counter += 1
return counter, None, None
def detectTabbed(self, lines):
""" Find indented text and remove indent before further proccesing.
Keyword arguments:
* lines: an array of strings
Returns: a list of post processed items and the unused
remainder of the original list
"""
items = []
item = -1
i = 0 # to keep track of where we are
def detab(line):
match = TABBED_RE.match(line)
if match:
return match.group(4)
for line in lines:
if line.strip(): # Non-blank line
line = detab(line)
if line:
items.append(line)
i += 1
continue
else:
return items, lines[i:]
else: # Blank line: _maybe_ we are done.
i += 1 # advance
# Find the next non-blank line
for j in range(i, len(lines)):
if lines[j].strip():
next_line = lines[j]; break
else:
break # There is no more text; we are done.
# Check if the next non-blank line is tabbed
if detab(next_line): # Yes, more work to do.
items.append("")
continue
else:
break # No, we are done.
else:
i += 1
return items, lines[i:]
class FootnotePattern(markdown.inlinepatterns.Pattern):
""" InlinePattern for footnote markers in a document's body text. """
def __init__(self, pattern, footnotes):
markdown.inlinepatterns.Pattern.__init__(self, pattern)
self.footnotes = footnotes
def handleMatch(self, m):
sup = etree.Element("sup")
a = etree.SubElement(sup, "a")
id = m.group(2)
sup.set('id', self.footnotes.makeFootnoteRefId(id))
a.set('href', '#' + self.footnotes.makeFootnoteId(id))
a.set('rel', 'footnote')
a.text = str(self.footnotes.footnotes.index(id) + 1)
return sup
class FootnoteTreeprocessor(markdown.treeprocessors.Treeprocessor):
""" Build and append footnote div to end of document. """
def __init__ (self, footnotes):
self.footnotes = footnotes
def run(self, root):
footnotesDiv = self.footnotes.makeFootnotesDiv(root)
if footnotesDiv:
result = self.footnotes.findFootnotesPlaceholder(root)
if result:
node, isText = result
if isText:
node.text = None
node.getchildren().insert(0, footnotesDiv)
else:
child, element = node
ind = element.getchildren().find(child)
element.getchildren().insert(ind + 1, footnotesDiv)
child.tail = None
fnPlaceholder.parent.replaceChild(fnPlaceholder, footnotesDiv)
else:
root.append(footnotesDiv)
class FootnotePostprocessor(markdown.postprocessors.Postprocessor):
""" Replace placeholders with html entities. """
def run(self, text):
text = text.replace(FN_BACKLINK_TEXT, "&#8617;")
return text.replace(NBSP_PLACEHOLDER, "&#160;")
def makeExtension(configs=[]):
""" Return an instance of the FootnoteExtension """
return FootnoteExtension(configs=configs)

Просмотреть файл

@ -1,195 +0,0 @@
#!/usr/bin/python
"""
HeaderID Extension for Python-Markdown
======================================
Adds ability to set HTML IDs for headers.
Basic usage:
>>> import markdown
>>> text = "# Some Header # {#some_id}"
>>> md = markdown.markdown(text, ['headerid'])
>>> md
u'<h1 id="some_id">Some Header</h1>'
All header IDs are unique:
>>> text = '''
... #Header
... #Another Header {#header}
... #Third Header {#header}'''
>>> md = markdown.markdown(text, ['headerid'])
>>> md
u'<h1 id="header">Header</h1>\\n<h1 id="header_1">Another Header</h1>\\n<h1 id="header_2">Third Header</h1>'
To fit within a html template's hierarchy, set the header base level:
>>> text = '''
... #Some Header
... ## Next Level'''
>>> md = markdown.markdown(text, ['headerid(level=3)'])
>>> md
u'<h3 id="some_header">Some Header</h3>\\n<h4 id="next_level">Next Level</h4>'
Turn off auto generated IDs:
>>> text = '''
... # Some Header
... # Header with ID # { #foo }'''
>>> md = markdown.markdown(text, ['headerid(forceid=False)'])
>>> md
u'<h1>Some Header</h1>\\n<h1 id="foo">Header with ID</h1>'
Use with MetaData extension:
>>> text = '''header_level: 2
... header_forceid: Off
...
... # A Header'''
>>> md = markdown.markdown(text, ['headerid', 'meta'])
>>> md
u'<h2>A Header</h2>'
Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
Project website: <http://www.freewisdom.org/project/python-markdown/HeaderId>
Contact: markdown@freewisdom.org
License: BSD (see ../docs/LICENSE for details)
Dependencies:
* [Python 2.3+](http://python.org)
* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
"""
import markdown
from markdown import etree
import re
from string import ascii_lowercase, digits, punctuation
ID_CHARS = ascii_lowercase + digits + '-_'
IDCOUNT_RE = re.compile(r'^(.*)_([0-9]+)$')
class HeaderIdProcessor(markdown.blockprocessors.BlockProcessor):
""" Replacement BlockProcessor for Header IDs. """
# Detect a header at start of any line in block
RE = re.compile(r"""(^|\n)
(?P<level>\#{1,6}) # group('level') = string of hashes
(?P<header>.*?) # group('header') = Header text
\#* # optional closing hashes
(?:[ \t]*\{[ \t]*\#(?P<id>[-_:a-zA-Z0-9]+)[ \t]*\})?
(\n|$) # ^^ group('id') = id attribute
""",
re.VERBOSE)
IDs = []
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()] # All lines before header
after = block[m.end():] # All lines after header
if before:
# As the header was not the first line of the block and the
# lines before the header must be parsed first,
# recursively parse this lines as a block.
self.parser.parseBlocks(parent, [before])
# Create header using named groups from RE
start_level, force_id = self._get_meta()
level = len(m.group('level')) + start_level
if level > 6:
level = 6
h = markdown.etree.SubElement(parent, 'h%d' % level)
h.text = m.group('header').strip()
if m.group('id'):
h.set('id', self._unique_id(m.group('id')))
elif force_id:
h.set('id', self._create_id(m.group('header').strip()))
if after:
# Insert remaining lines as first block for future parsing.
blocks.insert(0, after)
else:
# This should never happen, but just in case...
message(CRITICAL, "We've got a problem header!")
def _get_meta(self):
""" Return meta data suported by this ext as a tuple """
level = int(self.config['level'][0]) - 1
force = self._str2bool(self.config['forceid'][0])
if hasattr(self.md, 'Meta'):
if self.md.Meta.has_key('header_level'):
level = int(self.md.Meta['header_level'][0]) - 1
if self.md.Meta.has_key('header_forceid'):
force = self._str2bool(self.md.Meta['header_forceid'][0])
return level, force
def _str2bool(self, s, default=False):
""" Convert a string to a booleen value. """
s = str(s)
if s.lower() in ['0', 'f', 'false', 'off', 'no', 'n']:
return False
elif s.lower() in ['1', 't', 'true', 'on', 'yes', 'y']:
return True
return default
def _unique_id(self, id):
""" Ensure ID is unique. Append '_1', '_2'... if not """
while id in self.IDs:
m = IDCOUNT_RE.match(id)
if m:
id = '%s_%d'% (m.group(1), int(m.group(2))+1)
else:
id = '%s_%d'% (id, 1)
self.IDs.append(id)
return id
def _create_id(self, header):
""" Return ID from Header text. """
h = ''
for c in header.lower().replace(' ', '_'):
if c in ID_CHARS:
h += c
elif c not in punctuation:
h += '+'
return self._unique_id(h)
class HeaderIdExtension (markdown.Extension):
def __init__(self, configs):
# set defaults
self.config = {
'level' : ['1', 'Base level for headers.'],
'forceid' : ['True', 'Force all headers to have an id.']
}
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
md.registerExtension(self)
self.processor = HeaderIdProcessor(md.parser)
self.processor.md = md
self.processor.config = self.config
# Replace existing hasheader in place.
md.parser.blockprocessors['hashheader'] = self.processor
def reset(self):
self.processor.IDs = []
def makeExtension(configs=None):
return HeaderIdExtension(configs=configs)
if __name__ == "__main__":
import doctest
doctest.testmod()

Просмотреть файл

@ -1,62 +0,0 @@
#!/usr/bin/env python
"""
HTML Tidy Extension for Python-Markdown
=======================================
Runs [HTML Tidy][] on the output of Python-Markdown using the [uTidylib][]
Python wrapper. Both libtidy and uTidylib must be installed on your system.
Note than any Tidy [options][] can be passed in as extension configs. So,
for example, to output HTML rather than XHTML, set ``output_xhtml=0``. To
indent the output, set ``indent=auto`` and to have Tidy wrap the output in
``<html>`` and ``<body>`` tags, set ``show_body_only=0``.
[HTML Tidy]: http://tidy.sourceforge.net/
[uTidylib]: http://utidylib.berlios.de/
[options]: http://tidy.sourceforge.net/docs/quickref.html
Copyright (c)2008 [Waylan Limberg](http://achinghead.com)
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
Dependencies:
* [Python2.3+](http://python.org)
* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
* [HTML Tidy](http://utidylib.berlios.de/)
* [uTidylib](http://utidylib.berlios.de/)
"""
import markdown
import tidy
class TidyExtension(markdown.Extension):
def __init__(self, configs):
# Set defaults to match typical markdown behavior.
self.config = dict(output_xhtml=1,
show_body_only=1,
)
# Merge in user defined configs overriding any present if nessecary.
for c in configs:
self.config[c[0]] = c[1]
def extendMarkdown(self, md, md_globals):
# Save options to markdown instance
md.tidy_options = self.config
# Add TidyProcessor to postprocessors
md.postprocessors['tidy'] = TidyProcessor(md)
class TidyProcessor(markdown.postprocessors.Postprocessor):
def run(self, text):
# Pass text to Tidy. As Tidy does not accept unicode we need to encode
# it and decode its return value.
return unicode(tidy.parseString(text.encode('utf-8'),
**self.markdown.tidy_options))
def makeExtension(configs=None):
return TidyExtension(configs=configs)

Просмотреть файл

@ -1,119 +0,0 @@
"""
========================= IMAGE LINKS =================================
Turns paragraphs like
<~~~~~~~~~~~~~~~~~~~~~~~~
dir/subdir
dir/subdir
dir/subdir
~~~~~~~~~~~~~~
dir/subdir
dir/subdir
dir/subdir
~~~~~~~~~~~~~~~~~~~>
Into mini-photo galleries.
"""
import re, markdown
import url_manager
IMAGE_LINK = """<a href="%s"><img src="%s" title="%s"/></a>"""
SLIDESHOW_LINK = """<a href="%s" target="_blank">[slideshow]</a>"""
ALBUM_LINK = """&nbsp;<a href="%s">[%s]</a>"""
class ImageLinksExtension(markdown.Extension):
def extendMarkdown(self, md, md_globals):
md.preprocessors.add("imagelink", ImageLinkPreprocessor(md), "_begin")
class ImageLinkPreprocessor(markdown.preprocessors.Preprocessor):
def run(self, lines):
url = url_manager.BlogEntryUrl(url_manager.BlogUrl("all"),
"2006/08/29/the_rest_of_our")
all_images = []
blocks = []
in_image_block = False
new_lines = []
for line in lines:
if line.startswith("<~~~~~~~"):
albums = []
rows = []
in_image_block = True
if not in_image_block:
new_lines.append(line)
else:
line = line.strip()
if line.endswith("~~~~~~>") or not line:
in_image_block = False
new_block = "<div><br/><center><span class='image-links'>\n"
album_url_hash = {}
for row in rows:
for photo_url, title in row:
new_block += "&nbsp;"
new_block += IMAGE_LINK % (photo_url,
photo_url.get_thumbnail(),
title)
album_url_hash[str(photo_url.get_album())] = 1
new_block += "<br/>"
new_block += "</span>"
new_block += SLIDESHOW_LINK % url.get_slideshow()
album_urls = album_url_hash.keys()
album_urls.sort()
if len(album_urls) == 1:
new_block += ALBUM_LINK % (album_urls[0], "complete album")
else :
for i in range(len(album_urls)) :
new_block += ALBUM_LINK % (album_urls[i],
"album %d" % (i + 1) )
new_lines.append(new_block + "</center><br/></div>")
elif line[1:6] == "~~~~~" :
rows.append([]) # start a new row
else :
parts = line.split()
line = parts[0]
title = " ".join(parts[1:])
album, photo = line.split("/")
photo_url = url.get_photo(album, photo,
len(all_images)+1)
all_images.append(photo_url)
rows[-1].append((photo_url, title))
if not album in albums :
albums.append(album)
return new_lines
def makeExtension(configs):
return ImageLinksExtension(configs)

Просмотреть файл

@ -1,90 +0,0 @@
#!usr/bin/python
"""
Meta Data Extension for Python-Markdown
=======================================
This extension adds Meta Data handling to markdown.
Basic Usage:
>>> import markdown
>>> text = '''Title: A Test Doc.
... Author: Waylan Limberg
... John Doe
... Blank_Data:
...
... The body. This is paragraph one.
... '''
>>> md = markdown.Markdown(['meta'])
>>> md.convert(text)
u'<p>The body. This is paragraph one.</p>'
>>> md.Meta
{u'blank_data': [u''], u'author': [u'Waylan Limberg', u'John Doe'], u'title': [u'A Test Doc.']}
Make sure text without Meta Data still works (markdown < 1.6b returns a <p>).
>>> text = ' Some Code - not extra lines of meta data.'
>>> md = markdown.Markdown(['meta'])
>>> md.convert(text)
u'<pre><code>Some Code - not extra lines of meta data.\\n</code></pre>'
>>> md.Meta
{}
Copyright 2007-2008 [Waylan Limberg](http://achinghead.com).
Project website: <http://www.freewisdom.org/project/python-markdown/Meta-Data>
Contact: markdown@freewisdom.org
License: BSD (see ../docs/LICENSE for details)
"""
import markdown, re
# Global Vars
META_RE = re.compile(r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\s*(?P<value>.*)')
META_MORE_RE = re.compile(r'^[ ]{4,}(?P<value>.*)')
class MetaExtension (markdown.Extension):
""" Meta-Data extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add MetaPreprocessor to Markdown instance. """
md.preprocessors.add("meta", MetaPreprocessor(md), "_begin")
class MetaPreprocessor(markdown.preprocessors.Preprocessor):
""" Get Meta-Data. """
def run(self, lines):
""" Parse Meta-Data and store in Markdown.Meta. """
meta = {}
key = None
while 1:
line = lines.pop(0)
if line.strip() == '':
break # blank line - done
m1 = META_RE.match(line)
if m1:
key = m1.group('key').lower().strip()
meta[key] = [m1.group('value').strip()]
else:
m2 = META_MORE_RE.match(line)
if m2 and key:
# Add another line to existing key
meta[key].append(m2.group('value').strip())
else:
lines.insert(0, line)
break # no meta data - done
self.markdown.Meta = meta
return lines
def makeExtension(configs={}):
return MetaExtension(configs=configs)
if __name__ == "__main__":
import doctest
doctest.testmod()

Просмотреть файл

@ -1,114 +0,0 @@
import markdown
from markdown import etree
DEFAULT_URL = "http://www.freewisdom.org/projects/python-markdown/"
DEFAULT_CREATOR = "Yuri Takhteyev"
DEFAULT_TITLE = "Markdown in Python"
GENERATOR = "http://www.freewisdom.org/projects/python-markdown/markdown2rss"
month_map = { "Jan" : "01",
"Feb" : "02",
"March" : "03",
"April" : "04",
"May" : "05",
"June" : "06",
"July" : "07",
"August" : "08",
"September" : "09",
"October" : "10",
"November" : "11",
"December" : "12" }
def get_time(heading):
heading = heading.split("-")[0]
heading = heading.strip().replace(",", " ").replace(".", " ")
month, date, year = heading.split()
month = month_map[month]
return rdftime(" ".join((month, date, year, "12:00:00 AM")))
def rdftime(time):
time = time.replace(":", " ")
time = time.replace("/", " ")
time = time.split()
return "%s-%s-%sT%s:%s:%s-08:00" % (time[0], time[1], time[2],
time[3], time[4], time[5])
def get_date(text):
return "date"
class RssExtension (markdown.Extension):
def extendMarkdown(self, md, md_globals):
self.config = { 'URL' : [DEFAULT_URL, "Main URL"],
'CREATOR' : [DEFAULT_CREATOR, "Feed creator's name"],
'TITLE' : [DEFAULT_TITLE, "Feed title"] }
md.xml_mode = True
# Insert a tree-processor that would actually add the title tag
treeprocessor = RssTreeProcessor(md)
treeprocessor.ext = self
md.treeprocessors['rss'] = treeprocessor
md.stripTopLevelTags = 0
md.docType = '<?xml version="1.0" encoding="utf-8"?>\n'
class RssTreeProcessor(markdown.treeprocessors.Treeprocessor):
def run (self, root):
rss = etree.Element("rss")
rss.set("version", "2.0")
channel = etree.SubElement(rss, "channel")
for tag, text in (("title", self.ext.getConfig("TITLE")),
("link", self.ext.getConfig("URL")),
("description", None)):
element = etree.SubElement(channel, tag)
element.text = text
for child in root:
if child.tag in ["h1", "h2", "h3", "h4", "h5"]:
heading = child.text.strip()
item = etree.SubElement(channel, "item")
link = etree.SubElement(item, "link")
link.text = self.ext.getConfig("URL")
title = etree.SubElement(item, "title")
title.text = heading
guid = ''.join([x for x in heading if x.isalnum()])
guidElem = etree.SubElement(item, "guid")
guidElem.text = guid
guidElem.set("isPermaLink", "false")
elif child.tag in ["p"]:
try:
description = etree.SubElement(item, "description")
except UnboundLocalError:
# Item not defined - moving on
pass
else:
if len(child):
content = "\n".join([etree.tostring(node)
for node in child])
else:
content = child.text
pholder = self.markdown.htmlStash.store(
"<![CDATA[ %s]]>" % content)
description.text = pholder
return rss
def makeExtension(configs):
return RssExtension(configs)

Просмотреть файл

@ -1,97 +0,0 @@
#!/usr/bin/env Python
"""
Tables Extension for Python-Markdown
====================================
Added parsing of tables to Python-Markdown.
A simple example:
First Header | Second Header
------------- | -------------
Content Cell | Content Cell
Content Cell | Content Cell
Copyright 2009 - [Waylan Limberg](http://achinghead.com)
"""
import markdown
from markdown import etree
class TableProcessor(markdown.blockprocessors.BlockProcessor):
""" Process Tables. """
def test(self, parent, block):
rows = block.split('\n')
return (len(rows) > 2 and '|' in rows[0] and
'|' in rows[1] and '-' in rows[1] and
rows[1][0] in ['|', ':', '-'])
def run(self, parent, blocks):
""" Parse a table block and build table. """
block = blocks.pop(0).split('\n')
header = block[:2]
rows = block[2:]
# Get format type (bordered by pipes or not)
border = False
if header[0].startswith('|'):
border = True
# Get alignment of columns
align = []
for c in self._split_row(header[1], border):
if c.startswith(':') and c.endswith(':'):
align.append('center')
elif c.startswith(':'):
align.append('left')
elif c.endswith(':'):
align.append('right')
else:
align.append(None)
# Build table
table = etree.SubElement(parent, 'table')
thead = etree.SubElement(table, 'thead')
self._build_row(header[0], thead, align, border)
tbody = etree.SubElement(table, 'tbody')
for row in rows:
self._build_row(row, tbody, align, border)
def _build_row(self, row, parent, align, border):
""" Given a row of text, build table cells. """
tr = etree.SubElement(parent, 'tr')
tag = 'td'
if parent.tag == 'thead':
tag = 'th'
cells = self._split_row(row, border)
# We use align here rather than cells to ensure every row
# contains the same number of columns.
for i, a in enumerate(align):
c = etree.SubElement(tr, tag)
try:
c.text = cells[i].strip()
except IndexError:
c.text = ""
if a:
c.set('align', a)
def _split_row(self, row, border):
""" split a row of text into list of cells. """
if border:
if row.startswith('|'):
row = row[1:]
if row.endswith('|'):
row = row[:-1]
return row.split('|')
class TableExtension(markdown.Extension):
""" Add tables to Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add an instance of TableProcessor to BlockParser. """
md.parser.blockprocessors.add('table',
TableProcessor(md.parser),
'<hashheader')
def makeExtension(configs={}):
return TableExtension(configs=configs)

Просмотреть файл

@ -1,140 +0,0 @@
"""
Table of Contents Extension for Python-Markdown
* * *
(c) 2008 [Jack Miller](http://codezen.org)
Dependencies:
* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
"""
import markdown
from markdown import etree
import re
class TocTreeprocessor(markdown.treeprocessors.Treeprocessor):
# Iterator wrapper to get parent and child all at once
def iterparent(self, root):
for parent in root.getiterator():
for child in parent:
yield parent, child
def run(self, doc):
div = etree.Element("div")
div.attrib["class"] = "toc"
last_li = None
# Add title to the div
if self.config["title"][0]:
header = etree.SubElement(div, "span")
header.attrib["class"] = "toctitle"
header.text = self.config["title"][0]
level = 0
list_stack=[div]
header_rgx = re.compile("[Hh][123456]")
# Get a list of id attributes
used_ids = []
for c in doc.getiterator():
if "id" in c.attrib:
used_ids.append(c.attrib["id"])
for (p, c) in self.iterparent(doc):
if not c.text:
continue
# To keep the output from screwing up the
# validation by putting a <div> inside of a <p>
# we actually replace the <p> in its entirety.
# We do not allow the marker inside a header as that
# would causes an enless loop of placing a new TOC
# inside previously generated TOC.
if c.text.find(self.config["marker"][0]) > -1 and not header_rgx.match(c.tag):
for i in range(len(p)):
if p[i] == c:
p[i] = div
break
if header_rgx.match(c.tag):
tag_level = int(c.tag[-1])
# Regardless of how many levels we jumped
# only one list should be created, since
# empty lists containing lists are illegal.
if tag_level < level:
list_stack.pop()
level = tag_level
if tag_level > level:
newlist = etree.Element("ul")
if last_li:
last_li.append(newlist)
else:
list_stack[-1].append(newlist)
list_stack.append(newlist)
level = tag_level
# Do not override pre-existing ids
if not "id" in c.attrib:
id = self.config["slugify"][0](c.text)
if id in used_ids:
ctr = 1
while "%s_%d" % (id, ctr) in used_ids:
ctr += 1
id = "%s_%d" % (id, ctr)
used_ids.append(id)
c.attrib["id"] = id
else:
id = c.attrib["id"]
# List item link, to be inserted into the toc div
last_li = etree.Element("li")
link = etree.SubElement(last_li, "a")
link.text = c.text
link.attrib["href"] = '#' + id
if int(self.config["anchorlink"][0]):
anchor = etree.SubElement(c, "a")
anchor.text = c.text
anchor.attrib["href"] = "#" + id
anchor.attrib["class"] = "toclink"
c.text = ""
list_stack[-1].append(last_li)
class TocExtension(markdown.Extension):
def __init__(self, configs):
self.config = { "marker" : ["[TOC]",
"Text to find and replace with Table of Contents -"
"Defaults to \"[TOC]\""],
"slugify" : [self.slugify,
"Function to generate anchors based on header text-"
"Defaults to a built in slugify function."],
"title" : [None,
"Title to insert into TOC <div> - "
"Defaults to None"],
"anchorlink" : [0,
"1 if header should be a self link"
"Defaults to 0"]}
for key, value in configs:
self.setConfig(key, value)
# This is exactly the same as Django's slugify
def slugify(self, value):
""" Slugify a string, to make it URL friendly. """
import unicodedata
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s-]', '', value).strip().lower())
return re.sub('[-\s]+','-',value)
def extendMarkdown(self, md, md_globals):
tocext = TocTreeprocessor(md)
tocext.config = self.config
md.treeprocessors.add("toc", tocext, "_begin")
def makeExtension(configs={}):
return TocExtension(configs=configs)

Просмотреть файл

@ -1,155 +0,0 @@
#!/usr/bin/env python
'''
WikiLinks Extension for Python-Markdown
======================================
Converts [[WikiLinks]] to relative links. Requires Python-Markdown 2.0+
Basic usage:
>>> import markdown
>>> text = "Some text with a [[WikiLink]]."
>>> html = markdown.markdown(text, ['wikilinks'])
>>> html
u'<p>Some text with a <a class="wikilink" href="/WikiLink/">WikiLink</a>.</p>'
Whitespace behavior:
>>> markdown.markdown('[[ foo bar_baz ]]', ['wikilinks'])
u'<p><a class="wikilink" href="/foo_bar_baz/">foo bar_baz</a></p>'
>>> markdown.markdown('foo [[ ]] bar', ['wikilinks'])
u'<p>foo bar</p>'
To define custom settings the simple way:
>>> markdown.markdown(text,
... ['wikilinks(base_url=/wiki/,end_url=.html,html_class=foo)']
... )
u'<p>Some text with a <a class="foo" href="/wiki/WikiLink.html">WikiLink</a>.</p>'
Custom settings the complex way:
>>> md = markdown.Markdown(
... extensions = ['wikilinks'],
... extension_configs = {'wikilinks': [
... ('base_url', 'http://example.com/'),
... ('end_url', '.html'),
... ('html_class', '') ]},
... safe_mode = True)
>>> md.convert(text)
u'<p>Some text with a <a href="http://example.com/WikiLink.html">WikiLink</a>.</p>'
Use MetaData with mdx_meta.py (Note the blank html_class in MetaData):
>>> text = """wiki_base_url: http://example.com/
... wiki_end_url: .html
... wiki_html_class:
...
... Some text with a [[WikiLink]]."""
>>> md = markdown.Markdown(extensions=['meta', 'wikilinks'])
>>> md.convert(text)
u'<p>Some text with a <a href="http://example.com/WikiLink.html">WikiLink</a>.</p>'
MetaData should not carry over to next document:
>>> md.convert("No [[MetaData]] here.")
u'<p>No <a class="wikilink" href="/MetaData/">MetaData</a> here.</p>'
Define a custom URL builder:
>>> def my_url_builder(label, base, end):
... return '/bar/'
>>> md = markdown.Markdown(extensions=['wikilinks'],
... extension_configs={'wikilinks' : [('build_url', my_url_builder)]})
>>> md.convert('[[foo]]')
u'<p><a class="wikilink" href="/bar/">foo</a></p>'
From the command line:
python markdown.py -x wikilinks(base_url=http://example.com/,end_url=.html,html_class=foo) src.txt
By [Waylan Limberg](http://achinghead.com/).
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
Dependencies:
* [Python 2.3+](http://python.org)
* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
'''
import markdown
import re
def build_url(label, base, end):
""" Build a url from the label, a base, and an end. """
clean_label = re.sub(r'([ ]+_)|(_[ ]+)|([ ]+)', '_', label)
return '%s%s%s'% (base, clean_label, end)
class WikiLinkExtension(markdown.Extension):
def __init__(self, configs):
# set extension defaults
self.config = {
'base_url' : ['/', 'String to append to beginning or URL.'],
'end_url' : ['/', 'String to append to end of URL.'],
'html_class' : ['wikilink', 'CSS hook. Leave blank for none.'],
'build_url' : [build_url, 'Callable formats URL from label.'],
}
# Override defaults with user settings
for key, value in configs :
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
self.md = md
# append to end of inline patterns
WIKILINK_RE = r'\[\[([A-Za-z0-9_ -]+)\]\]'
wikilinkPattern = WikiLinks(WIKILINK_RE, self.config)
wikilinkPattern.md = md
md.inlinePatterns.add('wikilink', wikilinkPattern, "<not_strong")
class WikiLinks(markdown.inlinepatterns.Pattern):
def __init__(self, pattern, config):
markdown.inlinepatterns.Pattern.__init__(self, pattern)
self.config = config
def handleMatch(self, m):
if m.group(2).strip():
base_url, end_url, html_class = self._getMeta()
label = m.group(2).strip()
url = self.config['build_url'][0](label, base_url, end_url)
a = markdown.etree.Element('a')
a.text = label
a.set('href', url)
if html_class:
a.set('class', html_class)
else:
a = ''
return a
def _getMeta(self):
""" Return meta data or config data. """
base_url = self.config['base_url'][0]
end_url = self.config['end_url'][0]
html_class = self.config['html_class'][0]
if hasattr(self.md, 'Meta'):
if self.md.Meta.has_key('wiki_base_url'):
base_url = self.md.Meta['wiki_base_url'][0]
if self.md.Meta.has_key('wiki_end_url'):
end_url = self.md.Meta['wiki_end_url'][0]
if self.md.Meta.has_key('wiki_html_class'):
html_class = self.md.Meta['wiki_html_class'][0]
return base_url, end_url, html_class
def makeExtension(configs=None) :
return WikiLinkExtension(configs=configs)
if __name__ == "__main__":
import doctest
doctest.testmod()

Просмотреть файл

@ -1,274 +0,0 @@
# markdown/html4.py
#
# Add html4 serialization to older versions of Elementree
# Taken from ElementTree 1.3 preview with slight modifications
#
# Copyright (c) 1999-2007 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2007 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
import markdown
ElementTree = markdown.etree.ElementTree
QName = markdown.etree.QName
Comment = markdown.etree.Comment
PI = markdown.etree.PI
ProcessingInstruction = markdown.etree.ProcessingInstruction
HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
"img", "input", "isindex", "link", "meta" "param")
try:
HTML_EMPTY = set(HTML_EMPTY)
except NameError:
pass
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
# xml schema
"http://www.w3.org/2001/XMLSchema": "xs",
"http://www.w3.org/2001/XMLSchema-instance": "xsi",
# dublic core
"http://purl.org/dc/elements/1.1/": "dc",
}
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _encode(text, encoding):
try:
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_cdata(text, encoding):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 character, or so. assume that's, by far,
# the most common case in most applications.
if "&" in text:
text = text.replace("&", "&amp;")
if "<" in text:
text = text.replace("<", "&lt;")
if ">" in text:
text = text.replace(">", "&gt;")
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text, encoding):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&amp;")
if "<" in text:
text = text.replace("<", "&lt;")
if ">" in text:
text = text.replace(">", "&gt;")
if "\"" in text:
text = text.replace("\"", "&quot;")
if "\n" in text:
text = text.replace("\n", "&#10;")
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib_html(text, encoding):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&amp;")
if ">" in text:
text = text.replace(">", "&gt;")
if "\"" in text:
text = text.replace("\"", "&quot;")
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _serialize_html(write, elem, encoding, qnames, namespaces):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _escape_cdata(text, encoding))
elif tag is ProcessingInstruction:
write("<?%s?>" % _escape_cdata(text, encoding))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_html(write, e, encoding, qnames, None)
else:
write("<" + tag)
items = elem.items()
if items or namespaces:
items.sort() # lexical order
for k, v in items:
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib_html(v, encoding)
# FIXME: handle boolean attributes
write(" %s=\"%s\"" % (qnames[k], v))
if namespaces:
items = namespaces.items()
items.sort(key=lambda x: x[1]) # sort on prefix
for v, k in items:
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k.encode(encoding),
_escape_attrib(v, encoding)
))
write(">")
tag = tag.lower()
if text:
if tag == "script" or tag == "style":
write(_encode(text, encoding))
else:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_html(write, e, encoding, qnames, None)
if tag not in HTML_EMPTY:
write("</" + tag + ">")
if elem.tail:
write(_escape_cdata(elem.tail, encoding))
def write_html(root, f,
# keyword arguments
encoding="us-ascii",
default_namespace=None):
assert root is not None
if not hasattr(f, "write"):
f = open(f, "wb")
write = f.write
if not encoding:
encoding = "us-ascii"
qnames, namespaces = _namespaces(
root, encoding, default_namespace
)
_serialize_html(
write, root, encoding, qnames, namespaces
)
# --------------------------------------------------------------------
# serialization support
def _namespaces(elem, encoding, default_namespace=None):
# identify namespaces used in this tree
# maps qnames to *encoded* prefix:local names
qnames = {None: None}
# maps uri:s to prefixes
namespaces = {}
if default_namespace:
namespaces[default_namespace] = ""
def encode(text):
return text.encode(encoding)
def add_qname(qname):
# calculate serialized qname representation
try:
if qname[:1] == "{":
uri, tag = qname[1:].split("}", 1)
prefix = namespaces.get(uri)
if prefix is None:
prefix = _namespace_map.get(uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
if prefix != "xml":
namespaces[uri] = prefix
if prefix:
qnames[qname] = encode("%s:%s" % (prefix, tag))
else:
qnames[qname] = encode(tag) # default element
else:
if default_namespace:
# FIXME: can this be handled in XML 1.0?
raise ValueError(
"cannot use non-qualified names with "
"default_namespace option"
)
qnames[qname] = encode(qname)
except TypeError:
_raise_serialization_error(qname)
# populate qname and namespaces table
try:
iterate = elem.iter
except AttributeError:
iterate = elem.getiterator # cET compatibility
for elem in iterate():
tag = elem.tag
if isinstance(tag, QName) and tag.text not in qnames:
add_qname(tag.text)
elif isinstance(tag, basestring):
if tag not in qnames:
add_qname(tag)
elif tag is not None and tag is not Comment and tag is not PI:
_raise_serialization_error(tag)
for key, value in elem.items():
if isinstance(key, QName):
key = key.text
if key not in qnames:
add_qname(key)
if isinstance(value, QName) and value.text not in qnames:
add_qname(value.text)
text = elem.text
if isinstance(text, QName) and text.text not in qnames:
add_qname(text.text)
return qnames, namespaces
def to_html_string(element, encoding=None):
class dummy:
pass
data = []
file = dummy()
file.write = data.append
write_html(ElementTree(element).getroot(),file,encoding)
return "".join(data)

Просмотреть файл

@ -1,371 +0,0 @@
"""
INLINE PATTERNS
=============================================================================
Inline patterns such as *emphasis* are handled by means of auxiliary
objects, one per pattern. Pattern objects must be instances of classes
that extend markdown.Pattern. Each pattern object uses a single regular
expression and needs support the following methods:
pattern.getCompiledRegExp() # returns a regular expression
pattern.handleMatch(m) # takes a match object and returns
# an ElementTree element or just plain text
All of python markdown's built-in patterns subclass from Pattern,
but you can add additional patterns that don't.
Also note that all the regular expressions used by inline must
capture the whole block. For this reason, they all start with
'^(.*)' and end with '(.*)!'. In case with built-in expression
Pattern takes care of adding the "^(.*)" and "(.*)!".
Finally, the order in which regular expressions are applied is very
important - e.g. if we first replace http://.../ links with <a> tags
and _then_ try to replace inline html, we would end up with a mess.
So, we apply the expressions in the following order:
* escape and backticks have to go before everything else, so
that we can preempt any markdown patterns by escaping them.
* then we handle auto-links (must be done before inline html)
* then we handle inline HTML. At this point we will simply
replace all inline HTML strings with a placeholder and add
the actual HTML to a hash.
* then inline images (must be done before links)
* then bracketed links, first regular then reference-style
* finally we apply strong and emphasis
"""
import markdown
import re
from urlparse import urlparse, urlunparse
import sys
if sys.version >= "3.0":
from html import entities as htmlentitydefs
else:
import htmlentitydefs
"""
The actual regular expressions for patterns
-----------------------------------------------------------------------------
"""
NOBRACKET = r'[^\]\[]*'
BRK = ( r'\[('
+ (NOBRACKET + r'(\[')*6
+ (NOBRACKET+ r'\])*')*6
+ NOBRACKET + r')\]' )
NOIMG = r'(?<!\!)'
BACKTICK_RE = r'(?<!\\)(`+)(.+?)(?<!`)\2(?!`)' # `e=f()` or ``e=f("`")``
ESCAPE_RE = r'\\(.)' # \<
EMPHASIS_RE = r'(\*)([^\*]*)\2' # *emphasis*
STRONG_RE = r'(\*{2}|_{2})(.*?)\2' # **strong**
STRONG_EM_RE = r'(\*{3}|_{3})(.*?)\2' # ***strong***
if markdown.SMART_EMPHASIS:
EMPHASIS_2_RE = r'(?<!\S)(_)(\S.*?)\2' # _emphasis_
else:
EMPHASIS_2_RE = r'(_)(.*?)\2' # _emphasis_
LINK_RE = NOIMG + BRK + \
r'''\(\s*(<.*?>|((?:(?:\(.*?\))|[^\(\)]))*?)\s*((['"])(.*)\12)?\)'''
# [text](url) or [text](<url>)
IMAGE_LINK_RE = r'\!' + BRK + r'\s*\((<.*?>|([^\)]*))\)'
# ![alttxt](http://x.com/) or ![alttxt](<http://x.com/>)
REFERENCE_RE = NOIMG + BRK+ r'\s*\[([^\]]*)\]' # [Google][3]
IMAGE_REFERENCE_RE = r'\!' + BRK + '\s*\[([^\]]*)\]' # ![alt text][2]
NOT_STRONG_RE = r'( \* )' # stand-alone * or _
AUTOLINK_RE = r'<((?:f|ht)tps?://[^>]*)>' # <http://www.123.com>
AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>' # <me@example.com>
HTML_RE = r'(\<([a-zA-Z/][^\>]*?|\!--.*?--)\>)' # <...>
ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)' # &amp;
LINE_BREAK_RE = r' \n' # two spaces at end of line
LINE_BREAK_2_RE = r' $' # two spaces at end of text
def dequote(string):
"""Remove quotes from around a string."""
if ( ( string.startswith('"') and string.endswith('"'))
or (string.startswith("'") and string.endswith("'")) ):
return string[1:-1]
else:
return string
ATTR_RE = re.compile("\{@([^\}]*)=([^\}]*)}") # {@id=123}
def handleAttributes(text, parent):
"""Set values of an element based on attribute definitions ({@id=123})."""
def attributeCallback(match):
parent.set(match.group(1), match.group(2).replace('\n', ' '))
return ATTR_RE.sub(attributeCallback, text)
"""
The pattern classes
-----------------------------------------------------------------------------
"""
class Pattern:
"""Base class that inline patterns subclass. """
def __init__ (self, pattern, markdown_instance=None):
"""
Create an instant of an inline pattern.
Keyword arguments:
* pattern: A regular expression that matches a pattern
"""
self.pattern = pattern
self.compiled_re = re.compile("^(.*?)%s(.*?)$" % pattern, re.DOTALL)
# Api for Markdown to pass safe_mode into instance
self.safe_mode = False
if markdown_instance:
self.markdown = markdown_instance
def getCompiledRegExp (self):
""" Return a compiled regular expression. """
return self.compiled_re
def handleMatch(self, m):
"""Return a ElementTree element from the given match.
Subclasses should override this method.
Keyword arguments:
* m: A re match object containing a match of the pattern.
"""
pass
def type(self):
""" Return class name, to define pattern type """
return self.__class__.__name__
BasePattern = Pattern # for backward compatibility
class SimpleTextPattern (Pattern):
""" Return a simple text of group(2) of a Pattern. """
def handleMatch(self, m):
text = m.group(2)
if text == markdown.INLINE_PLACEHOLDER_PREFIX:
return None
return text
class SimpleTagPattern (Pattern):
"""
Return element of type `tag` with a text attribute of group(3)
of a Pattern.
"""
def __init__ (self, pattern, tag):
Pattern.__init__(self, pattern)
self.tag = tag
def handleMatch(self, m):
el = markdown.etree.Element(self.tag)
el.text = m.group(3)
return el
class SubstituteTagPattern (SimpleTagPattern):
""" Return a eLement of type `tag` with no children. """
def handleMatch (self, m):
return markdown.etree.Element(self.tag)
class BacktickPattern (Pattern):
""" Return a `<code>` element containing the matching text. """
def __init__ (self, pattern):
Pattern.__init__(self, pattern)
self.tag = "code"
def handleMatch(self, m):
el = markdown.etree.Element(self.tag)
el.text = markdown.AtomicString(m.group(3).strip())
return el
class DoubleTagPattern (SimpleTagPattern):
"""Return a ElementTree element nested in tag2 nested in tag1.
Useful for strong emphasis etc.
"""
def handleMatch(self, m):
tag1, tag2 = self.tag.split(",")
el1 = markdown.etree.Element(tag1)
el2 = markdown.etree.SubElement(el1, tag2)
el2.text = m.group(3)
return el1
class HtmlPattern (Pattern):
""" Store raw inline html and return a placeholder. """
def handleMatch (self, m):
rawhtml = m.group(2)
inline = True
place_holder = self.markdown.htmlStash.store(rawhtml)
return place_holder
class LinkPattern (Pattern):
""" Return a link element from the given match. """
def handleMatch(self, m):
el = markdown.etree.Element("a")
el.text = m.group(2)
title = m.group(11)
href = m.group(9)
if href:
if href[0] == "<":
href = href[1:-1]
el.set("href", self.sanitize_url(href.strip()))
else:
el.set("href", "")
if title:
title = dequote(title) #.replace('"', "&quot;")
el.set("title", title)
return el
def sanitize_url(self, url):
"""
Sanitize a url against xss attacks in "safe_mode".
Rather than specifically blacklisting `javascript:alert("XSS")` and all
its aliases (see <http://ha.ckers.org/xss.html>), we whitelist known
safe url formats. Most urls contain a network location, however some
are known not to (i.e.: mailto links). Script urls do not contain a
location. Additionally, for `javascript:...`, the scheme would be
"javascript" but some aliases will appear to `urlparse()` to have no
scheme. On top of that relative links (i.e.: "foo/bar.html") have no
scheme. Therefore we must check "path", "parameters", "query" and
"fragment" for any literal colons. We don't check "scheme" for colons
because it *should* never have any and "netloc" must allow the form:
`username:password@host:port`.
"""
locless_schemes = ['', 'mailto', 'news']
scheme, netloc, path, params, query, fragment = url = urlparse(url)
safe_url = False
if netloc != '' or scheme in locless_schemes:
safe_url = True
for part in url[2:]:
if ":" in part:
safe_url = False
if self.markdown.safeMode and not safe_url:
return ''
else:
return urlunparse(url)
class ImagePattern(LinkPattern):
""" Return a img element from the given match. """
def handleMatch(self, m):
el = markdown.etree.Element("img")
src_parts = m.group(9).split()
if src_parts:
src = src_parts[0]
if src[0] == "<" and src[-1] == ">":
src = src[1:-1]
el.set('src', self.sanitize_url(src))
else:
el.set('src', "")
if len(src_parts) > 1:
el.set('title', dequote(" ".join(src_parts[1:])))
if markdown.ENABLE_ATTRIBUTES:
truealt = handleAttributes(m.group(2), el)
else:
truealt = m.group(2)
el.set('alt', truealt)
return el
class ReferencePattern(LinkPattern):
""" Match to a stored reference and return link element. """
def handleMatch(self, m):
if m.group(9):
id = m.group(9).lower()
else:
# if we got something like "[Google][]"
# we'll use "google" as the id
id = m.group(2).lower()
if not id in self.markdown.references: # ignore undefined refs
return None
href, title = self.markdown.references[id]
text = m.group(2)
return self.makeTag(href, title, text)
def makeTag(self, href, title, text):
el = markdown.etree.Element('a')
el.set('href', self.sanitize_url(href))
if title:
el.set('title', title)
el.text = text
return el
class ImageReferencePattern (ReferencePattern):
""" Match to a stored reference and return img element. """
def makeTag(self, href, title, text):
el = markdown.etree.Element("img")
el.set("src", self.sanitize_url(href))
if title:
el.set("title", title)
el.set("alt", text)
return el
class AutolinkPattern (Pattern):
""" Return a link Element given an autolink (`<http://example/com>`). """
def handleMatch(self, m):
el = markdown.etree.Element("a")
el.set('href', m.group(2))
el.text = markdown.AtomicString(m.group(2))
return el
class AutomailPattern (Pattern):
"""
Return a mailto link Element given an automail link (`<foo@example.com>`).
"""
def handleMatch(self, m):
el = markdown.etree.Element('a')
email = m.group(2)
if email.startswith("mailto:"):
email = email[len("mailto:"):]
def codepoint2name(code):
"""Return entity definition by code, or the code if not defined."""
entity = htmlentitydefs.codepoint2name.get(code)
if entity:
return "%s%s;" % (markdown.AMP_SUBSTITUTE, entity)
else:
return "%s#%d;" % (markdown.AMP_SUBSTITUTE, code)
letters = [codepoint2name(ord(letter)) for letter in email]
el.text = markdown.AtomicString(''.join(letters))
mailto = "mailto:" + email
mailto = "".join([markdown.AMP_SUBSTITUTE + '#%d;' %
ord(letter) for letter in mailto])
el.set('href', mailto)
return el

Просмотреть файл

@ -1,162 +0,0 @@
class OrderedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
Copied from Django's SortedDict with some modifications.
"""
def __new__(cls, *args, **kwargs):
instance = super(OrderedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
if data is None:
data = {}
super(OrderedDict, self).__init__(data)
if isinstance(data, dict):
self.keyOrder = data.keys()
else:
self.keyOrder = []
for key, value in data:
if key not in self.keyOrder:
self.keyOrder.append(key)
def __deepcopy__(self, memo):
from copy import deepcopy
return self.__class__([(key, deepcopy(value, memo))
for key, value in self.iteritems()])
def __setitem__(self, key, value):
super(OrderedDict, self).__setitem__(key, value)
if key not in self.keyOrder:
self.keyOrder.append(key)
def __delitem__(self, key):
super(OrderedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
for k in self.keyOrder:
yield k
def pop(self, k, *args):
result = super(OrderedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(OrderedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def items(self):
return zip(self.keyOrder, self.values())
def iteritems(self):
for key in self.keyOrder:
yield key, super(OrderedDict, self).__getitem__(key)
def keys(self):
return self.keyOrder[:]
def iterkeys(self):
return iter(self.keyOrder)
def values(self):
return [super(OrderedDict, self).__getitem__(k) for k in self.keyOrder]
def itervalues(self):
for key in self.keyOrder:
yield super(OrderedDict, self).__getitem__(key)
def update(self, dict_):
for k, v in dict_.items():
self.__setitem__(k, v)
def setdefault(self, key, default):
if key not in self.keyOrder:
self.keyOrder.append(key)
return super(OrderedDict, self).setdefault(key, default)
def value_for_index(self, index):
"""Return the value of the item at the given zero-based index."""
return self[self.keyOrder[index]]
def insert(self, index, key, value):
"""Insert the key, value pair before the item with the given index."""
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(OrderedDict, self).__setitem__(key, value)
def copy(self):
"""Return a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
obj = self.__class__(self)
obj.keyOrder = self.keyOrder[:]
return obj
def __repr__(self):
"""
Replace the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])
def clear(self):
super(OrderedDict, self).clear()
self.keyOrder = []
def index(self, key):
""" Return the index of a given key. """
return self.keyOrder.index(key)
def index_for_location(self, location):
""" Return index or None for a given location. """
if location == '_begin':
i = 0
elif location == '_end':
i = None
elif location.startswith('<') or location.startswith('>'):
i = self.index(location[1:])
if location.startswith('>'):
if i >= len(self):
# last item
i = None
else:
i += 1
else:
raise ValueError('Not a valid location: "%s". Location key '
'must start with a ">" or "<".' % location)
return i
def add(self, key, value, location):
""" Insert by key location. """
i = self.index_for_location(location)
if i is not None:
self.insert(i, key, value)
else:
self.__setitem__(key, value)
def link(self, key, location):
""" Change location of an existing item. """
n = self.keyOrder.index(key)
del self.keyOrder[n]
i = self.index_for_location(location)
try:
if i is not None:
self.keyOrder.insert(i, key)
else:
self.keyOrder.append(key)
except Error:
# restore to prevent data loss and reraise
self.keyOrder.insert(n, key)
raise Error

Просмотреть файл

@ -1,77 +0,0 @@
"""
POST-PROCESSORS
=============================================================================
Markdown also allows post-processors, which are similar to preprocessors in
that they need to implement a "run" method. However, they are run after core
processing.
"""
import markdown
class Processor:
def __init__(self, markdown_instance=None):
if markdown_instance:
self.markdown = markdown_instance
class Postprocessor(Processor):
"""
Postprocessors are run after the ElementTree it converted back into text.
Each Postprocessor implements a "run" method that takes a pointer to a
text string, modifies it as necessary and returns a text string.
Postprocessors must extend markdown.Postprocessor.
"""
def run(self, text):
"""
Subclasses of Postprocessor should implement a `run` method, which
takes the html document as a single text string and returns a
(possibly modified) string.
"""
pass
class RawHtmlPostprocessor(Postprocessor):
""" Restore raw html to the document. """
def run(self, text):
""" Iterate over html stash and restore "safe" html. """
for i in range(self.markdown.htmlStash.html_counter):
html, safe = self.markdown.htmlStash.rawHtmlBlocks[i]
if self.markdown.safeMode and not safe:
if str(self.markdown.safeMode).lower() == 'escape':
html = self.escape(html)
elif str(self.markdown.safeMode).lower() == 'remove':
html = ''
else:
html = markdown.HTML_REMOVED_TEXT
if safe or not self.markdown.safeMode:
text = text.replace("<p>%s</p>" %
(markdown.preprocessors.HTML_PLACEHOLDER % i),
html + "\n")
text = text.replace(markdown.preprocessors.HTML_PLACEHOLDER % i,
html)
return text
def escape(self, html):
""" Basic html escaping """
html = html.replace('&', '&amp;')
html = html.replace('<', '&lt;')
html = html.replace('>', '&gt;')
return html.replace('"', '&quot;')
class AndSubstitutePostprocessor(Postprocessor):
""" Restore valid entities """
def __init__(self):
pass
def run(self, text):
text = text.replace(markdown.AMP_SUBSTITUTE, "&")
return text

Просмотреть файл

@ -1,214 +0,0 @@
"""
PRE-PROCESSORS
=============================================================================
Preprocessors work on source text before we start doing anything too
complicated.
"""
import re
import markdown
HTML_PLACEHOLDER_PREFIX = markdown.STX+"wzxhzdk:"
HTML_PLACEHOLDER = HTML_PLACEHOLDER_PREFIX + "%d" + markdown.ETX
class Processor:
def __init__(self, markdown_instance=None):
if markdown_instance:
self.markdown = markdown_instance
class Preprocessor (Processor):
"""
Preprocessors are run after the text is broken into lines.
Each preprocessor implements a "run" method that takes a pointer to a
list of lines of the document, modifies it as necessary and returns
either the same pointer or a pointer to a new list.
Preprocessors must extend markdown.Preprocessor.
"""
def run(self, lines):
"""
Each subclass of Preprocessor should override the `run` method, which
takes the document as a list of strings split by newlines and returns
the (possibly modified) list of lines.
"""
pass
class HtmlStash:
"""
This class is used for stashing HTML objects that we extract
in the beginning and replace with place-holders.
"""
def __init__ (self):
""" Create a HtmlStash. """
self.html_counter = 0 # for counting inline html segments
self.rawHtmlBlocks=[]
def store(self, html, safe=False):
"""
Saves an HTML segment for later reinsertion. Returns a
placeholder string that needs to be inserted into the
document.
Keyword arguments:
* html: an html segment
* safe: label an html segment as safe for safemode
Returns : a placeholder string
"""
self.rawHtmlBlocks.append((html, safe))
placeholder = HTML_PLACEHOLDER % self.html_counter
self.html_counter += 1
return placeholder
def reset(self):
self.html_counter = 0
self.rawHtmlBlocks = []
class HtmlBlockPreprocessor(Preprocessor):
"""Remove html blocks from the text and store them for later retrieval."""
right_tag_patterns = ["</%s>", "%s>"]
def _get_left_tag(self, block):
return block[1:].replace(">", " ", 1).split()[0].lower()
def _get_right_tag(self, left_tag, block):
for p in self.right_tag_patterns:
tag = p % left_tag
i = block.rfind(tag)
if i > 2:
return tag.lstrip("<").rstrip(">"), i + len(p)-2 + len(left_tag)
return block.rstrip()[-len(left_tag)-2:-1].lower(), len(block)
def _equal_tags(self, left_tag, right_tag):
if left_tag == 'div' or left_tag[0] in ['?', '@', '%']: # handle PHP, etc.
return True
if ("/" + left_tag) == right_tag:
return True
if (right_tag == "--" and left_tag == "--"):
return True
elif left_tag == right_tag[1:] \
and right_tag[0] != "<":
return True
else:
return False
def _is_oneliner(self, tag):
return (tag in ['hr', 'hr/'])
def run(self, lines):
text = "\n".join(lines)
new_blocks = []
text = text.split("\n\n")
items = []
left_tag = ''
right_tag = ''
in_tag = False # flag
while text:
block = text[0]
if block.startswith("\n"):
block = block[1:]
text = text[1:]
if block.startswith("\n"):
block = block[1:]
if not in_tag:
if block.startswith("<"):
left_tag = self._get_left_tag(block)
right_tag, data_index = self._get_right_tag(left_tag, block)
if data_index < len(block):
text.insert(0, block[data_index:])
block = block[:data_index]
if not (markdown.isBlockLevel(left_tag) \
or block[1] in ["!", "?", "@", "%"]):
new_blocks.append(block)
continue
if self._is_oneliner(left_tag):
new_blocks.append(block.strip())
continue
if block[1] == "!":
# is a comment block
left_tag = "--"
right_tag, data_index = self._get_right_tag(left_tag, block)
# keep checking conditions below and maybe just append
if block.rstrip().endswith(">") \
and self._equal_tags(left_tag, right_tag):
new_blocks.append(
self.markdown.htmlStash.store(block.strip()))
continue
else: #if not block[1] == "!":
# if is block level tag and is not complete
if markdown.isBlockLevel(left_tag) or left_tag == "--" \
and not block.rstrip().endswith(">"):
items.append(block.strip())
in_tag = True
else:
new_blocks.append(
self.markdown.htmlStash.store(block.strip()))
continue
new_blocks.append(block)
else:
items.append(block.strip())
right_tag, data_index = self._get_right_tag(left_tag, block)
if self._equal_tags(left_tag, right_tag):
# if find closing tag
in_tag = False
new_blocks.append(
self.markdown.htmlStash.store('\n\n'.join(items)))
items = []
if items:
new_blocks.append(self.markdown.htmlStash.store('\n\n'.join(items)))
new_blocks.append('\n')
new_text = "\n\n".join(new_blocks)
return new_text.split("\n")
class ReferencePreprocessor(Preprocessor):
""" Remove reference definitions from text and store for later use. """
RE = re.compile(r'^(\ ?\ ?\ ?)\[([^\]]*)\]:\s*([^ ]*)(.*)$', re.DOTALL)
def run (self, lines):
new_text = [];
for line in lines:
m = self.RE.match(line)
if m:
id = m.group(2).strip().lower()
t = m.group(4).strip() # potential title
if not t:
self.markdown.references[id] = (m.group(3), t)
elif (len(t) >= 2
and (t[0] == t[-1] == "\""
or t[0] == t[-1] == "\'"
or (t[0] == "(" and t[-1] == ")") ) ):
self.markdown.references[id] = (m.group(3), t[1:-1])
else:
new_text.append(line)
else:
new_text.append(line)
return new_text #+ "\n"

Просмотреть файл

@ -1,329 +0,0 @@
import markdown
import re
def isString(s):
""" Check if it's string """
return isinstance(s, unicode) or isinstance(s, str)
class Processor:
def __init__(self, markdown_instance=None):
if markdown_instance:
self.markdown = markdown_instance
class Treeprocessor(Processor):
"""
Treeprocessors are run on the ElementTree object before serialization.
Each Treeprocessor implements a "run" method that takes a pointer to an
ElementTree, modifies it as necessary and returns an ElementTree
object.
Treeprocessors must extend markdown.Treeprocessor.
"""
def run(self, root):
"""
Subclasses of Treeprocessor should implement a `run` method, which
takes a root ElementTree. This method can return another ElementTree
object, and the existing root ElementTree will be replaced, or it can
modify the current tree and return None.
"""
pass
class InlineProcessor(Treeprocessor):
"""
A Treeprocessor that traverses a tree, applying inline patterns.
"""
def __init__ (self, md):
self.__placeholder_prefix = markdown.INLINE_PLACEHOLDER_PREFIX
self.__placeholder_suffix = markdown.ETX
self.__placeholder_length = 4 + len(self.__placeholder_prefix) \
+ len(self.__placeholder_suffix)
self.__placeholder_re = re.compile(markdown.INLINE_PLACEHOLDER % r'([0-9]{4})')
self.markdown = md
def __makePlaceholder(self, type):
""" Generate a placeholder """
id = "%04d" % len(self.stashed_nodes)
hash = markdown.INLINE_PLACEHOLDER % id
return hash, id
def __findPlaceholder(self, data, index):
"""
Extract id from data string, start from index
Keyword arguments:
* data: string
* index: index, from which we start search
Returns: placeholder id and string index, after the found placeholder.
"""
m = self.__placeholder_re.search(data, index)
if m:
return m.group(1), m.end()
else:
return None, index + 1
def __stashNode(self, node, type):
""" Add node to stash """
placeholder, id = self.__makePlaceholder(type)
self.stashed_nodes[id] = node
return placeholder
def __handleInline(self, data, patternIndex=0):
"""
Process string with inline patterns and replace it
with placeholders
Keyword arguments:
* data: A line of Markdown text
* patternIndex: The index of the inlinePattern to start with
Returns: String with placeholders.
"""
if not isinstance(data, markdown.AtomicString):
startIndex = 0
while patternIndex < len(self.markdown.inlinePatterns):
data, matched, startIndex = self.__applyPattern(
self.markdown.inlinePatterns.value_for_index(patternIndex),
data, patternIndex, startIndex)
if not matched:
patternIndex += 1
return data
def __processElementText(self, node, subnode, isText=True):
"""
Process placeholders in Element.text or Element.tail
of Elements popped from self.stashed_nodes.
Keywords arguments:
* node: parent node
* subnode: processing node
* isText: bool variable, True - it's text, False - it's tail
Returns: None
"""
if isText:
text = subnode.text
subnode.text = None
else:
text = subnode.tail
subnode.tail = None
childResult = self.__processPlaceholders(text, subnode)
if not isText and node is not subnode:
pos = node.getchildren().index(subnode)
node.remove(subnode)
else:
pos = 0
childResult.reverse()
for newChild in childResult:
node.insert(pos, newChild)
def __processPlaceholders(self, data, parent):
"""
Process string with placeholders and generate ElementTree tree.
Keyword arguments:
* data: string with placeholders instead of ElementTree elements.
* parent: Element, which contains processing inline data
Returns: list with ElementTree elements with applied inline patterns.
"""
def linkText(text):
if text:
if result:
if result[-1].tail:
result[-1].tail += text
else:
result[-1].tail = text
else:
if parent.text:
parent.text += text
else:
parent.text = text
result = []
strartIndex = 0
while data:
index = data.find(self.__placeholder_prefix, strartIndex)
if index != -1:
id, phEndIndex = self.__findPlaceholder(data, index)
if id in self.stashed_nodes:
node = self.stashed_nodes.get(id)
if index > 0:
text = data[strartIndex:index]
linkText(text)
if not isString(node): # it's Element
for child in [node] + node.getchildren():
if child.tail:
if child.tail.strip():
self.__processElementText(node, child, False)
if child.text:
if child.text.strip():
self.__processElementText(child, child)
else: # it's just a string
linkText(node)
strartIndex = phEndIndex
continue
strartIndex = phEndIndex
result.append(node)
else: # wrong placeholder
end = index + len(prefix)
linkText(data[strartIndex:end])
strartIndex = end
else:
text = data[strartIndex:]
linkText(text)
data = ""
return result
def __applyPattern(self, pattern, data, patternIndex, startIndex=0):
"""
Check if the line fits the pattern, create the necessary
elements, add it to stashed_nodes.
Keyword arguments:
* data: the text to be processed
* pattern: the pattern to be checked
* patternIndex: index of current pattern
* startIndex: string index, from which we starting search
Returns: String with placeholders instead of ElementTree elements.
"""
match = pattern.getCompiledRegExp().match(data[startIndex:])
leftData = data[:startIndex]
if not match:
return data, False, 0
node = pattern.handleMatch(match)
if node is None:
return data, True, len(leftData) + match.span(len(match.groups()))[0]
if not isString(node):
if not isinstance(node.text, markdown.AtomicString):
# We need to process current node too
for child in [node] + node.getchildren():
if not isString(node):
if child.text:
child.text = self.__handleInline(child.text,
patternIndex + 1)
if child.tail:
child.tail = self.__handleInline(child.tail,
patternIndex)
placeholder = self.__stashNode(node, pattern.type())
return "%s%s%s%s" % (leftData,
match.group(1),
placeholder, match.groups()[-1]), True, 0
def run(self, tree):
"""Apply inline patterns to a parsed Markdown tree.
Iterate over ElementTree, find elements with inline tag, apply inline
patterns and append newly created Elements to tree. If you don't
want process your data with inline paterns, instead of normal string,
use subclass AtomicString:
node.text = markdown.AtomicString("data won't be processed with inline patterns")
Arguments:
* markdownTree: ElementTree object, representing Markdown tree.
Returns: ElementTree object with applied inline patterns.
"""
self.stashed_nodes = {}
stack = [tree]
while stack:
currElement = stack.pop()
insertQueue = []
for child in currElement.getchildren():
if child.text and not isinstance(child.text, markdown.AtomicString):
text = child.text
child.text = None
lst = self.__processPlaceholders(self.__handleInline(
text), child)
stack += lst
insertQueue.append((child, lst))
if child.getchildren():
stack.append(child)
for element, lst in insertQueue:
if element.text:
element.text = \
markdown.inlinepatterns.handleAttributes(element.text,
element)
i = 0
for newChild in lst:
# Processing attributes
if newChild.tail:
newChild.tail = \
markdown.inlinepatterns.handleAttributes(newChild.tail,
element)
if newChild.text:
newChild.text = \
markdown.inlinepatterns.handleAttributes(newChild.text,
newChild)
element.insert(i, newChild)
i += 1
return tree
class PrettifyTreeprocessor(Treeprocessor):
""" Add linebreaks to the html document. """
def _prettifyETree(self, elem):
""" Recursively add linebreaks to ElementTree children. """
i = "\n"
if markdown.isBlockLevel(elem.tag) and elem.tag not in ['code', 'pre']:
if (not elem.text or not elem.text.strip()) \
and len(elem) and markdown.isBlockLevel(elem[0].tag):
elem.text = i
for e in elem:
if markdown.isBlockLevel(e.tag):
self._prettifyETree(e)
if not elem.tail or not elem.tail.strip():
elem.tail = i
if not elem.tail or not elem.tail.strip():
elem.tail = i
def run(self, root):
""" Add linebreaks to ElementTree root object. """
self._prettifyETree(root)
# Do <br />'s seperately as they are often in the middle of
# inline content and missed by _prettifyETree.
brs = root.getiterator('br')
for br in brs:
if not br.tail or not br.tail.strip():
br.tail = '\n'
else:
br.tail = '\n%s' % br.tail

Просмотреть файл

@ -0,0 +1,43 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
'use strict';
const simple = require('sdk/simple-prefs');
const service = require('sdk/preferences/service');
const { id, preferencesBranch } = require('sdk/self');
const { AddonManager } = require('chrome').Cu.import('resource://gre/modules/AddonManager.jsm');
exports.testCurlyID = function(assert) {
assert.equal(id, '{34a1eae1-c20a-464f-9b0e-000000000000}', 'curly ID is curly');
assert.equal(simple.prefs.test13, 26, 'test13 is 26');
simple.prefs.test14 = '15';
assert.equal(service.get('extensions.{34a1eae1-c20a-464f-9b0e-000000000000}.test14'), '15', 'test14 is 15');
assert.equal(service.get('extensions.{34a1eae1-c20a-464f-9b0e-000000000000}.test14'), simple.prefs.test14, 'simple test14 also 15');
}
exports.testInvalidPreferencesBranch = function(assert) {
assert.notEqual(preferencesBranch, 'invalid^branch*name', 'invalid preferences-branch value ignored');
assert.equal(preferencesBranch, '{34a1eae1-c20a-464f-9b0e-000000000000}', 'preferences-branch is {34a1eae1-c20a-464f-9b0e-000000000000}');
}
// from `/test/test-self.js`, adapted to `sdk/test/assert` API
exports.testSelfID = function(assert, done) {
assert.equal(typeof(id), 'string', 'self.id is a string');
assert.ok(id.length > 0, 'self.id not empty');
AddonManager.getAddonByID(id, function(addon) {
assert.ok(addon, 'found addon with self.id');
done();
});
}
require('sdk/test/runner').runTestsFromModule(module);

Просмотреть файл

@ -0,0 +1,14 @@
{
"id": "{34a1eae1-c20a-464f-9b0e-000000000000}",
"fullName": "curly ID test",
"author": "Tomislav Jovanovic",
"preferences": [{
"name": "test13",
"type": "integer",
"title": "test13",
"value": 26
}],
"preferences-branch": "invalid^branch*name"
}

Просмотреть файл

@ -0,0 +1,34 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
'use strict';
const { id, preferencesBranch } = require('sdk/self');
const simple = require('sdk/simple-prefs');
const service = require('sdk/preferences/service');
const { AddonManager } = require('chrome').Cu.import('resource://gre/modules/AddonManager.jsm', {});
const expected_id = 'predefined-id@test';
exports.testExpectedID = function(assert) {
assert.equal(id, expected_id, 'ID is as expected');
assert.equal(preferencesBranch, expected_id, 'preferences-branch is ' + expected_id);
assert.equal(simple.prefs.test, 5, 'test pref is 5');
simple.prefs.test2 = '25';
assert.equal(service.get('extensions.'+expected_id+'.test2'), '25', 'test pref is 25');
assert.equal(service.get('extensions.'+expected_id+'.test2'), simple.prefs.test2, 'test pref is 25');
}
exports.testSelfID = function(assert, done) {
assert.equal(typeof(id), 'string', 'self.id is a string');
assert.ok(id.length > 0, 'self.id not empty');
AddonManager.getAddonByID(id, function(addon) {
assert.equal(addon.id, id, 'found addon with self.id');
done();
});
}
require('sdk/test/runner').runTestsFromModule(module);

Просмотреть файл

@ -0,0 +1,11 @@
{
"id": "predefined-id@test",
"fullName": "predefined ID test",
"author": "Erik Vold",
"preferences": [{
"name": "test",
"type": "integer",
"title": "test",
"value": 5
}]
}

Просмотреть файл

@ -0,0 +1,35 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
'use strict';
const { id, preferencesBranch } = require('sdk/self');
const simple = require('sdk/simple-prefs');
const service = require('sdk/preferences/service');
const { AddonManager } = require('chrome').Cu.import('resource://gre/modules/AddonManager.jsm');
exports.testPreferencesBranch = function(assert) {
assert.equal(preferencesBranch, 'human-readable', 'preferencesBranch is human-readable');
assert.equal(simple.prefs.test42, true, 'test42 is true');
simple.prefs.test43 = 'movie';
assert.equal(service.get('extensions.human-readable.test43'), 'movie', 'test43 is a movie');
}
// from `/test/test-self.js`, adapted to `sdk/test/assert` API
exports.testSelfID = function(assert, done) {
assert.equal(typeof(id), 'string', 'self.id is a string');
assert.ok(id.length > 0, 'self.id not empty');
AddonManager.getAddonByID(id, function(addon) {
assert.ok(addon, 'found addon with self.id');
done();
});
}
require('sdk/test/runner').runTestsFromModule(module);

Просмотреть файл

@ -0,0 +1,14 @@
{
"id": "test-preferences-branch",
"fullName": "preferences-branch test",
"author": "Tomislav Jovanovic",
"preferences": [{
"name": "test42",
"type": "bool",
"title": "test42",
"value": true
}],
"preferences-branch": "human-readable"
}

Просмотреть файл

@ -0,0 +1,11 @@
[App]
Vendor=Varma
Name=Test App
Version=1.0
BuildID=20060101
Copyright=Copyright (c) 2009 Atul Varma
ID=xulapp@toolness.com
[Gecko]
MinVersion=1.9.2.0
MaxVersion=2.0.*

Просмотреть файл

@ -0,0 +1,337 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// @see http://mxr.mozilla.org/mozilla-central/source/js/src/xpconnect/loader/mozJSComponentLoader.cpp
'use strict';
// IMPORTANT: Avoid adding any initialization tasks here, if you need to do
// something before add-on is loaded consider addon/runner module instead!
const { classes: Cc, Constructor: CC, interfaces: Ci, utils: Cu,
results: Cr, manager: Cm } = Components;
const ioService = Cc['@mozilla.org/network/io-service;1'].
getService(Ci.nsIIOService);
const resourceHandler = ioService.getProtocolHandler('resource').
QueryInterface(Ci.nsIResProtocolHandler);
const systemPrincipal = CC('@mozilla.org/systemprincipal;1', 'nsIPrincipal')();
const scriptLoader = Cc['@mozilla.org/moz/jssubscript-loader;1'].
getService(Ci.mozIJSSubScriptLoader);
const prefService = Cc['@mozilla.org/preferences-service;1'].
getService(Ci.nsIPrefService).
QueryInterface(Ci.nsIPrefBranch);
const appInfo = Cc["@mozilla.org/xre/app-info;1"].
getService(Ci.nsIXULAppInfo);
const vc = Cc["@mozilla.org/xpcom/version-comparator;1"].
getService(Ci.nsIVersionComparator);
const REASON = [ 'unknown', 'startup', 'shutdown', 'enable', 'disable',
'install', 'uninstall', 'upgrade', 'downgrade' ];
const bind = Function.call.bind(Function.bind);
let loader = null;
let unload = null;
let cuddlefishSandbox = null;
let nukeTimer = null;
// Utility function that synchronously reads local resource from the given
// `uri` and returns content string.
function readURI(uri) {
let ioservice = Cc['@mozilla.org/network/io-service;1'].
getService(Ci.nsIIOService);
let channel = ioservice.newChannel(uri, 'UTF-8', null);
let stream = channel.open();
let cstream = Cc['@mozilla.org/intl/converter-input-stream;1'].
createInstance(Ci.nsIConverterInputStream);
cstream.init(stream, 'UTF-8', 0, 0);
let str = {};
let data = '';
let read = 0;
do {
read = cstream.readString(0xffffffff, str);
data += str.value;
} while (read != 0);
cstream.close();
return data;
}
// We don't do anything on install & uninstall yet, but in a future
// we should allow add-ons to cleanup after uninstall.
function install(data, reason) {}
function uninstall(data, reason) {}
function startup(data, reasonCode) {
try {
let reason = REASON[reasonCode];
// URI for the root of the XPI file.
// 'jar:' URI if the addon is packed, 'file:' URI otherwise.
// (Used by l10n module in order to fetch `locale` folder)
let rootURI = data.resourceURI.spec;
// TODO: Maybe we should perform read harness-options.json asynchronously,
// since we can't do anything until 'sessionstore-windows-restored' anyway.
let options = JSON.parse(readURI(rootURI + './harness-options.json'));
let id = options.jetpackID;
let name = options.name;
// Clean the metadata
options.metadata[name]['permissions'] = options.metadata[name]['permissions'] || {};
// freeze the permissionss
Object.freeze(options.metadata[name]['permissions']);
// freeze the metadata
Object.freeze(options.metadata[name]);
// Register a new resource 'domain' for this addon which is mapping to
// XPI's `resources` folder.
// Generate the domain name by using jetpack ID, which is the extension ID
// by stripping common characters that doesn't work as a domain name:
let uuidRe =
/^\{([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})\}$/;
let domain = id.
toLowerCase().
replace(/@/g, '-at-').
replace(/\./g, '-dot-').
replace(uuidRe, '$1');
let prefixURI = 'resource://' + domain + '/';
let resourcesURI = ioService.newURI(rootURI + '/resources/', null, null);
resourceHandler.setSubstitution(domain, resourcesURI);
// Create path to URLs mapping supported by loader.
let paths = {
// Relative modules resolve to add-on package lib
'./': prefixURI + name + '/lib/',
'./tests/': prefixURI + name + '/tests/',
'': 'resource://gre/modules/commonjs/'
};
// Maps addon lib and tests ressource folders for each package
paths = Object.keys(options.metadata).reduce(function(result, name) {
result[name + '/'] = prefixURI + name + '/lib/'
result[name + '/tests/'] = prefixURI + name + '/tests/'
return result;
}, paths);
// We need to map tests folder when we run sdk tests whose package name
// is stripped
if (name == 'addon-sdk')
paths['tests/'] = prefixURI + name + '/tests/';
let useBundledSDK = options['force-use-bundled-sdk'];
if (!useBundledSDK) {
try {
useBundledSDK = prefService.getBoolPref("extensions.addon-sdk.useBundledSDK");
}
catch (e) {
// Pref doesn't exist, allow using Firefox shipped SDK
}
}
// Starting with Firefox 21.0a1, we start using modules shipped into firefox
// Still allow using modules from the xpi if the manifest tell us to do so.
// And only try to look for sdk modules in xpi if the xpi actually ship them
if (options['is-sdk-bundled'] &&
(vc.compare(appInfo.version, '21.0a1') < 0 || useBundledSDK)) {
// Maps sdk module folders to their resource folder
paths[''] = prefixURI + 'addon-sdk/lib/';
// test.js is usually found in root commonjs or SDK_ROOT/lib/ folder,
// so that it isn't shipped in the xpi. Keep a copy of it in sdk/ folder
// until we no longer support SDK modules in XPI:
paths['test'] = prefixURI + 'addon-sdk/lib/sdk/test.js';
}
// Retrieve list of module folder overloads based on preferences in order to
// eventually used a local modules instead of files shipped into Firefox.
let branch = prefService.getBranch('extensions.modules.' + id + '.path');
paths = branch.getChildList('', {}).reduce(function (result, name) {
// Allows overloading of any sub folder by replacing . by / in pref name
let path = name.substr(1).split('.').join('/');
// Only accept overloading folder by ensuring always ending with `/`
if (path) path += '/';
let fileURI = branch.getCharPref(name);
// On mobile, file URI has to end with a `/` otherwise, setSubstitution
// takes the parent folder instead.
if (fileURI[fileURI.length-1] !== '/')
fileURI += '/';
// Maps the given file:// URI to a resource:// in order to avoid various
// failure that happens with file:// URI and be close to production env
let resourcesURI = ioService.newURI(fileURI, null, null);
let resName = 'extensions.modules.' + domain + '.commonjs.path' + name;
resourceHandler.setSubstitution(resName, resourcesURI);
result[path] = 'resource://' + resName + '/';
return result;
}, paths);
// Make version 2 of the manifest
let manifest = options.manifest;
// Import `cuddlefish.js` module using a Sandbox and bootstrap loader.
let cuddlefishPath = 'loader/cuddlefish.js';
let cuddlefishURI = 'resource://gre/modules/commonjs/sdk/' + cuddlefishPath;
if (paths['sdk/']) { // sdk folder has been overloaded
// (from pref, or cuddlefish is still in the xpi)
cuddlefishURI = paths['sdk/'] + cuddlefishPath;
}
else if (paths['']) { // root modules folder has been overloaded
cuddlefishURI = paths[''] + 'sdk/' + cuddlefishPath;
}
cuddlefishSandbox = loadSandbox(cuddlefishURI);
let cuddlefish = cuddlefishSandbox.exports;
// Normalize `options.mainPath` so that it looks like one that will come
// in a new version of linker.
let main = options.mainPath;
unload = cuddlefish.unload;
loader = cuddlefish.Loader({
paths: paths,
// modules manifest.
manifest: manifest,
// Add-on ID used by different APIs as a unique identifier.
id: id,
// Add-on name.
name: name,
// Add-on version.
version: options.metadata[name].version,
// Add-on package descriptor.
metadata: options.metadata[name],
// Add-on load reason.
loadReason: reason,
prefixURI: prefixURI,
// Add-on URI.
rootURI: rootURI,
// options used by system module.
// File to write 'OK' or 'FAIL' (exit code emulation).
resultFile: options.resultFile,
// Arguments passed as --static-args
staticArgs: options.staticArgs,
// Arguments related to test runner.
modules: {
'@test/options': {
allTestModules: options.allTestModules,
iterations: options.iterations,
filter: options.filter,
profileMemory: options.profileMemory,
stopOnError: options.stopOnError,
verbose: options.verbose,
parseable: options.parseable,
checkMemory: options.check_memory,
}
}
});
let module = cuddlefish.Module('sdk/loader/cuddlefish', cuddlefishURI);
let require = cuddlefish.Require(loader, module);
require('sdk/addon/runner').startup(reason, {
loader: loader,
main: main,
prefsURI: rootURI + 'defaults/preferences/prefs.js'
});
} catch (error) {
dump('Bootstrap error: ' +
(error.message ? error.message : String(error)) + '\n' +
(error.stack || error.fileName + ': ' + error.lineNumber) + '\n');
throw error;
}
};
function loadSandbox(uri) {
let proto = {
sandboxPrototype: {
loadSandbox: loadSandbox,
ChromeWorker: ChromeWorker
}
};
let sandbox = Cu.Sandbox(systemPrincipal, proto);
// Create a fake commonjs environnement just to enable loading loader.js
// correctly
sandbox.exports = {};
sandbox.module = { uri: uri, exports: sandbox.exports };
sandbox.require = function (id) {
if (id !== "chrome")
throw new Error("Bootstrap sandbox `require` method isn't implemented.");
return Object.freeze({ Cc: Cc, Ci: Ci, Cu: Cu, Cr: Cr, Cm: Cm,
CC: bind(CC, Components), components: Components,
ChromeWorker: ChromeWorker });
};
scriptLoader.loadSubScript(uri, sandbox, 'UTF-8');
return sandbox;
}
function unloadSandbox(sandbox) {
if ("nukeSandbox" in Cu)
Cu.nukeSandbox(sandbox);
}
function setTimeout(callback, delay) {
let timer = Cc["@mozilla.org/timer;1"].createInstance(Ci.nsITimer);
timer.initWithCallback({ notify: callback }, delay,
Ci.nsITimer.TYPE_ONE_SHOT);
return timer;
}
function shutdown(data, reasonCode) {
let reason = REASON[reasonCode];
if (loader) {
unload(loader, reason);
unload = null;
// Don't waste time cleaning up if the application is shutting down
if (reason != "shutdown") {
// Avoid leaking all modules when something goes wrong with one particular
// module. Do not clean it up immediatly in order to allow executing some
// actions on addon disabling.
// We need to keep a reference to the timer, otherwise it is collected
// and won't ever fire.
nukeTimer = setTimeout(nukeModules, 1000);
}
}
};
function nukeModules() {
nukeTimer = null;
// module objects store `exports` which comes from sandboxes
// We should avoid keeping link to these object to avoid leaking sandboxes
for (let key in loader.modules) {
delete loader.modules[key];
}
// Direct links to sandboxes should be removed too
for (let key in loader.sandboxes) {
let sandbox = loader.sandboxes[key];
delete loader.sandboxes[key];
// Bug 775067: From FF17 we can kill all CCW from a given sandbox
unloadSandbox(sandbox);
}
loader = null;
// both `toolkit/loader` and `system/xul-app` are loaded as JSM's via
// `cuddlefish.js`, and needs to be unloaded to avoid memory leaks, when
// the addon is unload.
unloadSandbox(cuddlefishSandbox.loaderSandbox);
unloadSandbox(cuddlefishSandbox.xulappSandbox);
// Bug 764840: We need to unload cuddlefish otherwise it will stay alive
// and keep a reference to this compartment.
unloadSandbox(cuddlefishSandbox);
cuddlefishSandbox = null;
}

Просмотреть файл

@ -0,0 +1,3 @@
pref("extensions.test-simple-prefs@jetpack.somePreference", "TEST");
pref("extensions.test-simple-prefs@jetpack.myInteger", 8);
pref("extensions.test-simple-prefs@jetpack.myHiddenInt", 5);

Просмотреть файл

@ -0,0 +1,33 @@
<?xml version="1.0"?>
<!-- This Source Code Form is subject to the terms of the Mozilla Public
- License, v. 2.0. If a copy of the MPL was not distributed with this
- file, You can obtain one at http://mozilla.org/MPL/2.0/. -->
<RDF xmlns="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:em="http://www.mozilla.org/2004/em-rdf#">
<Description about="urn:mozilla:install-manifest">
<em:id>xulapp@toolness.com</em:id>
<em:version>1.0</em:version>
<em:type>2</em:type>
<em:bootstrap>true</em:bootstrap>
<em:unpack>false</em:unpack>
<!-- Firefox -->
<em:targetApplication>
<Description>
<em:id>{ec8030f7-c20a-464f-9b0e-13a3a9e97384}</em:id>
<em:minVersion>21.0</em:minVersion>
<em:maxVersion>25.0a1</em:maxVersion>
</Description>
</em:targetApplication>
<!-- Front End MetaData -->
<em:name>Test App</em:name>
<em:description>Harness for tests.</em:description>
<em:creator>Mozilla Corporation</em:creator>
<em:homepageURL></em:homepageURL>
<em:optionsType></em:optionsType>
<em:updateURL></em:updateURL>
</Description>
</RDF>

Просмотреть файл

@ -0,0 +1,5 @@
<?xml version="1.0" ?>
<vbox xmlns="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul">
<setting data-jetpack-id="test-simple-prefs@jetpack" pref="extensions.test-simple-prefs@jetpack.somePreference" pref-name="somePreference" title="some-title" type="string">Some short description for the preference</setting>
<setting data-jetpack-id="test-simple-prefs@jetpack" pref="extensions.test-simple-prefs@jetpack.myInteger" pref-name="myInteger" title="my-int" type="integer">How many of them we have.</setting>
</vbox>

Просмотреть файл

@ -0,0 +1,97 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
'use strict';
const { Cu } = require('chrome');
const sp = require('sdk/simple-prefs');
const app = require('sdk/system/xul-app');
const self = require('sdk/self');
const tabs = require('sdk/tabs');
const { preferencesBranch } = require('sdk/self');
const { AddonManager } = Cu.import('resource://gre/modules/AddonManager.jsm', {});
exports.testRegression = function(assert) {
assert.equal(self.preferencesBranch, self.id, 'preferencesBranch returns id here');
}
exports.testDefaultValues = function (assert) {
assert.equal(sp.prefs.myHiddenInt, 5, 'myHiddenInt default is 5');
assert.equal(sp.prefs.myInteger, 8, 'myInteger default is 8');
assert.equal(sp.prefs.somePreference, 'TEST', 'somePreference default is correct');
}
exports.testOptionsType = function(assert, done) {
AddonManager.getAddonByID(self.id, function(aAddon) {
assert.equal(aAddon.optionsType, AddonManager.OPTIONS_TYPE_INLINE, 'options type is inline');
done();
});
}
if (app.is('Firefox')) {
exports.testAOM = function(assert, done) {
tabs.open({
url: 'about:addons',
onReady: function(tab) {
tab.attach({
contentScriptWhen: 'end',
contentScript: 'function onLoad() {\n' +
'unsafeWindow.removeEventListener("load", onLoad, false);\n' +
'AddonManager.getAddonByID("' + self.id + '", function(aAddon) {\n' +
'unsafeWindow.gViewController.viewObjects.detail.node.addEventListener("ViewChanged", function whenViewChanges() {\n' +
'unsafeWindow.gViewController.viewObjects.detail.node.removeEventListener("ViewChanged", whenViewChanges, false);\n' +
'setTimeout(function() {\n' + // TODO: figure out why this is necessary..
'self.postMessage({\n' +
'somePreference: getAttributes(unsafeWindow.document.querySelector("setting[title=\'some-title\']")),\n' +
'myInteger: getAttributes(unsafeWindow.document.querySelector("setting[title=\'my-int\']")),\n' +
'myHiddenInt: getAttributes(unsafeWindow.document.querySelector("setting[title=\'hidden-int\']"))\n' +
'});\n' +
'}, 250);\n' +
'}, false);\n' +
'unsafeWindow.gViewController.commands.cmd_showItemDetails.doCommand(aAddon, true);\n' +
'});\n' +
'function getAttributes(ele) {\n' +
'if (!ele) return {};\n' +
'return {\n' +
'pref: ele.getAttribute("pref"),\n' +
'type: ele.getAttribute("type"),\n' +
'title: ele.getAttribute("title"),\n' +
'desc: ele.getAttribute("desc")\n' +
'}\n' +
'}\n' +
'}\n' +
// Wait for the load event ?
'if (document.readyState == "complete") {\n' +
'onLoad()\n' +
'} else {\n' +
'unsafeWindow.addEventListener("load", onLoad, false);\n' +
'}\n',
onMessage: function(msg) {
// test somePreference
assert.equal(msg.somePreference.type, 'string', 'some pref is a string');
assert.equal(msg.somePreference.pref, 'extensions.'+self.preferencesBranch+'.somePreference', 'somePreference path is correct');
assert.equal(msg.somePreference.title, 'some-title', 'somePreference title is correct');
assert.equal(msg.somePreference.desc, 'Some short description for the preference', 'somePreference description is correct');
// test myInteger
assert.equal(msg.myInteger.type, 'integer', 'myInteger is a int');
assert.equal(msg.myInteger.pref, 'extensions.'+self.preferencesBranch+'.myInteger', 'extensions.test-simple-prefs.myInteger');
assert.equal(msg.myInteger.title, 'my-int', 'myInteger title is correct');
assert.equal(msg.myInteger.desc, 'How many of them we have.', 'myInteger desc is correct');
// test myHiddenInt
assert.equal(msg.myHiddenInt.type, undefined, 'myHiddenInt was not displayed');
assert.equal(msg.myHiddenInt.pref, undefined, 'myHiddenInt was not displayed');
assert.equal(msg.myHiddenInt.title, undefined, 'myHiddenInt was not displayed');
assert.equal(msg.myHiddenInt.desc, undefined, 'myHiddenInt was not displayed');
tab.close(done);
}
});
}
});
}
}
require('sdk/test/runner').runTestsFromModule(module);

Просмотреть файл

@ -0,0 +1,24 @@
{
"id": "test-simple-prefs",
"preferences": [{
"name": "somePreference",
"title": "some-title",
"description": "Some short description for the preference",
"type": "string",
"value": "TEST"
},
{
"description": "How many of them we have.",
"name": "myInteger",
"type": "integer",
"value": 8,
"title": "my-int"
}, {
"name": "myHiddenInt",
"type": "integer",
"hidden": true,
"value": 5,
"title": "hidden-int"
}],
"preferences-branch": "simple-prefs-regression"
}

Просмотреть файл

@ -8,6 +8,7 @@ const sp = require('sdk/simple-prefs');
const app = require('sdk/system/xul-app');
const self = require('sdk/self');
const tabs = require('sdk/tabs');
const { preferencesBranch } = require('sdk/self');
const { AddonManager } = Cu.import('resource://gre/modules/AddonManager.jsm', {});
@ -89,4 +90,8 @@ if (app.is('Firefox')) {
}
}
exports.testDefaultPreferencesBranch = function(assert) {
assert.equal(preferencesBranch, self.id, 'preferencesBranch default the same as self.id');
}
require('sdk/test/runner').runTestsFromModule(module);

Просмотреть файл

@ -0,0 +1,44 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
'use strict';
const { id, preferencesBranch } = require('sdk/self');
const simple = require('sdk/simple-prefs');
const service = require('sdk/preferences/service');
const { AddonManager } = require('chrome').Cu.import('resource://gre/modules/AddonManager.jsm');
exports.testStandardID = function(assert) {
assert.equal(id, 'standard-id@jetpack', 'standard ID is standard');
assert.equal(simple.prefs.test13, 26, 'test13 is 26');
simple.prefs.test14 = '15';
assert.equal(service.get('extensions.standard-id@jetpack.test14'), '15', 'test14 is 15');
assert.equal(service.get('extensions.standard-id@jetpack.test14'), simple.prefs.test14, 'simple test14 also 15');
}
exports.testInvalidPreferencesBranch = function(assert) {
assert.notEqual(preferencesBranch, 'invalid^branch*name', 'invalid preferences-branch value ignored');
assert.equal(preferencesBranch, 'standard-id@jetpack', 'preferences-branch is standard-id@jetpack');
}
// from `/test/test-self.js`, adapted to `sdk/test/assert` API
exports.testSelfID = function(assert, done) {
assert.equal(typeof(id), 'string', 'self.id is a string');
assert.ok(id.length > 0, 'self.id not empty');
AddonManager.getAddonByID(id, function(addon) {
assert.ok(addon, 'found addon with self.id');
done();
});
}
require('sdk/test/runner').runTestsFromModule(module);

Просмотреть файл

@ -0,0 +1,14 @@
{
"id": "standard-id",
"fullName": "standard ID test",
"author": "Tomislav Jovanovic",
"preferences": [{
"name": "test13",
"type": "integer",
"title": "test13",
"value": 26
}],
"preferences-branch": "invalid^branch*name"
}

8
addon-sdk/source/test/fixtures/loader/self/main.js поставляемый Normal file
Просмотреть файл

@ -0,0 +1,8 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
"use strict";
var self = require("sdk/self");
exports.self = self;

Просмотреть файл

@ -6,6 +6,8 @@
const { Cc, Ci, Cu, Cm, components } = require("chrome");
const xulApp = require("sdk/system/xul-app");
const self = require("sdk/self");
const { Loader, main, unload } = require("toolkit/loader");
const loaderOptions = require("@loader/options");
const { AddonManager } = Cu.import("resource://gre/modules/AddonManager.jsm", {});
@ -59,4 +61,19 @@ exports.testSelfID = function(assert, done) {
});
}
exports.testSelfHandlesLackingLoaderOptions = function (assert) {
let root = module.uri.substr(0, module.uri.lastIndexOf('/'));
let uri = root + '/fixtures/loader/self/';
let sdkPath = loaderOptions.paths[''] + 'sdk';
let loader = Loader({ paths: { '': uri, 'sdk': sdkPath }});
let program = main(loader, 'main');
let self = program.self;
assert.pass("No errors thrown when including sdk/self without loader options");
assert.equal(self.isPrivateBrowsingSupported, false,
"safely checks sdk/self.isPrivateBrowsingSupported");
assert.equal(self.packed, false,
"safely checks sdk/self.packed");
unload(loader);
};
require("sdk/test").run(exports);