我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用docutils.nodes.TextElement()。
def rst2ansi(input_string, output_encoding='utf-8'): overrides = {} overrides['input_encoding'] = 'unicode' def style_role(name, rawtext, text, lineno, inliner, options={}, content=[]): return [nodes.TextElement(rawtext, text, classes=[name])], [] for color in COLORS: roles.register_local_role('ansi-fg-' + color, style_role) roles.register_local_role('ansi-bg-' + color, style_role) for style in STYLES: roles.register_local_role('ansi-' + style, style_role) out = core.publish_string(input_string.decode('utf-8'), settings_overrides=overrides, writer=Writer(unicode=output_encoding.startswith('utf'))) return out.decode(output_encoding)
def visit_reference(self, node): atts = {'class': 'reference'} if 'refuri' in node: atts['href'] = node['refuri'] if ( self.settings.cloak_email_addresses and atts['href'].startswith('mailto:')): atts['href'] = self.cloak_mailto(atts['href']) self.in_mailto = True atts['class'] += ' external' else: assert 'refid' in node, \ 'References must have "refuri" or "refid" attribute.' atts['href'] = '#' + node['refid'] atts['class'] += ' internal' if not isinstance(node.parent, nodes.TextElement): assert len(node) == 1 and isinstance(node[0], nodes.image) atts['class'] += ' image-reference' self.body.append(self.starttag(node, 'a', '', **atts))
def latex2html(node, source): inline = isinstance(node.parent, nodes.TextElement) latex = node['latex'] name = 'math-%s' % md5(latex).hexdigest()[-10:] dest = '_static/%s.png' % name depth = latex2png(latex, dest, node['fontset']) path = '_static' count = source.split('/doc/')[-1].count('/') for i in range(count): if os.path.exists(path): break path = '../'+path path = '../'+path #specifically added for matplotlib if inline: cls = '' else: cls = 'class="center" ' if inline and depth != 0: style = 'style="position: relative; bottom: -%dpx"' % (depth + 1) else: style = '' return '<img src="%s/%s.png" %s%s/>' % (path, name, cls, style)
def is_translatable(node): if isinstance(node, nodes.TextElement): if not node.source: return False # built-in message if isinstance(node, IGNORED_NODES) and 'translatable' not in node: return False if not node.get('translatable', True): # not(node['translatable'] == True or node['translatable'] is None) return False # <field_name>orphan</field_name> # XXX ignore all metadata (== docinfo) if isinstance(node, nodes.field_name) and node.children[0] == 'orphan': return False return True if isinstance(node, nodes.image) and node.get('translatable'): return True return False
def is_inline(self, node): """Check whether a node represents an inline element.""" return isinstance(node.parent, nodes.TextElement)
def visit_raw(self, node): if 'html' in node.get('format', '').split(): t = isinstance(node.parent, nodes.TextElement) and 'span' or 'div' if node['classes']: self.body.append(self.starttag(node, t, suffix='')) self.body.append(node.astext()) if node['classes']: self.body.append('</%s>' % t) # Keep non-HTML raw text out of output: raise nodes.SkipNode
def visit_raw(self, node): if 'xml' not in node.get('format', '').split(): # skip other raw content? # raise nodes.SkipNode self.default_visit(node) return # wrap in <raw> element self.default_visit(node) # or not? xml_string = node.astext() self.output.append(xml_string) self.default_departure(node) # or not? # Check validity of raw XML: if isinstance(xml_string, unicode) and sys.version_info < (3,): xml_string = xml_string.encode('utf8') try: self.xmlparser.parse(StringIO(xml_string)) except xml.sax._exceptions.SAXParseException, error: col_num = self.the_handle.locator.getColumnNumber() line_num = self.the_handle.locator.getLineNumber() srcline = node.line if not isinstance(node.parent, nodes.TextElement): srcline += 2 # directive content start line msg = 'Invalid raw XML in column %d, line offset %d:\n%s' % ( col_num, line_num, node.astext()) self.warn(msg, source=node.source, line=srcline+line_num-1) raise nodes.SkipNode # content already processed
def is_inline(self, node): """Check whether a node represents an inline or block-level element""" return isinstance(node.parent, nodes.TextElement)
def setup(app): app.add_node(latex_math) app.add_role('math', math_role) # Add visit/depart methods to HTML-Translator: def visit_latex_math_html(self, node): source = self.document.attributes['source'] self.body.append(latex2html(node, source)) def depart_latex_math_html(self, node): pass HTMLTranslator.visit_latex_math = visit_latex_math_html HTMLTranslator.depart_latex_math = depart_latex_math_html # Add visit/depart methods to LaTeX-Translator: def visit_latex_math_latex(self, node): inline = isinstance(node.parent, nodes.TextElement) if inline: self.body.append('$%s$' % node['latex']) else: self.body.extend(['\\begin{equation}', node['latex'], '\\end{equation}']) def depart_latex_math_latex(self, node): pass LaTeXTranslator.visit_latex_math = visit_latex_math_latex LaTeXTranslator.depart_latex_math = depart_latex_math_latex
def setup(app): app.add_node(latex_math) app.add_role('math', math_role) # Add visit/depart methods to HTML-Translator: def visit_latex_math_html(self, node): mathml = ''.join(node['mathml_tree'].xml()) self.body.append(mathml) def depart_latex_math_html(self, node): pass HTMLTranslator.visit_latex_math = visit_latex_math_html HTMLTranslator.depart_latex_math = depart_latex_math_html # Add visit/depart methods to LaTeX-Translator: def visit_latex_math_latex(self, node): inline = isinstance(node.parent, nodes.TextElement) if inline: self.body.append('$%s$' % node['latex']) else: self.body.extend(['\\begin{equation}', node['latex'], '\\end{equation}']) def depart_latex_math_latex(self, node): pass LaTeXTranslator.visit_latex_math = visit_latex_math_latex LaTeXTranslator.depart_latex_math = depart_latex_math_latex # LaTeX to MathML translation stuff:
def default_visit(self, node): """Default node visit method.""" if not self.in_simple: self.output.append(self.indent*self.level) self.output.append(node.starttag(xml.sax.saxutils.quoteattr)) self.level += 1 if isinstance(node, nodes.TextElement): self.in_simple += 1 if not self.in_simple: self.output.append(self.newline)
def default_departure(self, node): """Default node depart method.""" self.level -= 1 if not self.in_simple: self.output.append(self.indent*self.level) self.output.append(node.endtag()) if isinstance(node, nodes.TextElement): self.in_simple -= 1 if not self.in_simple: self.output.append(self.newline) # specific visit and depart methods # ---------------------------------
def process_metadata(self, docname, doctree): """Process the docinfo part of the doctree as metadata. Keep processing minimal -- just return what docutils says. """ self.metadata[docname] = md = {} try: docinfo = doctree[0] except IndexError: # probably an empty document return if docinfo.__class__ is not nodes.docinfo: # nothing to see here return for node in docinfo: # nodes are multiply inherited... if isinstance(node, nodes.authors): md['authors'] = [author.astext() for author in node] elif isinstance(node, nodes.TextElement): # e.g. author md[node.__class__.__name__] = node.astext() else: name, body = node md[name.astext()] = body.astext() for name, value in md.items(): if name in ('tocdepth',): try: value = int(value) except ValueError: value = 0 md[name] = value del doctree[0]
def astext(self): return ' -> ' + nodes.TextElement.astext(self)
def astext(self): return '[' + nodes.TextElement.astext(self) + ']'
def visit_reference(self, node): atts = {'class': 'reference'} if node.get('internal') or 'refuri' not in node: atts['class'] += ' internal' else: atts['class'] += ' external' if 'refuri' in node: atts['href'] = node['refuri'] if self.settings.cloak_email_addresses and \ atts['href'].startswith('mailto:'): atts['href'] = self.cloak_mailto(atts['href']) self.in_mailto = 1 else: assert 'refid' in node, \ 'References must have "refuri" or "refid" attribute.' atts['href'] = '#' + node['refid'] if not isinstance(node.parent, nodes.TextElement): assert len(node) == 1 and isinstance(node[0], nodes.image) atts['class'] += ' image-reference' if 'reftitle' in node: atts['title'] = node['reftitle'] self.body.append(self.starttag(node, 'a', '', **atts)) if node.get('secnumber'): self.body.append(('%s' + self.secnumber_suffix) % '.'.join(map(str, node['secnumber'])))
def visit_raw(self, node): if 'xml' not in node.get('format', '').split(): # skip other raw content? # raise nodes.SkipNode self.default_visit(node) return # wrap in <raw> element self.default_visit(node) # or not? xml_string = node.astext() self.output.append(xml_string) self.default_departure(node) # or not? # Check validity of raw XML: if isinstance(xml_string, str) and sys.version_info < (3,): xml_string = xml_string.encode('utf8') try: self.xmlparser.parse(StringIO(xml_string)) except xml.sax._exceptions.SAXParseException as error: col_num = self.the_handle.locator.getColumnNumber() line_num = self.the_handle.locator.getLineNumber() srcline = node.line if not isinstance(node.parent, nodes.TextElement): srcline += 2 # directive content start line msg = 'Invalid raw XML in column %d, line offset %d:\n%s' % ( col_num, line_num, node.astext()) self.warn(msg, source=node.source, line=srcline+line_num-1) raise nodes.SkipNode # content already processed
def apply(self): for target in self.document.traverse(nodes.target): # Only block-level targets without reference (like ".. target:"): if (isinstance(target.parent, nodes.TextElement) or (target.hasattr('refid') or target.hasattr('refuri') or target.hasattr('refname'))): continue assert len(target) == 0, 'error: block-level target has children' next_node = target.next_node(ascend=True) # Do not move names and ids into Invisibles (we'd lose the # attributes) or different Targetables (e.g. footnotes). if (next_node is not None and ((not isinstance(next_node, nodes.Invisible) and not isinstance(next_node, nodes.Targetable)) or isinstance(next_node, nodes.target))): next_node['ids'].extend(target['ids']) next_node['names'].extend(target['names']) # Set defaults for next_node.expect_referenced_by_name/id. if not hasattr(next_node, 'expect_referenced_by_name'): next_node.expect_referenced_by_name = {} if not hasattr(next_node, 'expect_referenced_by_id'): next_node.expect_referenced_by_id = {} for id in target['ids']: # Update IDs to node mapping. self.document.ids[id] = next_node # If next_node is referenced by id ``id``, this # target shall be marked as referenced. next_node.expect_referenced_by_id[id] = target for name in target['names']: next_node.expect_referenced_by_name[name] = target # If there are any expect_referenced_by_... attributes # in target set, copy them to next_node. next_node.expect_referenced_by_name.update( getattr(target, 'expect_referenced_by_name', {})) next_node.expect_referenced_by_id.update( getattr(target, 'expect_referenced_by_id', {})) # Set refid to point to the first former ID of target # which is now an ID of next_node. target['refid'] = target['ids'][0] # Clear ids and names; they have been moved to # next_node. target['ids'] = [] target['names'] = [] self.document.note_refid(target)
def apply(self): smart_quotes = self.document.settings.smart_quotes if not smart_quotes: return try: alternative = smart_quotes.startswith('alt') except AttributeError: alternative = False # print repr(alternative) document_language = self.document.settings.language_code # "Educate" quotes in normal text. Handle each block of text # (TextElement node) as a unit to keep context around inline nodes: for node in self.document.traverse(nodes.TextElement): # skip preformatted text blocks and special elements: if isinstance(node, (nodes.FixedTextElement, nodes.Special)): continue # nested TextElements are not "block-level" elements: if isinstance(node.parent, nodes.TextElement): continue # list of text nodes in the "text block": txtnodes = [txtnode for txtnode in node.traverse(nodes.Text) if not isinstance(txtnode.parent, nodes.option_string)] # language: use typographical quotes for language "lang" lang = node.get_language_code(document_language) # use alternative form if `smart-quotes` setting starts with "alt": if alternative: if '-x-altquot' in lang: lang = lang.replace('-x-altquot', '') else: lang += '-x-altquot' # drop subtags missing in quotes: for tag in utils.normalize_language_tag(lang): if tag in smartquotes.smartchars.quotes: lang = tag break else: # language not supported: (keep ASCII quotes) if lang not in self.unsupported_languages: self.document.reporter.warning('No smart quotes ' 'defined for language "%s".'%lang, base_node=node) self.unsupported_languages.add(lang) lang = '' # Iterator educating quotes in plain text: # '2': set all, using old school en- and em- dash shortcuts teacher = smartquotes.educate_tokens(self.get_tokens(txtnodes), attr='2', language=lang) for txtnode, newtext in zip(txtnodes, teacher): txtnode.parent.replace(txtnode, nodes.Text(newtext)) self.unsupported_languages = set() # reset
def extract_bibliographic(self, field_list): docinfo = nodes.docinfo() bibliofields = self.language.bibliographic_fields labels = self.language.labels topics = {'dedication': None, 'abstract': None} for field in field_list: try: name = field[0][0].astext() normedname = nodes.make_id(name) if not (len(field) == 2 and normedname in bibliofields and self.check_empty_biblio_field(field, name)): raise TransformError canonical = bibliofields[normedname] biblioclass = self.biblio_nodes[canonical] if issubclass(biblioclass, nodes.TextElement): if not self.check_compound_biblio_field(field, name): raise TransformError utils.clean_rcs_keywords( field[1][0], self.rcs_keyword_substitutions) docinfo.append(biblioclass('', '', *field[1][0])) elif issubclass(biblioclass, nodes.authors): self.extract_authors(field, name, docinfo) elif issubclass(biblioclass, nodes.topic): if topics[canonical]: field[-1] += self.document.reporter.warning( 'There can only be one "%s" field.' % name, base_node=field) raise TransformError title = nodes.title(name, labels[canonical]) topics[canonical] = biblioclass( '', title, classes=[canonical], *field[1].children) else: docinfo.append(biblioclass('', *field[1].children)) except TransformError: if len(field[-1]) == 1 \ and isinstance(field[-1][0], nodes.paragraph): utils.clean_rcs_keywords( field[-1][0], self.rcs_keyword_substitutions) if normedname and normedname not in bibliofields: field['classes'].append(normedname) docinfo.append(field) nodelist = [] if len(docinfo) != 0: nodelist.append(docinfo) for name in ('dedication', 'abstract'): if topics[name]: nodelist.append(topics[name]) return nodelist
def apply(self): smart_quotes = self.document.settings.smart_quotes if not smart_quotes: return try: alternative = smart_quotes.startswith('alt') except AttributeError: alternative = False # print repr(alternative) document_language = self.document.settings.language_code lc_smartquotes = self.document.settings.smartquotes_locales if lc_smartquotes: smartquotes.smartchars.quotes.update(dict(lc_smartquotes)) # "Educate" quotes in normal text. Handle each block of text # (TextElement node) as a unit to keep context around inline nodes: for node in self.document.traverse(nodes.TextElement): # skip preformatted text blocks and special elements: if isinstance(node, self.nodes_to_skip): continue # nested TextElements are not "block-level" elements: if isinstance(node.parent, nodes.TextElement): continue # list of text nodes in the "text block": txtnodes = [txtnode for txtnode in node.traverse(nodes.Text) if not isinstance(txtnode.parent, nodes.option_string)] # language: use typographical quotes for language "lang" lang = node.get_language_code(document_language) # use alternative form if `smart-quotes` setting starts with "alt": if alternative: if '-x-altquot' in lang: lang = lang.replace('-x-altquot', '') else: lang += '-x-altquot' # drop unsupported subtags: for tag in utils.normalize_language_tag(lang): if tag in smartquotes.smartchars.quotes: lang = tag break else: # language not supported: (keep ASCII quotes) if lang not in self.unsupported_languages: self.document.reporter.warning('No smart quotes ' 'defined for language "%s".'%lang, base_node=node) self.unsupported_languages.add(lang) lang = '' # Iterator educating quotes in plain text: # (see "utils/smartquotes.py" for the attribute setting) teacher = smartquotes.educate_tokens(self.get_tokens(txtnodes), attr=self.smartquotes_action, language=lang) for txtnode, newtext in zip(txtnodes, teacher): txtnode.parent.replace(txtnode, nodes.Text(newtext, rawsource=txtnode.rawsource)) self.unsupported_languages = set() # reset
def extract_bibliographic(self, field_list): docinfo = nodes.docinfo() bibliofields = self.language.bibliographic_fields labels = self.language.labels topics = {'dedication': None, 'abstract': None} for field in field_list: try: name = field[0][0].astext() normedname = nodes.fully_normalize_name(name) if not (len(field) == 2 and normedname in bibliofields and self.check_empty_biblio_field(field, name)): raise TransformError canonical = bibliofields[normedname] biblioclass = self.biblio_nodes[canonical] if issubclass(biblioclass, nodes.TextElement): if not self.check_compound_biblio_field(field, name): raise TransformError utils.clean_rcs_keywords( field[1][0], self.rcs_keyword_substitutions) docinfo.append(biblioclass('', '', *field[1][0])) elif issubclass(biblioclass, nodes.authors): self.extract_authors(field, name, docinfo) elif issubclass(biblioclass, nodes.topic): if topics[canonical]: field[-1] += self.document.reporter.warning( 'There can only be one "%s" field.' % name, base_node=field) raise TransformError title = nodes.title(name, labels[canonical]) topics[canonical] = biblioclass( '', title, classes=[canonical], *field[1].children) else: docinfo.append(biblioclass('', *field[1].children)) except TransformError: if len(field[-1]) == 1 \ and isinstance(field[-1][0], nodes.paragraph): utils.clean_rcs_keywords( field[-1][0], self.rcs_keyword_substitutions) if normedname not in bibliofields: classvalue = nodes.make_id(normedname) if classvalue: field['classes'].append(classvalue) docinfo.append(field) nodelist = [] if len(docinfo) != 0: nodelist.append(docinfo) for name in ('dedication', 'abstract'): if topics[name]: nodelist.append(topics[name]) return nodelist
def extract_bibliographic(self, field_list): docinfo = nodes.docinfo() bibliofields = self.language.bibliographic_fields labels = self.language.labels topics = {'dedication': None, 'abstract': None} for field in field_list: try: name = field[0][0].astext() normedname = nodes.fully_normalize_name(name) if not (len(field) == 2 and normedname in bibliofields and self.check_empty_biblio_field(field, name)): raise TransformError canonical = bibliofields[normedname] biblioclass = self.biblio_nodes[canonical] if issubclass(biblioclass, nodes.TextElement): if not self.check_compound_biblio_field(field, name): raise TransformError utils.clean_rcs_keywords( field[1][0], self.rcs_keyword_substitutions) docinfo.append(biblioclass('', '', *field[1][0])) elif issubclass(biblioclass, nodes.authors): self.extract_authors(field, name, docinfo) elif issubclass(biblioclass, nodes.topic): if topics[canonical]: field[-1] += self.document.reporter.warning( 'There can only be one "%s" field.' % name, base_node=field) raise TransformError title = nodes.title(name, labels[canonical]) topics[canonical] = biblioclass( '', title, classes=[canonical], *field[1].children) else: docinfo.append(biblioclass('', *field[1].children)) except TransformError: if len(field[-1]) == 1 \ and isinstance(field[-1][0], nodes.paragraph): utils.clean_rcs_keywords( field[-1][0], self.rcs_keyword_substitutions) docinfo.append(field) nodelist = [] if len(docinfo) != 0: nodelist.append(docinfo) for name in ('dedication', 'abstract'): if topics[name]: nodelist.append(topics[name]) return nodelist