我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用docutils.nodes.inline()。
def _strong_node(self, refuri, text_nodes): cp = docutils_nodes.inline(classes=['link-container']) n1 = docutils_nodes.strong() n1.extend(text_nodes) cp.append(n1) paramlink = docutils_nodes.reference( '', '', docutils_nodes.Text(u"¶", u"¶"), refid="", # paramlink is our own CSS class, headerlink # is theirs. Trying to get everything we can for existing # symbols... classes=['paramlink', 'headerlink'] ) cp.append(paramlink) return cp
def build_reference_node(self, fromdocname, builder, docname, labelid, sectname, rolename, **options): nodeclass = options.pop('nodeclass', nodes.reference) newnode = nodeclass('', '', internal=True, **options) innernode = nodes.inline(sectname, sectname) if innernode.get('classes') is not None: innernode['classes'].append('std') innernode['classes'].append('std-' + rolename) if docname == fromdocname: newnode['refid'] = labelid else: # set more info in contnode; in case the # get_relative_uri call raises NoUri, # the builder will then have to resolve these contnode = addnodes.pending_xref('') contnode['refdocname'] = docname contnode['refsectname'] = sectname newnode['refuri'] = builder.get_relative_uri( fromdocname, docname) if labelid: newnode['refuri'] += '#' + labelid newnode.append(innernode) return newnode
def build_reference_node(self, fromdocname, builder, docname, labelid, sectname, **options): nodeclass = options.pop('nodeclass', nodes.reference) newnode = nodeclass('', '', internal=True, **options) innernode = nodes.inline(sectname, sectname) if docname == fromdocname: newnode['refid'] = labelid else: # set more info in contnode; in case the # get_relative_uri call raises NoUri, # the builder will then have to resolve these contnode = addnodes.pending_xref('') contnode['refdocname'] = docname contnode['refsectname'] = sectname newnode['refuri'] = builder.get_relative_uri( fromdocname, docname) if labelid: newnode['refuri'] += '#' + labelid newnode.append(innernode) return newnode
def menusel_role(typ, rawtext, text, lineno, inliner, options={}, content=[]): text = utils.unescape(text) if typ == 'menuselection': text = text.replace('-->', u'\N{TRIANGULAR BULLET}') spans = _amp_re.split(text) node = nodes.inline(rawtext=rawtext) for i, span in enumerate(spans): span = span.replace('&&', '&') if i == 0: if len(span) > 0: textnode = nodes.Text(span) node += textnode continue accel_node = nodes.inline() letter_node = nodes.Text(span[0]) accel_node += letter_node accel_node['classes'].append('accelerator') node += accel_node textnode = nodes.Text(span[1:]) node += textnode node['classes'].append(typ) return [node], []
def coq_code_role(role, rawtext, text, lineno, inliner, options={}, content=[]): #pylint: disable=dangerous-default-value """And inline role for Coq source code""" options['language'] = 'Coq' return code_role(role, rawtext, text, lineno, inliner, options, content) ## Too heavy: ## Forked from code_role to use our custom tokenizer; this doesn't work for ## snippets though: for example CoqDoc swallows the parentheses around this: ## “(a: A) (b: B)” # set_classes(options) # classes = ['code', 'coq'] # code = utils.unescape(text, 1) # node = nodes.literal(rawtext, '', *highlight_using_coqdoc(code), classes=classes) # return [node], [] # TODO pass different languages?
def colorize_str(self, raw): """Parse raw (an ANSI-colored output string from Coqtop) into Sphinx nodes.""" last_end = 0 for match in AnsiColorsParser.COLOR_PATTERN.finditer(raw): self._add_text(raw, last_end, match.start()) last_end = match.end() classes = ansicolors.parse_ansi(match.group(1)) if 'ansi-reset' in classes: self._finalize_pending_nodes() else: node = nodes.inline() self.pending_nodes.append(node) node['classes'].extend(classes) self._add_text(raw, last_end, len(raw)) self._finalize_pending_nodes() return self.new_nodes
def GrammarProductionRole(typ, rawtext, text, lineno, inliner, options={}, content=[]): """An inline role to declare grammar productions that are not in fact included in a `productionlist` directive. Useful to informally introduce a production, as part of running text """ #pylint: disable=dangerous-default-value, unused-argument env = inliner.document.settings.env targetid = 'grammar-token-{}'.format(text) target = nodes.target('', '', ids=[targetid]) inliner.document.note_explicit_target(target) code = nodes.literal(rawtext, text, role=typ.lower()) node = nodes.inline(rawtext, '', target, code, classes=['inline-grammar-production']) set_role_source_info(inliner, lineno, node) env.domaindata['std']['objects']['token', text] = env.docname, targetid return [node], []
def get_changelog(self, repo, commit): item = nodes.list_item() item.append(self._make_message_node(commit.message, commit.hexsha)) item.append(nodes.inline(text=six.text_type(' by '))) item.append(nodes.emphasis(text=six.text_type(commit.author.name))) item.append(nodes.inline(text=six.text_type(' at '))) commit_date = datetime.fromtimestamp(commit.authored_date) item.append(nodes.emphasis(text=six.text_type(commit_date))) if OPTION_WITH_REF_URL in self.options: ref_url = repo.get_commit_url(commit.hexsha) ref = nodes.reference('', commit.hexsha, refuri=ref_url) item.append(nodes.paragraph('', '', ref)) if OPTION_INCLUDE_DIFF in self.options: diff = repo.get_diff(commit.hexsha) item.append(self._make_diff_node(diff, commit.hexsha)) return item
def get_changelog(self, repo, commit): item = nodes.list_item() item.append(self._make_message_node(commit['summary'], commit['sha'])) item.append(nodes.inline(text=six.text_type(' by '))) item.append(nodes.emphasis(text=six.text_type(commit['user']))) item.append(nodes.inline(text=six.text_type(' at '))) item.append(nodes.emphasis(text=six.text_type(commit['date']))) if OPTION_WITH_REF_URL in self.options: ref_url = repo.get_commit_url(commit['sha']) ref = nodes.reference('', commit['sha'], refuri=ref_url) item.append(nodes.paragraph('', '', ref)) if OPTION_INCLUDE_DIFF in self.options: diff = repo.get_diff(commit['revision']) item.append(self._make_diff_node(diff, commit['sha'])) return item
def run(self): node = addnodes.versionmodified() node.document = self.state.document set_source_info(self, node) node['type'] = self.name node['version'] = self.arguments[0] text = versionlabels[self.name] % self.arguments[0] if len(self.arguments) == 2: inodes, messages = self.state.inline_text(self.arguments[1], self.lineno+1) para = nodes.paragraph(self.arguments[1], '', *inodes, translatable=False) set_source_info(self, para) node.append(para) else: messages = [] if self.content: self.state.nested_parse(self.content, self.content_offset, node) if len(node): if isinstance(node[0], nodes.paragraph) and node[0].rawsource: content = nodes.inline(node[0].rawsource, translatable=True) content.source = node[0].source content.line = node[0].line content += node[0].children node[0].replace_self(nodes.paragraph('', '', content, translatable=False)) node[0].insert(0, nodes.inline('', '%s: ' % text, classes=['versionmodified'])) else: para = nodes.paragraph('', '', nodes.inline('', '%s.' % text, classes=['versionmodified']), translatable=False) node.append(para) env = self.state.document.settings.env # XXX should record node.source as well env.note_versionchange(node['type'], node['version'], node, node.line) return [node] + messages
def apply(self): config = self.document.settings.env.config github_project = config.github_project issue_pattern = config.github_issue_pattern if isinstance(issue_pattern, str_t): issue_pattern = re.compile(issue_pattern) for node in self.document.traverse(nodes.Text): parent = node.parent if isinstance(parent, (nodes.literal, nodes.FixedTextElement)): continue text = text_t(node) new_nodes = [] last_issue_ref_end = 0 for match in issue_pattern.finditer(text): head = text[last_issue_ref_end:match.start()] if head: new_nodes.append(nodes.Text(head)) last_issue_ref_end = match.end() issuetext = match.group(0) issue_id = match.group(1) refnode = pending_xref() refnode['reftarget'] = issue_id refnode['reftype'] = 'issue' refnode['github_project'] = github_project reftitle = issuetext refnode.append(nodes.inline( issuetext, reftitle, classes=['xref', 'issue'])) new_nodes.append(refnode) if not new_nodes: continue tail = text[last_issue_ref_end:] if tail: new_nodes.append(nodes.Text(tail)) parent.replace(node, new_nodes)
def resolve_issue_reference(app, env, node, contnode): if node['reftype'] != 'issue': return issue_id = node['reftarget'] project = node['github_project'] issue = Issue(issue_id, None, URL.format(project=project, issue_id=issue_id)) conttext = text_t(contnode[0]) formatted_conttext = nodes.Text(conttext.format(issue=issue)) formatted_contnode = nodes.inline(conttext, formatted_conttext, classes=contnode['classes']) return make_issue_reference(issue, formatted_contnode)
def make_target_footnote(self, refuri, refs, notes): if refuri in notes: # duplicate? footnote = notes[refuri] assert len(footnote['names']) == 1 footnote_name = footnote['names'][0] else: # original footnote = nodes.footnote() footnote_id = self.document.set_id(footnote) # Use uppercase letters and a colon; they can't be # produced inside names by the parser. footnote_name = 'TARGET_NOTE: ' + footnote_id footnote['auto'] = 1 footnote['names'] = [footnote_name] footnote_paragraph = nodes.paragraph() footnote_paragraph += nodes.reference('', refuri, refuri=refuri) footnote += footnote_paragraph self.document.note_autofootnote(footnote) self.document.note_explicit_target(footnote, footnote) for ref in refs: if isinstance(ref, nodes.target): continue refnode = nodes.footnote_reference(refname=footnote_name, auto=1) refnode['classes'] += self.classes self.document.note_autofootnote_ref(refnode) self.document.note_footnote_ref(refnode) index = ref.parent.index(ref) + 1 reflist = [refnode] if not utils.get_trim_footnote_ref_space(self.document.settings): if self.classes: reflist.insert(0, nodes.inline(text=' ', Classes=self.classes)) else: reflist.insert(0, nodes.Text(' ')) ref.parent.insert(index, reflist) return footnote
def generic_custom_role(role, rawtext, text, lineno, inliner, options={}, content=[]): """""" # Once nested inline markup is implemented, this and other methods should # recursively call inliner.nested_parse(). set_classes(options) return [nodes.inline(rawtext, utils.unescape(text), **options)], []
def code_role(role, rawtext, text, lineno, inliner, options={}, content=[]): set_classes(options) language = options.get('language', '') classes = ['code'] if 'classes' in options: classes.extend(options['classes']) if language and language not in classes: classes.append(language) try: tokens = Lexer(utils.unescape(text, 1), language, inliner.document.settings.syntax_highlight) except LexerError, error: msg = inliner.reporter.warning(error) prb = inliner.problematic(rawtext, rawtext, msg) return [prb], [msg] node = nodes.literal(rawtext, '', classes=classes) # analyse content and add nodes for every token for classes, value in tokens: # print (classes, value) if classes: node += nodes.inline(value, value, classes=classes) else: # insert as Text to decrease the verbosity of the output node += nodes.Text(value, value) return [node], []
def _link_node(self, refuri, text_nodes): link = docutils_nodes.reference( '', '', text_nodes[0], refuri=refuri) link.extend(text_nodes[1:]) cp = docutils_nodes.inline(classes=['link-container']) cp.append(link) return cp
def run(self): node = addnodes.versionmodified() node.document = self.state.document node['type'] = 'deprecated-removed' version = (self.arguments[0], self.arguments[1]) node['version'] = version text = self._label % version if len(self.arguments) == 3: inodes, messages = self.state.inline_text(self.arguments[2], self.lineno+1) para = nodes.paragraph(self.arguments[2], '', *inodes) node.append(para) else: messages = [] if self.content: self.state.nested_parse(self.content, self.content_offset, node) if len(node): if isinstance(node[0], nodes.paragraph) and node[0].rawsource: content = nodes.inline(node[0].rawsource, translatable=True) content.source = node[0].source content.line = node[0].line content += node[0].children node[0].replace_self(nodes.paragraph('', '', content)) node[0].insert(0, nodes.inline('', '%s: ' % text, classes=['versionmodified'])) else: para = nodes.paragraph('', '', nodes.inline('', '%s.' % text, classes=['versionmodified'])) node.append(para) env = self.state.document.settings.env env.note_versionchange('deprecated', version[0], node, self.lineno) return [node] + messages # Support for including Misc/NEWS
def code_role(role, rawtext, text, lineno, inliner, options={}, content=[]): set_classes(options) language = options.get('language', '') classes = ['code'] if 'classes' in options: classes.extend(options['classes']) if language and language not in classes: classes.append(language) try: tokens = Lexer(utils.unescape(text, 1), language, inliner.document.settings.syntax_highlight) except LexerError, error: msg = inliner.reporter.warning(error) prb = inliner.problematic(rawtext, rawtext, msg) return [prb], [msg] node = nodes.literal(rawtext, '', classes=classes) # analyze content and add nodes for every token for classes, value in tokens: # print (classes, value) if classes: node += nodes.inline(value, value, classes=classes) else: # insert as Text to decrease the verbosity of the output node += nodes.Text(value, value) return [node], []
def run(self): arguments = self.arguments[0].split('\n') env = self.state.document.settings.env targetid = 'index-%s' % env.new_serialno('index') targetnode = nodes.target('', '', ids=[targetid]) self.state.document.note_explicit_target(targetnode) indexnode = addnodes.index() indexnode['entries'] = ne = [] indexnode['inline'] = False set_source_info(self, indexnode) for entry in arguments: ne.extend(process_index_entry(entry, targetid)) return [indexnode, targetnode]
def apply(self): from sphinx.builders.gettext import MessageCatalogBuilder env = self.document.settings.env builder = env.app.builder if isinstance(builder, MessageCatalogBuilder): return for inline in self.document.traverse(nodes.inline): if 'translatable' in inline: inline.parent.remove(inline) inline.parent += inline.children
def make_field(self, types, domain, item): fieldarg, content = item fieldname = nodes.field_name('', self.label) if fieldarg: fieldname += nodes.Text(' ') fieldname += self.make_xref(self.rolename, domain, fieldarg, nodes.Text) if len(content) == 1 and ( isinstance(content[0], nodes.Text) or (isinstance(content[0], nodes.inline) and len(content[0]) == 1 and isinstance(content[0][0], nodes.Text))): content = [self.make_xref(self.bodyrolename, domain, content[0].astext(), contnode=content[0])] fieldbody = nodes.field_body('', nodes.paragraph('', '', *content)) return nodes.field('', fieldname, fieldbody)
def parse_notation(notation, source, line, rawtext=None): """Parse notation and wrap it in an inline node""" node = nodes.inline(rawtext or notation, '', *sphinxify(notation), classes=['notation']) node.source, node.line = source, line return node
def highlight_using_coqdoc(sentence): """Lex sentence using coqdoc, and yield inline nodes for each token""" tokens = coqdoc.lex(utils.unescape(sentence, 1)) for classes, value in tokens: yield nodes.inline(value, value, classes=classes)
def NotationRole(role, rawtext, text, lineno, inliner, options={}, content=[]): #pylint: disable=unused-argument, dangerous-default-value """And inline role for notations""" notation = utils.unescape(text, 1) position = inliner.reporter.get_source_and_line(lineno) return [nodes.literal(rawtext, '', parse_notation(notation, *position, rawtext=rawtext))], []
def run(self): # Uses a ‘container’ instead of a ‘literal_block’ to disable # Pygments-based post-processing (we could also set rawsource to '') content = '\n'.join(self.content) node = nodes.inline(content, '', *highlight_using_coqdoc(content)) wrapper = nodes.container(content, node, classes=['coqdoc', 'literal-block']) return [wrapper]
def add_coqtop_output(self): """Add coqtop's responses to a Sphinx AST Finds nodes to process using is_coqtop_block.""" with CoqTop(color=True) as repl: for node in self.document.traverse(CoqtopBlocksTransform.is_coqtop_block): options = node['coqtop_options'] opt_undo, opt_reset, opt_input, opt_output = self.parse_options(options) if opt_reset: repl.sendone("Reset Initial.") pairs = [] for sentence in self.split_sentences(node.rawsource): pairs.append((sentence, repl.sendone(sentence))) if opt_undo: repl.sendone("Undo {}.".format(len(pairs))) dli = nodes.definition_list_item() for sentence, output in pairs: # Use Coqdoq to highlight input in_chunks = highlight_using_coqdoc(sentence) dli += nodes.term(sentence, '', *in_chunks, classes=self.block_classes(opt_input)) # Parse ANSI sequences to highlight output out_chunks = AnsiColorsParser().colorize_str(output) dli += nodes.definition(output, *out_chunks, classes=self.block_classes(opt_output, output)) node.clear() node.rawsource = self.make_rawsource(pairs, opt_input, opt_output) node['classes'].extend(self.block_classes(opt_input or opt_output)) node += nodes.inline('', '', classes=['coqtop-reset'] * opt_reset) node += nodes.definition_list(node.rawsource, dli)
def visitRepeat(self, ctx:TacticNotationsParser.RepeatContext): # Uses inline nodes instead of subscript and superscript to ensure that # we get the right customization hooks at the LaTeX level wrapper = nodes.inline('', '', classes=['repeat-wrapper']) wrapper += nodes.inline('', '', *self.visitChildren(ctx), classes=["repeat"]) repeat_marker = ctx.LGROUP().getText()[1] wrapper += nodes.inline(repeat_marker, repeat_marker, classes=['notation-sup']) separator = ctx.ATOM() if separator: sep = separator.getText() wrapper += nodes.inline(sep, sep, classes=['notation-sub']) return [wrapper]
def visitCurlies(self, ctx:TacticNotationsParser.CurliesContext): sp = nodes.inline('', '', classes=["curlies"]) sp += nodes.Text("{") sp.extend(self.visitChildren(ctx)) sp += nodes.Text("}") return [sp]
def visitHole(self, ctx:TacticNotationsParser.HoleContext): hole = ctx.ID().getText() token_name = hole[1:] node = nodes.inline(hole, token_name, classes=["hole"]) return [addnodes.pending_xref(token_name, node, reftype='token', refdomain='std', reftarget=token_name)]
def _create_notes_paragraph(self, notes): """ Constructs a paragraph which represents the implementation notes The paragraph consists of text and clickable URL nodes if links were given in the notes. """ para = nodes.paragraph() # links could start with http:// or https:// link_idxs = [m.start() for m in re.finditer('https?://', notes)] start_idx = 0 for link_idx in link_idxs: # assume the notes start with text (could be empty) para.append(nodes.inline(text=notes[start_idx:link_idx])) # create a URL node until the next text or the end of the notes link_end_idx = notes.find(" ", link_idx) if link_end_idx == -1: # In case the notes end with a link without a blank link_end_idx = len(notes) uri = notes[link_idx:link_end_idx + 1] para.append(nodes.reference("", uri, refuri=uri)) start_idx = link_end_idx + 1 # get all text after the last link (could be empty) or all of the # text if no link was given para.append(nodes.inline(text=notes[start_idx:])) return para
def apply(self, **kwargs): for ref in self.document.traverse(nodes.reference): # Skip inter-document links if 'refname' in ref: if self.document.nameids.get(ref['refname']): continue # Convert remaining non-external links to intra-document references refuri = ref['refuri'] if 'refuri' in ref else None if not refuri or not '://' in refuri: # Get the raw text (strip ``s and _) rawtext = re.sub('^`(.*)`_?$', '\\1', ref.rawsource) # Create xref node xref = addnodes.pending_xref( rawtext, reftype='ref', refdomain='std', refexplicit=(refuri is not None)) # Rewrite section links if not refuri: refuri = ref['name'] # Fill target information xref['reftarget'] = refuri.lower() xref['refwarn'] = True xref['refdoc'] = self.document.settings.env.docname # Add ref text xref += nodes.inline(rawtext, ref['name'], classes=ref['classes']) # Replace the old node ref.replace_self(xref) #==============================================================================
def code_role(role, rawtext, text, lineno, inliner, options={}, content=[]): set_classes(options) language = options.get('language', '') classes = ['code'] if 'classes' in options: classes.extend(options['classes']) if language and language not in classes: classes.append(language) try: tokens = Lexer(utils.unescape(text, 1), language, inliner.document.settings.syntax_highlight) except LexerError as error: msg = inliner.reporter.warning(error) prb = inliner.problematic(rawtext, rawtext, msg) return [prb], [msg] node = nodes.literal(rawtext, '', classes=classes) # analyse content and add nodes for every token for classes, value in tokens: # print (classes, value) if classes: node += nodes.inline(value, value, classes=classes) else: # insert as Text to decrease the verbosity of the output node += nodes.Text(value, value) return [node], []
def run(self): head = nodes.paragraph() head.append(nodes.inline("Wraps API:", "Wraps API: ")) source = '\n'.join(self.content.data) literal_node = nodes.literal_block(source, source) literal_node['laguage'] = 'C++' return [head, literal_node]
def run(self): source = '\n'.join(self.content.data) literal = nodes.literal_block(source, source) literal['visualnodetype'] = True literal['language'] = 'python' # docutils document model is insane! head1 = nodes.paragraph() introduction = self.options.pop('introduction', "Example:") head1.append(nodes.inline(introduction, introduction)) inter = self.options.pop('inter', "Outputs:") head2 = nodes.paragraph() head2.append( nodes.section("foo", nodes.inline(inter, inter)) ) directive_nodes = [ head1, literal, head2, self.get_image_node(source) ] return directive_nodes
def run(self): self.assert_has_content() if self.arguments: language = self.arguments[0] else: language = '' set_classes(self.options) classes = ['code'] if language: classes.append(language) if 'classes' in self.options: classes.extend(self.options['classes']) # set up lexical analyzer try: tokens = Lexer(u'\n'.join(self.content), language, self.state.document.settings.syntax_highlight) except LexerError, error: raise self.warning(error) if 'number-lines' in self.options: # optional argument `startline`, defaults to 1 try: startline = int(self.options['number-lines'] or 1) except ValueError: raise self.error(':number-lines: with non-integer start value') endline = startline + len(self.content) # add linenumber filter: tokens = NumberLines(tokens, startline, endline) node = nodes.literal_block('\n'.join(self.content), classes=classes) self.add_name(node) # if called from "include", set the source if 'source' in self.options: node.attributes['source'] = self.options['source'] # analyze content and add nodes for every token for classes, value in tokens: # print (classes, value) if classes: node += nodes.inline(value, value, classes=classes) else: # insert as Text to decrease the verbosity of the output node += nodes.Text(value, value) return [node]