我们从Python开源项目中,提取了以下25个代码示例,用于说明如何使用docutils.nodes.term()。
def definition_list_item(self, termline): indented, indent, line_offset, blank_finish = \ self.state_machine.get_indented() itemnode = nodes.definition_list_item( '\n'.join(termline + list(indented))) lineno = self.state_machine.abs_line_number() - 1 (itemnode.source, itemnode.line) = self.state_machine.get_source_and_line(lineno) termlist, messages = self.term(termline, lineno) itemnode += termlist definition = nodes.definition('', *messages) itemnode += definition if termline[0][-2:] == '::': definition += self.reporter.info( 'Blank line missing before literal block (after the "::")? ' 'Interpreted as a definition list item.', line=lineno+1) self.nested_parse(indented, input_offset=line_offset, node=definition) return itemnode, blank_finish
def term(self, lines, lineno): """Return a definition_list's term and optional classifiers.""" assert len(lines) == 1 text_nodes, messages = self.inline_text(lines[0], lineno) term_node = nodes.term() (term_node.source, term_node.line) = self.state_machine.get_source_and_line(lineno) term_node.rawsource = unescape(lines[0]) node_list = [term_node] for i in range(len(text_nodes)): node = text_nodes[i] if isinstance(node, nodes.Text): parts = self.classifier_delimiter.split(node.rawsource) if len(parts) == 1: node_list[-1] += node else: node_list[-1] += nodes.Text(parts[0].rstrip()) for part in parts[1:]: classifier_node = nodes.classifier('', part) node_list.append(classifier_node) else: node_list[-1] += node return node_list, messages
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode): results = [] ltarget = target.lower() # :ref: lowercases its target automatically for role in ('ref', 'option'): # do not try "keyword" res = self.resolve_xref(env, fromdocname, builder, role, ltarget if role == 'ref' else target, node, contnode) if res: results.append(('std:' + role, res)) # all others for objtype in self.object_types: key = (objtype, target) if objtype == 'term': key = (objtype, ltarget) if key in self.data['objects']: docname, labelid = self.data['objects'][key] results.append(('std:' + self.role_for_objtype(objtype), make_refnode(builder, fromdocname, docname, labelid, contnode))) return results
def make_termnodes_from_paragraph_node(env, node, new_id=None): gloss_entries = env.temp_data.setdefault('gloss_entries', set()) objects = env.domaindata['std']['objects'] termtext = node.astext() if new_id is None: new_id = nodes.make_id('term-' + termtext) if new_id in gloss_entries: new_id = 'term-' + str(len(gloss_entries)) gloss_entries.add(new_id) objects['term', termtext.lower()] = env.docname, new_id # add an index entry too indexnode = addnodes.index() indexnode['entries'] = [('single', termtext, new_id, 'main')] new_termnodes = [] new_termnodes.append(indexnode) new_termnodes.extend(node.children) new_termnodes.append(addnodes.termsep()) for termnode in new_termnodes: termnode.source, termnode.line = node.source, node.line return new_id, termtext, new_termnodes
def run(self): self.assert_has_content() title = self.arguments[0] content = '\n'.join(self.content) math_node = self.make_math_node(self.prepare_latex(content)) tid = nodes.make_id(title) target = nodes.target('', '', ids=['inference-' + tid]) self.state.document.note_explicit_target(target) term, desc = nodes.term('', title), nodes.description('', math_node) dli = nodes.definition_list_item('', term, desc) dl = nodes.definition_list(content, target, dli) set_source_info(self, dl) return [dl]
def _format_subcommands(self, parser_info): assert 'children' in parser_info items = [] for subcmd in parser_info['children']: subcmd_items = [] if subcmd['help']: subcmd_items.append(nodes.paragraph(text=subcmd['help'])) else: subcmd_items.append(nodes.paragraph(text='Undocumented')) items.append( nodes.definition_list_item( '', nodes.term('', '', nodes.strong( text=subcmd['bare_usage'])), nodes.definition('', *subcmd_items))) return nodes.definition_list('', *items)
def split_term_classifiers(line): # split line into a term and classifiers. if no classifier, None is used.. parts = re.split(' +: +', line) + [None] return parts
def make_glossary_term(env, textnodes, index_key, source, lineno, new_id=None): # get a text-only representation of the term and register it # as a cross-reference target term = nodes.term('', '', *textnodes) term.source = source term.line = lineno gloss_entries = env.temp_data.setdefault('gloss_entries', set()) objects = env.domaindata['std']['objects'] termtext = term.astext() if new_id is None: new_id = nodes.make_id('term-' + termtext) if new_id in gloss_entries: new_id = 'term-' + str(len(gloss_entries)) gloss_entries.add(new_id) objects['term', termtext.lower()] = env.docname, new_id # add an index entry too indexnode = addnodes.index() indexnode['entries'] = [('single', termtext, new_id, 'main', index_key)] indexnode.source, indexnode.line = term.source, term.line term.append(indexnode) term['ids'].append(new_id) term['names'].append(new_id) return term
def make_term_from_paragraph_node(termnodes, ids): # make a single "term" node with all the terms, separated by termsep # nodes (remove the dangling trailing separator) term = nodes.term('', '', *termnodes[:-1]) term.source, term.line = termnodes[0].source, termnodes[0].line term.rawsource = term.astext() term['ids'].extend(ids) term['names'].extend(ids) return term
def add_coqtop_output(self): """Add coqtop's responses to a Sphinx AST Finds nodes to process using is_coqtop_block.""" with CoqTop(color=True) as repl: for node in self.document.traverse(CoqtopBlocksTransform.is_coqtop_block): options = node['coqtop_options'] opt_undo, opt_reset, opt_input, opt_output = self.parse_options(options) if opt_reset: repl.sendone("Reset Initial.") pairs = [] for sentence in self.split_sentences(node.rawsource): pairs.append((sentence, repl.sendone(sentence))) if opt_undo: repl.sendone("Undo {}.".format(len(pairs))) dli = nodes.definition_list_item() for sentence, output in pairs: # Use Coqdoq to highlight input in_chunks = highlight_using_coqdoc(sentence) dli += nodes.term(sentence, '', *in_chunks, classes=self.block_classes(opt_input)) # Parse ANSI sequences to highlight output out_chunks = AnsiColorsParser().colorize_str(output) dli += nodes.definition(output, *out_chunks, classes=self.block_classes(opt_output, output)) node.clear() node.rawsource = self.make_rawsource(pairs, opt_input, opt_output) node['classes'].extend(self.block_classes(opt_input or opt_output)) node += nodes.inline('', '', classes=['coqtop-reset'] * opt_reset) node += nodes.definition_list(node.rawsource, dli)
def map_nested_definitions(nested_content): if nested_content is None: raise Exception('Nested content should be iterable, not null') # build definition dictionary definitions = {} for item in nested_content: if not isinstance(item, nodes.definition_list): continue for subitem in item: if not isinstance(subitem, nodes.definition_list_item): continue if not len(subitem.children) > 0: continue classifier = '@after' idx = subitem.first_child_matching_class(nodes.classifier) if idx is not None: ci = subitem[idx] if len(ci.children) > 0: classifier = ci.children[0].astext() if classifier is not None and classifier not in ( '@replace', '@before', '@after'): raise Exception('Unknown classifier: %s' % classifier) idx = subitem.first_child_matching_class(nodes.term) if idx is not None: ch = subitem[idx] if len(ch.children) > 0: term = ch.children[0].astext() idx = subitem.first_child_matching_class(nodes.definition) if idx is not None: def_node = subitem[idx] def_node.attributes['classifier'] = classifier definitions[term] = def_node return definitions
def print_command_args_and_opts(arg_list, opt_list, sub_list=None): items = [] if arg_list: items.append(nodes.definition_list_item( '', nodes.term(text='Positional arguments:'), nodes.definition('', arg_list))) if opt_list: items.append(nodes.definition_list_item( '', nodes.term(text='Options:'), nodes.definition('', opt_list))) if sub_list and len(sub_list): items.append(nodes.definition_list_item( '', nodes.term(text='Sub-commands:'), nodes.definition('', sub_list))) return nodes.definition_list('', *items)
def print_subcommand_list(data, nested_content): definitions = map_nested_definitions(nested_content) items = [] if 'children' in data: for child in data['children']: my_def = [nodes.paragraph( text=child['help'])] if child['help'] else [] name = child['name'] my_def = apply_definition(definitions, my_def, name) if len(my_def) == 0: my_def.append(nodes.paragraph(text='Undocumented')) if 'description' in child: my_def.append(nodes.paragraph(text=child['description'])) my_def.append(nodes.literal_block(text=child['usage'])) my_def.append(print_command_args_and_opts( print_arg_list(child, nested_content), print_opt_list(child, nested_content), print_subcommand_list(child, nested_content) )) items.append( nodes.definition_list_item( '', nodes.term('', '', nodes.strong(text=name)), nodes.definition('', *my_def) ) ) return nodes.definition_list('', *items)
def _parse_definition_list( def_list_node: nodes.definition_list) -> ExtraContentDict: """Parse a definition list inside the directive. Args: def_list_node: A definition list node containing definitions for extending the Sphinx output. Raises: ValueError: The given classifier was unrecognized. Returns: A dict where keys are item IDs and values contain the classifiers and the content as lists of docutils nodes. """ definitions = collections.defaultdict(lambda: None) for node in def_list_node: if not isinstance(node, nodes.definition_list_item): continue term = _get_matching_child(node, nodes.term, last=False).astext() classifiers = set() for child_node in node.children: if not isinstance(child_node, nodes.classifier): continue classifier = child_node.astext() if classifier not in ALL_CLASSIFIERS: raise ValueError("unknown classifier '{0}'".format(classifier)) classifiers.add(classifier) if not classifiers & CONTENT_CLASSIFIERS: classifiers.add("@after") if not classifiers & MARKUP_CLASSIFIERS: classifiers.add("@auto") content = _get_matching_child( node, nodes.definition, last=False).children if not definitions[term]: definitions[term] = [] definitions[term].append(ExtraContent(classifiers, content)) return definitions