我们从Python开源项目中,提取了以下24个代码示例,用于说明如何使用docutils.nodes.make_id()。
def run(self): set_classes(self.options) self.assert_has_content() text = '\n'.join(self.content) admonition_node = self.node_class(text, **self.options) self.add_name(admonition_node) if self.node_class is nodes.admonition: title_text = self.arguments[0] textnodes, messages = self.state.inline_text(title_text, self.lineno) title = nodes.title(title_text, '', *textnodes) title.source, title.line = ( self.state_machine.get_source_and_line(self.lineno)) admonition_node += title admonition_node += messages if not 'classes' in self.options: admonition_node['classes'] += ['admonition-' + nodes.make_id(title_text)] self.state.nested_parse(self.content, self.content_offset, admonition_node) return [admonition_node]
def class_option(argument): """ Convert the argument into a list of ID-compatible strings and return it. (Directive option conversion function.) Raise ``ValueError`` if no argument is found. """ if argument is None: raise ValueError('argument required but none supplied') names = argument.split() class_names = [] for name in names: class_name = nodes.make_id(name) if not class_name: raise ValueError('cannot make "%s" into a class name' % name) class_names.append(class_name) return class_names
def make_termnodes_from_paragraph_node(env, node, new_id=None): gloss_entries = env.temp_data.setdefault('gloss_entries', set()) objects = env.domaindata['std']['objects'] termtext = node.astext() if new_id is None: new_id = nodes.make_id('term-' + termtext) if new_id in gloss_entries: new_id = 'term-' + str(len(gloss_entries)) gloss_entries.add(new_id) objects['term', termtext.lower()] = env.docname, new_id # add an index entry too indexnode = addnodes.index() indexnode['entries'] = [('single', termtext, new_id, 'main')] new_termnodes = [] new_termnodes.append(indexnode) new_termnodes.extend(node.children) new_termnodes.append(addnodes.termsep()) for termnode in new_termnodes: termnode.source, termnode.line = node.source, node.line return new_id, termtext, new_termnodes
def make_admonition(node_class, name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): warnings.warn('make_admonition is deprecated, use ' 'docutils.parsers.rst.directives.admonitions.BaseAdmonition ' 'instead', DeprecationWarning, stacklevel=2) text = '\n'.join(content) admonition_node = node_class(text) if arguments: title_text = arguments[0] textnodes, messages = state.inline_text(title_text, lineno) admonition_node += nodes.title(title_text, '', *textnodes) admonition_node += messages if 'class' in options: classes = options['class'] else: classes = ['admonition-' + nodes.make_id(title_text)] admonition_node['classes'] += classes state.nested_parse(content, content_offset, admonition_node) return [admonition_node]
def run(self): self.assert_has_content() title = self.arguments[0] content = '\n'.join(self.content) math_node = self.make_math_node(self.prepare_latex(content)) tid = nodes.make_id(title) target = nodes.target('', '', ids=['inference-' + tid]) self.state.document.note_explicit_target(target) term, desc = nodes.term('', title), nodes.description('', math_node) dli = nodes.definition_list_item('', term, desc) dl = nodes.definition_list(content, target, dli) set_source_info(self, dl) return [dl]
def make_glossary_term(env, textnodes, index_key, source, lineno, new_id=None): # get a text-only representation of the term and register it # as a cross-reference target term = nodes.term('', '', *textnodes) term.source = source term.line = lineno gloss_entries = env.temp_data.setdefault('gloss_entries', set()) objects = env.domaindata['std']['objects'] termtext = term.astext() if new_id is None: new_id = nodes.make_id('term-' + termtext) if new_id in gloss_entries: new_id = 'term-' + str(len(gloss_entries)) gloss_entries.add(new_id) objects['term', termtext.lower()] = env.docname, new_id # add an index entry too indexnode = addnodes.index() indexnode['entries'] = [('single', termtext, new_id, 'main', index_key)] indexnode.source, indexnode.line = term.source, term.line term.append(indexnode) term['ids'].append(new_id) term['names'].append(new_id) return term
def _add_target(self, signode, name): """Register a link target ‘name’, pointing to signode.""" targetid = make_target(self.objtype, nodes.make_id(name)) if targetid not in self.state.document.ids: signode['ids'].append(targetid) signode['names'].append(name) signode['first'] = (not self.names) self.state.document.note_explicit_target(signode) self._record_name(name, targetid) return targetid
def genindex_nodes(genindexentries): indexlabel = _('Index') indexunder = '='*len(indexlabel) output=['DUMMY','=====','.. _genindex:\n\n',indexlabel,indexunder,''] for key, entries in genindexentries: #from pudb import set_trace; set_trace() output.append('.. cssclass:: heading4\n\n%s\n\n'%key) # initial for entryname, (links, subitems) in entries: if links: output.append('`%s <#%s>`_'%(entryname,nodes.make_id(links[0][1]))) for i,link in enumerate(links[1:]): output[-1]+=(' `[%s] <#%s>`_ '%(i+1,nodes.make_id(link[1]))) output.append('') else: output.append(entryname) if subitems: for subentryname, subentrylinks in subitems: if subentrylinks: output.append(' `%s <%s>`_'%(subentryname,subentrylinks[0])) for i,link in enumerate(subentrylinks[1:]): output[-1]+=(' `[%s] <%s>`_ '%(i+1,link)) output.append('') else: output.append(subentryname) output.append('') doctree = docutils.core.publish_doctree('\n'.join(output)) return doctree[1]
def extract_bibliographic(self, field_list): docinfo = nodes.docinfo() bibliofields = self.language.bibliographic_fields labels = self.language.labels topics = {'dedication': None, 'abstract': None} for field in field_list: try: name = field[0][0].astext() normedname = nodes.make_id(name) if not (len(field) == 2 and normedname in bibliofields and self.check_empty_biblio_field(field, name)): raise TransformError canonical = bibliofields[normedname] biblioclass = self.biblio_nodes[canonical] if issubclass(biblioclass, nodes.TextElement): if not self.check_compound_biblio_field(field, name): raise TransformError utils.clean_rcs_keywords( field[1][0], self.rcs_keyword_substitutions) docinfo.append(biblioclass('', '', *field[1][0])) elif issubclass(biblioclass, nodes.authors): self.extract_authors(field, name, docinfo) elif issubclass(biblioclass, nodes.topic): if topics[canonical]: field[-1] += self.document.reporter.warning( 'There can only be one "%s" field.' % name, base_node=field) raise TransformError title = nodes.title(name, labels[canonical]) topics[canonical] = biblioclass( '', title, classes=[canonical], *field[1].children) else: docinfo.append(biblioclass('', *field[1].children)) except TransformError: if len(field[-1]) == 1 \ and isinstance(field[-1][0], nodes.paragraph): utils.clean_rcs_keywords( field[-1][0], self.rcs_keyword_substitutions) if normedname and normedname not in bibliofields: field['classes'].append(normedname) docinfo.append(field) nodelist = [] if len(docinfo) != 0: nodelist.append(docinfo) for name in ('dedication', 'abstract'): if topics[name]: nodelist.append(topics[name]) return nodelist
def extract_bibliographic(self, field_list): docinfo = nodes.docinfo() bibliofields = self.language.bibliographic_fields labels = self.language.labels topics = {'dedication': None, 'abstract': None} for field in field_list: try: name = field[0][0].astext() normedname = nodes.fully_normalize_name(name) if not (len(field) == 2 and normedname in bibliofields and self.check_empty_biblio_field(field, name)): raise TransformError canonical = bibliofields[normedname] biblioclass = self.biblio_nodes[canonical] if issubclass(biblioclass, nodes.TextElement): if not self.check_compound_biblio_field(field, name): raise TransformError utils.clean_rcs_keywords( field[1][0], self.rcs_keyword_substitutions) docinfo.append(biblioclass('', '', *field[1][0])) elif issubclass(biblioclass, nodes.authors): self.extract_authors(field, name, docinfo) elif issubclass(biblioclass, nodes.topic): if topics[canonical]: field[-1] += self.document.reporter.warning( 'There can only be one "%s" field.' % name, base_node=field) raise TransformError title = nodes.title(name, labels[canonical]) topics[canonical] = biblioclass( '', title, classes=[canonical], *field[1].children) else: docinfo.append(biblioclass('', *field[1].children)) except TransformError: if len(field[-1]) == 1 \ and isinstance(field[-1][0], nodes.paragraph): utils.clean_rcs_keywords( field[-1][0], self.rcs_keyword_substitutions) if normedname not in bibliofields: classvalue = nodes.make_id(normedname) if classvalue: field['classes'].append(classvalue) docinfo.append(field) nodelist = [] if len(docinfo) != 0: nodelist.append(docinfo) for name in ('dedication', 'abstract'): if topics[name]: nodelist.append(topics[name]) return nodelist
def _generate_nodes(self, name, command, parent=None, show_nested=False): """Generate the relevant Sphinx nodes. Format a `click.Group` or `click.Command`. :param name: Name of command, as used on the command line :param command: Instance of `click.Group` or `click.Command` :param parent: Instance of `click.Context`, or None :param show_nested: Whether subcommands should be included in output :returns: A list of nested docutil nodes """ ctx = click.Context(command, info_name=name, parent=parent) # Title section = nodes.section( '', nodes.title(text=name), ids=[nodes.make_id(ctx.command_path)], names=[nodes.fully_normalize_name(ctx.command_path)]) # Summary source_name = ctx.command_path result = statemachine.ViewList() lines = _format_command(ctx, show_nested) for line in lines: result.append(line, source_name) self.state.nested_parse(result, 0, section) # Subcommands if show_nested: commands = getattr(ctx.command, 'commands', {}) for command_name, command_obj in sorted(commands.items()): section.extend(self._generate_nodes( command_name, command_obj, ctx, show_nested)) return [section]