Python docutils.nodes 模块,emphasis() 实例源码

我们从Python开源项目中,提取了以下30个代码示例,用于说明如何使用docutils.nodes.emphasis()

项目:chalktalk_docs    作者:loremIpsum1771    | 项目源码 | 文件源码
def run(self):
        env = self.state.document.settings.env
        if not env.config.show_authors:
            return []
        para = nodes.paragraph(translatable=False)
        emph = nodes.emphasis()
        para += emph
        if self.name == 'sectionauthor':
            text = _('Section author: ')
        elif self.name == 'moduleauthor':
            text = _('Module author: ')
        elif self.name == 'codeauthor':
            text = _('Code author: ')
        else:
            text = _('Author: ')
        emph += nodes.Text(text, text)
        inodes, messages = self.state.inline_text(self.arguments[0],
                                                  self.lineno)
        emph.extend(inodes)
        return [para] + messages
项目:chalktalk_docs    作者:loremIpsum1771    | 项目源码 | 文件源码
def autolink_role(typ, rawtext, etext, lineno, inliner,
                  options={}, content=[]):
    """Smart linking role.

    Expands to ':obj:`text`' if `text` is an object that can be imported;
    otherwise expands to '*text*'.
    """
    env = inliner.document.settings.env
    r = env.get_domain('py').role('obj')(
        'obj', rawtext, etext, lineno, inliner, options, content)
    pnode = r[0][0]

    prefixes = get_import_prefixes_from_env(env)
    try:
        name, obj, parent, modname = import_by_name(pnode['reftarget'], prefixes)
    except ImportError:
        content = pnode[0]
        r[0][0] = nodes.emphasis(rawtext, content[0].astext(),
                                 classes=content['classes'])
    return r
项目:sphinxcontrib-vcs    作者:t2y    | 项目源码 | 文件源码
def get_changelog(self, repo, commit):
        item = nodes.list_item()

        item.append(self._make_message_node(commit.message, commit.hexsha))
        item.append(nodes.inline(text=six.text_type(' by ')))
        item.append(nodes.emphasis(text=six.text_type(commit.author.name)))
        item.append(nodes.inline(text=six.text_type(' at ')))

        commit_date = datetime.fromtimestamp(commit.authored_date)
        item.append(nodes.emphasis(text=six.text_type(commit_date)))

        if OPTION_WITH_REF_URL in self.options:
            ref_url = repo.get_commit_url(commit.hexsha)
            ref = nodes.reference('', commit.hexsha, refuri=ref_url)
            item.append(nodes.paragraph('', '', ref))

        if OPTION_INCLUDE_DIFF in self.options:
            diff = repo.get_diff(commit.hexsha)
            item.append(self._make_diff_node(diff, commit.hexsha))

        return item
项目:sphinxcontrib-vcs    作者:t2y    | 项目源码 | 文件源码
def get_changelog(self, repo, commit):
        item = nodes.list_item()

        item.append(self._make_message_node(commit['summary'], commit['sha']))
        item.append(nodes.inline(text=six.text_type(' by ')))
        item.append(nodes.emphasis(text=six.text_type(commit['user'])))
        item.append(nodes.inline(text=six.text_type(' at ')))
        item.append(nodes.emphasis(text=six.text_type(commit['date'])))

        if OPTION_WITH_REF_URL in self.options:
            ref_url = repo.get_commit_url(commit['sha'])
            ref = nodes.reference('', commit['sha'], refuri=ref_url)
            item.append(nodes.paragraph('', '', ref))

        if OPTION_INCLUDE_DIFF in self.options:
            diff = repo.get_diff(commit['revision'])
            item.append(self._make_diff_node(diff, commit['sha']))

        return item
项目:simphony-remote    作者:simphony    | 项目源码 | 文件源码
def autolink_role(typ, rawtext, etext, lineno, inliner,
                  options={}, content=[]):
    """Smart linking role.

    Expands to ':obj:`text`' if `text` is an object that can be imported;
    otherwise expands to '*text*'.
    """
    env = inliner.document.settings.env
    r = env.get_domain('py').role('obj')(
        'obj', rawtext, etext, lineno, inliner, options, content)
    pnode = r[0][0]

    prefixes = get_import_prefixes_from_env(env)
    try:
        name, obj, parent, modname = import_by_name(pnode['reftarget'], prefixes)
    except ImportError:
        content = pnode[0]
        r[0][0] = nodes.emphasis(rawtext, content[0].astext(),
                                 classes=content['classes'])
    return r
项目:veros    作者:dionhaefner    | 项目源码 | 文件源码
def fa_global(key=''):
    def fa(role, rawtext, text, lineno, inliner, options={}, content=[]):
        options.update({'classes': []})
        options['classes'].append('fa')
        if key:
            options['classes'].append('fa-%s' % key)
        else:
             for x in text.split(","):
                options['classes'].append('fa-%s' % x)
        set_classes(options)
        node = emphasis(**options)
        return [node], []
    return fa

#add directive
项目:veros    作者:dionhaefner    | 项目源码 | 文件源码
def run(self):
        options= {'classes' : []}
        options['classes'].append('fa')
        for x in self.content[0].split(' '):
            options['classes'].append('fa-%s' % x)
        set_classes(options)
        node = emphasis(**options)
        return [node]
项目:aws-cfn-plex    作者:lordmuffin    | 项目源码 | 文件源码
def emphasis(self, match, lineno):
        before, inlines, remaining, sysmessages, endstring = self.inline_obj(
              match, lineno, self.patterns.emphasis, nodes.emphasis)
        return before, inlines, remaining, sysmessages
项目:AshsSDK    作者:thehappydinoa    | 项目源码 | 文件源码
def emphasis(self, match, lineno):
        before, inlines, remaining, sysmessages, endstring = self.inline_obj(
              match, lineno, self.patterns.emphasis, nodes.emphasis)
        return before, inlines, remaining, sysmessages
项目:python2-tracer    作者:extremecoders-re    | 项目源码 | 文件源码
def add_annotations(self, app, doctree):
        for node in doctree.traverse(addnodes.desc_content):
            par = node.parent
            if par['domain'] != 'c':
                continue
            if par['stableabi']:
                node.insert(0, nodes.emphasis(' Part of the stable ABI.',
                                              ' Part of the stable ABI.',
                                              classes=['stableabi']))
            if par['objtype'] != 'function':
                continue
            if not par[0].has_key('names') or not par[0]['names']:
                continue
            name = par[0]['names'][0]
            if name.startswith("c."):
                name = name[2:]
            entry = self.get(name)
            if not entry:
                continue
            elif entry.result_type not in ("PyObject*", "PyVarObject*"):
                continue
            if entry.result_refs is None:
                rc = 'Return value: Always NULL.'
            elif entry.result_refs:
                rc = 'Return value: New reference.'
            else:
                rc = 'Return value: Borrowed reference.'
            node.insert(0, nodes.emphasis(rc, rc, classes=['refcount']))
项目:chalktalk_docs    作者:loremIpsum1771    | 项目源码 | 文件源码
def emphasis(self, match, lineno):
        before, inlines, remaining, sysmessages, endstring = self.inline_obj(
              match, lineno, self.patterns.emphasis, nodes.emphasis)
        return before, inlines, remaining, sysmessages
项目:chalktalk_docs    作者:loremIpsum1771    | 项目源码 | 文件源码
def _resolve_doc_reference(self, builder, node, contnode):
        # directly reference to document by source name;
        # can be absolute or relative
        docname = docname_join(node['refdoc'], node['reftarget'])
        if docname in self.all_docs:
            if node['refexplicit']:
                # reference with explicit title
                caption = node.astext()
            else:
                caption = clean_astext(self.titles[docname])
            innernode = nodes.emphasis(caption, caption)
            newnode = nodes.reference('', '', internal=True)
            newnode['refuri'] = builder.get_relative_uri(node['refdoc'], docname)
            newnode.append(innernode)
            return newnode
项目:chalktalk_docs    作者:loremIpsum1771    | 项目源码 | 文件源码
def make_xref(self, rolename, domain, target, innernode=nodes.emphasis,
                  contnode=None):
        result = super(PyXrefMixin, self).make_xref(rolename, domain, target,
                                                    innernode, contnode)
        result['refspecific'] = True
        if target.startswith(('.', '~')):
            prefix, result['reftarget'] = target[0], target[1:]
            if prefix == '.':
                text = target[1:]
            elif prefix == '~':
                text = target.split('.')[-1]
            for node in result.traverse(nodes.Text):
                node.parent[node.parent.index(node)] = nodes.Text(text)
                break
        return result
项目:chalktalk_docs    作者:loremIpsum1771    | 项目源码 | 文件源码
def emph_literal_role(typ, rawtext, text, lineno, inliner,
                      options={}, content=[]):
    text = utils.unescape(text)
    pos = 0
    retnode = nodes.literal(role=typ.lower(), classes=[typ])
    for m in _litvar_re.finditer(text):
        if m.start() > pos:
            txt = text[pos:m.start()]
            retnode += nodes.Text(txt, txt)
        retnode += nodes.emphasis(m.group(1), m.group(1))
        pos = m.end()
    if pos < len(text):
        retnode += nodes.Text(text[pos:], text[pos:])
    return [retnode], []
项目:chalktalk_docs    作者:loremIpsum1771    | 项目源码 | 文件源码
def assemble_doctree(self, indexfile, toctree_only, appendices):
        self.docnames = set([indexfile] + appendices)
        self.info(darkgreen(indexfile) + " ", nonl=1)
        tree = self.env.get_doctree(indexfile)
        tree['docname'] = indexfile
        if toctree_only:
            # extract toctree nodes from the tree and put them in a
            # fresh document
            new_tree = new_document('<texinfo output>')
            new_sect = nodes.section()
            new_sect += nodes.title(u'<Set title in conf.py>',
                                    u'<Set title in conf.py>')
            new_tree += new_sect
            for node in tree.traverse(addnodes.toctree):
                new_sect += node
            tree = new_tree
        largetree = inline_all_toctrees(self, self.docnames, indexfile, tree,
                                        darkgreen, [indexfile])
        largetree['docname'] = indexfile
        for docname in appendices:
            appendix = self.env.get_doctree(docname)
            appendix['docname'] = docname
            largetree.append(appendix)
        self.info()
        self.info("resolving references...")
        self.env.resolve_references(largetree, indexfile, self)
        # TODO: add support for external :ref:s
        for pendingnode in largetree.traverse(addnodes.pending_xref):
            docname = pendingnode['refdocname']
            sectname = pendingnode['refsectname']
            newnodes = [nodes.emphasis(sectname, sectname)]
            for subdir, title in self.titles:
                if docname.startswith(subdir):
                    newnodes.append(nodes.Text(_(' (in '), _(' (in ')))
                    newnodes.append(nodes.emphasis(title, title))
                    newnodes.append(nodes.Text(')', ')'))
                    break
            else:
                pass
            pendingnode.replace_self(newnodes)
        return largetree
项目:blackmamba    作者:zrzka    | 项目源码 | 文件源码
def emphasis(self, match, lineno):
        before, inlines, remaining, sysmessages, endstring = self.inline_obj(
              match, lineno, self.patterns.emphasis, nodes.emphasis)
        return before, inlines, remaining, sysmessages
项目:RST-vscode    作者:tht13    | 项目源码 | 文件源码
def emphasis(self, match, lineno):
        before, inlines, remaining, sysmessages, endstring = self.inline_obj(
              match, lineno, self.patterns.emphasis, nodes.emphasis)
        return before, inlines, remaining, sysmessages
项目:sphinxcontrib-ansibleautodoc    作者:shirou    | 项目源码 | 文件源码
def make_arg(self, key, value):
        name = nodes.field_name(text=key)
        body = nodes.field_body()
        body.append(nodes.emphasis(text=value))
        field = nodes.field()
        field += [name, body]
        return field
项目:tf_aws_ecs_instance_draining_on_scale_in    作者:terraform-community-modules    | 项目源码 | 文件源码
def emphasis(self, match, lineno):
        before, inlines, remaining, sysmessages, endstring = self.inline_obj(
              match, lineno, self.patterns.emphasis, nodes.emphasis)
        return before, inlines, remaining, sysmessages
项目:plotnine    作者:has2k1    | 项目源码 | 文件源码
def get_entries(self):
        def _get_sections(doctree):
            """
            Return all sections after the 'Examples' section
            """
            # Do not traverse all the sections, only look for those
            # that are siblings of the first node.
            ref_node = doctree[0][0]
            kwargs = dict(descend=False, siblings=True)
            exnode = None  # Examples node
            for section in ref_node.traverse(nodes.section, **kwargs):
                if section[0].astext() == 'Examples':
                    exnode = section
                    break

            if not exnode:
                return

            for section in exnode[0].traverse(nodes.section, **kwargs):
                yield section

        for section in _get_sections(self.doctree):
            section_id = section.attributes['ids'][0]
            section_title = section[0].astext()

            # If an emphasis follows the section, it is the description
            if isinstance(section[1][0], nodes.emphasis):
                description = section[1][0].astext()
            else:
                description = ''

            # Last image in the section is used to create the thumbnail
            try:
                image_node = section.traverse(nodes.image)[-1]
            except IndexError:
                continue
            else:
                image_filename = image_node.attributes['uri']

            yield GalleryEntry(
                title=section_title,
                section_id=section_id,
                html_link='{}#{}'.format(self.htmlfilename, section_id),
                thumbnail=self.make_thumbnail(image_filename),
                description=description)
项目:aws-cfn-plex    作者:lordmuffin    | 项目源码 | 文件源码
def parse(self, text, lineno, memo, parent):
        # Needs to be refactored for nested inline markup.
        # Add nested_parse() method?
        """
        Return 2 lists: nodes (text and inline elements), and system_messages.

        Using `self.patterns.initial`, a pattern which matches start-strings
        (emphasis, strong, interpreted, phrase reference, literal,
        substitution reference, and inline target) and complete constructs
        (simple reference, footnote reference), search for a candidate.  When
        one is found, check for validity (e.g., not a quoted '*' character).
        If valid, search for the corresponding end string if applicable, and
        check it for validity.  If not found or invalid, generate a warning
        and ignore the start-string.  Implicit inline markup (e.g. standalone
        URIs) is found last.
        """
        self.reporter = memo.reporter
        self.document = memo.document
        self.language = memo.language
        self.parent = parent
        pattern_search = self.patterns.initial.search
        dispatch = self.dispatch
        remaining = escape2null(text)
        processed = []
        unprocessed = []
        messages = []
        while remaining:
            match = pattern_search(remaining)
            if match:
                groups = match.groupdict()
                method = dispatch[groups['start'] or groups['backquote']
                                  or groups['refend'] or groups['fnend']]
                before, inlines, remaining, sysmessages = method(self, match,
                                                                 lineno)
                unprocessed.append(before)
                messages += sysmessages
                if inlines:
                    processed += self.implicit_inline(''.join(unprocessed),
                                                      lineno)
                    processed += inlines
                    unprocessed = []
            else:
                break
        remaining = ''.join(unprocessed) + remaining
        if remaining:
            processed += self.implicit_inline(remaining, lineno)
        return processed, messages

    # Inline object recognition
    # -------------------------
    # print start_string_prefix.encode('utf8')
    # TODO: support non-ASCII whitespace in the following 4 patterns?
项目:AshsSDK    作者:thehappydinoa    | 项目源码 | 文件源码
def parse(self, text, lineno, memo, parent):
        # Needs to be refactored for nested inline markup.
        # Add nested_parse() method?
        """
        Return 2 lists: nodes (text and inline elements), and system_messages.

        Using `self.patterns.initial`, a pattern which matches start-strings
        (emphasis, strong, interpreted, phrase reference, literal,
        substitution reference, and inline target) and complete constructs
        (simple reference, footnote reference), search for a candidate.  When
        one is found, check for validity (e.g., not a quoted '*' character).
        If valid, search for the corresponding end string if applicable, and
        check it for validity.  If not found or invalid, generate a warning
        and ignore the start-string.  Implicit inline markup (e.g. standalone
        URIs) is found last.
        """
        self.reporter = memo.reporter
        self.document = memo.document
        self.language = memo.language
        self.parent = parent
        pattern_search = self.patterns.initial.search
        dispatch = self.dispatch
        remaining = escape2null(text)
        processed = []
        unprocessed = []
        messages = []
        while remaining:
            match = pattern_search(remaining)
            if match:
                groups = match.groupdict()
                method = dispatch[groups['start'] or groups['backquote']
                                  or groups['refend'] or groups['fnend']]
                before, inlines, remaining, sysmessages = method(self, match,
                                                                 lineno)
                unprocessed.append(before)
                messages += sysmessages
                if inlines:
                    processed += self.implicit_inline(''.join(unprocessed),
                                                      lineno)
                    processed += inlines
                    unprocessed = []
            else:
                break
        remaining = ''.join(unprocessed) + remaining
        if remaining:
            processed += self.implicit_inline(remaining, lineno)
        return processed, messages

    # Inline object recognition
    # -------------------------
    # See also init_customizations().
项目:chalktalk_docs    作者:loremIpsum1771    | 项目源码 | 文件源码
def parse(self, text, lineno, memo, parent):
        # Needs to be refactored for nested inline markup.
        # Add nested_parse() method?
        """
        Return 2 lists: nodes (text and inline elements), and system_messages.

        Using `self.patterns.initial`, a pattern which matches start-strings
        (emphasis, strong, interpreted, phrase reference, literal,
        substitution reference, and inline target) and complete constructs
        (simple reference, footnote reference), search for a candidate.  When
        one is found, check for validity (e.g., not a quoted '*' character).
        If valid, search for the corresponding end string if applicable, and
        check it for validity.  If not found or invalid, generate a warning
        and ignore the start-string.  Implicit inline markup (e.g. standalone
        URIs) is found last.
        """
        self.reporter = memo.reporter
        self.document = memo.document
        self.language = memo.language
        self.parent = parent
        pattern_search = self.patterns.initial.search
        dispatch = self.dispatch
        remaining = escape2null(text)
        processed = []
        unprocessed = []
        messages = []
        while remaining:
            match = pattern_search(remaining)
            if match:
                groups = match.groupdict()
                method = dispatch[groups['start'] or groups['backquote']
                                  or groups['refend'] or groups['fnend']]
                before, inlines, remaining, sysmessages = method(self, match,
                                                                 lineno)
                unprocessed.append(before)
                messages += sysmessages
                if inlines:
                    processed += self.implicit_inline(''.join(unprocessed),
                                                      lineno)
                    processed += inlines
                    unprocessed = []
            else:
                break
        remaining = ''.join(unprocessed) + remaining
        if remaining:
            processed += self.implicit_inline(remaining, lineno)
        return processed, messages

    # Inline object recognition
    # -------------------------
    # lookahead and look-behind expressions for inline markup rules
项目:chalktalk_docs    作者:loremIpsum1771    | 项目源码 | 文件源码
def process_todo_nodes(app, doctree, fromdocname):
    if not app.config['todo_include_todos']:
        for node in doctree.traverse(todo_node):
            node.parent.remove(node)

    # Replace all todolist nodes with a list of the collected todos.
    # Augment each todo with a backlink to the original location.
    env = app.builder.env

    if not hasattr(env, 'todo_all_todos'):
        env.todo_all_todos = []

    for node in doctree.traverse(todolist):
        if not app.config['todo_include_todos']:
            node.replace_self([])
            continue

        content = []

        for todo_info in env.todo_all_todos:
            para = nodes.paragraph(classes=['todo-source'])
            description = _('(The <<original entry>> is located in '
                            ' %s, line %d.)') % \
                (todo_info['source'], todo_info['lineno'])
            desc1 = description[:description.find('<<')]
            desc2 = description[description.find('>>')+2:]
            para += nodes.Text(desc1, desc1)

            # Create a reference
            newnode = nodes.reference('', '', internal=True)
            innernode = nodes.emphasis(_('original entry'), _('original entry'))
            try:
                newnode['refuri'] = app.builder.get_relative_uri(
                    fromdocname, todo_info['docname'])
                newnode['refuri'] += '#' + todo_info['target']['refid']
            except NoUri:
                # ignore if no URI can be determined, e.g. for LaTeX output
                pass
            newnode.append(innernode)
            para += newnode
            para += nodes.Text(desc2, desc2)

            # (Recursively) resolve references in the todo content
            todo_entry = todo_info['todo']
            env.resolve_references(todo_entry, todo_info['docname'],
                                   app.builder)

            # Insert into the todolist
            content.append(todo_entry)
            content.append(para)

        node.replace_self(content)
项目:chalktalk_docs    作者:loremIpsum1771    | 项目源码 | 文件源码
def assemble_doctree(self, indexfile, toctree_only, appendices):
        self.docnames = set([indexfile] + appendices)
        self.info(darkgreen(indexfile) + " ", nonl=1)
        tree = self.env.get_doctree(indexfile)
        tree['docname'] = indexfile
        if toctree_only:
            # extract toctree nodes from the tree and put them in a
            # fresh document
            new_tree = new_document('<latex output>')
            new_sect = nodes.section()
            new_sect += nodes.title(u'<Set title in conf.py>',
                                    u'<Set title in conf.py>')
            new_tree += new_sect
            for node in tree.traverse(addnodes.toctree):
                new_sect += node
            tree = new_tree
        largetree = inline_all_toctrees(self, self.docnames, indexfile, tree,
                                        darkgreen, [indexfile])
        largetree['docname'] = indexfile
        for docname in appendices:
            appendix = self.env.get_doctree(docname)
            appendix['docname'] = docname
            largetree.append(appendix)
        self.info()
        self.info("resolving references...")
        self.env.resolve_references(largetree, indexfile, self)
        # resolve :ref:s to distant tex files -- we can't add a cross-reference,
        # but append the document name
        for pendingnode in largetree.traverse(addnodes.pending_xref):
            docname = pendingnode['refdocname']
            sectname = pendingnode['refsectname']
            newnodes = [nodes.emphasis(sectname, sectname)]
            for subdir, title in self.titles:
                if docname.startswith(subdir):
                    newnodes.append(nodes.Text(_(' (in '), _(' (in ')))
                    newnodes.append(nodes.emphasis(title, title))
                    newnodes.append(nodes.Text(')', ')'))
                    break
            else:
                pass
            pendingnode.replace_self(newnodes)
        return largetree
项目:finite-element-course    作者:finite-element    | 项目源码 | 文件源码
def assemble_doctree(self, indexfile, toctree_only, appendices):
        self.docnames = set([indexfile] + appendices)
        self.info(darkgreen(indexfile) + " ", nonl=1)
        tree = self.env.get_doctree(indexfile)
        tree['docname'] = indexfile
        if toctree_only:
            # extract toctree nodes from the tree and put them in a
            # fresh document
            new_tree = new_document('<latex output>')
            new_sect = nodes.section()
            new_sect += nodes.title(u'<Set title in conf.py>',
                                    u'<Set title in conf.py>')
            new_tree += new_sect
            for node in tree.traverse(addnodes.toctree):
                new_sect += node
            tree = new_tree
        largetree = inline_all_toctrees(self, self.docnames, indexfile, tree,
                                        darkgreen, [])
        largetree['docname'] = indexfile
        for docname in appendices:
            appendix = self.env.get_doctree(docname)
            appendix['docname'] = docname
            largetree.append(appendix)
        self.info()
        self.info("resolving references...")
        self.env.resolve_references(largetree, indexfile, self)
        # resolve :ref:s to distant tex files -- we can't add a cross-reference,
        # but append the document name
        for pendingnode in largetree.traverse(addnodes.pending_xref):
            docname = pendingnode['refdocname']
            sectname = pendingnode['refsectname']
            newnodes = [nodes.emphasis(sectname, sectname)]
            for subdir, title in self.titles:
                if docname.startswith(subdir):
                    newnodes.append(nodes.Text(_(' (in '), _(' (in ')))
                    newnodes.append(nodes.emphasis(title, title))
                    newnodes.append(nodes.Text(')', ')'))
                    break
            else:
                pass
            pendingnode.replace_self(newnodes)
        return largetree
项目:blackmamba    作者:zrzka    | 项目源码 | 文件源码
def parse(self, text, lineno, memo, parent):
        # Needs to be refactored for nested inline markup.
        # Add nested_parse() method?
        """
        Return 2 lists: nodes (text and inline elements), and system_messages.

        Using `self.patterns.initial`, a pattern which matches start-strings
        (emphasis, strong, interpreted, phrase reference, literal,
        substitution reference, and inline target) and complete constructs
        (simple reference, footnote reference), search for a candidate.  When
        one is found, check for validity (e.g., not a quoted '*' character).
        If valid, search for the corresponding end string if applicable, and
        check it for validity.  If not found or invalid, generate a warning
        and ignore the start-string.  Implicit inline markup (e.g. standalone
        URIs) is found last.
        """
        self.reporter = memo.reporter
        self.document = memo.document
        self.language = memo.language
        self.parent = parent
        pattern_search = self.patterns.initial.search
        dispatch = self.dispatch
        remaining = escape2null(text)
        processed = []
        unprocessed = []
        messages = []
        while remaining:
            match = pattern_search(remaining)
            if match:
                groups = match.groupdict()
                method = dispatch[groups['start'] or groups['backquote']
                                  or groups['refend'] or groups['fnend']]
                before, inlines, remaining, sysmessages = method(self, match,
                                                                 lineno)
                unprocessed.append(before)
                messages += sysmessages
                if inlines:
                    processed += self.implicit_inline(''.join(unprocessed),
                                                      lineno)
                    processed += inlines
                    unprocessed = []
            else:
                break
        remaining = ''.join(unprocessed) + remaining
        if remaining:
            processed += self.implicit_inline(remaining, lineno)
        return processed, messages

    # Inline object recognition
    # -------------------------
    # See also init_customizations().
项目:RST-vscode    作者:tht13    | 项目源码 | 文件源码
def parse(self, text, lineno, memo, parent):
        # Needs to be refactored for nested inline markup.
        # Add nested_parse() method?
        """
        Return 2 lists: nodes (text and inline elements), and system_messages.

        Using `self.patterns.initial`, a pattern which matches start-strings
        (emphasis, strong, interpreted, phrase reference, literal,
        substitution reference, and inline target) and complete constructs
        (simple reference, footnote reference), search for a candidate.  When
        one is found, check for validity (e.g., not a quoted '*' character).
        If valid, search for the corresponding end string if applicable, and
        check it for validity.  If not found or invalid, generate a warning
        and ignore the start-string.  Implicit inline markup (e.g. standalone
        URIs) is found last.
        """
        self.reporter = memo.reporter
        self.document = memo.document
        self.language = memo.language
        self.parent = parent
        pattern_search = self.patterns.initial.search
        dispatch = self.dispatch
        remaining = escape2null(text)
        processed = []
        unprocessed = []
        messages = []
        while remaining:
            match = pattern_search(remaining)
            if match:
                groups = match.groupdict()
                method = dispatch[groups['start'] or groups['backquote']
                                  or groups['refend'] or groups['fnend']]
                before, inlines, remaining, sysmessages = method(self, match,
                                                                 lineno)
                unprocessed.append(before)
                messages += sysmessages
                if inlines:
                    processed += self.implicit_inline(''.join(unprocessed),
                                                      lineno)
                    processed += inlines
                    unprocessed = []
            else:
                break
        remaining = ''.join(unprocessed) + remaining
        if remaining:
            processed += self.implicit_inline(remaining, lineno)
        return processed, messages

    # Inline object recognition
    # -------------------------
    # print start_string_prefix.encode('utf8')
    # TODO: support non-ASCII whitespace in the following 4 patterns?
项目:sphinxcontrib-ansibleautodoc    作者:shirou    | 项目源码 | 文件源码
def make_node(self, lang='en'):
        if lang not in texts.keys():
            lang = 'en'
        arg_map = texts[lang]["arg_map"]
        task_title = texts[lang]["task_title"]
        module_title = texts[lang]["module_title"]

        module = ""
        module_args = []
        # first, search module 
        for arg, m in self.args.items():
            if arg not in arg_map.keys():
                module = arg
                module_args.append(m)

        item = nodes.admonition()
        title = nodes.title(text=self.name)
        item.append(title)

        for m in module_args:
            if isinstance(m, str):
                item.append(nodes.paragraph(text=m))
            else:
                mlist = []
                for k, v in m.items():
                    mlist.append("%s=%s" % (k, v))
                item.append(nodes.paragraph(text=" ".join(mlist)))

        field_list = nodes.field_list()
        field_list.append(self.make_arg(module_title, module))
        # second, create node tree
        for arg, txt in arg_map.items():
            if not txt:  # skip name etc...
                continue
            if arg not in self.args:
                continue
            value = self.args[arg]  # value of that task arg
            if isinstance(value, list):
                bl = nodes.bullet_list()
                for v in value:
                    body = nodes.emphasis(text=v)
                    bl.append(nodes.list_item('', body))
                name = nodes.field_name(text=txt)
                body = nodes.field_body()
                body.append(bl)
                field = nodes.field()
                field += [name, body]
                field_list.append(field)
            else:
                field_list.append(self.make_arg(txt, value))

        item.append(field_list)

        return item
项目:tf_aws_ecs_instance_draining_on_scale_in    作者:terraform-community-modules    | 项目源码 | 文件源码
def parse(self, text, lineno, memo, parent):
        # Needs to be refactored for nested inline markup.
        # Add nested_parse() method?
        """
        Return 2 lists: nodes (text and inline elements), and system_messages.

        Using `self.patterns.initial`, a pattern which matches start-strings
        (emphasis, strong, interpreted, phrase reference, literal,
        substitution reference, and inline target) and complete constructs
        (simple reference, footnote reference), search for a candidate.  When
        one is found, check for validity (e.g., not a quoted '*' character).
        If valid, search for the corresponding end string if applicable, and
        check it for validity.  If not found or invalid, generate a warning
        and ignore the start-string.  Implicit inline markup (e.g. standalone
        URIs) is found last.
        """
        self.reporter = memo.reporter
        self.document = memo.document
        self.language = memo.language
        self.parent = parent
        pattern_search = self.patterns.initial.search
        dispatch = self.dispatch
        remaining = escape2null(text)
        processed = []
        unprocessed = []
        messages = []
        while remaining:
            match = pattern_search(remaining)
            if match:
                groups = match.groupdict()
                method = dispatch[groups['start'] or groups['backquote']
                                  or groups['refend'] or groups['fnend']]
                before, inlines, remaining, sysmessages = method(self, match,
                                                                 lineno)
                unprocessed.append(before)
                messages += sysmessages
                if inlines:
                    processed += self.implicit_inline(''.join(unprocessed),
                                                      lineno)
                    processed += inlines
                    unprocessed = []
            else:
                break
        remaining = ''.join(unprocessed) + remaining
        if remaining:
            processed += self.implicit_inline(remaining, lineno)
        return processed, messages

    # Inline object recognition
    # -------------------------
    # print start_string_prefix.encode('utf8')
    # TODO: support non-ASCII whitespace in the following 4 patterns?