我们从Python开源项目中,提取了以下45个代码示例,用于说明如何使用docutils.nodes.field_list()。
def is_compactable(self, node): # print "is_compactable %s ?" % node.__class__, # explicite class arguments have precedence if 'compact' in node['classes']: return True if 'open' in node['classes']: return False # check config setting: if (isinstance(node, (nodes.field_list, nodes.definition_list)) and not self.settings.compact_field_lists): # print "`compact-field-lists` is False" return False if (isinstance(node, (nodes.enumerated_list, nodes.bullet_list)) and not self.settings.compact_lists): # print "`compact-lists` is False" return False # more special cases: if (self.topic_classes == ['contents']): # TODO: self.in_contents return True # check the list items: return self.check_simple_list(node)
def visit_list_item(self, node): # print "visiting list item", node.__class__ children = [child for child in node.children if not isinstance(child, nodes.Invisible)] # print "has %s visible children" % len(children) if (children and isinstance(children[0], nodes.paragraph) and (isinstance(children[-1], nodes.bullet_list) or isinstance(children[-1], nodes.enumerated_list) or isinstance(children[-1], nodes.field_list))): children.pop() # print "%s children remain" % len(children) if len(children) <= 1: return else: # print "found", child.__class__, "in", node.__class__ raise nodes.NodeFound
def rfc2822(self, match, context, next_state): """RFC2822-style field list item.""" fieldlist = nodes.field_list(classes=['rfc2822']) self.parent += fieldlist field, blank_finish = self.rfc2822_field(match) fieldlist += field offset = self.state_machine.line_offset + 1 # next line newline_offset, blank_finish = self.nested_list_parse( self.state_machine.input_lines[offset:], input_offset=self.state_machine.abs_line_offset() + 1, node=fieldlist, initial_state='RFC2822List', blank_finish=blank_finish) self.goto_line(newline_offset) if not blank_finish: self.parent += self.unindent_warning( 'RFC2822-style field list') return [], next_state, []
def field_marker(self, match, context, next_state): """Field list item.""" field_list = nodes.field_list() self.parent += field_list field, blank_finish = self.field(match) field_list += field offset = self.state_machine.line_offset + 1 # next line newline_offset, blank_finish = self.nested_list_parse( self.state_machine.input_lines[offset:], input_offset=self.state_machine.abs_line_offset() + 1, node=field_list, initial_state='FieldList', blank_finish=blank_finish) self.goto_line(newline_offset) if not blank_finish: self.parent += self.unindent_warning('Field list') raw = self.state_machine.input_lines[offset-1:newline_offset-2].data field_list.realsource = '\n'.join(raw) return [], next_state, []
def apply(self): if not getattr(self.document.settings, 'docinfo_xform', 1): return document = self.document index = document.first_child_not_matching_class( nodes.PreBibliographic) if index is None: return candidate = document[index] if isinstance(candidate, nodes.field_list): biblioindex = document.first_child_not_matching_class( (nodes.Titular, nodes.Decorative)) nodelist = self.extract_bibliographic(candidate) nodelist[0].realsource = candidate.realsource del document[index] # untransformed field list (candidate) document[biblioindex:biblioindex] = nodelist
def field_marker(self, match, context, next_state): """Field list item.""" field_list = nodes.field_list() self.parent += field_list field, blank_finish = self.field(match) field_list += field offset = self.state_machine.line_offset + 1 # next line newline_offset, blank_finish = self.nested_list_parse( self.state_machine.input_lines[offset:], input_offset=self.state_machine.abs_line_offset() + 1, node=field_list, initial_state='FieldList', blank_finish=blank_finish) self.goto_line(newline_offset) if not blank_finish: self.parent += self.unindent_warning('Field list') return [], next_state, []
def apply(self): if not getattr(self.document.settings, 'docinfo_xform', 1): return document = self.document index = document.first_child_not_matching_class( nodes.PreBibliographic) if index is None: return candidate = document[index] if isinstance(candidate, nodes.field_list): biblioindex = document.first_child_not_matching_class( (nodes.Titular, nodes.Decorative)) nodelist = self.extract_bibliographic(candidate) del document[index] # untransformed field list (candidate) document[biblioindex:biblioindex] = nodelist
def interpolation_dict(self): subs = html4css1.Writer.interpolation_dict(self) settings = self.document.settings pyhome = settings.python_home subs['pyhome'] = pyhome subs['pephome'] = settings.pep_home if pyhome == '..': subs['pepindex'] = '.' else: subs['pepindex'] = pyhome + '/dev/peps' index = self.document.first_child_matching_class(nodes.field_list) header = self.document[index] self.pepnum = header[0][1].astext() subs['pep'] = self.pepnum if settings.no_random: subs['banner'] = 0 else: import random subs['banner'] = random.randrange(64) try: subs['pepnum'] = '%04i' % int(self.pepnum) except ValueError: subs['pepnum'] = self.pepnum self.title = header[1][1].astext() subs['title'] = self.title subs['body'] = ''.join( self.body_pre_docinfo + self.docinfo + self.body) return subs
def parse_extension_options(self, option_spec, datalines): """ Parse `datalines` for a field list containing extension options matching `option_spec`. :Parameters: - `option_spec`: a mapping of option name to conversion function, which should raise an exception on bad input. - `datalines`: a list of input strings. :Return: - Success value, 1 or 0. - An option dictionary on success, an error string on failure. """ node = nodes.field_list() newline_offset, blank_finish = self.nested_list_parse( datalines, 0, node, initial_state='ExtensionOptions', blank_finish=True) if newline_offset != len(datalines): # incomplete parse of block return 0, 'invalid option block' try: options = utils.extract_extension_options(node, option_spec) except KeyError, detail: return 0, ('unknown option: "%s"' % detail.args[0]) except (ValueError, TypeError), detail: return 0, ('invalid option value: %s' % ' '.join(detail.args)) except utils.ExtensionOptionError, detail: return 0, ('invalid option data: %s' % ' '.join(detail.args)) if blank_finish: return 1, options else: return 0, 'option data incompletely parsed'
def transform_all(self, node): """Transform all field list children of a node.""" # don't traverse, only handle field lists that are immediate children for child in node: if isinstance(child, nodes.field_list): self.transform(child)
def parse_extension_options(self, option_spec, datalines): """ Parse `datalines` for a field list containing extension options matching `option_spec`. :Parameters: - `option_spec`: a mapping of option name to conversion function, which should raise an exception on bad input. - `datalines`: a list of input strings. :Return: - Success value, 1 or 0. - An option dictionary on success, an error string on failure. """ node = nodes.field_list() newline_offset, blank_finish = self.nested_list_parse( datalines, 0, node, initial_state='ExtensionOptions', blank_finish=True) if newline_offset != len(datalines): # incomplete parse of block return 0, 'invalid option block' try: options = utils.extract_extension_options(node, option_spec) except KeyError as detail: return 0, ('unknown option: "%s"' % detail.args[0]) except (ValueError, TypeError) as detail: return 0, ('invalid option value: %s' % ' '.join(detail.args)) except utils.ExtensionOptionError as detail: return 0, ('invalid option data: %s' % ' '.join(detail.args)) if blank_finish: return 1, options else: return 0, 'option data incompletely parsed'
def interpolation_dict(self): result = super().interpolation_dict() index = self.document.first_child_matching_class(nodes.field_list) self.document[index][0][1].text = '-1' result['pepnum'] = result['pep'] = self._pep_num result['pephome'] = 'https://github.com/fedora-python/pep-drafts/blob/master' result['pepindex'] = 'index.html' result['banner'] = 0 return result
def extract_bibliographic(self, field_list): docinfo = nodes.docinfo() bibliofields = self.language.bibliographic_fields labels = self.language.labels topics = {'dedication': None, 'abstract': None} for field in field_list: try: name = field[0][0].astext() normedname = nodes.make_id(name) if not (len(field) == 2 and normedname in bibliofields and self.check_empty_biblio_field(field, name)): raise TransformError canonical = bibliofields[normedname] biblioclass = self.biblio_nodes[canonical] if issubclass(biblioclass, nodes.TextElement): if not self.check_compound_biblio_field(field, name): raise TransformError utils.clean_rcs_keywords( field[1][0], self.rcs_keyword_substitutions) docinfo.append(biblioclass('', '', *field[1][0])) elif issubclass(biblioclass, nodes.authors): self.extract_authors(field, name, docinfo) elif issubclass(biblioclass, nodes.topic): if topics[canonical]: field[-1] += self.document.reporter.warning( 'There can only be one "%s" field.' % name, base_node=field) raise TransformError title = nodes.title(name, labels[canonical]) topics[canonical] = biblioclass( '', title, classes=[canonical], *field[1].children) else: docinfo.append(biblioclass('', *field[1].children)) except TransformError: if len(field[-1]) == 1 \ and isinstance(field[-1][0], nodes.paragraph): utils.clean_rcs_keywords( field[-1][0], self.rcs_keyword_substitutions) if normedname and normedname not in bibliofields: field['classes'].append(normedname) docinfo.append(field) nodelist = [] if len(docinfo) != 0: nodelist.append(docinfo) for name in ('dedication', 'abstract'): if topics[name]: nodelist.append(topics[name]) return nodelist
def extract_bibliographic(self, field_list): docinfo = nodes.docinfo() bibliofields = self.language.bibliographic_fields labels = self.language.labels topics = {'dedication': None, 'abstract': None} for field in field_list: try: name = field[0][0].astext() normedname = nodes.fully_normalize_name(name) if not (len(field) == 2 and normedname in bibliofields and self.check_empty_biblio_field(field, name)): raise TransformError canonical = bibliofields[normedname] biblioclass = self.biblio_nodes[canonical] if issubclass(biblioclass, nodes.TextElement): if not self.check_compound_biblio_field(field, name): raise TransformError utils.clean_rcs_keywords( field[1][0], self.rcs_keyword_substitutions) docinfo.append(biblioclass('', '', *field[1][0])) elif issubclass(biblioclass, nodes.authors): self.extract_authors(field, name, docinfo) elif issubclass(biblioclass, nodes.topic): if topics[canonical]: field[-1] += self.document.reporter.warning( 'There can only be one "%s" field.' % name, base_node=field) raise TransformError title = nodes.title(name, labels[canonical]) topics[canonical] = biblioclass( '', title, classes=[canonical], *field[1].children) else: docinfo.append(biblioclass('', *field[1].children)) except TransformError: if len(field[-1]) == 1 \ and isinstance(field[-1][0], nodes.paragraph): utils.clean_rcs_keywords( field[-1][0], self.rcs_keyword_substitutions) if normedname not in bibliofields: classvalue = nodes.make_id(normedname) if classvalue: field['classes'].append(classvalue) docinfo.append(field) nodelist = [] if len(docinfo) != 0: nodelist.append(docinfo) for name in ('dedication', 'abstract'): if topics[name]: nodelist.append(topics[name]) return nodelist
def extract_bibliographic(self, field_list): docinfo = nodes.docinfo() bibliofields = self.language.bibliographic_fields labels = self.language.labels topics = {'dedication': None, 'abstract': None} for field in field_list: try: name = field[0][0].astext() normedname = nodes.fully_normalize_name(name) if not (len(field) == 2 and normedname in bibliofields and self.check_empty_biblio_field(field, name)): raise TransformError canonical = bibliofields[normedname] biblioclass = self.biblio_nodes[canonical] if issubclass(biblioclass, nodes.TextElement): if not self.check_compound_biblio_field(field, name): raise TransformError utils.clean_rcs_keywords( field[1][0], self.rcs_keyword_substitutions) docinfo.append(biblioclass('', '', *field[1][0])) elif issubclass(biblioclass, nodes.authors): self.extract_authors(field, name, docinfo) elif issubclass(biblioclass, nodes.topic): if topics[canonical]: field[-1] += self.document.reporter.warning( 'There can only be one "%s" field.' % name, base_node=field) raise TransformError title = nodes.title(name, labels[canonical]) topics[canonical] = biblioclass( '', title, classes=[canonical], *field[1].children) else: docinfo.append(biblioclass('', *field[1].children)) except TransformError: if len(field[-1]) == 1 \ and isinstance(field[-1][0], nodes.paragraph): utils.clean_rcs_keywords( field[-1][0], self.rcs_keyword_substitutions) docinfo.append(field) nodelist = [] if len(docinfo) != 0: nodelist.append(docinfo) for name in ('dedication', 'abstract'): if topics[name]: nodelist.append(topics[name]) return nodelist
def make_node(self, lang='en'): if lang not in texts.keys(): lang = 'en' arg_map = texts[lang]["arg_map"] task_title = texts[lang]["task_title"] module_title = texts[lang]["module_title"] module = "" module_args = [] # first, search module for arg, m in self.args.items(): if arg not in arg_map.keys(): module = arg module_args.append(m) item = nodes.admonition() title = nodes.title(text=self.name) item.append(title) for m in module_args: if isinstance(m, str): item.append(nodes.paragraph(text=m)) else: mlist = [] for k, v in m.items(): mlist.append("%s=%s" % (k, v)) item.append(nodes.paragraph(text=" ".join(mlist))) field_list = nodes.field_list() field_list.append(self.make_arg(module_title, module)) # second, create node tree for arg, txt in arg_map.items(): if not txt: # skip name etc... continue if arg not in self.args: continue value = self.args[arg] # value of that task arg if isinstance(value, list): bl = nodes.bullet_list() for v in value: body = nodes.emphasis(text=v) bl.append(nodes.list_item('', body)) name = nodes.field_name(text=txt) body = nodes.field_body() body.append(bl) field = nodes.field() field += [name, body] field_list.append(field) else: field_list.append(self.make_arg(txt, value)) item.append(field_list) return item