我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用string.rstrip()。
def __PrivateFile(self, aLocalView): """ Return a list with all private and cheked out files in <aLocalView> view. """ myClearCaseCommand = 'cleartool lsprivate -tag ' + aLocalView myPrivateFileList = [] (mystdIn, myStdOut) = popen2.popen2(myClearCaseCommand) for myLine in mystdIn: myFilter = '[checkedout]' myLine = string.rstrip(string.lstrip(myLine)) if myLine[-len(myFilter):] == myFilter: myLine = string.rstrip(string.lstrip(myLine[:-len(myFilter)])) myPrivateFileList.append(myLine) return myPrivateFileList
def getsites(lang): try: page_counter=0 while page_counter < int(arg_page_end): s.send("PONG %s\r\n" % line[1]) time.sleep(3) results_web = 'http://www.google.com/search?q='+str(query)+'&hl='+str(lang)+'&lr=&ie=UTF-8&start='+repr(page_counter)+'&sa=N' request_web = urllib2.Request(results_web) request_web.add_header('User-Agent',random.choice(agents)) opener_web = urllib2.build_opener() text = opener_web.open(request_web).read() if re.search("403 Forbidden", text): s.send("PRIVMSG %s :%s\r\n" % (CHAN, "[-] Received Captcha... Damn that sucks!")) break names = re.findall(('<cite>+[\w\d\?\/\.\=\s\-]+=+[\d]+[\w\d\?\/\.\=\s\-]+</cite>'),text.replace("<b>","").replace("</b>","")) for name in names: name = re.sub(" - \d+k - </cite>","",name.replace("<cite>","")).replace("</cite>","") name = name.rstrip(" -") sites.append(name) page_counter +=10 except IOError: s.send("PRIVMSG %s :%s\r\n" % (CHAN, "[-] Can't connect to Google Web!"))
def visitNode_a_listing(self, node): fileName = os.path.join(self.currDir, node.getAttribute('href')) self.writer('\\begin{verbatim}\n') lines = map(string.rstrip, open(fileName).readlines()) lines = lines[int(node.getAttribute('skipLines', 0)):] self.writer(text.removeLeadingTrailingBlanks('\n'.join(lines))) self.writer('\\end{verbatim}') # Write a caption for this source listing fileName = os.path.basename(fileName) caption = domhelpers.getNodeText(node) if caption == fileName: caption = 'Source listing' self.writer('\parbox[b]{\linewidth}{\\begin{center}%s --- ' '\\begin{em}%s\\end{em}\\end{center}}' % (latexEscape(caption), latexEscape(fileName)))
def load_from_triage_string(self, strTriage): splitOnEq = string.split(strTriage, "=") self.strFrame = splitOnEq[0] self.strFollowup = string.strip(splitOnEq[1]) splitOnBang = string.split(splitOnEq[0], "!") self.strModule = "*" self.strRoutine = "*" if(len(splitOnBang) > 1): self.strModule = splitOnBang[0] self.strRoutine = splitOnBang[1] elif self.strFrame.endswith("*"): self.strModule = self.strFrame.rstrip("*") elif self.strFrame.startswith("*"): self.strRoutine = self.strFrame.lstrip("*") else: self.strModule = self.strFrame self.bExactModule = "*" not in self.strModule self.bExactRoutine = "*" not in self.strRoutine self.bExactFrame = self.bExactModule and self.bExactRoutine
def analyze(self, dictProps, dictArgs): _dbg_write('RUNNING HEAP CORRUPTION ANALYZE') if 'FOLLOW_UP' in dictProps and dictProps['FOLLOW_UP'] == 'heap_corruption': #check if there is a stack walker frame on the faulting stack _dbg_write('HEAP CORRUPTION IS PRESENT') stackWalkFrame = g_dbg.get_first_frame('Thread::StackWalkFramesEx') if stackWalkFrame is not None: tid = g_dbg.eval_uint(stackWalkFrame,'this->m_OSThreadId') if not tid == 0: thread = g_dbg.target.GetProcess().GetThreadByID(tid) dictProps['CORRUPT_ROOT_THREAD'] = str(thread) pc = self._find_walker_pc_as_uint() pcHex = string.rstrip(hex(pc), 'L') dictProps['CORRUPT_ROOT_FRAME_PC'] = pcHex sos = SosInterpreter() dictProps['CORRUPT_ROOT_FRAME'] = sos.get_symbol(pcHex)
def _count_grids(self): self.num_grids = None test_grid = test_grid_id = None self.num_stars = 0 for line in rlines(open(self.index_filename, "rb")): if line.startswith("BaryonFileName") or \ line.startswith("ParticleFileName") or \ line.startswith("FileName "): test_grid = line.split("=")[-1].strip().rstrip() if line.startswith("NumberOfStarParticles"): self.num_stars = int(line.split("=")[-1]) if line.startswith("Grid "): if self.num_grids is None: self.num_grids = int(line.split("=")[-1]) test_grid_id = int(line.split("=")[-1]) if test_grid is not None: break self._guess_dataset_type(self.ds.dimensionality, test_grid, test_grid_id)
def look_for_pythondoc(self, type, token, start, end, line): if type == tokenize.COMMENT and string.rstrip(token) == "##": # found a comment: set things up for comment processing self.comment_start = start self.comment = [] return self.process_comment_body else: # deal with "bare" subjects if token == "def" or token == "class": self.subject_indent = self.indent self.subject_parens = 0 self.subject_start = self.comment_start = None self.subject = [] return self.process_subject(type, token, start, end, line) return self.look_for_pythondoc ## # (Token handler) Processes a comment body. This handler adds # comment lines to the current comment.
def getdoc(object): """Get the doc string or comments for an object.""" result = inspect.getdoc(object) or inspect.getcomments(object) return result and re.sub('^ *\n', '', rstrip(result)) or ''
def splitdoc(doc): """Split a doc string into a synopsis line (if any) and the rest.""" lines = split(strip(doc), '\n') if len(lines) == 1: return lines[0], '' elif len(lines) >= 2 and not rstrip(lines[1]): return lines[0], join(lines[2:], '\n') return '', join(lines, '\n')
def getdocloc(self, object): """Return the location of module docs or None""" try: file = inspect.getabsfile(object) except TypeError: file = '(built-in)' docloc = os.environ.get("PYTHONDOCS", "http://docs.python.org/library") basedir = os.path.join(sys.exec_prefix, "lib", "python"+sys.version[0:3]) if (isinstance(object, type(os)) and (object.__name__ in ('errno', 'exceptions', 'gc', 'imp', 'marshal', 'posix', 'signal', 'sys', 'thread', 'zipimport') or (file.startswith(basedir) and not file.startswith(os.path.join(basedir, 'site-packages')))) and object.__name__ not in ('xml.etree', 'test.pydoc_mod')): if docloc.startswith("http://"): docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__) else: docloc = os.path.join(docloc, object.__name__ + ".html") else: docloc = None return docloc # -------------------------------------------- HTML documentation generator
def indent(self, text, prefix=' '): """Indent text by prepending a given prefix to each line.""" if not text: return '' lines = split(text, '\n') lines = map(lambda line, prefix=prefix: prefix + line, lines) if lines: lines[-1] = rstrip(lines[-1]) return join(lines, '\n')
def section(self, title, contents): """Format a section with a given heading.""" return self.bold(title) + '\n' + rstrip(self.indent(contents)) + '\n\n' # ---------------------------------------------- type-specific routines
def htmlIndent(snippetLine): ret = string.replace(string.replace(html.escape(string.rstrip(snippetLine)), ' ', ' '), '\t', ' ') return ret
def addPyListings(document, dir): """ Insert Python source listings into the given document from files in the given directory based on C{py-listing} nodes. Any node in C{document} with a C{class} attribute set to C{py-listing} will have source lines taken from the file named in that node's C{href} attribute (searched for in C{dir}) inserted in place of that node. If a node has a C{skipLines} attribute, its value will be parsed as an integer and that many lines will be skipped at the beginning of the source file. @type document: A DOM Node or Document @param document: The document within which to make listing replacements. @type dir: C{str} @param dir: The directory in which to find source files containing the referenced Python listings. @return: C{None} """ for node in domhelpers.findElementsWithAttribute(document, "class", "py-listing"): filename = node.getAttribute("href") outfile = cStringIO.StringIO() lines = map(string.rstrip, open(os.path.join(dir, filename)).readlines()) data = '\n'.join(lines[int(node.getAttribute('skipLines', 0)):]) data = cStringIO.StringIO(text.removeLeadingTrailingBlanks(data)) htmlizer.filter(data, outfile, writer=htmlizer.SmallerHTMLWriter) val = outfile.getvalue() _replaceWithListing(node, val, filename, "py-listing")
def getdoc(object): """Get the doc string or comments for an object.""" result = inspect.getdoc(object) or inspect.getcomments(object) result = _encode(result) return result and re.sub('^ *\n', '', rstrip(result)) or ''
def FromSBFrame(sbFrame): frame = DbgFrame() frame.sbFrame = sbFrame strIp = string.rstrip(hex(sbFrame.addr.GetLoadAddress(g_dbg.target)), 'L') strModule = sbFrame.module.file.basename strRoutine = sbFrame.symbol.name if (strModule is None or strModule == '') and (strRoutine is None or strRoutine == ''): tplFrame = DbgFrame.__tryget_managed_frame_info(strIp) strModule = tplFrame[0] strRoutine = tplFrame[1] frame.__populate_frame_strs(strIp, strModule, strRoutine) return frame
def load_rules_from_file(self, strPath, lstRules): with open(strPath) as f: #read all lines from the file rawlines = [line.rstrip('\n') for line in f] #filter all comment lines, blank lines and lines not containing a = ruleLines = [line for line in rawlines if len(line) <> 0 and line[0] <> '\n' and line[0] <> ';' and '=' in line] #create rule for each rule line lstRules.extend([StackTriageRule(line) for line in ruleLines])
def get_parameter(self,parameter,type=None): """ Gets a parameter not in the parameterDict. """ if parameter in self.parameters: return self.parameters[parameter] for line in open(self.parameter_filename): if line.find("#") >= 1: # Keep the commented lines line=line[:line.find("#")] line=line.strip().rstrip() if len(line) < 2: continue try: param, vals = map(string.strip,map(string.rstrip, line.split("="))) except ValueError: mylog.error("ValueError: '%s'", line) if parameter == param: if type is None: t = vals.split() else: t = map(type, vals.split()) if len(t) == 1: self.parameters[param] = t[0] else: self.parameters[param] = t if param.endswith("Units") and not param.startswith("Temperature"): dataType = param[:-5] self.conversion_factors[dataType] = self.parameters[param] return self.parameters[parameter] return ""
def getdocloc(self, object, basedir=os.path.join(sys.exec_prefix, "lib", "python"+sys.version[0:3])): """Return the location of module docs or None""" try: file = inspect.getabsfile(object) except TypeError: file = '(built-in)' docloc = os.environ.get("PYTHONDOCS", "https://docs.python.org/library") basedir = os.path.normcase(basedir) if (isinstance(object, type(os)) and (object.__name__ in ('errno', 'exceptions', 'gc', 'imp', 'marshal', 'posix', 'signal', 'sys', 'thread', 'zipimport') or (file.startswith(basedir) and not file.startswith(os.path.join(basedir, 'site-packages')))) and object.__name__ not in ('xml.etree', 'test.pydoc_mod')): if docloc.startswith(("http://", "https://")): docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__.lower()) else: docloc = os.path.join(docloc, object.__name__.lower() + ".html") else: docloc = None return docloc # -------------------------------------------- HTML documentation generator
def __init__(self, tokenize, grammar, table='YappyTab', no_table=1, tabletype=LALRtable, noconflicts=1, expect=0, **args): """@param tokenize: same as for L{Lexer} @param grammar: if a string C{parse_grammar} is called @param table: and no_table, tabletype same as for L{LRparser} @param args: dictionary where: - key C{tmpdir} is the directory where the parse table used by the Yappy Grammar is stored; - key C{usrdir} is the directory where the user tables are stored - key C{nosemrules} if 1 semantic actions are not applied""" self.lex = Lexer(tokenize) operators = None if self.lex.__dict__.has_key("operators"): operators = self.lex.operators if type(grammar) is StringType: grammar = self.parse_grammar(grammar, {'locals': locals()}, args) if args.has_key('usrdir') and os.path.isdir(args['usrdir']): table = string.rstrip(args['usrdir']) + '/' + table if os.path.dirname(table) == "" or os.path.exists(os.path.dirname(table)): LRparser.__init__(self, grammar,table, no_table, tabletype, operators, noconflicts, expect, **args) else: sys.stderr.write("Directory %s do not exist\n" % table) sys.exit() if (self.Log.noconflicts): if ((self.Log.conflicts.has_key('sr') and len(self.Log.conflicts['sr']) != self.Log.expect)): print "LR conflicts: number %s value %s" % (len(self.Log.conflicts['sr']), self.Log.conflicts) print """If it is Ok, set expect to the number of conflicts and build table again""" elif self.Log.conflicts.has_key('rr'): print "LR conflicts rr : number %s value %s" % (len(self.Log.conflicts['rr']), self.Log.conflicts['rr'])
def __init__(self, no_table=1, table='yappypar.tab', tabletype=LR1table, **args): grammar = grules([ ("G -> RULE G", self.GRule), ("G -> []", EmptySemRule), ("RULE -> ID rulesym MULTI ruleend", self.RULERule), ("MULTI -> RHS rulesep MULTI", self.MULTIRule), ("MULTI -> RHS", self.MULTIRule), ("RHS -> []", EmptySemRule), #RHS->OPSEM not allowed; epsilon-rule ("RHS -> RH OPSEM", self.RHSRule), ("RH -> ID RH", self.RHRule), ("RH -> ID", self.RHRule), ("OPSEM -> []", self.OPSEMRule), # ("OPSEM -> semsym ID csemsym",self.OPSEMRule),#OPSEM->OP not allowed # ("OPSEM -> semsym ID OP csemsym",self.OPSEMRule), ("OPSEM -> IDS", self.OPSEMRule1), ("OPSEM -> IDS OP", self.OPSEMRule1), ("OP -> opsym OPV", self.OPRule), ("OPV -> ID ID ", self.OPVRule) ]) tokenize = [ ("\{\{.*\}\}", lambda x: ("IDS", string.strip(x[2:-2]))), ("\s+", ""), ("->", lambda x: ("rulesym", x)), ("\|", lambda x: ("rulesep", x)), (";", lambda x: ("ruleend", x)), # ("}}",lambda x: ("csemsym",x)), # ("{{",lambda x: ("semsym",x)), ("//", lambda x: ("opsym", x)), (".*", lambda x: ("ID", x))] if args.has_key('tmpdir'): args1 = {'usrdir': string.rstrip(args['tmpdir'], '/')} else: args1 = {} Yappy.__init__(self, tokenize, grammar, table, no_table, **args1)
def look_for_encoding(self, type, token, start, end, line): if type == tokenize.COMMENT: if string.rstrip(token) == "##": return self.look_for_pythondoc(type, token, start, end, line) m = re.search("coding[:=]\s*([-_.\w]+)", token) if m: self.encoding = m.group(1) return self.look_for_pythondoc if start[0] > 2: return self.look_for_pythondoc return self.look_for_encoding ## # (Token handler) Scans for PythonDoc comments.
def __init__(self, data = 0): NBResourceRecord.__init__(self, data) self.addr_entries = [ ] if data: self._data = self.get_rdata() _qn_length, qn_name, qn_scope = decode_name(data) self._netbios_name = string.rstrip(qn_name[:-1]) + qn_scope self._name_type = ord(qn_name[-1]) self._nb_flags = unpack('!H', self._data[:2]) offset = 2 while offset<len(self._data): self.addr_entries.append('%d.%d.%d.%d' % unpack('4B', (self._data[offset:offset+4]))) offset += 4