我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用re.M。
def findHeadings(self, lines, struc): linenum=len(lines) level=struc[-1] _h_re=re.compile(self.textparser._h_re_base % level, re.X | re.M) hidx=[] for ii in xrange(linenum): if _h_re.match(lines[ii]): hidx.append(ii) hidx.append(linenum) groups=[[hidx[ii],hidx[ii+1]] for ii in xrange(len(hidx)-1)] result=[] for ii in groups: #--------Use heading line as container name-------- result.append(TextContainer(lines[ii[0]],struc,ii)) return result
def add_targets(self): self.target_dir = os.path.dirname(self.input_video) self.hard_link = '' if MOVE_FILES: #If we care about foreign languages execute this part if FOREIGN: audiostreams = self.get_audio_streams() #if we want to create hard links and there is both english and locale audio stream in the file or in the name if HARD_LINK and ((LOCALE in audiostreams and 'eng' in audiostreams) or (re.search('.{}.'.format(LOCALE),self.input_video,re.I) and re.search('.eng.',self.input_video,re.I))): self.target_dir = TVSHOW_TARGET if self.is_show else MOVIE_TARGET self.hard_link = LANG_TVSHOW_TARGET if self.is_show else LANG_MOVIE_TARGET else: #If the the input is matches LOCALE put it in the lang folders if re.search(LANG_PATTERN,self.input_video,re.I | re.M): self.target_dir = LANG_TVSHOW_TARGET if self.is_show else LANG_MOVIE_TARGET #Else put them in the main folder else: self.target_dir = TVSHOW_TARGET if self.is_show else MOVIE_TARGET #if we don't give a shit about multiple languages simply determine if tvshow or movie else: self.target_dir = TVSHOW_TARGET if self.is_show else MOVIE_TARGET
def list_pr(list_pr,trackname): print (header) print ('<table>') print('<tr><td>%s %s%s</font></br></br></td></tr>'%(ask,color,url)+'<tr><th>%s %s</br>%s %s</br></br></th></tr>'%(Artist,artist_song(html)[1],Album,artist_song(html)[0])+'<tr><th><img src="%s" /></th></tr>'%Image(html)[1]+'</table>') if ((album(url)[0]).isdigit() == True): print('<table><tr><td>'+'</br><a href="%(1)s">Download track %(2)s</a> - %(4)s (%(3)s) %(5)s at: %(1)s'%{'1':list_pr[int(album(url)[0])],'2':(int(album(url)[0]))+1,'3':file_size(list_pr[int(album(url)[0])])[1],'4':trackname[int(album(url)[0])],'5':color}+'</font></br></br></br>All album tracks are:'+'</td></tr>') for i in list_pr: #print ('<tr><td>'+i[0]+'<tr><td>'+i[1]+'</td></th>') print ('<tr><td>'+'<a href="%s">Download track %s</a> - %s (%s) %s at: '%(i,list_pr.index(i)+1,trackname[list_pr.index(i)],file_size(i)[1],color)+i+'</font></td></tr>') else: print('<table></br>') for i in list_pr: #print ('<tr><td>'+i[0]+'<tr><td>'+i[1]+'</td></th>') print ('<tr><td>'+'<a href="%s">Download track %s</a> - %s (%s) %s at: '%(i,list_pr.index(i)+1,trackname[list_pr.index(i)],file_size(i)[1],color)+i+'</font></td></tr>') print('</table>') #print(datetime.now().strftime('</br></br></br>%A, %d %b %Y, %I:%M:%S %p')) print(difficulties) print ("<p><b><a href='/RJ'>Try again</a></b></p>") print ("</body></html>");
def list_pr(list_pr): print (header) print ('<table>') print('<tr><td>You asked for %s</br></br></td></tr>'%url+'<tr><th>Artist: %s</br>Album: %s</br></br></th></tr>'%(artist_song(html)[1],artist_song(html)[0])+'<tr><th><img src="%s" /></th></tr>'%Image(html)[1]+'</table>') if ((album(url)[0]).isdigit() == True): print('<table><tr><td>'+'</br><a href="%(1)s">Download track %(2)s</a> (%(3)s) at: %(1)s'%{'1':list_dl(album(url))[int(album(url)[0])],'2':(int(album(url)[0]))+1,'3':file_size(list_dl(album(url))[int(album(url)[0])])}+'</br></br></br>Other album tracks are:'+'</td></tr>') for i in list_pr: #print ('<tr><td>'+i[0]+'<tr><td>'+i[1]+'</td></th>') print ('<tr><td>'+'<a href="%s">Download track %s</a> (%s) at: '%(i,list_pr.index(i)+1,file_size(i))+i+'</td></tr>') else: print('<table></br>') for i in list_pr: #print ('<tr><td>'+i[0]+'<tr><td>'+i[1]+'</td></th>') print ('<tr><td>'+'<a href="%s">Download track %s</a> (%s) at: '%(i,list_pr.index(i)+1,file_size(i))+i+'</td></tr>') print('</table>') #print(datetime.now().strftime('</br></br></br>%A, %d %b %Y, %I:%M:%S %p')) print ("<p><b><a href='/RJ'>Try again</a></b></p>") print ("</body></html>");
def vid_pr(dl): print (header) print ('<table>') j=0 k=0 AA=[] AB=['Download 480p','Download 720p','Download 1080'] while j<len(video(url)): if len(file_size(video(url)[j]))>6: AA.append(video(url)[j]) j+=1 print ('<tr><td>'+'You asked for %s</br></br></td></tr>'%url+'<tr><th>Artist: %s</br>Track: %s</br></br></th></tr>'%(artist_song(html)[1],artist_song(html)[0])+'<tr><th><img src="%s" /></th></tr></table>'%Image(html)[0]) print('<table><tr><td></br>') while k<len(AA): print('<tr><td>'+'%s %s %s'%('<a href="%s"><b>%s</b></a>'%(AA[k],AB[k]),' (%s)'%file_size(AA[k]),'at: %s'%AA[k])+'</br></td></tr>') k+=1 #print(datetime.now().strftime('</br></br></br>%A, %d %b %Y, %I:%M:%S %p')) print('</td></tr></table>') print ("<p><b><a href='/RJ'>Try again</a></b></p>") print ("</body></html>");
def entab(temp, tab_width=4, all=0): #if all is true, every time tab_width number of spaces are found next #to each other, they are converted to a tab. If false, only those at #the beginning of the line are converted. Default is false. if all: temp = re.sub(r" {" + `tab_width` + r"}", r"\t", temp) else: patt = re.compile(r"^ {" + `tab_width` + r"}", re.M) temp, count = patt.subn(r"\t", temp) i = 1 while count > 0: #this only loops a few times, at most six or seven times on #heavily indented code subpatt = re.compile(r"^\t{" + `i` + r"} {" + `tab_width` + r"}", re.M) temp, count = subpatt.subn("\t"*(i+1), temp) i += 1 return temp
def scan_aux(self, node): """ A recursive regex-based scanner that finds included auxiliary files. """ nodes = [node] re_aux = re.compile(r'\\@input{(?P<file>[^{}]*)}', re.M) def parse_node(node): code = node.read() for match in re_aux.finditer(code): path = match.group('file') found = node.parent.find_or_declare(path) if found and found not in nodes: Logs.debug('tex: found aux node ' + found.abspath()) nodes.append(found) parse_node(found) parse_node(node) return nodes
def parse_rst_node(node, nodes, names, seen): # TODO add extensibility, to handle custom rst include tags... if node in seen: return seen.append(node) code = node.read() re_rst = re.compile(r'^\s*.. ((?P<subst>\|\S+\|) )?(?P<type>include|image|figure):: (?P<file>.*)$', re.M) for match in re_rst.finditer(code): ipath = match.group('file') itype = match.group('type') Logs.debug("rst: visiting %s: %s" % (itype, ipath)) found = node.parent.find_resource(ipath) if found: nodes.append(found) if itype == 'include': parse_rst_node(found, nodes, names, seen) else: names.append(ipath)
def get_nag_version(conf, fc): """Get the NAG compiler version""" version_re = re.compile(r"^NAG Fortran Compiler *Release *(?P<major>\d*)\.(?P<minor>\d*)", re.M).search cmd = fc + ['-V'] out, err = fc_config.getoutput(conf,cmd,stdin=False) if out: match = version_re(out) if not match: match = version_re(err) else: match = version_re(err) if not match: conf.fatal('Could not determine the NAG version.') k = match.groupdict() conf.env['FC_VERSION'] = (k['major'], k['minor'])
def subst_func(tsk): "Substitutes variables in a .in file" m4_re = re.compile('@(\w+)@', re.M) code = tsk.inputs[0].read() #Utils.readf(infile) # replace all % by %% to prevent errors by % signs in the input file while string formatting code = code.replace('%', '%%') s = m4_re.sub(r'%(\1)s', code) env = tsk.env di = getattr(tsk, 'dict', {}) or getattr(tsk.generator, 'dict', {}) if not di: names = m4_re.findall(code) for i in names: di[i] = env.get_flat(i) or env.get_flat(i.upper()) tsk.outputs[0].write(s % di)
def get_nag_version(conf, fc): """Get the NAG compiler version""" version_re = re.compile(r"^NAG Fortran Compiler *Release *(?P<major>\d*)\.(?P<minor>\d*)", re.M).search cmd = fc + ['-v'] out, err = fc_config.getoutput(conf,cmd,stdin=False) if out: match = version_re(out) if not match: match = version_re(err) else: match = version_re(err) if not match: conf.fatal('Could not determine the NAG version.') k = match.groupdict() conf.env['FC_VERSION'] = (k['major'], k['minor'])
def find_version(*file_paths): """ see https://github.com/pypa/sampleproject/blob/master/setup.py """ with open(os.path.join(here, *file_paths), 'r') as f: version_file = f.read() # The version line must have the form # __version__ = 'ver' version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M) if version_match: return version_match.group(1) raise RuntimeError("Unable to find version string. " "Should be at the first line of __init__.py.")
def update_consts(filename, constname, content): with open(filename) as f: data = f.read() # Line to start/end inserting re_match = re.compile(r'^%s\s*=\s*\($.*?^\s*\)$' % constname, re.M | re.S) m = re_match.search(data) if not m: raise ValueError('Could not find existing definition for %s' % (constname,)) new_block = format_lines(constname, content) data = data[:m.start()] + new_block + data[m.end():] with open(filename, 'w') as f: f.write(data)
def search(self, regexp, path=None): regex = re.compile(r'^%s' % regexp, re.M) if path: parent = self.get(path) if not parent or not parent.children: return children = [c.text for c in parent.children] data = '\n'.join(children) else: data = str(self) match = regex.search(data) if match: if match.groups(): values = match.groupdict().values() groups = list(set(match.groups()).difference(values)) return (groups, match.groupdict()) else: return match.group()
def run(self): if self.check(): print_success("Target appears to be vulnerable") if self.script_content and len(self.script_content): print_status("Parsing the script ...") creds = [] for line in self.script_content.split("\n"): line = line.strip() m_groups = re.match(r'username (.*) password (.*) user-type (.*)', line, re.I | re.M) if m_groups: creds.append((m_groups.group(1), m_groups.group(2), m_groups.group(3))) print_table(('Username', 'Hash', 'User type'), *creds) else: print_error("Exploit failed - target seems to be not vulnerable")
def add_head(text): """Add head html from template """ head = open(PATH_TO_TEMPLATE_HTML).read() head = head.replace('{{ url_index }}', PATH_TO_HTML + '/' + 'index.html') head = head.replace('href="img/', 'href="' + PATH_TO_TEMPLATE + '/img/') head = head.replace('="lib/', '="' + PATH_TO_TEMPLATE + '/lib/') head = head.replace('="css/', '="' + PATH_TO_TEMPLATE + '/css/') head = head.replace('="js/', '="' + PATH_TO_TEMPLATE + '/js/') # remove demo content head = re.sub(r'<!-- start of demo -->.*<!-- end of demo -->', r'', head, flags=re.M | re.DOTALL) return head + text #head_new = '' # for l in head.split('\n'): # if l.find('href="http://') > -1 or l.find('src="http://') > -1 or l.find('href="#') > -1: # head_new += l # else: # l = l.replace('href=', 'href="' + PATH_TO_TEMPLATE + '"') # l = l.replace('src=', 'src="' + PATH_TO_TEMPLATE + '"') # head_new += l # return head + text
def unhighlight(text): hits = re.findall( '<div class="highlight"><pre><span></span>(?P<text>.+?)</pre></div>', text, re.M | re.S) for h in hits: # print 'h',h.strip() if h.strip(): if h.find('<span') == -1: # it's note # print 'no span' h_and_context = re.findall( r'<div class="highlight"><pre><span></span>' + re.escape(h) + '</pre></div>', text, re.M | re.S) if h_and_context: h_and_context = h_and_context[0] h_and_context_unhigh = h_and_context.replace( '<div class="highlight">', '').replace('</pre></div>', '</pre>') text = text.replace(h_and_context, h_and_context_unhigh) else: h_and_context = re.findall( r'<div class="highlight"><pre><span></span>' + re.escape(h) + '</pre></div>', text, re.M | re.S) # print h_and_context return text
def _do_code_blocks(self, text): """Process Markdown `<pre><code>` blocks.""" code_block_re = re.compile(r''' (?:\n\n|\A\n?) ( # $1 = the code block -- one or more lines, starting with a space/tab (?: (?:[ ]{%d} | \t) # Lines must start with a tab or a tab-width of spaces .*\n+ )+ ) ((?=^[ ]{0,%d}\S)|\Z) # Lookahead for non-space at line-start, or end of doc # Lookahead to make sure this block isn't already in a code block. # Needed when syntax highlighting is being used. (?![^<]*\</code\>) ''' % (self.tab_width, self.tab_width), re.M | re.X) return code_block_re.sub(self._code_block_sub, text)
def check_and_cythonize(root_dir): print(root_dir) hashes = load_hashes(HASH_FILE) for cur_dir, dirs, files in os.walk(root_dir): for filename in files: if filename.endswith('.pyx'): gen_file_ext = '.c' # Cython files with libcpp imports should be compiled to cpp with open(os.path.join(cur_dir, filename), 'rb') as f: data = f.read() m = re.search(b"libcpp", data, re.I | re.M) if m: gen_file_ext = ".cpp" cython_file = filename gen_file = filename.replace('.pyx', gen_file_ext) cythonize_if_unchanged(cur_dir, cython_file, gen_file, hashes) # Save hashes once per module. This prevents cythonizing prev. # files again when debugging broken code in a single file save_hashes(hashes, HASH_FILE)
def find_process_files(root_dir): hash_db = load_hashes(HASH_FILE) for cur_dir, dirs, files in os.walk(root_dir): for filename in files: in_file = os.path.join(cur_dir, filename + ".in") if filename.endswith('.pyx') and os.path.isfile(in_file): continue for fromext, function in rules.items(): if filename.endswith(fromext): toext = ".c" with open(os.path.join(cur_dir, filename), 'rb') as f: data = f.read() m = re.search( br"^\s*#\s*distutils:\s*language\s*=\s*c\+\+\s*$", data, re.I | re.M) if m: toext = ".cxx" fromfile = filename tofile = filename[:-len(fromext)] + toext process(cur_dir, fromfile, tofile, function, hash_db) save_hashes(hash_db, HASH_FILE)
def test(self): ''' Trinity need to run as non root user ''' args = self.params.get('runarg', default=' ') process.system('su - trinity -c " %s %s %s"' % (os.path.join(self.srcdir, 'trinity'), args, '-N 1000000'), shell=True) dmesg = process.system_output('dmesg') # verify if system having issue after fuzzer run match = re.search(r'unhandled', dmesg, re.M | re.I) if match: self.log.info("Testcase failure as segfault") match = re.search(r'Call Trace:', dmesg, re.M | re.I) if match: self.log.info("some call traces seen please check")
def get_version(): """Obtain the packge version from a python file e.g. pkg/__init__.py See <https://packaging.python.org/en/latest/single_source_version.html>. """ file_dir = os.path.realpath(os.path.dirname(__file__)) with open( os.path.join(file_dir, '..', 'behold', 'version.py')) as f: txt = f.read() version_match = re.search( r"""^__version__ = ['"]([^'"]*)['"]""", txt, re.M) if version_match: return version_match.group(1) raise RuntimeError("Unable to find version string.") # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------
def DocToHelp(doc): """Takes a __doc__ string and reformats it as help.""" # Get rid of starting and ending white space. Using lstrip() or even # strip() could drop more than maximum of first line and right space # of last line. doc = doc.strip() # Get rid of all empty lines whitespace_only_line = re.compile('^[ \t]+$', re.M) doc = whitespace_only_line.sub('', doc) # Cut out common space at line beginnings doc = CutCommonSpacePrefix(doc) # Just like this module's comment, comments tend to be aligned somehow. # In other words they all start with the same amount of white space # 1) keep double new lines # 2) keep ws after new lines if not empty line # 3) all other new lines shall be changed to a space # Solution: Match new lines between non white space and replace with space. doc = re.sub('(?<=\S)\n(?=\S)', ' ', doc, re.M) return doc
def is_module_loaded(module): """Checks if a kernel module is already loaded""" matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) return len(matches) > 0
def is_enabled(): """ Check if `ufw` is enabled :returns: True if ufw is enabled """ output = subprocess.check_output(['ufw', 'status'], universal_newlines=True, env={'LANG': 'en_US', 'PATH': os.environ['PATH']}) m = re.findall(r'^Status: active\n', output, re.M) return len(m) >= 1
def enable(soft_fail=False): """ Enable ufw :param soft_fail: If set to True silently disables IPv6 support in ufw, otherwise a UFWIPv6Error exception is raised when IP6 support is broken. :returns: True if ufw is successfully enabled """ if is_enabled(): return True if not is_ipv6_ok(soft_fail): disable_ipv6() output = subprocess.check_output(['ufw', 'enable'], universal_newlines=True, env={'LANG': 'en_US', 'PATH': os.environ['PATH']}) m = re.findall('^Firewall is active and enabled on system startup\n', output, re.M) hookenv.log(output, level='DEBUG') if len(m) == 0: hookenv.log("ufw couldn't be enabled", level='WARN') return False else: hookenv.log("ufw enabled", level='INFO') return True
def disable(): """ Disable ufw :returns: True if ufw is successfully disabled """ if not is_enabled(): return True output = subprocess.check_output(['ufw', 'disable'], universal_newlines=True, env={'LANG': 'en_US', 'PATH': os.environ['PATH']}) m = re.findall(r'^Firewall stopped and disabled on system startup\n', output, re.M) hookenv.log(output, level='DEBUG') if len(m) == 0: hookenv.log("ufw couldn't be disabled", level='WARN') return False else: hookenv.log("ufw disabled", level='INFO') return True
def default_policy(policy='deny', direction='incoming'): """ Changes the default policy for traffic `direction` :param policy: allow, deny or reject :param direction: traffic direction, possible values: incoming, outgoing, routed """ if policy not in ['allow', 'deny', 'reject']: raise UFWError(('Unknown policy %s, valid values: ' 'allow, deny, reject') % policy) if direction not in ['incoming', 'outgoing', 'routed']: raise UFWError(('Unknown direction %s, valid values: ' 'incoming, outgoing, routed') % direction) output = subprocess.check_output(['ufw', 'default', policy, direction], universal_newlines=True, env={'LANG': 'en_US', 'PATH': os.environ['PATH']}) hookenv.log(output, level='DEBUG') m = re.findall("^Default %s policy changed to '%s'\n" % (direction, policy), output, re.M) if len(m) == 0: hookenv.log("ufw couldn't change the default policy to %s for %s" % (policy, direction), level='WARN') return False else: hookenv.log("ufw default policy for %s changed to %s" % (direction, policy), level='INFO') return True
def defSyntax(self): '''Define re patterns according to syntax.''' #------------------REGEX patterns------------------ if self.syntax=='markdown': self._img_re=re.compile('^(.*)!\\[(.+?)\\]\\((.+?)\\)', re.M | re.L) self._h_re_base = r''' (^(.+)[ \t]*\n(=+|-+)[ \t]*\n+) | (^(\#{%s}) # \1 = string of #'s [ \t]* (.+?) # \2 = Header text [ \t]* (?<!\\) # ensure not an escaped trailing '#' \#* # optional closing #'s (not counted) \n+ ) ''' self._all_h_re=re.compile(self._h_re_base %'1,6', re.X | re.M) elif self.syntax=='zim': self._img_re=re.compile('^(.*)\\{\\{(.+?)\\}\\}(.*)$', re.M | re.L) self._h_re_base = r''' ^(\={%s}) # \1 = string of ='s [ \t]* (.+?) # \2 = Header text [ \t]* \1 \n+ ''' self._all_h_re=re.compile(self._h_re_base %'1,6', re.X | re.M) else: raise Exception("Unknown syntax %s" %self.syntax) return
def createNoteBook(title,geeknote=None,verbose=True): #-------------------Trunc title------------------- title=title.strip() title=truncStr(title,MAX_NOTEBOOK_TITLE_LEN) #-------Make sure title doesnt start with #------- tp=textparse.TextParser('markdown') _h_re=re.compile(tp._h_re_base %'1,', re.X | re.M) m=_h_re.match(title) if m: title=m.group(6) #---------------------Connect--------------------- if geeknote is None: geeknote=GeekNoteConnector() geeknote.connectToEvertone() #-----------------Check if exists----------------- notebooks=geeknote.getEvernote().findNotebooks() out.preloader.stop() if not isinstance(title,unicode): title=unicode(title,'utf8') notebooks=[unicode(ii.name,'utf8') for ii in notebooks] if title in notebooks: out.successMessage('Notebook already exists.') return 0 else: out.preloader.setMessage("Creating notebook...") result = geeknote.getEvernote().createNotebook(name=title) if result: out.successMessage("Notebook has been successfully created.") return 0 else: out.failureMessage("Error while the process " "of creating the notebook.") return tools.exitErr()
def createNote(title,content,tags,notebook,geeknote=None,\ skipnotebook=False): #-------------------Trunc texts------------------- notebook=notebook.strip() notebook=truncStr(notebook,MAX_NOTEBOOK_TITLE_LEN) title=title.strip() title=truncStr(title,MAX_NOTE_TITLE_LEN) #-------Make sure title doesnt start with #------- tp=textparse.TextParser('markdown') _h_re=re.compile(tp._h_re_base %'1,', re.X | re.M) m=_h_re.match(title) if m: title=m.group(6) m=_h_re.match(notebook) if m: notebook=m.group(6) if tags is not None and len(tags.split(','))>=MAX_NOTE_TAGS: tags=u','.join(tags.split(',')[:MAX_NOTE_TAGS]) #---------------------Connect--------------------- if geeknote is None: geeknote=GeekNoteConnector() geeknote.connectToEvertone() #-----------------Create notebook----------------- if not skipnotebook: result=createNoteBook(notebook,geeknote) if skipnotebook or result==0: #----------------------Write---------------------- inputdata=geeknote._parseInput(title,content,tags,notebook,None) out.preloader.setMessage('Creating note...') result=bool(geeknote.getEvernote().createNote(**inputdata)) if result: out.successMessage("Note has been successfully saved.") else: out.failureMessage("Error while saving the note.")
def _dict(self): """ Construct an ordered dict from the naming elements of the path """ d = OrderedDict() for json_fragment in re.findall('\((.*?)\)', self, re.M): json_ordered_dict = json.loads(re.sub("'", '"', json_fragment), object_pairs_hook=OrderedDict) d.update(json_ordered_dict) return d
def get_version(*file_paths): """Retrieves the version from tigerleaflet/__init__.py""" filename = os.path.join(os.path.dirname(__file__), *file_paths) version_file = open(filename).read() version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M) if version_match: return version_match.group(1) raise RuntimeError('Unable to find version string.')
def find_meta(meta): """ Extract __*meta*__ from META_FILE. """ meta_match = re.search( r"^__{meta}__ = ['\"]([^'\"]*)['\"]".format(meta=meta), META_FILE, re.M ) if meta_match: return meta_match.group(1) raise RuntimeError("Unable to find __{meta}__ string.".format(meta=meta))
def handle_command(self): try: s = re.search(r'.* -name (.*)$|\s.*', self.cmd_str, re.M|re.I) return { "name": s.group(1) } except: return {}
def get_version(*file_paths): """Retrieves the version from magic_cards/__init__.py""" filename = os.path.join(os.path.dirname(__file__), *file_paths) version_file = open(filename).read() version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M) if version_match: return version_match.group(1) raise RuntimeError('Unable to find version string.')
def get_version(*file_paths): """ Extract the version string from the file at the given relative path fragments. """ filename = os.path.join(os.path.dirname(__file__), *file_paths) version_file = open(filename).read() # pylint: disable=open-builtin version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M) if version_match: return version_match.group(1) raise RuntimeError('Unable to find version string.')
def get_version(*file_paths): """ Extract the version string from the file at the given relative path fragments. """ filename = os.path.join(os.path.dirname(__file__), *file_paths) version_file = open(filename).read() version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M) if version_match: return version_match.group(1) raise RuntimeError('Unable to find version string.')
def find_version(*file_paths): version_file = read(*file_paths) version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M) if version_match: return version_match.group(1) raise RuntimeError("Unable to find version string.")
def read_version(): with open(os.path.join(os.path.dirname(__file__), 'pyplanet', '__init__.py')) as handler: return re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", handler.read(), re.M).group(1)