我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用re.IGNORECASE。
def searchExploit(exploit_list, soft_name, soft_version): """ Search affected packages in exploit_list """ result = [] version_search = versionVartions(soft_version, args.level) for exploit in exploit_list: if exploit[5] in valid_platforms and (args.dos or exploit[6]!='dos' or args.type == 'dos'): # Platform and DoS if args.filter == None or args.filter.lower() in exploit[2].lower(): # Filter if args.type == None or args.type == exploit[6]: # Type query = "(^(\w*\s){0,%s}|/\s?)%s(\s|\s.*\s|\/).* -" % (args.level, soft_name.replace('+', '\+')) if re.search(query, exploit[2],re.IGNORECASE): affected_versions = extractVersions(exploit[2]) for affected_version in affected_versions: if args.level == 5 or LooseVersion(version_search) <= LooseVersion(affected_version): if args.duplicates == False: exploit_list.remove(exploit) # Duplicates printOutput(exploit, soft_name, soft_version) result.append([exploit, soft_name, soft_version]) break return result
def __fmt_key(self, key): """Formats the hash key for more consistent hits; hence fetching the 'Message-ID' key should still be fetched even if the user indexes with 'message-id'. """ def _fmt(_k): return _k.group(1) + _k.group(2).upper() if not isinstance(key, basestring): # Handle invalid key entries types key = str(key) key = re.sub( # Flip -id to ID (short for Identifier) # Flip -crc to CRC (short for Cyclic Redundancy Check) r'([_-])((id|crc)([^a-z0-9]|$))', _fmt, re.sub(r'(^|\s|[_-])(\S)', _fmt, key.strip().lower()), flags=re.IGNORECASE, ) if key in VALID_HEADER_ENTRIES or key.startswith(UNKNOWN_PREFIX): return key return UNKNOWN_PREFIX + key
def scan_thread(keyword, catalog_json): # Check each thread, threads who contains the keyword are returned matched_threads = [] for i in range(len(catalog_json)): for thread in catalog_json[i]["threads"]: regex = r'\b{0}\b'.format(keyword) # Search thread title if 'sub' in thread: if re.search(regex, str(thread["sub"]), re.IGNORECASE): matched_threads.append(thread["no"]) # Search OPs post body if 'com' in thread: if re.search(regex, str(thread["com"]), re.IGNORECASE): matched_threads.append(thread["no"]) return matched_threads
def parse150(resp): '''Parse the '150' response for a RETR request. Returns the expected transfer size or None; size is not guaranteed to be present in the 150 message. ''' if resp[:3] != '150': raise error_reply, resp global _150_re if _150_re is None: import re _150_re = re.compile("150 .* \((\d+) bytes\)", re.IGNORECASE) m = _150_re.match(resp) if not m: return None s = m.group(1) try: return int(s) except (OverflowError, ValueError): return long(s)
def pickline(file, key, casefold = 1): try: f = open(file, 'r') except IOError: return None pat = re.escape(key) + ':' prog = re.compile(pat, casefold and re.IGNORECASE) while 1: line = f.readline() if not line: break if prog.match(line): text = line[len(key)+1:] while 1: line = f.readline() if not line or not line[0].isspace(): break text = text + line return text.strip() return None
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources r = client.request(urlparse.urljoin(self.base_link, self.conf_link), XHR=True) r = json.loads(r).get('streamer') r = client.request(r + '%s.mp4/master.m3u8' % url, XHR=True) r = re.findall('RESOLUTION\s*=\s*\d+x(\d+).*?\n(http.*?)(?:\n|$)', r, re.IGNORECASE) r = [(source_utils.label_to_quality(i[0]), i[1]) for i in r] for quality, link in r: sources.append({'source': 'CDN', 'quality': quality, 'language': 'de', 'url': link, 'direct': True, 'debridonly': False}) return sources except: return sources
def checkTextodefinitivoarray(type,array,aprobdef): tpe = AmendmentFlow.hasfinishTextorEnmienda(type) if tpe: if aprobdef: for element in array: if re.search("aprobaci(.+?)n(.*?)definitiva", element, re.IGNORECASE): return True return None else: amendfinish = AmendmentFlow.getFinishText(type) for element in array: for texa in amendfinish: if re.search(texa, element, re.IGNORECASE): return True return None else: return None
def funMatch(args, **options): try: [2, 3].index(len(args)) except ValueError: raise ParseError("match expects either two or three arguments") flags = 0 if len(args) == 3: if args[2] == 'i': flags = re.IGNORECASE else: raise ParseError('match only supports the ignore case flag "i"') if re.search(args[1],args[0],flags): return "true" else: return "false"
def str_flags_to_int(str_flags): flags = 0 if "i" in str_flags: flags |= re.IGNORECASE if "l" in str_flags: flags |= re.LOCALE if "m" in str_flags: flags |= re.MULTILINE if "s" in str_flags: flags |= re.DOTALL if "u" in str_flags: flags |= re.UNICODE if "x" in str_flags: flags |= re.VERBOSE return flags
def __init__(self, pattern, flags=0): """BSON regular expression data. This class is useful to store and retrieve regular expressions that are incompatible with Python's regular expression dialect. :Parameters: - `pattern`: string - `flags`: (optional) an integer bitmask, or a string of flag characters like "im" for IGNORECASE and MULTILINE """ if not isinstance(pattern, (text_type, bytes)): raise TypeError("pattern must be a string, not %s" % type(pattern)) self.pattern = pattern if isinstance(flags, string_type): self.flags = str_flags_to_int(flags) elif isinstance(flags, int): self.flags = flags else: raise TypeError( "flags must be a string or int, not %s" % type(flags))
def _encode_regex(name, value, dummy0, dummy1): """Encode a python regex or bson.regex.Regex.""" flags = value.flags # Python 2 common case if flags == 0: return b"\x0B" + name + _make_c_string_check(value.pattern) + b"\x00" # Python 3 common case elif flags == re.UNICODE: return b"\x0B" + name + _make_c_string_check(value.pattern) + b"u\x00" else: sflags = b"" if flags & re.IGNORECASE: sflags += b"i" if flags & re.LOCALE: sflags += b"l" if flags & re.MULTILINE: sflags += b"m" if flags & re.DOTALL: sflags += b"s" if flags & re.UNICODE: sflags += b"u" if flags & re.VERBOSE: sflags += b"x" sflags += b"\x00" return b"\x0B" + name + _make_c_string_check(value.pattern) + sflags
def _adjust_header(type_, orig_header): """ Make sure 'pythonw' is used for gui and and 'python' is used for console (regardless of what sys.executable is). """ pattern = 'pythonw.exe' repl = 'python.exe' if type_ == 'gui': pattern, repl = repl, pattern pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE) new_header = pattern_ob.sub(string=orig_header, repl=repl) clean_header = new_header[2:-1].strip('"') if sys.platform == 'win32' and not os.path.exists(clean_header): # the adjusted version doesn't exist, so return the original return orig_header return new_header
def parse_str(self, gen_str): """Attempts to extract the information from a generic interface.""" # Right now I'm going to punt. There are so many variations # on these that it's difficult to write a RE for it. Also # there are few ways to have to rewrite it. We will extract # a name, and then a type string (which may include defaults) gen_pattern = r'\s?(?P<name>.*?)\s?(?::)\s?(?P<type>.*)' gp = re.compile(gen_pattern, re.IGNORECASE) s = re.search(gp, gen_str) if s: self.name = s.group('name') # Sometimes the type has a trailing space. Eliminating it. self.type = re.sub(r'\s*$', '', s.group('type')) self.success = True else: print('vhdl-mode: Could not parse generic string.') self.success = False
def __init__(self, pattern, flags=0): """BSON regular expression data. This class is useful to store and retrieve regular expressions that are incompatible with Python's regular expression dialect. :Parameters: - `pattern`: string - `flags`: (optional) an integer bitmask, or a string of flag characters like "im" for IGNORECASE and MULTILINE """ if not isinstance(pattern, string_types): raise TypeError("pattern must be a string, not %s" % type(pattern)) self.pattern = pattern if isinstance(flags, string_types): self.flags = str_flags_to_int(flags) elif isinstance(flags, int): self.flags = flags else: raise TypeError( "flags must be a string or int, not %s" % type(flags))
def get_year(self): try: yre = '(dei:DocumentFiscalYearFocus$)' year = self.ins_sp.find(name=re.compile(yre, re.IGNORECASE | re.MULTILINE)).get_text() except AttributeError: try: yre = '(dei:DocumentPeriodEndDate$)' year = self.ins_sp.find(name=re.compile(yre, re.IGNORECASE | re.MULTILINE)).get_text() year = year[:4] except AttributeError: return False try: year = int(year) sure_years = [2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011, 2012, 2013, 2014, 2016] if year in sure_years: self.xbrl_year = str(year) if year == 2010: self.xbrl_year = '2009' if year == 2015: self.xbrl_year = '2014' return True except: return False
def searchMessageBodies(self, term=None): if (not self.srv): return if (not term): return self.getMessages() matched = [] i = 1 for (server_msg, body, octets) in self.msg_list: body = '\n'.join(body) for search_term in term: if re.search(search_term, body, re.IGNORECASE): print "MATCHED ON [%s]" % (search_term) if not i in matched: matched.append(i) i=i+1 return matched
def searchMessageSubjects(self, term=None): if (not self.srv): return if (not term): return self.getMessages() matched = [] i = 1 for (server_msg, body, octets) in self.msg_list: msg = email.message_from_string('\n'.join(body)) for search_term in term: if re.search(search_term, msg['subject'], re.IGNORECASE): print "MATCHED ON [%s]" % (search_term) if not i in matched: matched.append(i) i=i+1 return matched
def search (self,pattern,caseSens=True,debug=0): "Intenta hacer Matching entre el pattern pasado por parametro y la ultima linea leida" if not caseSens: self.regexp=re.compile(pattern,re.IGNORECASE) else: self.regexp=re.compile(pattern) self.matches=self.regexp.findall(self.lastline) j=0 for i in self.matches: if not type(i)==type(()): self.matches[j]=tuple([self.matches[j]]) j+=1 # DEBUG PARA MATCHING if (debug==1): print "[",self.lastline,"-",pattern,"]" print len(self.matches) print self.matches if len(self.matches)==0: return False else: return True
def parse(self, data): state = "init" for line in data.splitlines(): if state == "init": self.parseRequest(line) state = "host" continue if state == "host": match = re.match("host: (.*)$", line, re.IGNORECASE) if match: self.host = match.group(1) state = "keys" continue if not line: continue line = line.split(":", 1) if len(line) == 1: raise SyntaxError("Unable to parse client header: %r" % line[0]) key, value = line self.headers.append( (key, value) )
def compilePatterns(self): for text, score, match in self.regexs: self.debug("Add regex pattern: %r" % text) yield (text, score, match) for text, score in self.patterns.iteritems(): regex = r'%s' % re.escape(text.lower()) self.debug("Create pattern regex: %r" % regex) match = re.compile(regex, re.IGNORECASE).search yield (text, score, match) for text, score in self.words.iteritems(): regex = r'(?:^|\W)%s(?:$|\W)' % re.escape(text.lower()) self.debug("Create word regex: %r" % regex) match = re.compile(regex, re.IGNORECASE).search yield (text, score, match)
def analyse_text(text): """ Check for inital comment and patterns that distinguish Rexx from other C-like languages. """ if re.search(r'/\*\**\s*rexx', text, re.IGNORECASE): # Header matches MVS Rexx requirements, this is certainly a Rexx # script. return 1.0 elif text.startswith('/*'): # Header matches general Rexx requirements; the source code might # still be any language using C comments such as C++, C# or Java. lowerText = text.lower() result = sum(weight for (pattern, weight) in RexxLexer.PATTERNS_AND_WEIGHTS if pattern.search(lowerText)) + 0.01 return min(result, 1.0)
def keep_redirecting(r, my_pub): # don't read r.content unless we have to, because it will cause us to download the whole thig instead of just the headers # 10.5762/kais.2016.17.5.316 if ("content-length" in r.headers): # manually follow javascript if that's all that's in the payload file_size = int(r.headers["content-length"]) if file_size < 500: matches = re.findall(ur"<script>location.href='(.*)'</script>", r.content, re.IGNORECASE) if matches: redirect_url = matches[0] if redirect_url.startswith(u"/"): redirect_url = get_link_target(redirect_url, r.url) return redirect_url # 10.1097/00003643-201406001-00238 if my_pub and my_pub.is_same_publisher("Ovid Technologies (Wolters Kluwer Health)"): matches = re.findall(ur"OvidAN = '(.*?)';", r.content, re.IGNORECASE) if matches: an_number = matches[0] redirect_url = "http://content.wkhealth.com/linkback/openurl?an={}".format(an_number) return redirect_url return None
def _maybe_update_line_package(self, line, package): original_line = line pattern = r'\b{package}(?:\[\w*\])?=={old_version}\b'.format( package=re.escape(package['name']), old_version=re.escape(str(package['current_version']))) if re.search(pattern, line, flags=re.IGNORECASE): line = line.replace( '=={}'.format(package['current_version']), '=={}'.format(package['latest_version']) ) if line != original_line: self.upgraded_packages.append(package) if self.dry_run: # pragma: nocover print('[Dry Run]: skipping requirements replacement:', original_line.replace('\n', ''), ' / ', line.replace('\n', '')) return original_line return line
def _do_match(self, text): if self.use_regex: try: flags = re.UNICODE if not self.case_sensitive: flags |= re.IGNORECASE return bool(re.findall(self.pattern, text, flags=flags)) except Exception as ex: logger.warning('Regular expression match failed', exc_info=True) raise self.BadPatternException(str(ex)) else: if self.case_sensitive: pattern = self.pattern else: pattern = self.pattern.lower() text = text.lower() return pattern in text
def find_templates(): """ Load python modules from templates directory and get templates list :return: list of tuples (pairs): [(compiled regex, lambda regex_match: return message_data)] """ templates = [] templates_directory = (inspect.getsourcefile(lambda: 0).rstrip('__init__.py') + 'templates') template_files = os.listdir(templates_directory) for template_file in template_files: if template_file.startswith('.') or not template_file.endswith('.py'): continue # Hack for dev development and disutils try: template_module = importlib.import_module('templates.{}'.format( template_file.rstrip('.py') )) except ImportError: template_module = importlib.import_module('ross.templates.{}'.format( template_file.rstrip('.py') )) # Iterate throw items in template. # If there are variable ends with 'templates', # extend templates list with it. for (name, content) in template_module.__dict__.items(): if name.endswith('templates'): for (regex_text, data_func) in content: templates.append((re.compile(regex_text, re.IGNORECASE), data_func)) return templates
def valid_url(self, url): p = re.compile( r'^(?:http|ftp)s?://' r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa r'localhost|' r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' r'(?::\d+)?' r'(?:/?|[/?]\S+)$', re.IGNORECASE) if p.match(url): return True else: return False
def printable_usage(doc): # in python < 2.7 you can't pass flags=re.IGNORECASE usage_split = re.split(r'([Uu][Ss][Aa][Gg][Ee]:)', doc) if len(usage_split) < 3: raise DocoptLanguageError('"usage:" (case-insensitive) not found.') if len(usage_split) > 3: raise DocoptLanguageError('More than one "usage:" (case-insensitive).') return re.split(r'\n\s*\n', ''.join(usage_split[1:]))[0].strip()
def _search_for_query(self, query): if query in self._search_pattern_cache: return self._search_pattern_cache[query] # Build pattern: include all characters pattern = [] for c in query: # pattern.append('[^{0}]*{0}'.format(re.escape(c))) pattern.append('.*?{0}'.format(re.escape(c))) pattern = ''.join(pattern) search = re.compile(pattern, re.IGNORECASE).search self._search_pattern_cache[query] = search return search
def get_enlisted_regex_matches(formatted_comment): """ Gets a regex match for enlisted AFSCs. Note: Enlisted matching is NOT case sensitive. :param formatted_comment: string not including any quoted text :return: regex matches """ enlisted_AFSC_search = re.compile(ENLISTED_AFSC_REGEX, re.IGNORECASE) matched_comments_enlisted = enlisted_AFSC_search.finditer(formatted_comment) return matched_comments_enlisted
def checkFactorDB(n): """See if the modulus is already factored on factordb.com, and if so get the factors""" # Factordb gives id's of numbers, which act as links for full number # follow the id's and get the actual numbers r = requests.get('http://www.factordb.com/index.php?query=%s' % str(n)) regex = re.compile("index\.php\?id\=([0-9]+)", re.IGNORECASE) ids = regex.findall(r.text) # These give you ID's to the actual number p_id = ids[1] q_id = ids[2] # follow ID's regex = re.compile("value=\"([0-9]+)\"", re.IGNORECASE) r_1 = requests.get('http://www.factordb.com/index.php?id=%s' % p_id) r_2 = requests.get('http://www.factordb.com/index.php?id=%s' % q_id) # Get numbers p = int(regex.findall(r_1.text)[0]) print(p) ans = 1 n = int(n) p = int(p) print(n) while n % p == 0: ans *= p n /= p print(ans,n, ans*n) return (ans, n)
def handle_401(self, r, **kwargs): """ Takes the given response and tries digest-auth, if needed. :rtype: requests.Response """ if self._thread_local.pos is not None: # Rewind the file position indicator of the body to where # it was to resend the request. r.request.body.seek(self._thread_local.pos) s_auth = r.headers.get('www-authenticate', '') if 'digest' in s_auth.lower() and self._thread_local.num_401_calls < 2: self._thread_local.num_401_calls += 1 pat = re.compile(r'digest ', flags=re.IGNORECASE) self._thread_local.chal = parse_dict_header(pat.sub('', s_auth, count=1)) # Consume content and release the original connection # to allow our new request to reuse the same one. r.content r.close() prep = r.request.copy() extract_cookies_to_jar(prep._cookies, r.request, r.raw) prep.prepare_cookies(prep._cookies) prep.headers['Authorization'] = self.build_digest_header( prep.method, prep.url) _r = r.connection.send(prep, **kwargs) _r.history.append(r) _r.request = prep return _r self._thread_local.num_401_calls = 1 return r
def _adjust_header(cls, type_, orig_header): """ Make sure 'pythonw' is used for gui and and 'python' is used for console (regardless of what sys.executable is). """ pattern = 'pythonw.exe' repl = 'python.exe' if type_ == 'gui': pattern, repl = repl, pattern pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE) new_header = pattern_ob.sub(string=orig_header, repl=repl) return new_header if cls._use_header(new_header) else orig_header
def _get_device_counters(self, device): r = re.compile('/ctr[0-9]+$', flags=re.IGNORECASE) co_phys_chan_names = [c.name for c in device.co_physical_chans] return list(filter(r.search, co_phys_chan_names))
def _get_device_pfi_lines(self, device): r = re.compile('/PFI[0-9]+$', flags=re.IGNORECASE) return list(filter(r.search, device.terminals))
def checkMatch(self, fieldValue, acListEntry): pattern = re.compile(re.escape(fieldValue) + '.*', re.IGNORECASE) return re.match(pattern, acListEntry) # function to get all matches as a list
def setup(bot=None): global url_finder # TODO figure out why this is needed, and get rid of it, because really? if not bot: return bot.config.define_section('url', UrlSection) if bot.config.url.exclude: regexes = [re.compile(s) for s in bot.config.url.exclude] else: regexes = [] # We're keeping these in their own list, rather than putting then in the # callbacks list because 1, it's easier to deal with modules that are still # using this list, and not the newer callbacks list and 2, having a lambda # just to pass is kinda ugly. if not bot.memory.contains('url_exclude'): bot.memory['url_exclude'] = regexes else: exclude = bot.memory['url_exclude'] if regexes: exclude.extend(regexes) bot.memory['url_exclude'] = exclude # Ensure that url_callbacks and last_seen_url are in memory if not bot.memory.contains('url_callbacks'): bot.memory['url_callbacks'] = tools.SopelMemory() if not bot.memory.contains('last_seen_url'): bot.memory['last_seen_url'] = tools.SopelMemory() url_finder = re.compile(r'(?u)(%s?(?:http|https|ftp)(?:://\S+))' % (bot.config.url.exclusion_char), re.IGNORECASE)
def is_color(string): """Checks if the given string is a valid color. Arguments: string -- the string to check """ if string in NAMED_COLORS: return True if re.match(r"^#?[0-9a-f]{3}([0-9a-f]{3})?$", string, re.IGNORECASE): return True return False
def update_handler(d): global flag global variable # On this example, we just show the update object itself d = str(d) #testChannel re1 = '( id: )(?:[0-9][0-9]+)(,)' rg = re.compile(re1,re.IGNORECASE|re.DOTALL) m = rg.search(d) if m: word1=m.group(0) word2=word1.replace(' id: ', '') word3=word2.replace(',', '') word4=word3 idd = int(word4) peer1 = InputPeerSelf() #INPUT YOUR KEYWORDS BELOW word_list = ["#DCR", "#LTC", "#NAUT", "#NXT", "#XCP", "#GRC", "#REP", "#PPC", "#RIC", "#STRAT", "#GAME", "#BTM", "#CLAM", "#ARDR", "#BLK", "#OMNI", "#SJCX", "#FLDC", "#BCH", "#POT", "#VRC", "#ETH", "#PINK", "#NOTE", "#BTS", "#AMP", "#NAV", "#BELA", "#ETC", "#FLO", "#VIA", "#XBC", "#XPM", "#DASH", "#XVC", "#GNO", "#NMC", "#RADS", "#VTC", "#XEM", "#FCT", "#XRP", "#NXC", "#STEEM", "#SBD", "#BURST", "#XMR", "#DGB", "#LBC", "#BCY", "#PASC", "#LSK", "#EXP", "#MAID", "#BTCD", "#SYS", "#GNT", "#HUC", "#EMC2", "#NEOS", "#ZEC", "#STR"] regex_string = "(?<=\W)(%s)(?=\W)" % "|".join(word_list) finder = re.compile(regex_string) string_to_be_searched = d results = finder.findall(" %s " % string_to_be_searched) result_set = set(results) print(idd) for word in word_list: if word in result_set: try: var = word var1 = var.replace('#', '') btc = '-BTC' variable = var1 + btc if (os.path.isfile(pid)): print('Waiting on current process to finish... If you experience errors, delete process.run') else: sell = 'notready' m = multiprocessing.Process(target = runitt , args = ()) m.start() client(ForwardMessageRequest(peer=peer1, id=(idd), random_id=(generate_random_long()))) except Exception as e: print(e)
def __init__(cls, name, bases, dct): super(_TemplateMetaclass, cls).__init__(name, bases, dct) if 'pattern' in dct: pattern = cls.pattern else: pattern = _TemplateMetaclass.pattern % { 'delim' : _re.escape(cls.delimiter), 'id' : cls.idpattern, } cls.pattern = _re.compile(pattern, _re.IGNORECASE | _re.VERBOSE)