我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用logger.info()。
def prepare_zip(): from pkg_resources import resource_filename as resource from config import config from json import dumps logger.info('creating/updating gimel.zip') with ZipFile('gimel.zip', 'w', ZIP_DEFLATED) as zipf: info = ZipInfo('config.json') info.external_attr = 0o664 << 16 zipf.writestr(info, dumps(config)) zipf.write(resource('gimel', 'config.py'), 'config.py') zipf.write(resource('gimel', 'gimel.py'), 'gimel.py') zipf.write(resource('gimel', 'logger.py'), 'logger.py') for root, dirs, files in os.walk(resource('gimel', 'vendor')): for file in files: real_file = os.path.join(root, file) relative_file = os.path.relpath(real_file, resource('gimel', '')) zipf.write(real_file, relative_file)
def role(): new_role = False try: logger.info('finding role') iam('get_role', RoleName='gimel') except ClientError: logger.info('role not found. creating') iam('create_role', RoleName='gimel', AssumeRolePolicyDocument=ASSUMED_ROLE_POLICY) new_role = True role_arn = iam('get_role', RoleName='gimel', query='Role.Arn') logger.debug('role_arn={}'.format(role_arn)) logger.info('updating role policy') iam('put_role_policy', RoleName='gimel', PolicyName='gimel', PolicyDocument=POLICY) if new_role: from time import sleep logger.info('waiting for role policy propagation') sleep(5) return role_arn
def _function_alias(name, version, alias=LIVE): try: logger.info('creating function alias {0} for {1}:{2}'.format( alias, name, version)) arn = aws_lambda('create_alias', FunctionName=name, FunctionVersion=version, Name=alias, query='AliasArn') except ClientError: logger.info('alias {0} exists. updating {0} -> {1}:{2}'.format( alias, name, version)) arn = aws_lambda('update_alias', FunctionName=name, FunctionVersion=version, Name=alias, query='AliasArn') return arn
def preflight_checks(): logger.info('checking aws credentials and region') if region() is None: logger.error('Region is not set up. please run aws configure') return False try: check_aws_credentials() except AttributeError: logger.error('AWS credentials not found. please run aws configure') return False logger.info('testing redis') try: from gimel import _redis _redis().ping() except redis.exceptions.ConnectionError: logger.error('Redis ping failed. Please run gimel configure') return False return True
def get_channel_local_path(channel_name): # TODO: (3.2) El XML debería escribirse en el userdata, de forma que se leerán dos ficheros locales: el del userdata y el que está junto al py (vendrá con el plugin). El mayor de los 2 es la versión actual, y si no existe fichero se asume versión 0 if channel_name<>"channelselector": local_channel_path = os.path.join( config.get_runtime_path() , 'channels' , channel_name+".py" ) local_version_path = os.path.join( config.get_runtime_path() , 'channels' , channel_name+".xml" ) local_compiled_path = os.path.join( config.get_runtime_path() , 'channels' , channel_name+".pyo" ) else: local_channel_path = os.path.join( config.get_runtime_path() , channel_name+".py" ) local_version_path = os.path.join( config.get_runtime_path() , channel_name+".xml" ) local_compiled_path = os.path.join( config.get_runtime_path() , channel_name+".pyo" ) logger.info("tvalacarta.core.updater local_channel_path="+local_channel_path) logger.info("tvalacarta.core.updater local_version_path="+local_version_path) logger.info("tvalacarta.core.updater local_compiled_path="+local_compiled_path) return local_channel_path , local_version_path , local_compiled_path
def downloadpageWithoutCookies(url): logger.info("tvalacarta.core.scrapertools Descargando " + url) inicio = time.clock() req = urllib2.Request(url) req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; es-ES; rv:1.9.0.14) Gecko/2009082707 Firefox/3.0.14') req.add_header('X-Requested-With','XMLHttpRequest') try: response = urllib2.urlopen(req) except: req = urllib2.Request(url.replace(" ","%20")) req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; es-ES; rv:1.9.0.14) Gecko/2009082707 Firefox/3.0.14') response = urllib2.urlopen(req) data=response.read() response.close() fin = time.clock() logger.info("tvalacarta.core.scrapertools Descargado en %d segundos " % (fin-inicio+1)) return data
def connect(server_name,user,password): import smb,nmb logger.info("[samba.py] Crea netbios...") netbios = nmb.NetBIOS() logger.info("[samba.py] Averigua IP...") nbhost = netbios.gethostbyname(server_name) server_ip = nbhost[0].get_ip() logger.info("[samba.py] server_ip="+server_ip) logger.info("[samba.py] Crea smb...") remote = smb.SMB(server_name, server_ip) logger.info("ok") if remote.is_login_required(): logger.info("[samba.py] Login...") if user=="": logger.info("[samba.py] User vacio, se asume 'guest'") user="guest" remote.login(user, password) else: logger.info("[samba.py] Login no requerido") return remote
def get_files(url): logger.info("[samba.py] get_files") # Separa la URL en los elementos server_name,share_name,path,user,password = parse_url(url) # Conecta con el servidor remoto remote = connect(server_name,user,password) ficheros = [] for f in remote.list_path(share_name, path + '*'): name = f.get_longname() #logger.info("[samba.py] name="+name) if name == '.' or name == '..': continue if f.is_directory(): continue ficheros.append(name) return ficheros
def already_suscribed(item): logger.info("suscription.already_suscribed item="+item.tostring()) current_suscriptions = _read_suscription_file() # Check if suscription already on file existe = False for suscription_item in current_suscriptions: logger.info("suscription.already_suscribed suscription_item="+suscription_item.tostring()) if suscription_item.url == item.url: existe = True break logger.info("suscription.already_suscribed -> "+repr(existe)) return existe # ------------------------------------------------------------------------------------------ # Private functions # ------------------------------------------------------------------------------------------ # Read suscriptions from file
def unpack(source): """Unpacks P.A.C.K.E.R. packed js code.""" payload, symtab, radix, count = _filterargs(source) logger.info("payload="+repr(payload)) logger.info("symtab="+repr(symtab)) logger.info("radix="+repr(radix)) logger.info("count="+repr(count)) if count != len(symtab): raise UnpackingError('Malformed p.a.c.k.e.r. symtab.') try: unbase = Unbaser(radix) except TypeError: raise UnpackingError('Unknown p.a.c.k.e.r. encoding.') def lookup(match): """Look up symbols in the synthetic symtab.""" word = match.group(0) return symtab[unbase(word)] or word source = re.sub(r'\b\w+\b', lookup, payload) return _replacestrings(source)
def get_channel_local_path(channel_name): if channel_name != "channelselector": local_channel_path = os.path.join(config.get_runtime_path(), 'channels', channel_name + ".py") local_version_path = os.path.join(config.get_runtime_path(), 'channels', channel_name + ".xml") local_compiled_path = os.path.join(config.get_runtime_path(), 'channels', channel_name + ".pyo") else: local_channel_path = os.path.join(config.get_runtime_path(), channel_name + ".py") local_version_path = os.path.join(config.get_runtime_path(), channel_name + ".xml") local_compiled_path = os.path.join(config.get_runtime_path(), channel_name + ".pyo") logger.info("local_channel_path=" + local_channel_path) logger.info("local_version_path=" + local_version_path) logger.info("local_compiled_path=" + local_compiled_path) return local_channel_path, local_version_path, local_compiled_path
def get_channel_local_path(channel_name): # TODO: (3.2) El XML debería escribirse en el userdata, de forma que se leerán dos ficheros locales: el del userdata y el que está junto al py (vendrá con el plugin). El mayor de los 2 es la versión actual, y si no existe fichero se asume versión 0 if channel_name<>"channelselector": local_channel_path = os.path.join( config.get_runtime_path() , 'channels' , channel_name+".py" ) local_version_path = os.path.join( config.get_runtime_path() , 'channels' , channel_name+".xml" ) local_compiled_path = os.path.join( config.get_runtime_path() , 'channels' , channel_name+".pyo" ) else: local_channel_path = os.path.join( config.get_runtime_path() , channel_name+".py" ) local_version_path = os.path.join( config.get_runtime_path() , channel_name+".xml" ) local_compiled_path = os.path.join( config.get_runtime_path() , channel_name+".pyo" ) logger.info("streamondemand-pureita.core.updater local_channel_path="+local_channel_path) logger.info("streamondemand-pureita.core.updater local_version_path="+local_version_path) logger.info("streamondemand-pureita.core.updater local_compiled_path="+local_compiled_path) return local_channel_path , local_version_path , local_compiled_path
def downloadpageWithoutCookies(url): logger.info("streamondemand-pureita.core.scrapertools Descargando " + url) inicio = time.clock() req = urllib2.Request(url) req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; es-ES; rv:1.9.0.14) Gecko/2009082707 Firefox/3.0.14') req.add_header('X-Requested-With','XMLHttpRequest') try: response = urllib2.urlopen(req) except: req = urllib2.Request(url.replace(" ","%20")) req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; es-ES; rv:1.9.0.14) Gecko/2009082707 Firefox/3.0.14') response = urllib2.urlopen(req) data=response.read() response.close() fin = time.clock() logger.info("streamondemand-pureita.core.scrapertools Descargado en %d segundos " % (fin-inicio+1)) return data
def download(url,package_name): logger.info("pyload_client.download url="+url+", package_name="+package_name) session = login(config.get_setting("pyload_user"),config.get_setting("pyload_password")) package_id = find_package_id(package_name) if package_id is None: api_url = urlparse.urljoin(config.get_setting("pyload"),"/api/addPackage") logger.info("pyload_client.download api_url="+api_url) data = scrapertools.cache_page( api_url , post=urllib.urlencode( {"name":"'"+package_name+"'","links":str([url])} ) ) logger.info("pyload_client.download data="+data) else: api_url = urlparse.urljoin(config.get_setting("pyload"),"/api/addFiles") logger.info("pyload_client.download api_url="+api_url) data = scrapertools.cache_page( api_url , post=urllib.urlencode( {"pid":str(package_id),"links":str([url])} ) ) logger.info("pyload_client.download data="+data) return
def _run_record_validation(self, schema_name, table_name, tablemeta, columnmeta, validation_rules, skip_record_validation): log.info(_("Record validation: start")) if skip_record_validation: log.info(_("Record validation: skipping")) return if not validation_rules: log.info(_("Record validation: no validation rule")) return validation = self.run_record_validation(schema_name, table_name, validation_rules) assert isinstance(validation, dict) for col in tablemeta.column_names: if validation and col in validation: columnmeta[col].validation = validation[col] log.info(_("Record validation: end"))
def run_postscan_validation(self, schema_name, table_name, tablemeta, columnmeta, table_data, validation_rules): if not validation_rules: return table_data v = DbProfilerValidator.DbProfilerValidator(table_data['schema_name'], table_data['table_name'], self, validation_rules) log.info(_("Column statistics validation: start")) validated1, failed1 = v.validate_table(table_data) log.info(_("Column statistics validation: end (%d)") % validated1) log.info(_("SQL validation: start")) validated2, failed2 = v.validate_sql(self.dbdriver) log.info(_("SQL validation: end (%d)") % validated2) v.update_table_data(table_data) return table_data
def get(self): jsondata = u"" try: data_all = [] cursor = self._conn.cursor() for r in cursor.execute("SELECT * FROM repo"): data_all.append(json.loads(unicode(r[4]))) log.info(_("Retrieved all data from the repository `%s'.") % self.filename) except Exception as e: log.error(_("Could not retreive from the repository `%s'") % self.filename, detail=unicode(e)) return None return data_all
def get_datamap_source_tables(self, database_name, schema_name, table_name): """Get source talbe names from the data mapping info. Args: database_name (str): schema_name (str): tablename (str): Returns: list: a list of source table names. """ assert database_name and schema_name and table_name tables = [] for d in self.get_datamap_items(database_name, schema_name, table_name): if d['source_table_name'] not in tables: tables.append(d['source_table_name']) return tables
def verify(self): repo = DbProfilerRepository.DbProfilerRepository(self.repofile) repo.open() log.info(_("Verifying the validation results.")) table_list = repo.get_table_list() valid = 0 invalid = 0 for t in table_list: table = repo.get_table(t[0], t[1], t[2]) v, i = verify_table(table) valid += v invalid += i if invalid == 0: log.info(_("No invalid results: %d/%d") % (invalid, valid+invalid)) else: log.info(_("Invalid results: %d/%d") % (invalid, valid+invalid)) repo.close() return (True if invalid > 0 else False)
def export_json(repo, tables=[], output_path='./json'): json_data = [] try: f = open(output_path + "/EXPORT.JSON", "a") for tab in tables: database_name = tab[0] schema_name = tab[1] table_name = tab[2] data = repo.get_table(database_name, schema_name, table_name) json_data.append(data) f.write(json.dumps(json_data, indent=2).encode('utf-8')) f.close() log.info(_("Generated JSON file.")) except IOError, e: log.error(_("Could not generate JSON file.")) sys.exit(1) return True
def preflight(): logger.info('running preflight checks') preflight_checks()
def deploy(preflight): if preflight: logger.info('running preflight checks') if not preflight_checks(): return logger.info('deploying') run() js_code_snippet()
def configure(): from config import config, config_filename, generate_config if not config: logger.info('generating new config {}'.format(config_filename)) generate_config(config_filename) click.edit(filename=config_filename)
def rollback_lambda(name, alias=LIVE): all_versions = _versions(name) live_version = _get_version(name, alias) try: live_index = all_versions.index(live_version) if live_index < 1: raise RuntimeError('Cannot find previous version') prev_version = all_versions[live_index - 1] logger.info('rolling back to version {}'.format(prev_version)) _function_alias(name, prev_version) except RuntimeError as error: logger.error('Unable to rollback. {}'.format(repr(error)))
def deploy_api(api_id): logger.info('deploying API') return apigateway('create_deployment', restApiId=api_id, description='gimel deployment', stageName='prod', stageDescription='gimel production', cacheClusterEnabled=False, query='id')
def create_update_lambda(role_arn, wiring): name, handler, memory, timeout = (wiring[k] for k in ('FunctionName', 'Handler', 'MemorySize', 'Timeout')) try: logger.info('finding lambda function') function_arn = aws_lambda('get_function', FunctionName=name, query='Configuration.FunctionArn') except ClientError: function_arn = None if not function_arn: logger.info('creating new lambda function {}'.format(name)) with open('gimel.zip', 'rb') as zf: function_arn, version = aws_lambda('create_function', FunctionName=name, Runtime='python2.7', Role=role_arn, Handler=handler, MemorySize=memory, Timeout=timeout, Publish=True, Code={'ZipFile': zf.read()}, query='[FunctionArn, Version]') else: logger.info('updating lambda function {}'.format(name)) with open('gimel.zip', 'rb') as zf: function_arn, version = aws_lambda('update_function_code', FunctionName=name, Publish=True, ZipFile=zf.read(), query='[FunctionArn, Version]') function_arn = _function_alias(name, version) _cleanup_old_versions(name) logger.debug('function_arn={} ; version={}'.format(function_arn, version)) return function_arn
def create_update_api(role_arn, function_arn, wiring): logger.info('creating or updating api /{}'.format(wiring['pathPart'])) api_id = get_create_api() resource_id = resource(api_id, wiring['pathPart']) uri = function_uri(function_arn, region()) api_method(api_id, resource_id, role_arn, uri, wiring) cors(api_id, resource_id)
def _load_config(config_filename): try: with open(config_filename) as config_file: logger.info('Using config {}'.format(config_filename)) return config_file.name, json.load(config_file) except IOError: logger.debug('trying to load {} (not found)'.format(config_filename)) return config_filename, {}
def pull_repo(self, force=False): """Clone repo to specified dir. Delete repo if it currently exist unless reuse. """ try: helpers.create_path(self.paths['absolute_path'], True) if force: self.delete_repo() if not os.path.exists(self.paths['repo_path']): logger.info("Starting Repo Cloning", track=self.track) output, rc = helpers.run( "git clone -b %s %s" % (self.branch, self.url), self.paths['absolute_path'], self.dryrun) if rc > 0: self.delete_repo() logger.error("Pulling_repo", error=output, path=self.paths['repo_path']) return -1 return 1 else: return 0 except Exception as e: logger.errorout("Pulling_repo", err_msg=e.message, error="Error pulling repo", path=self.paths['repo_path'])
def delete_repo(self): """Deletes repo """ logger.info('delete', path=self.paths['repo_path'], track=self.track) helpers.delete_path(self.paths['repo_path'])
def run_command(self, ecommand, host): """Run single stored command""" command = ecommand['command'] # Check to see if host can run command if not command.can_host_use(host): if not command.suppress_limit_to_hosts_warnings: logger.warn("Invalid host for command", command=command.name, hostname=host.hostname, module=COMMAND_MODULE_INIT, allowed_hosts=command.limit_to_hosts) return False # Call root is already taken applied in get_cmd ssh_run = SshRun(host.hostname, host.ssh_hostname, "", helpers.get_function_name(), False) logger.info("SSH Started", state=0, hostname=host.hostname, command=command.name, module=COMMAND_MODULE_CUSTOM) results = ssh_run.run_single(self.get_cmd(ecommand)) ssh_run.close_ssh_channel() _log_rc(results, "SSH Finished", state=1, auth=command.use_auth, app_binary=command.use_app_binary, hostname=host.hostname, command=command.name, cmd=self.get_cmd_clean(ecommand), output=results, module=COMMAND_MODULE_CUSTOM) return True
def loads(*args, **kwargs): try: #logger.info("tvalacarta.core.jsontools loads Probando json incluido en el interprete") import json return to_utf8(json.loads(*args, **kwargs)) except ImportError: pass except: logger.info(traceback.format_exc()) try: logger.info("tvalacarta.core.jsontools loads Probando simplejson incluido en el interprete") import simplejson as json return to_utf8(json.loads(*args, **kwargs)) except ImportError: pass except: logger.info(traceback.format_exc()) try: logger.info("tvalacarta.core.jsontools loads Probando simplejson en el directorio lib") from lib import simplejson as json return to_utf8(json.loads(*args, **kwargs)) except ImportError: pass except: logger.info(traceback.format_exc())
def dumps(*args, **kwargs): try: #logger.info("tvalacarta.core.jsontools loads Probando json incluido en el interprete") import json return json.dumps(*args, **kwargs) except ImportError: pass except: logger.info(traceback.format_exc()) try: logger.info("tvalacarta.core.jsontools loads Probando simplejson incluido en el interprete") import simplejson as json return json.dumps(*args, **kwargs) except ImportError: pass except: logger.info(traceback.format_exc()) try: logger.info("tvalacarta.core.jsontools loads Probando simplejson en el directorio lib") from lib import simplejson as json return json.dumps(*args, **kwargs) except ImportError: pass except: logger.info(traceback.format_exc())
def load_json(data): #logger.info("core.jsontools.load_json Probando simplejson en directorio lib") try: #logger.info("tvalacarta.core.jsontools.load_json Probando simplejson en directorio lib") from lib import simplejson json_data = simplejson.loads(data, object_hook= to_utf8) logger.info("tvalacarta.core.jsontools.load_json -> "+repr(json_data)) return json_data except: logger.info(traceback.format_exc()) try: logger.info("tvalacarta.core.jsontools.load_json Probando simplejson incluido en el interprete") import simplejson json_data = simplejson.loads(data, object_hook=to_utf8) logger.info("tvalacarta.core.jsontools.load_json -> "+repr(json_data)) return json_data except: logger.info(traceback.format_exc()) try: logger.info("tvalacarta.core.jsontools.load_json Probando json incluido en el interprete") import json json_data = json.loads(data, object_hook=to_utf8) logger.info("tvalacarta.core.jsontools.load_json -> "+repr(json_data)) return json_data except: logger.info(traceback.format_exc()) try: logger.info("tvalacarta.core.jsontools.load_json Probando JSON de Plex") json_data = JSON.ObjectFromString(data, encoding="utf-8") logger.info("tvalacarta.core.jsontools.load_json -> "+repr(json_data)) return json_data except: logger.info(traceback.format_exc()) logger.info("tvalacarta.core.jsontools.load_json No se ha encontrado un parser de JSON valido") logger.info("tvalacarta.core.jsontools.load_json -> (nada)") return ""
def xmlTojson(path_xml): '''Lee un fichero xml y retorna un diccionario json Parametros: path_xml (str) -- Ruta completa al archivo XML que se desea leer. Retorna: Si el argumento path_xml no señala a un archivo XML valido retorna un diccionario vacio. En caso cortrario retorna un diccionario construido a partir de los campos del archivo XML. ''' import os ret ={} try: if os.path.exists(path_xml): infile = open( path_xml , "rb" ) data = infile.read() infile.close() ret = Xml2Json(data).result except: import traceback logger.info("tvalacarta.core.jsontools xmlTojson ERROR al leer el fichero y/o crear el json") logger.info("tvalacarta.core.jsontools "+traceback.format_exc()) return ret
def downloadtitle(url,title): logger.info("tvalacarta.core.downloadtools downloadtitle: title="+title+" url="+url ) fullpath = getfilefromtitle(url,title) return downloadfile(url,fullpath)
def GetTitleFromFile(title): # Imprime en el log lo que va a descartar logger.info("tvalacarta.core.downloadtools GetTitleFromFile: titulo="+title ) #logger.info("tvalacarta.core.downloadtools downloadtitle: title="+urllib.quote_plus( title )) plataforma = config.get_system_platform(); logger.info("tvalacarta.core.downloadtools GetTitleFromFile: plataforma="+plataforma) #nombrefichero = xbmc.makeLegalFilename(title + url[-4:]) if plataforma=="xbox": nombrefichero = title[:38] + title[-4:] nombrefichero = limpia_nombre_excepto_1(nombrefichero) else: nombrefichero = title return nombrefichero
def extract(self, file, dir): logger.info("file=%s" % file) logger.info("dir=%s" % dir) if not dir.endswith(':') and not os.path.exists(dir): os.mkdir(dir) zf = zipfile.ZipFile(file) self._createstructure(file, dir) num_files = len(zf.namelist()) for name in zf.namelist(): logger.info("name=%s" % name) if not name.endswith('/'): logger.info("no es un directorio") try: (path,filename) = os.path.split(os.path.join(dir, name)) logger.info("path=%s" % path) logger.info("name=%s" % name) os.makedirs( path ) except: pass outfilename = os.path.join(dir, name) logger.info("outfilename=%s" % outfilename) try: outfile = open(outfilename, 'wb') outfile.write(zf.read(name)) except: logger.info("Error en fichero "+name)
def update(item): # Descarga el ZIP logger.info("tvalacarta.core.updater update") remotefilename = REMOTE_FILE+item.version+".zip" localfilename = LOCAL_FILE+item.version+".zip" logger.info("tvalacarta.core.updater remotefilename=%s" % remotefilename) logger.info("tvalacarta.core.updater localfilename=%s" % localfilename) logger.info("tvalacarta.core.updater descarga fichero...") inicio = time.clock() #urllib.urlretrieve(remotefilename,localfilename) from core import downloadtools downloadtools.downloadfile(remotefilename, localfilename, continuar=False) fin = time.clock() logger.info("tvalacarta.core.updater Descargado en %d segundos " % (fin-inicio+1)) # Lo descomprime logger.info("tvalacarta.core.updater descomprime fichero...") import ziptools unzipper = ziptools.ziptools() destpathname = DESTINATION_FOLDER logger.info("tvalacarta.core.updater destpathname=%s" % destpathname) unzipper.extract(localfilename,destpathname) # Borra el zip descargado logger.info("tvalacarta.core.updater borra fichero...") os.remove(localfilename) logger.info("tvalacarta.core.updater ...fichero borrado")
def get_channel_remote_url(channel_name): _remote_channel_url_ = "https://raw.githubusercontent.com/tvalacarta/tvalacarta/master/python/main-classic/" if channel_name <> "channelselector": _remote_channel_url_+= "channels/" remote_channel_url = _remote_channel_url_+channel_name+".py" remote_version_url = _remote_channel_url_+channel_name+".xml" logger.info("tvalacarta.core.updater remote_channel_url="+remote_channel_url) logger.info("tvalacarta.core.updater remote_version_url="+remote_version_url) return remote_channel_url , remote_version_url
def cachePage2(url,headers): logger.info("Descargando " + url) inicio = time.clock() req = urllib2.Request(url) for header in headers: logger.info(header[0]+":"+header[1]) req.add_header(header[0], header[1]) try: response = urllib2.urlopen(req) except: req = urllib2.Request(url.replace(" ","%20")) for header in headers: logger.info(header[0]+":"+header[1]) req.add_header(header[0], header[1]) response = urllib2.urlopen(req) data=response.read() response.close() fin = time.clock() logger.info("Descargado en %d segundos " % (fin-inicio+1)) ''' outfile = open(localFileName,"w") outfile.write(data) outfile.flush() outfile.close() logger.info("Grabado a " + localFileName) ''' return data
def cachePagePost(url,post): logger.info("Descargando " + url) inicio = time.clock() req = urllib2.Request(url,post) req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3') try: response = urllib2.urlopen(req) except: req = urllib2.Request(url.replace(" ","%20"),post) req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3') response = urllib2.urlopen(req) data=response.read() response.close() fin = time.clock() logger.info("Descargado en %d segundos " % (fin-inicio+1)) ''' outfile = open(localFileName,"w") outfile.write(data) outfile.flush() outfile.close() logger.info("Grabado a " + localFileName) ''' return data
def set_ok(self, cookie, request): #logger.info("set_ok Cookie "+repr(cookie)+" request "+repr(request)) #cookie.discard = False #cookie. devuelve = cookielib.DefaultCookiePolicy.set_ok(self, cookie, request) #logger.info("set_ok "+repr(devuelve)) return devuelve
def return_ok(self, cookie, request): #logger.info("return_ok Cookie "+repr(cookie)+" request "+repr(request)) #cookie.discard = False devuelve = cookielib.DefaultCookiePolicy.return_ok(self, cookie, request) #logger.info("return_ok "+repr(devuelve)) return devuelve
def domain_return_ok(self, domain, request): #logger.info("domain_return_ok domain "+repr(domain)+" request "+repr(request)) devuelve = cookielib.DefaultCookiePolicy.domain_return_ok(self, domain, request) #logger.info("domain_return_ok "+repr(devuelve)) return devuelve
def printMatches(matches): i = 0 for match in matches: logger.info("[scrapertools.py] %d %s" % (i , match)) i = i + 1
def get_season_and_episode(title): logger.info("get_season_and_episode('"+title+"')") patron ="(\d+)[x|X](\d+)" matches = re.compile(patron).findall(title) logger.info(str(matches)) filename=matches[0][0]+"x"+matches[0][1] logger.info("get_season_and_episode('"+title+"') -> "+filename) return filename
def update(total,errores=0, nuevos=0, serie="No indicada"): """Pide Resumen de actualizacin. Adems pregunta y actualiza la Biblioteca nuevos: Nmero de episodios actualizados. Se muestra como resumen en la ventana de confirmacin. total: Nmero de episodios Totales en la Biblioteca. Se muestra como resumen en la ventana de confirmacin. Erores: Nmero de episodios que no se pudo aadir (generalmente por caracteres no vlidos en el nombre del archivo o por problemas de permisos. """ if nuevos == 1: texto = 'Se ha aadido 1 episodio a la Biblioteca (%d en total)' % (total,) else: texto = 'Se han aadido %d episodios a la Biblioteca (%d en total)' % (nuevos,total) advertencia = xbmcgui.Dialog() # Pedir confirmacin para actualizar la biblioteca if nuevos > 0: if errores == 0: actualizar = advertencia.yesno('pelisalacarta' , texto ,'Deseas que actualice ahora la Biblioteca?') else: # Si hubo errores muestra una lnea adicional en la pregunta de actualizar biblioteca if errores == 1: texto2 = '(No se pudo aadir 1 episodio)' else: texto2 = '(No se pudieron aadir '+str(errores)+' episodios)' actualizar = advertencia.yesno('pelisalacarta' , texto , texto2 , 'Deseas que actualice ahora la Biblioteca?') else: #No hay episodios nuevos -> no actualizar if errores == 0: texto2 = "" elif errores == 1: texto2 = '(No se pudo aadir 1 episodio)' else: texto2 = '(No se pudieron aadir '+str(errores)+' episodios)' advertencia.ok('pelisalacarta',texto,texto2) actualizar = False if actualizar: xbmc.executebuiltin('UpdateLibrary(video)') logger.info ('[Library update] Serie: "%s". Total: %d, Erroneos: %d, Nuevos: %d' %(serie, total, errores, nuevos))
def fixStrm (file): logger.info("[library.py] fixStrm file: "+file) url = LeeStrm (file) if len(url)==0: return False args = url.split('?',1) url2 = '%s?%s' % (sys.argv[ 0 ],args [1]) logger.info ("[library.py] fixStrm new url: "+url2) return SaveStrm (file,url2)