我们从Python开源项目中,提取了以下47个代码示例,用于说明如何使用pycurl.error()。
def _handle_events(self, fd, events): """Called by IOLoop when there is activity on one of our file descriptors. """ action = 0 if events & ioloop.IOLoop.READ: action |= pycurl.CSELECT_IN if events & ioloop.IOLoop.WRITE: action |= pycurl.CSELECT_OUT while True: try: ret, num_handles = self._multi.socket_action(fd, action) except pycurl.error as e: ret = e.args[0] if ret != pycurl.E_CALL_MULTI_PERFORM: break self._finish_pending_requests()
def ipv6_asn(ifname): try: c = pycurl.Curl() body = BytesIO() c.setopt(c.URL, "https://stat.ripe.net/data/prefix-overview/data.json?resource={}" .format(ipv6_address_public(ifname))) c.setopt(c.INTERFACE, ifname) c.setopt(c.WRITEDATA, body) c.perform() asns = json.loads((body.getvalue()).decode('utf-8'))['data']['asns'] if len(asns) == 1: return asns[0]['asn'] else: return None except pycurl.error: return None
def perform(self): self.__performHead="" self.__performBody="" self.__headersSent="" try: conn = Request.to_pycurl_object(pycurl.Curl(), self) conn.perform() self.response_from_conn_object(conn, self.__performHead, self.__performBody) except pycurl.error, error: errno, errstr = error raise ReqRespException(ReqRespException.FATAL, errstr) finally: conn.close() ######### ESTE conjunto de funciones no es necesario para el uso habitual de la clase
def fetch_many_async(urls, callback=None, errback=None, **kwargs): """ Retrieve a list of URLs asynchronously. @param callback: Optionally, a function that will be fired one time for each successful URL, and will be passed its content and the URL itself. @param errback: Optionally, a function that will be fired one time for each failing URL, and will be passed the failure and the URL itself. @return: A C{DeferredList} whose callback chain will be fired as soon as all downloads have terminated. If an error occurs, the errback chain of the C{DeferredList} will be fired immediatly. """ results = [] for url in urls: result = fetch_async(url, **kwargs) if callback: result.addCallback(callback, url) if errback: result.addErrback(errback, url) results.append(result) return DeferredList(results, fireOnOneErrback=True, consumeErrors=True)
def test_fetch_to_files_with_non_existing_directory(self): """ The deferred list returned by L{fetch_to_files} results in a failure if the destination directory doesn't exist. """ url_results = {"http://im/right": b"right"} directory = "i/dont/exist/" curl = CurlManyStub(url_results) result = fetch_to_files(url_results.keys(), directory, curl=curl) def check_error(failure): error = str(failure.value.subFailure.value) self.assertEqual(error, ("[Errno 2] No such file or directory: " "'i/dont/exist/right'")) self.assertFalse(os.path.exists(os.path.join(directory, "right"))) result.addErrback(check_error) return result
def send(self, p_retry = 0): l_retry = p_retry while l_retry >= 0: self.cleanup() try: self.m_handle.perform() self.read_response() except pycurl.error as l_error: l_code = l_error.args[0] if l_code == 28: logger.warning(__name__, "timeout on request '%s' : %s", self.m_request.m_url, l_error.args[1]) self.m_response.m_error = l_error.args[1] return False else: self.m_response.m_error = "curl error : %s" % self._error_from_core(l_code) if not self.response().has_error(): return True logger.info(__name__, "error on request '%s' (left %d retries left) : %s", self.m_request.m_url, l_retry, self.response().m_error) l_retry -= 1 logger.error(__name__, "error on request '%s' : %s", self.m_request.m_url, self.response().m_error) return False
def _attach_stream(self, event): """ Attach stream to circuit. """ try: self._controller.attach_stream(event.id, self._cid) except (OperationFailed, InvalidRequest), error: error = str(error) # If circuit is already closed, close stream too. if error in (('Unknown circuit "%s"' % self._cid), "Can't attach stream to non-open origin circuit"): self._controller.close_stream(event.id) # Ignore the rare cases (~5*10^-7) where a stream has already been # closed almost directly after its NEW-event has been received. elif error == 'Unknown stream "%s"' % event.id: sys.stderr.write('Stream %s has already been ' + 'closed.\n' % event.id) else: raise
def fetch(self, request, **kwargs): """Executes an HTTPRequest, returning an HTTPResponse. If an error occurs during the fetch, we raise an HTTPError. """ if not isinstance(request, HTTPRequest): request = HTTPRequest(url=request, **kwargs) buffer = cStringIO.StringIO() headers = httputil.HTTPHeaders() try: _curl_setup_request(self._curl, request, buffer, headers) self._curl.perform() code = self._curl.getinfo(pycurl.HTTP_CODE) effective_url = self._curl.getinfo(pycurl.EFFECTIVE_URL) buffer.seek(0) response = HTTPResponse( request=request, code=code, headers=headers, buffer=buffer, effective_url=effective_url) if code < 200 or code >= 300: raise HTTPError(code, response=response) return response except pycurl.error, e: buffer.close() raise CurlError(*e)
def _handle_events(self, fd, events): """Called by IOLoop when there is activity on one of our file descriptors. """ action = 0 if events & ioloop.IOLoop.READ: action |= pycurl.CSELECT_IN if events & ioloop.IOLoop.WRITE: action |= pycurl.CSELECT_OUT while True: try: ret, num_handles = self._socket_action(fd, action) except pycurl.error as e: ret = e.args[0] if ret != pycurl.E_CALL_MULTI_PERFORM: break self._finish_pending_requests()
def transfer(ipaddr, username, password, commandfile): #transfers commandfile to camera storage = StringIO() c = pycurl.Curl() c.setopt(c.URL, 'http://' + ipaddr + '/admin/remoteconfig') c.setopt(c.POST, 1) c.setopt(c.CONNECTTIMEOUT, 5) c.setopt(c.TIMEOUT, TIMEOUT) filesize = os.path.getsize(commandfile) f = open(commandfile, 'rb') c.setopt(c.FAILONERROR, True) c.setopt(pycurl.POSTFIELDSIZE, filesize) c.setopt(pycurl.READFUNCTION, FileReader(f).read_callback) c.setopt(c.WRITEFUNCTION, storage.write) c.setopt(pycurl.HTTPHEADER, ["application/x-www-form-urlencoded"]) c.setopt(c.VERBOSE, VERBOSE) c.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC) c.setopt(pycurl.USERPWD, username + ':' + password) try: c.perform() except pycurl.error, error: errno, errstr = error print 'An error occurred: ', errstr return False, '' c.close() content = storage.getvalue() f.close() return True, content # *************************************************************** # *** Main program *** # ***************************************************************
def user_init(self, config_file): """ Try to load default values from config file Defaults should be in the form x = y """ try: self._read_config("/etc/" + Config.config) if self._read_config(config_file): return True self._read_config(Config.topdir + "/" + Config.config) if self.topdir != self.homedir: self._read_config(Config.homedir + "/" + Config.config) except ValueError as error: Msg().err("Error:", error) sys.exit(1) self._override_config() self._verify_config()
def restore(self): """Restore container files after FileBind""" error = False if not os.path.isdir(self.container_orig_dir): return True for f_name in os.listdir(self.container_orig_dir): orig_file = self.container_orig_dir + "/" + f_name if not os.path.isfile(orig_file): continue cont_file = os.path.basename(f_name).replace('#', '/') cont_file = self.container_root + "/" + cont_file if os.path.islink(cont_file): FileUtil(cont_file).remove() elif os.path.exists(cont_file): continue if not FileUtil(orig_file).rename(cont_file): Msg().err("Error: restoring binded file:", cont_file) error = True if not error: FileUtil(self.container_orig_dir).remove() FileUtil(self.container_bind_dir).remove()
def _verify_layer_file(self, structure, layer_id): """Verify layer file in repository""" layer_f = structure["layers"][layer_id]["layer_f"] if not (os.path.exists(layer_f) and os.path.islink(layer_f)): Msg().err("Error: layer data file symbolic link not found", layer_id) return False if not os.path.exists(self.cur_tagdir + "/" + os.readlink(layer_f)): Msg().err("Error: layer data file not found") return False if not FileUtil(layer_f).verify_tar(): Msg().err("Error: layer file not ok:", layer_f) return False match = re.search("/sha256:(\\S+)$", layer_f) if match: layer_f_chksum = ChkSUM().sha256(layer_f) if layer_f_chksum != match.group(1): Msg().err("Error: layer file chksum error:", layer_f) return False return True
def do_load(self, cmdp): """ load: load a container image saved by docker with 'docker save' load --input=<docker-saved-container-file> load -i <docker-saved-container-file> load < <docker-saved-container-file> """ imagefile = cmdp.get("--input=") if not imagefile: imagefile = cmdp.get("-i=") if imagefile is False: imagefile = "-" if cmdp.missing_options(): # syntax error return False if not imagefile: Msg().err("Error: must specify filename of docker exported image") return False repos = self.dockerlocalfileapi.load(imagefile) if not repos: Msg().err("Error: loading failed") return False else: for repo_item in repos: Msg().out(repo_item) return True
def do_create(self, cmdp): """ create: extract image layers and create a container create [options] <repo/image:tag> --name=xxxx :set or change the name of the container """ imagespec = cmdp.get("P1") name = cmdp.get("--name=") if cmdp.missing_options(): # syntax error return False container_id = self._create(imagespec) if container_id: Msg().out(container_id) if name and not self.localrepo.set_container_name(container_id, name): Msg().err("Error: invalid container name may already exist " "or wrong format") return False return True return False
def do_rmi(self, cmdp): """ rmi: delete an image in the local repository rmi [options] <repo/image:tag> -f :force removal """ force = cmdp.get("-f") imagespec = str(cmdp.get("P1")) (imagerepo, tag) = self._check_imagespec(imagespec) if cmdp.missing_options(): # syntax error return False if not imagerepo: return False else: if self.localrepo.isprotected_imagerepo(imagerepo, tag): Msg().err("Error: image repository is protected") return False Msg().out("Info: deleting image:", imagespec, l=Msg.INF) if not self.localrepo.del_imagerepo(imagerepo, tag, force): Msg().err("Error: deleting image") return False return True
def do_protect(self, cmdp): """ protect: protect a container or image against deletion protect <container-id or repo/image:tag> """ arg = cmdp.get("P1") if cmdp.missing_options(): # syntax error return False if self.localrepo.get_container_id(arg): if not self.localrepo.protect_container(arg): Msg().err("Error: protect container failed") return False return True else: (imagerepo, tag) = self._check_imagespec(arg) if imagerepo: if self.localrepo.protect_imagerepo(imagerepo, tag): return True Msg().err("Error: protect image failed") return False
def do_unprotect(self, cmdp): """ unprotect: remove delete protection unprotect <container-id or repo/image:tag> """ arg = cmdp.get("P1") if cmdp.missing_options(): # syntax error return False if self.localrepo.get_container_id(arg): if not self.localrepo.unprotect_container(arg): Msg().err("Error: unprotect container failed") return False return True else: (imagerepo, tag) = self._check_imagespec(arg) if imagerepo: if self.localrepo.unprotect_imagerepo(imagerepo, tag): return True Msg().err("Error: unprotect image failed") return False
def do_name(self, cmdp): """ name: give a name alias to a container name <container-id> <container-name> """ container_id = cmdp.get("P1") name = cmdp.get("P2") if cmdp.missing_options(): # syntax error return False if not (self.localrepo.get_container_id(container_id) and name): Msg().err("Error: invalid container id or name") return False if not self.localrepo.set_container_name(container_id, name): Msg().err("Error: invalid container name") return False return True
def do_verify(self, cmdp): """ verify: verify an image verify <repo/image:tag> """ (imagerepo, tag) = self._check_imagespec(cmdp.get("P1")) if (not imagerepo) or cmdp.missing_options(): # syntax error return False else: Msg().out("Info: verifying: %s:%s" % (imagerepo, tag), l=Msg.INF) if not self.localrepo.cd_imagerepo(imagerepo, tag): Msg().err("Error: selecting image and tag") return False elif self.localrepo.verify_image(): Msg().out("Info: image Ok", l=Msg.INF) return True Msg().err("Error: image verification failure") return False
def declare_options(self, opts_string, opt_where="CMD_OPT"): """Declare in advance options that are part of the command line """ pos = 0 opt_list = self._argv_split[opt_where] while pos < len(opt_list): for opt_name in opts_string.strip().split(): if opt_name.endswith("="): if opt_list[pos].startswith(opt_name): self._argv_consumed_options[opt_where].append(pos) elif opt_list[pos] == opt_name[:-1]: self._argv_consumed_options[opt_where].append(pos) if pos + 1 == len(opt_list): break # error -x without argument at end of line if (pos < len(opt_list) and not opt_list[pos+1].startswith("-")): self._argv_consumed_options[opt_where].\ append(pos + 1) elif opt_list[pos] == opt_name: self._argv_consumed_options[opt_where].append(pos) pos += 1
def load(self): parser = ConfigParser() settings = {} try: found = parser.read(AppConfiguration.CONFIG_FNAME) if found and self.section_name in parser.sections(): settings = parser[self.section_name] except Exception: msg = 'Error loading app settings from {}. Error details logged to file.'.format(AppConfiguration.CONFIG_FNAME) logger.error(msg) logexception() raise AppException(msg) self._load_settings(settings) self.save()
def onDownloadComplete(self, url, data): self.requested = False if not data: # print('Request Failed: {}'. format(self.result.item.name)) return if self.image: return # CONVERT DATA TO GIF IMAGE try: img = PIL.Image.open(data) self.image = PIL.ImageTk.PhotoImage(img) self.updateOverlayImage(img) if url not in self.CACHE: self.CACHE[url] = data # notify ui self.ui_queue.put((MsgType.Object, self)) except OSError as e: item = self.item with open('tmp\\{}.err.png'.format(item.name.strip()), mode='wb') as f: f.write(data.getvalue()) logger.error('Image conversion failed: {}, Length: {}\t{}'.format(item.name, len(data.getvalue()), url))
def end_all_async_unsafe(self): if not Config.RECORDING_ACTIVATED: return for rtmp_name in self._recording_rtmps: curl = pycurl.Curl() try: self._set_def_curl_opts(curl) curl.setopt(pycurl.URL, self._end_url(rtmp_name)) curl.setopt(pycurl.WRITEDATA, self._end_buffer) curl.perform() except pycurl.error as e: console.warning( 'Pycurl error in end_all() for racer <{0}>: Tried to curl <{1}>. Error {2}.'.format( rtmp_name, self._end_url(rtmp_name), e)) finally: curl.close() self._recording_rtmps.clear()
def _end_record_nolock(self, rtmp_name): rtmp_name = rtmp_name.lower() if rtmp_name not in self._recording_rtmps: return curl = pycurl.Curl() try: self._set_def_curl_opts(curl) curl.setopt(pycurl.URL, self._end_url(rtmp_name)) curl.setopt(pycurl.WRITEDATA, self._end_buffer) curl.perform() self._recording_rtmps = [r for r in self._recording_rtmps if r != rtmp_name] except pycurl.error as e: console.warning( 'Pycurl error in end_record({0}): Tried to curl <{1}>. Error {2}.'.format( rtmp_name, self._end_url(rtmp_name), e)) finally: curl.close()
def curl_to_buf(url, proto, c=None, buf=None): if ((c and buf is None) or (buf and c is None)): _out.die('bad arguments!') if c is None: buf = BytesIO() c = curl_common_init(buf) c.setopt(c.URL, url) try: c.perform() except pycurl.error: check_curl_error(c, buf, proto, True) check_curl_error(c, buf, proto) c.close() return buf
def error(error_code): if error_code == 1: logging.info("sign ??????") return 1 elif error_code == 2: logging.info("????????") return 2 elif error_code == 3: logging.info("?????????? 100 ? ?????????????????????????") return 3 elif error_code == 4: logging.info("???? ?????????") return 4 elif error_code == 5: logging.info("??????????? MD5?") return 5 elif error_code == 6: logging.info("??????? ??????????") return 6 elif error_code == 7: logging.info("???????????????~") return 7 else: logging.info("????") return 100
def reset(self): """Reset the state of the transport engine. Do this before performing another type of request.""" for c in self.__chandles: if c not in self.__freehandles: try: self.__mhandle.remove_handle(c) except pycurl.error: # If cleanup is interrupted, it's # possible that a handle was removed but # not placed in freelist. In that case, # finish cleanup and appened to # freehandles. pass self.__teardown_handle(c) self.__active_handles = 0 self.__freehandles = self.__chandles[:] self.__req_q = deque() self.__failures = [] self.__success = [] self.__orphans = set()
def _handle_timeout(self): """Called by IOLoop when the requested timeout has passed.""" with stack_context.NullContext(): self._timeout = None while True: try: ret, num_handles = self._multi.socket_action( pycurl.SOCKET_TIMEOUT, 0) except pycurl.error as e: ret = e.args[0] if ret != pycurl.E_CALL_MULTI_PERFORM: break self._finish_pending_requests() # In theory, we shouldn't have to do this because curl will # call _set_timeout whenever the timeout changes. However, # sometimes after _handle_timeout we will need to reschedule # immediately even though nothing has changed from curl's # perspective. This is because when socket_action is # called with SOCKET_TIMEOUT, libcurl decides internally which # timeouts need to be processed by using a monotonic clock # (where available) while tornado uses python's time.time() # to decide when timeouts have occurred. When those clocks # disagree on elapsed time (as they will whenever there is an # NTP adjustment), tornado might call _handle_timeout before # libcurl is ready. After each timeout, resync the scheduled # timeout with libcurl's current state. new_timeout = self._multi.timeout() if new_timeout >= 0: self._set_timeout(new_timeout)
def _handle_force_timeout(self): """Called by IOLoop periodically to ask libcurl to process any events it may have forgotten about. """ with stack_context.NullContext(): while True: try: ret, num_handles = self._multi.socket_all() except pycurl.error as e: ret = e.args[0] if ret != pycurl.E_CALL_MULTI_PERFORM: break self._finish_pending_requests()
def _process_queue(self): with stack_context.NullContext(): while True: started = 0 while self._free_list and self._requests: started += 1 curl = self._free_list.pop() (request, callback) = self._requests.popleft() curl.info = { "headers": httputil.HTTPHeaders(), "buffer": BytesIO(), "request": request, "callback": callback, "curl_start_time": time.time(), } try: self._curl_setup_request( curl, request, curl.info["buffer"], curl.info["headers"]) except Exception as e: # If there was an error in setup, pass it on # to the callback. Note that allowing the # error to escape here will appear to work # most of the time since we are still in the # caller's original stack frame, but when # _process_queue() is called from # _finish_pending_requests the exceptions have # nowhere to go. callback(HTTPResponse( request=request, code=599, error=e)) else: self._multi.add_handle(curl) if not started: break
def _finish(self, curl, curl_error=None, curl_message=None): info = curl.info curl.info = None self._multi.remove_handle(curl) self._free_list.append(curl) buffer = info["buffer"] if curl_error: error = CurlError(curl_error, curl_message) code = error.code effective_url = None buffer.close() buffer = None else: error = None code = curl.getinfo(pycurl.HTTP_CODE) effective_url = curl.getinfo(pycurl.EFFECTIVE_URL) buffer.seek(0) # the various curl timings are documented at # http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html time_info = dict( queue=info["curl_start_time"] - info["request"].start_time, namelookup=curl.getinfo(pycurl.NAMELOOKUP_TIME), connect=curl.getinfo(pycurl.CONNECT_TIME), pretransfer=curl.getinfo(pycurl.PRETRANSFER_TIME), starttransfer=curl.getinfo(pycurl.STARTTRANSFER_TIME), total=curl.getinfo(pycurl.TOTAL_TIME), redirect=curl.getinfo(pycurl.REDIRECT_TIME), ) try: info["callback"](HTTPResponse( request=info["request"], code=code, headers=info["headers"], buffer=buffer, effective_url=effective_url, error=error, reason=info['headers'].get("X-Http-Reason", None), request_time=time.time() - info["curl_start_time"], time_info=time_info)) except Exception: self.handle_callback_exception(info["callback"])