Python requests.packages.urllib3.util.retry 模块,Retry() 实例源码

我们从Python开源项目中,提取了以下43个代码示例,用于说明如何使用requests.packages.urllib3.util.retry.Retry()

项目:apimatic-cli    作者:apimatic    | 项目源码 | 文件源码
def __init__(self, timeout=60, cache=False, max_retries=None, retry_interval=None):
        """The constructor.

        Args:
            timeout (float): The default global timeout(seconds).

        """
        self.timeout = timeout
        self.session = requests.session()

        if max_retries and retry_interval:
            retries = Retry(total=max_retries, backoff_factor=retry_interval)
            self.session.mount('http://', HTTPAdapter(max_retries=retries))
            self.session.mount('https://', HTTPAdapter(max_retries=retries))

        if cache:
            self.session = CacheControl(self.session)
项目:resultsdb-updater    作者:release-engineering    | 项目源码 | 文件源码
def retry_session():
    # This will give the total wait time in minutes:
    # >>> sum([min((0.3 * (2 ** (i - 1))), 120) / 60 for i in range(24)])
    # >>> 30.5575
    # This works by the using the minimum time in seconds of the backoff time
    # and the max back off time which defaults to 120 seconds. The backoff time
    # increases after every failed attempt.
    session = requests.Session()
    retry = Retry(
        total=24,
        read=5,
        connect=24,
        backoff_factor=0.3,
        status_forcelist=(500, 502, 504),
        method_whitelist=('GET', 'POST'),
    )
    adapter = HTTPAdapter(max_retries=retry)
    session.mount('http://', adapter)
    session.mount('https://', adapter)
    return session
项目:lightning-integration    作者:cdecker    | 项目源码 | 文件源码
def requests_retry_session(
    retries=3,
    backoff_factor=0.3,
    status_forcelist=(500, 502, 504),
    session=None,
):
    session = session or requests.Session()
    retry = Retry(
        total=retries,
        read=retries,
        connect=retries,
        backoff_factor=backoff_factor,
        status_forcelist=status_forcelist,
    )
    adapter = HTTPAdapter(max_retries=retry)
    session.mount('http://', adapter)
    session.mount('https://', adapter)
    return session
项目:clusterfuzz-tools    作者:google    | 项目源码 | 文件源码
def get_http():
  """Get the http object."""
  ensure_dir(CLUSTERFUZZ_TESTCASES_DIR)
  http = requests_cache.CachedSession(
      cache_name=os.path.join(CLUSTERFUZZ_TESTCASES_DIR, 'http_cache'),
      backend='sqlite',
      allowable_methods=('GET', 'POST'),
      allowable_codes=[200],
      expire_after=HTTP_CACHE_TTL)
  http.mount(
      'https://',
      adapters.HTTPAdapter(
          # backoff_factor is 0.5. Therefore, the max wait time is 16s.
          retry.Retry(
              total=5, backoff_factor=0.5,
              status_forcelist=[500, 502, 503, 504]))
  )
  return http
项目:python-percy-client    作者:percy    | 项目源码 | 文件源码
def _requests_retry_session(
        self,
        retries=3,
        backoff_factor=0.3,
        method_whitelist=['HEAD', 'GET', 'POST', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'],
        status_forcelist=(500, 502, 503, 504, 520, 524),
        session=None,
    ):
        session = session or requests.Session()
        retry = Retry(
            total=retries,
            read=retries,
            connect=retries,
            status=retries,
            method_whitelist=method_whitelist,
            backoff_factor=backoff_factor,
            status_forcelist=status_forcelist,
        )
        adapter = HTTPAdapter(max_retries=retry)
        session.mount('http://', adapter)
        session.mount('https://', adapter)
        return session
项目:ebay    作者:fgscivittaro    | 项目源码 | 文件源码
def get_soup(url, num_retries = 10):
    """
    Takes in a url and returns the parsed BeautifulSoup code for that url with
    handling capabilities if the request 'bounces'.
    """

    s = requests.Session()

    retries = Retry(
        total = num_retries,
        backoff_factor = 0.1,
        status_forcelist = [500, 502, 503, 504]
        )

    s.mount('http://', HTTPAdapter(max_retries = retries))

    return BeautifulSoup(s.get(url).text, 'html.parser')
项目:open-mic    作者:cosmir    | 项目源码 | 文件源码
def demo(base_url):
    """Login through a third-party OAuth handler and print some stats.

    Parameters
    ----------
    base_url : str
        Base URL of the CMS server.
    """
    session = requests.Session()
    adapter = HTTPAdapter(max_retries=Retry(total=3, backoff_factor=0.02))
    session.mount('{}://'.format(urlparse(base_url).scheme), adapter)

    wb = webbrowser.get()
    login_url = os.path.join(base_url, "login?complete=no")
    session.get(login_url)
    wb.open(login_url)

    auth_url = input("Enter the URL returned after authentication:")
    response = session.get(auth_url.replace("complete=no", 'complete=yes'))
    assert response.status_code == 200

    print(session.get(os.path.join(base_url, 'me')).content)
项目:clickhouse-cli    作者:hatarist    | 项目源码 | 文件源码
def __init__(self, url, user, password, database, settings=None, stacktrace=False, timeout=10.0,
                 timeout_retry=0, timeout_retry_delay=0.0):
        self.url = url
        self.user = user
        self.password = password or ''
        self.database = database
        self.settings = settings or {}
        self.cli_settings = {}
        self.stacktrace = stacktrace
        self.timeout = timeout
        self.session = requests.Session()

        retries = Retry(
            connect=timeout_retry,
            # method_whitelist={'GET', 'POST'},  # enabling retries for POST may be a bad idea
            backoff_factor=timeout_retry_delay
        )
        self.session.mount('http://', requests.adapters.HTTPAdapter(max_retries=retries))
项目:openmailbox_downloader    作者:appleorange1    | 项目源码 | 文件源码
def list_folders(csrftoken, sessionid):
    print("Getting list of folders")
    # Create a session object from requests library
    s = requests.Session()
    retries = Retry(total=10, backoff_factor=1,
                    status_forcelist=[500, 502, 504])
    s.mount('https://', HTTPAdapter(max_retries=retries))
    s.headers.update({'Cookie': 'csrftoken={0};'
                      'sessionid={1}'.format(csrftoken, sessionid)})
    mdatareq = 'https://app.openmailbox.org/requests/webmail?action=folderlist'
    print(mdatareq)

    metadata = json.loads(s.get(mdatareq).text)
    print(metadata)

    print('\nFolder names:')
    for line in metadata['folders']:
        print(line['name'])
项目:github-bugzilla-pr-linker    作者:mozilla    | 项目源码 | 文件源码
def requests_retry_session(
    retries=3,
    backoff_factor=0.3,
    status_forcelist=(502, 504),
    session=None,
):
    session = session or requests.Session()
    retry = Retry(
        total=retries,
        read=retries,
        connect=retries,
        backoff_factor=backoff_factor,
        status_forcelist=status_forcelist,
    )
    adapter = HTTPAdapter(max_retries=retry)
    session.mount('http://', adapter)
    session.mount('https://', adapter)
    return session
项目:Hockey-Scraper    作者:HarryShomer    | 项目源码 | 文件源码
def get_url(url):
    """
    Get the url

    :param url: given url

    :return: page
    """
    response = requests.Session()
    retries = Retry(total=10, backoff_factor=.1)
    response.mount('http://', HTTPAdapter(max_retries=retries))

    try:
        response = response.get(url, timeout=5)
        response.raise_for_status()
    except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError):
        return None

    return response
项目:tvmaze-plex-agent    作者:srob650    | 项目源码 | 文件源码
def _endpoint_premium_get(self, url):
        s = requests.Session()
        retries = Retry(total=5,
                        backoff_factor=0.1,
                        status_forcelist=[429])
        s.mount('http://', HTTPAdapter(max_retries=retries))
        try:
            r = s.get(url, auth=(self.username, self.api_key))
        except requests.exceptions.ConnectionError as e:
            raise ConnectionError(repr(e))

        s.close()

        if r.status_code in [404, 422]:
            return None

        if r.status_code == 400:
            raise BadRequest('Bad Request for url {}'.format(url))

        results = r.json()
        if results:
            return results
        else:
            return None
项目:tvmaze-plex-agent    作者:srob650    | 项目源码 | 文件源码
def _endpoint_premium_delete(self, url):
        s = requests.Session()
        retries = Retry(total=5,
                        backoff_factor=0.1,
                        status_forcelist=[429])
        s.mount('http://', HTTPAdapter(max_retries=retries))
        try:
            r = s.delete(url, auth=(self.username, self.api_key))
        except requests.exceptions.ConnectionError as e:
            raise ConnectionError(repr(e))

        s.close()

        if r.status_code == 400:
            raise BadRequest('Bad Request for url {}'.format(url))

        if r.status_code == 200:
            return True

        if r.status_code == 404:
            return None
项目:tvmaze-plex-agent    作者:srob650    | 项目源码 | 文件源码
def _endpoint_premium_put(self, url, payload=None):
        s = requests.Session()
        retries = Retry(total=5,
                        backoff_factor=0.1,
                        status_forcelist=[429])
        s.mount('http://', HTTPAdapter(max_retries=retries))
        try:
            r = s.put(url, data=payload, auth=(self.username, self.api_key))
        except requests.exceptions.ConnectionError as e:
            raise ConnectionError(repr(e))

        s.close()

        if r.status_code == 400:
            raise BadRequest('Bad Request for url {}'.format(url))

        if r.status_code == 200:
            return True

        if r.status_code in [404, 422]:
            return None

    # Get Show object
项目:open-wob-api    作者:openstate    | 项目源码 | 文件源码
def setup_http_session(self):
        if self.http_session:
            self.http_session.close()

        self.http_session = Session()
        self.http_session.headers['User-Agent'] = USER_AGENT

        http_retry = Retry(total=5, status_forcelist=[500, 503],
                           backoff_factor=.5)
        http_adapter = HTTPAdapter(max_retries=http_retry)
        self.http_session.mount('http://', http_adapter)

        http_retry = Retry(total=5, status_forcelist=[500, 503],
                           backoff_factor=.5)
        http_adapter = HTTPAdapter(max_retries=http_retry)
        self.http_session.mount('https://', http_adapter)
项目:open-wob-api    作者:openstate    | 项目源码 | 文件源码
def http_session(self):
        """Returns a :class:`requests.Session` object. A new session is
        created if it doesn't already exist."""
        http_session = getattr(self, '_http_session', None)
        if not http_session:
            requests.packages.urllib3.disable_warnings()
            session = requests.Session()
            session.headers['User-Agent'] = USER_AGENT

            http_retry = Retry(total=5, status_forcelist=[500, 503],
                               backoff_factor=.5)
            http_adapter = HTTPAdapter(max_retries=http_retry)
            session.mount('http://', http_adapter)

            http_retry = Retry(total=5, status_forcelist=[500, 503],
                               backoff_factor=.5)
            http_adapter = HTTPAdapter(max_retries=http_retry)
            session.mount('https://', http_adapter)

            self._http_session = session

        return self._http_session
项目:MundiAPI-PYTHON    作者:mundipagg    | 项目源码 | 文件源码
def __init__(self, timeout=60, cache=False, max_retries=None, retry_interval=None):
        """The constructor.

        Args:
            timeout (float): The default global timeout(seconds).

        """
        self.timeout = timeout
        self.session = requests.session()

        if max_retries and retry_interval:
            retries = Retry(total=max_retries, backoff_factor=retry_interval)
            self.session.mount('http://', HTTPAdapter(max_retries=retries))
            self.session.mount('https://', HTTPAdapter(max_retries=retries))

        if cache:
            self.session = CacheControl(self.session)
项目:transfert    作者:rbernand    | 项目源码 | 文件源码
def _connect(self):
        self._session = requests.Session()
        adaptator = requests.adapters.HTTPAdapter()
        adaptator.max_retries = HttpRetry(
            read=self.READ_MAX_RETRIES,
            connect=self.CONN_MAX_RETRIES,
            backoff_factor=self.BACKOFF_FACTOR)
        self._session.mount(str(self.url), adaptator)
        self.__conn = self._session.get(
            self.url,
            stream=True,
            timeout=(self.CONN_TIMEOUT, self.READ_TIMEOUT))
项目:sauna    作者:NicolasLM    | 项目源码 | 文件源码
def __init__(self, endpoint=None, application_key=None,
                 application_secret=None, consumer_key=None, timeout=TIMEOUT):
        from requests import Session
        from requests.adapters import HTTPAdapter

        self._endpoint = ENDPOINTS[endpoint]
        self._application_key = application_key
        self._application_secret = application_secret
        self._consumer_key = consumer_key

        # lazy load time delta
        self._time_delta = None

        try:
            # Some older versions of requests to not have the urllib3
            # vendorized package
            from requests.packages.urllib3.util.retry import Retry
        except ImportError:
            retries = 5
        else:
            # use a requests session to reuse connections between requests
            retries = Retry(
                total=5,
                backoff_factor=0.2,
                status_forcelist=[422, 500, 502, 503, 504]
            )

        self._session = Session()
        self._session.mount('https://', HTTPAdapter(max_retries=retries))
        self._session.mount('http://', HTTPAdapter(max_retries=retries))

        # Override default timeout
        self._timeout = timeout
项目:jwplatform-py    作者:jwplayer    | 项目源码 | 文件源码
def __init__(self, *args, **kwargs):
        super(RetryAdapter, self).__init__(*args, **kwargs)
        self.max_retries = Retry(total=RETRY_COUNT,
                                 backoff_factor=BACKOFF_FACTOR)
项目:tecken    作者:mozilla-services    | 项目源码 | 文件源码
def requests_retry_session(
    retries=3,
    backoff_factor=0.3,
    status_forcelist=(500, 502, 504),
):
    """Opinionated wrapper that creates a requests session with a
    HTTPAdapter that sets up a Retry policy that includes connection
    retries.

    If you do the more naive retry by simply setting a number. E.g.::

        adapter = HTTPAdapter(max_retries=3)

    then it will raise immediately on any connection errors.
    Retrying on connection errors guards better on unpredictable networks.
    From http://docs.python-requests.org/en/master/api/?highlight=retries#requests.adapters.HTTPAdapter
    it says: "By default, Requests does not retry failed connections."

    The backoff_factor is documented here:
    https://urllib3.readthedocs.io/en/latest/reference/urllib3.util.html#urllib3.util.retry.Retry
    A default of retries=3 and backoff_factor=0.3 means it will sleep like::

        [0.3, 0.6, 1.2]
    """  # noqa
    session = requests.Session()
    retry = Retry(
        total=retries,
        read=retries,
        connect=retries,
        backoff_factor=backoff_factor,
        status_forcelist=status_forcelist,
    )
    adapter = HTTPAdapter(max_retries=retry)
    session.mount('http://', adapter)
    session.mount('https://', adapter)
    return session
项目:libmozdata    作者:mozilla    | 项目源码 | 文件源码
def __init__(self, base_url, queries=None, **kwargs):
        """Constructor

        Args:
            base_url (str): the server's url
            queries (Optional[Query]): the queries
        """

        self.session = FuturesSession(max_workers=self.MAX_WORKERS)
        retries = Retry(total=Connection.MAX_RETRIES, backoff_factor=1, status_forcelist=Connection.STATUS_FORCELIST)
        self.session.mount(base_url, HTTPAdapter(max_retries=retries))
        self.results = []
        self.queries = queries

        if kwargs:
            if 'timeout' in kwargs:
                self.TIMEOUT = kwargs['timeout']
            if 'max_retries' in kwargs:
                self.MAX_RETRIES = kwargs['max_retries']
            if 'max_workers' in kwargs:
                self.MAX_WORKERS = kwargs['max_workers']
            if 'user_agent' in kwargs:
                self.USER_AGENT = kwargs['user_agent']
            if 'x_forwarded_for' in kwargs:
                self.X_FORWARDED_FOR = utils.get_x_fwded_for_str(kwargs['x_forwarded_for'])

        self.exec_queries()
项目:PoGo-Proxies    作者:neskk    | 项目源码 | 文件源码
def get_async_requests_session(num_retries, backoff_factor, pool_size,
                               status_forcelist=[500, 502, 503, 504]):
    # Use requests & urllib3 to auto-retry.
    # If the backoff_factor is 0.1, then sleep() will sleep for [0.1s, 0.2s,
    # 0.4s, ...] between retries. It will also force a retry if the status
    # code returned is in status_forcelist.
    session = FuturesSession(max_workers=pool_size)

    # If any regular response is generated, no retry is done. Without using
    # the status_forcelist, even a response with status 500 will not be
    # retried.
    retries = Retry(total=num_retries, backoff_factor=backoff_factor,
                    status_forcelist=status_forcelist)

    # Mount handler on both HTTP & HTTPS.
    session.mount('http://', HTTPAdapter(max_retries=retries,
                                         pool_connections=pool_size,
                                         pool_maxsize=pool_size))
    session.mount('https://', HTTPAdapter(max_retries=retries,
                                          pool_connections=pool_size,
                                          pool_maxsize=pool_size))

    return session


# Evaluates the status of PTC and Niantic request futures, and returns the
# result (optionally with an error).
# Warning: blocking! Can only get status code if request has finished.
项目:PoGo-Proxies    作者:neskk    | 项目源码 | 文件源码
def download_webpage(target_url, proxy=None, timeout=5):
    s = requests.Session()

    retries = Retry(total=3,
                    backoff_factor=0.5,
                    status_forcelist=[500, 502, 503, 504])

    s.mount('http://', HTTPAdapter(max_retries=retries))

    headers = {
        'User-Agent': ('Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) ' +
                       'Gecko/20100101 Firefox/54.0'),
        'Referer': 'http://google.com'
    }

    r = s.get(target_url,
              proxies={'http': proxy, 'https': proxy},
              timeout=timeout,
              headers=headers)

    if r.status_code == 200:
        return r.content

    return None


# Sockslist.net uses javascript to obfuscate proxies port number.
# Builds a dictionary with decoded values for each variable.
# Dictionary = {'var': intValue, ...})
项目:skills-ml    作者:workforce-data-initiative    | 项目源码 | 文件源码
def _default_session(self):
        session = requests.Session()
        retries = Retry(
            total=5,
            backoff_factor=2,
            status_forcelist=[502, 503, 504]
        )
        session.mount('https://', HTTPAdapter(max_retries=retries))
        return session
项目:modis-ingestor    作者:AstroDigital    | 项目源码 | 文件源码
def get_session(retries=5):
    s = requests.Session()
    r = Retry(total=retries, backoff_factor=0.1, status_forcelist=[500, 502, 503, 504])
    s.mount('http://', HTTPAdapter(max_retries=r))
    s.mount('https://', HTTPAdapter(max_retries=r))
    return s
项目:ISIC-Dataset-Downloader    作者:vgupta-ai    | 项目源码 | 文件源码
def initializeRequestSession():
    global requestSession
    requestSession = requests.Session()
    retries = Retry(total=5,
                    backoff_factor=0.1,
                    status_forcelist=[500, 502, 503, 504])
    requestSession.mount('http://', HTTPAdapter(max_retries=retries))
    requestSession.mount('https://', HTTPAdapter(max_retries=retries))
项目:dcard-spider    作者:leVirve    | 项目源码 | 文件源码
def __init__(self, workers=8):
        retries = Retry(
            total=self.max_retries,
            backoff_factor=0.1,
            status_forcelist=[500, 502, 503, 504])
        session = requests.Session()
        session.mount('https://', HTTPAdapter(max_retries=retries))
        self.session = session
        self.pool = Pool(workers)
项目:quilt    作者:quiltdata    | 项目源码 | 文件源码
def _create_s3_session():
    """
    Creates a session with automatic retries on 5xx errors.
    """
    sess = requests.Session()
    retries = Retry(total=3,
                    backoff_factor=.5,
                    status_forcelist=[500, 502, 503, 504])
    sess.mount('https://', HTTPAdapter(max_retries=retries))
    return sess
项目:fabric8-analytics-worker    作者:fabric8-analytics    | 项目源码 | 文件源码
def get_session_retry(retries=3, backoff_factor=0.2, status_forcelist=(404, 500, 502, 504),
                      session=None):
    """Set HTTP Adapter with retries to session."""
    session = session or requests.Session()
    retry = Retry(total=retries, read=retries, connect=retries,
                  backoff_factor=backoff_factor, status_forcelist=status_forcelist)
    adapter = HTTPAdapter(max_retries=retry)
    session.mount('http://', adapter)
    return session
项目:pp_api    作者:artreven    | 项目源码 | 文件源码
def __init__(self, server, auth_data=None, session=None, max_retries=None):
        self.auth_data = auth_data
        self.server = server
        self.session = u.get_session(session, auth_data)
        if max_retries is not None:
            retries = Retry(total=max_retries,
                            backoff_factor=0.3,
                            status_forcelist=[500, 502, 503, 504])
            self.session.mount(self.server, HTTPAdapter(max_retries=retries))
项目:mal-scraper    作者:QasimK    | 项目源码 | 文件源码
def __init__(self, retry_info):
        super().__init__()
        self.session = requests.Session()
        retry = Retry(**retry_info)
        adaptor = HTTPAdapter(max_retries=retry)
        self.session.mount('http://', adaptor)
        self.session.mount('https://', adaptor)
项目:open-mic    作者:cosmir    | 项目源码 | 文件源码
def upload(filename, metadata, url):
    """Upload an audio file and corresponding metadata to the CMS.

    Parameters
    ----------
    filename : str
        Path to an audio file on disk.

    metadata : dict
        Object containing arbitrary metadata matching this audio file.

    url : str
        Destination for uploading data.

    Returns
    -------
    response : obj
        Dictionary containing data about the upload event, including the URI of
        the newly minted object.
    """
    start = datetime.datetime.now()
    session = requests.Session()
    adapter = HTTPAdapter(max_retries=Retry(total=8, backoff_factor=0.02))
    session.mount('{}://'.format(urlparse(url).scheme), adapter)

    response = session.post(
        url,
        data=metadata,
        files=dict(audio=open(filename, 'rb'))
    )
    end = datetime.datetime.now()
    elapsed = end - start
    result = dict(status=response.status_code, time_elapsed=str(elapsed),
                  start_time=str(start), filename=filename, **response.json())
    LOG.info(json.dumps(result))
项目:open-mic    作者:cosmir    | 项目源码 | 文件源码
def launch_cms(port, noauth=False, max_retries=8):
    """Thin wrapper around kick-starting the CMS server.

    Parameters
    ----------
    port : int
        Port for running the server locally.

    noauth : bool, default=False
        If True, do not use authentication.

    Returns
    -------
    pid : int
        Process ID of the server.
    """
    flags = ['--debug']
    if noauth:
        flags += ['--noauth']

    cmd = "python {} --port {} --config {} {}".format(
        os.path.join('backend_server', 'main.py'), port,
        os.path.join('backend_server', '.config-local.yaml'), " ".join(flags))
    logging.info("server: {}".format(cmd))
    server = subprocess.Popen(cmd.split(" "), stdout=subprocess.PIPE,
                              preexec_fn=os.setsid)

    # Test that the server is on; will raise an exception after enough attempts
    session = requests.Session()
    adapter = HTTPAdapter(max_retries=Retry(total=max_retries,
                                            backoff_factor=0.1))
    session.mount('http://', adapter)
    try:
        session.get('http://localhost:{}'.format(port))
    except requests.exceptions.ConnectionError:
        kill(server)
        raise EnvironmentError(
            "Unable to confirm that the server started successfully.")
    return server
项目:cortana-intelligence-customer360    作者:Azure    | 项目源码 | 文件源码
def sbs_request_session(namespace, max_retries=5, backoff=0.1):
    '''
    Create a request session to manage retries for ServiceBusService
    :param namespace ServiceBus namespace name
    :param max_retries max allowed retries
    :param backoff modify wait time before retry
    '''
    retries = Retry(total=max_retries, connect=max_retries, backoff_factor=backoff)
    request_session = Session()
    request_session.mount('https://{}.servicebus.windows.net'.format(namespace), HTTPAdapter(max_retries=retries))
    return request_session
项目:openmailbox_downloader    作者:appleorange1    | 项目源码 | 文件源码
def get_emails(csrftoken, sessionid, mailbox, lowerbound, upperbound):
    print("Getting list of emails")
    # Create a session object from requests library
    s = requests.Session()
    retries = Retry(total=10, backoff_factor=1,
                    status_forcelist=[500, 502, 504])
    s.mount('https://', HTTPAdapter(max_retries=retries))
    s.headers.update({'Cookie': 'csrftoken={0};'
                      'sessionid={1}'.format(csrftoken, sessionid)})
    mdatareq = 'https://app.openmailbox.org/requests/webmail?range={0}-{1}&sort=date&order=0&selected=&action=maillist&mailbox={2}'.format(lowerbound, upperbound, mailbox)
    print(mdatareq)

    metadata = json.loads(s.get(mdatareq).text)
    print(metadata)
    uids = []
    for line in metadata['partial_list']:
        uids.append(line['uid'])
    print("Finished getting list of emails")

    os.makedirs('emails_output_dir', exist_ok=True)
    print("Created directory emails_output_dir if it didn't already exist")

    for uid in uids:
        fname = 'emails_output_dir/' + str(mailbox) + '-' + str(uid) + ".eml"
        if not os.path.isfile(fname):
            req = 'https://app.openmailbox.org/requests/webmail?mailbox={0}&uid={1}&action=downloadmessage'.format(mailbox, str(uid))
            resp = s.get(req, stream=True)
            with open(fname, 'wb') as eml:
                for chunk in resp:
                    eml.write(chunk)
            print("Saved message " + fname)
        else:
            print("Already downloaded " + fname)
项目:bdbag    作者:ini-bdds    | 项目源码 | 文件源码
def get_new_session():
    session = requests.session()
    retries = Retry(connect=5,
                    read=5,
                    backoff_factor=1.0,
                    status_forcelist=[500, 502, 503, 504])

    session.mount('http://', HTTPAdapter(max_retries=retries))
    session.mount('https://', HTTPAdapter(max_retries=retries))

    return session
项目:tvmaze-plex-agent    作者:srob650    | 项目源码 | 文件源码
def _endpoint_standard_get(url):
        s = requests.Session()
        retries = Retry(total=5,
                        backoff_factor=0.1,
                        status_forcelist=[429])
        s.mount('http://', HTTPAdapter(max_retries=retries))
        try:
            r = s.get(url)
        except requests.exceptions.ConnectionError as e:
            raise ConnectionError(repr(e))

        s.close()

        if r.status_code in [404, 422]:
            return None

        if r.status_code == 400:
            raise BadRequest('Bad Request for url {}'.format(url))

        results = r.json()
        if results:
            return results
        else:
            return None

    # Query TVMaze Premium endpoints
项目:oadoi    作者:Impactstory    | 项目源码 | 文件源码
def call_requests_get(url,
                      headers={},
                      read_timeout=60,
                      connect_timeout=60,
                      stream=False,
                      related_pub=None,
                      ask_slowly=False):

    if u"doi.org/" in url:
        url = get_crossref_resolve_url(url, related_pub)
        logger.info(u"new url is {}".format(url))

    following_redirects = True
    num_redirects = 0
    while following_redirects:
        requests_session = requests.Session()

        retries = Retry(total=1,
                        backoff_factor=0.1,
                        status_forcelist=[500, 502, 503, 504])
        requests_session.mount('http://', DelayedAdapter(max_retries=retries))
        requests_session.mount('https://', DelayedAdapter(max_retries=retries))

        if u"citeseerx.ist.psu.edu/" in url:
            url = url.replace("http://", "https://")
            proxy_url = os.getenv("STATIC_IP_PROXY")
            proxies = {"https": proxy_url, "http": proxy_url}
        else:
            proxies = {}

        # logger.info(u"getting url {}".format(url))
        r = requests_session.get(url,
                    headers=headers,
                    timeout=(connect_timeout, read_timeout),
                    stream=stream,
                    proxies=proxies,
                    allow_redirects=True,
                    verify=False)

        if r and not r.encoding:
            r.encoding = "utf-8"

        # check to see if we actually want to keep redirecting, using business-logic redirect paths
        following_redirects = False
        num_redirects += 1
        if (r.status_code == 200) and (num_redirects < 5):
            redirect_url = keep_redirecting(r, related_pub)
            if redirect_url:
                following_redirects = True
                url = redirect_url

    return r
项目:harbour-sailfinder    作者:DylanVanAssche    | 项目源码 | 文件源码
def __init__(self, session=None, parser=None, user_agent=None,
                 history=True, timeout=None, allow_redirects=True, cache=False,
                 cache_patterns=None, max_age=None, max_count=None, tries=None,
                 multiplier=None):

        self.session = session or requests.Session()

        # Add default user agent string
        if user_agent is not None:
            self.session.headers['User-Agent'] = user_agent

        self.parser = parser

        self.timeout = timeout
        self.allow_redirects = allow_redirects

        # Set up caching
        if cache:
            adapter = RoboHTTPAdapter(max_age=max_age, max_count=max_count)
            cache_patterns = cache_patterns or ['http://', 'https://']
            for pattern in cache_patterns:
                self.session.mount(pattern, adapter)
        elif max_age:
            raise ValueError('Parameter `max_age` is provided, '
                             'but caching is turned off')
        elif max_count:
            raise ValueError('Parameter `max_count` is provided, '
                             'but caching is turned off')

        # Configure history
        self.history = history
        if history is True:
            self._maxlen = None
        elif not history:
            self._maxlen = 1
        else:
            self._maxlen = history
        self._states = []
        self._cursor = -1

        # Set up retries
        if tries:
            retry = Retry(tries, backoff_factor=multiplier)
            for protocol in ['http://', 'https://']:
                self.session.adapters[protocol].max_retries = retry
项目:harbour-sailfinder    作者:DylanVanAssche    | 项目源码 | 文件源码
def __init__(self, session=None, parser=None, user_agent=None,
                 history=True, timeout=None, allow_redirects=True, cache=False,
                 cache_patterns=None, max_age=None, max_count=None, tries=None,
                 multiplier=None):

        self.session = session or requests.Session()

        # Add default user agent string
        if user_agent is not None:
            self.session.headers['User-Agent'] = user_agent

        self.parser = parser

        self.timeout = timeout
        self.allow_redirects = allow_redirects

        # Set up caching
        if cache:
            adapter = RoboHTTPAdapter(max_age=max_age, max_count=max_count)
            cache_patterns = cache_patterns or ['http://', 'https://']
            for pattern in cache_patterns:
                self.session.mount(pattern, adapter)
        elif max_age:
            raise ValueError('Parameter `max_age` is provided, '
                             'but caching is turned off')
        elif max_count:
            raise ValueError('Parameter `max_count` is provided, '
                             'but caching is turned off')

        # Configure history
        self.history = history
        if history is True:
            self._maxlen = None
        elif not history:
            self._maxlen = 1
        else:
            self._maxlen = history
        self._states = []
        self._cursor = -1

        # Set up retries
        if tries:
            retry = Retry(tries, backoff_factor=multiplier)
            for protocol in ['http://', 'https://']:
                self.session.adapters[protocol].max_retries = retry
项目:congress-headshots-lambda    作者:datadesk    | 项目源码 | 文件源码
def send_request(self, url):
        session = requests.Session()
        retries = Retry(
            total=5,
            backoff_factor=1,
            status_forcelist=[500, 502, 503, 504]
        )
        session.mount("http://", HTTPAdapter(max_retries=retries))
        response = session.get(
            url,
            headers=self.request_headers,
            timeout=10,
            allow_redirects=False
        )
        try:
            response.raise_for_status()
            json_response = json.loads(response.content)
            return json_response
        except requests.exceptions.ReadTimeout as exception:
            # maybe set up for a retry, or continue in a retry loop
            logger.error("%s: %s" % (exception, url))
            logger.error("will need to setup retry and then access archived file")
            raise
        except requests.exceptions.ConnectionError as exception:
            # incorrect domain
            logger.error("will need to raise message that we can't connect")
            logger.error("%s: %s" % (exception, url))
            raise
        except requests.exceptions.HTTPError as exception:
            # http error occurred
            logger.error("%s: %s" % (exception, url))
            logger.error("trying to access archived file via failsafe")
            raise
        except requests.exceptions.URLRequired as exception:
            # valid URL is required to make a request
            logger.error("%s: %s" % (exception, url))
            logger.error("will need to raise message that URL is broken")
            raise
        except requests.exceptions.TooManyRedirects as exception:
            # tell the user their url was bad and try a different one
            logger.error("%s: %s" % (exception, url))
            logger.error("will need to raise message that URL is broken")
            raise
        except requests.exceptions.RequestException as exception:
            # ambiguous exception
            logger.error("%s: %s" % (exception, url))
            logger.error("trying to access archived file via failsafe")
            raise
项目:pp_api    作者:artreven    | 项目源码 | 文件源码
def extract_from_file(file, pid, server, auth_data=None, session=None,
                      max_retries=None, mb_time_factor=3,**kwargs):
    """
    Make extract call using project determined by pid.

    :param auth_data:
    :param session:
    :param text: text
    :param pid: id of project
    :param server: server url
    :return: response object
    """
    root_logger.warning(
        'Please, consider switching to using PoolParty object for making calls to PoolParty.')
    data = {
        'numberOfConcepts': 100000,
        'numberOfTerms': 100000,
        'projectId': pid,
        'language': 'en',
        'useTransitiveBroaderConcepts': True,
        'useRelatedConcepts': True,
        # 'sentimentAnalysis': True,
        'filterNestedConcepts': True
    }
    data.update(kwargs)
    session = u.get_session(session, auth_data)
    target_url = server + '/extractor/api/extract'
    start = time()
    try:
        if not hasattr(file, 'read'):
            file = open(file, 'rb')
        # Findout filesize
        file.seek(0, 2) # Go to end of file
        f_size_mb = file.tell() / (1024 * 1024)
        file.seek(0) # Go to start of file
        if max_retries is not None:
            retries = Retry(total=max_retries,
                            backoff_factor=0.3,
                            status_forcelist=[500, 502, 503, 504])
            session.mount(server, HTTPAdapter(max_retries=retries))
        r = session.post(
            target_url,
            data=data,
            files={'file': file},
            timeout=(3.05, int(27 * mb_time_factor*(1 + f_size_mb )))
        )
    except Exception as e:
        root_logger.error(traceback.format_exc())
    finally:
        file.close()
    root_logger.debug('call took {:0.3f}'.format(time() - start))
    if not 'r' in locals():
        return None
    try:
        r.raise_for_status()
    except HTTPError as e:
        logging.error(r.text)
        raise e
    return r