Python six.moves.urllib.request 模块,urlopen() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用six.moves.urllib.request.urlopen()

项目:scikit-dataaccess    作者:MITHaystack    | 项目源码 | 文件源码
def getFileURLs(file_ids):
    '''
    Retrieve the ftp location for a list of file IDs

    @param file_ids: List of file IDs
    @return List of ftp locations
    '''



    info_url='http://modwebsrv.modaps.eosdis.nasa.gov/axis2/services/MODAPSservices/getFileUrls?fileIds='

    for file_id in file_ids:
        info_url += str(file_id) + ','

    info_url = info_url[:-1]


    url = urlopen(info_url)
    tree = ET.fromstring(url.read().decode())
    url.close()

    return [ child.text for child in tree ]
项目:ssbio    作者:SBRG    | 项目源码 | 文件源码
def download_sifts_xml(pdb_id, outdir='', outfile=''):
    """Download the SIFTS file for a PDB ID.

    Args:
        pdb_id:
        outdir:
        outfile:

    Returns:

    """
    baseURL = 'ftp://ftp.ebi.ac.uk/pub/databases/msd/sifts/xml/'
    filename = '{}.xml.gz'.format(pdb_id)

    if outfile:
        outfile = op.join(outdir, outfile)
    else:
        outfile = op.join(outdir, filename.split('.')[0] + '.sifts.xml')

    if not op.exists(outfile):
        response = urlopen(baseURL + filename)
        with open(outfile, 'wb') as f:
            f.write(gzip.decompress(response.read()))

    return outfile
项目:mos-horizon    作者:Mirantis    | 项目源码 | 文件源码
def get_template_files(template_data=None, template_url=None):
    if template_data:
        tpl = template_data
    elif template_url:
        with contextlib.closing(request.urlopen(template_url)) as u:
            tpl = u.read()
    else:
        return {}, None
    if not tpl:
        return {}, None
    if isinstance(tpl, six.binary_type):
        tpl = tpl.decode('utf-8')
    template = template_format.parse(tpl)
    files = {}
    _get_file_contents(template, files)
    return files, template
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def urlretrieve(url, filename, reporthook=None, data=None):
        def chunk_read(response, chunk_size=8192, reporthook=None):
            total_size = response.info().get('Content-Length').strip()
            total_size = int(total_size)
            count = 0
            while 1:
                chunk = response.read(chunk_size)
                count += 1
                if not chunk:
                    reporthook(count, total_size, total_size)
                    break
                if reporthook:
                    reporthook(count, chunk_size, total_size)
                yield chunk

        response = urlopen(url, data)
        with open(filename, 'wb') as fd:
            for chunk in chunk_read(response, reporthook=reporthook):
                fd.write(chunk)
项目:python-kingbirdclient    作者:openstack    | 项目源码 | 文件源码
def get_contents_if_file(contents_or_file_name):
    """Get the contents of a file.

    If the value passed in is a file name or file URI, return the
    contents. If not, or there is an error reading the file contents,
    return the value passed in as the contents.

    For example, a workflow definition will be returned if either the
    workflow definition file name, or file URI are passed in, or the
    actual workflow definition itself is passed in.
    """
    try:
        if parse.urlparse(contents_or_file_name).scheme:
            definition_url = contents_or_file_name
        else:
            path = os.path.abspath(contents_or_file_name)
            definition_url = parse.urljoin(
                'file:',
                request.pathname2url(path)
            )
        return request.urlopen(definition_url).read().decode('utf8')
    except Exception:
        return contents_or_file_name
项目:deb-python-oauth2client    作者:openstack    | 项目源码 | 文件源码
def test_ClientRedirectServer(self):
        # create a ClientRedirectServer and run it in a thread to listen
        # for a mock GET request with the access token
        # the server should return a 200 message and store the token
        httpd = tools.ClientRedirectServer(('localhost', 0),
                                           tools.ClientRedirectHandler)
        code = 'foo'
        url = 'http://localhost:{0}?code={1}'.format(
            httpd.server_address[1], code)
        t = threading.Thread(target=httpd.handle_request)
        t.setDaemon(True)
        t.start()
        f = request.urlopen(url)
        self.assertTrue(f.read())
        t.join()
        httpd.server_close()
        self.assertEqual(httpd.query_params.get('code'), code)
项目:REMAP    作者:REMAPApp    | 项目源码 | 文件源码
def test_ClientRedirectServer(self):
        # create a ClientRedirectServer and run it in a thread to listen
        # for a mock GET request with the access token
        # the server should return a 200 message and store the token
        httpd = tools.ClientRedirectServer(('localhost', 0),
                                           tools.ClientRedirectHandler)
        code = 'foo'
        url = 'http://localhost:{0}?code={1}'.format(
            httpd.server_address[1], code)
        t = threading.Thread(target=httpd.handle_request)
        t.setDaemon(True)
        t.start()
        f = request.urlopen(url)
        self.assertTrue(f.read())
        t.join()
        httpd.server_close()
        self.assertEqual(httpd.query_params.get('code'), code)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def import_libs():
            from __future__ import absolute_import
            from __future__ import division
            from __future__ import print_function

            import hashlib
            import os
            import shutil
            import sys
            import tarfile
            import zipfile

            import six
            from six.moves.urllib.error import HTTPError
            from six.moves.urllib.error import URLError
            from six.moves.urllib.request import urlopen

            from tensorflow.contrib.keras.python.keras.utils.generic_utils import Progbar
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def urlretrieve(url, filename, reporthook=None, data=None):
        def chunk_read(response, chunk_size=8192, reporthook=None):
            total_size = response.info().get('Content-Length').strip()
            total_size = int(total_size)
            count = 0
            while 1:
                chunk = response.read(chunk_size)
                count += 1
                if not chunk:
                    reporthook(count, total_size, total_size)
                    break
                if reporthook:
                    reporthook(count, chunk_size, total_size)
                yield chunk

        response = urlopen(url, data)
        with open(filename, 'wb') as fd:
            for chunk in chunk_read(response, reporthook=reporthook):
                fd.write(chunk)
项目:flask-ask    作者:johnwheeler    | 项目源码 | 文件源码
def _make_tide_request(city, date):
    station = STATIONS.get(city.lower())
    noaa_api_params = {
        'station': station,
        'product': 'predictions',
        'datum': 'MLLW',
        'units': 'english',
        'time_zone': 'lst_ldt',
        'format': 'json'
    }
    if date == datetime.date.today():
        noaa_api_params['date'] = 'today'
    else:
        noaa_api_params['begin_date'] = date.strftime('%Y%m%d')
        noaa_api_params['range'] = 24
    url = ENDPOINT + "?" + urlencode(noaa_api_params)
    resp_body = urlopen(url).read()
    if len(resp_body) == 0:
        statement_text = render_template('noaa_problem')
    else:
        noaa_response_obj = json.loads(resp_body)
        predictions = noaa_response_obj['predictions']
        tideinfo = _find_tide_info(predictions)
        statement_text = render_template('tide_info', date=date, city=city, tideinfo=tideinfo)
    return statement(statement_text).simple_card("Tide Pooler", statement_text)
项目:odin    作者:imito    | 项目源码 | 文件源码
def urlretrieve(url, filename, reporthook=None, data=None):
    '''
    This function is adpated from: https://github.com/fchollet/keras
    Original work Copyright (c) 2014-2015 keras contributors
    '''
    def chunk_read(response, chunk_size=8192, reporthook=None):
      total_size = response.info().get('Content-Length').strip()
      total_size = int(total_size)
      count = 0
      while 1:
        chunk = response.read(chunk_size)
        if not chunk:
          break
        count += 1
        if reporthook:
          reporthook(count, chunk_size, total_size)
        yield chunk

    response = urlopen(url, data)
    with open(filename, 'wb') as fd:
      for chunk in chunk_read(response, reporthook=reporthook):
        fd.write(chunk)
项目:ovpn-speed-connect    作者:N2ITN    | 项目源码 | 文件源码
def download_and_extract_nord_zipfile(ovpn_dir_path):
    """
    This function retrieves NordVPN's zipfile, de-serializes & saves it.
    Then it extracts the contents.

    :param ovpn_dir_path: efault is the users {$HOME}/OVPN/ (%HOME%\OVPN\ on windows)

    :return: NUTHIN'
    """
    zip_data = urlopen('https://downloads.nordcdn.com/configs/archives/servers/ovpn.zip').read()
    zipfile_path = os.path.join(ovpn_dir_path, 'zipfile.zip')
    with open(zipfile_path, 'wb+') as nord_zipfile:
        nord_zipfile.write(zip_data)
    # sanity check
    assert os.path.exists(zipfile_path)
    # lololol This could be more than one line...buttfuckit
    #TODO: also this can b dangerous AF. Need to make some checks before deserializing.
    zipfile.ZipFile(zipfile_path).extractall(ovpn_dir_path)
项目:minihydra    作者:VillanCh    | 项目源码 | 文件源码
def read_file_or_url(self, fname):
        # TODO: not working on localhost
        if os.path.isfile(fname):
            result = open(fname, 'r')
        else:
            match = self.urlre.match(fname)
            if match:
                result = urlopen(match.group(1))
            else:
                fname = os.path.expanduser(fname)
                try:
                    result = open(os.path.expanduser(fname), 'r')
                except IOError:
                    result = open('%s.%s' % (os.path.expanduser(fname),
                                             self.defaultExtension), 'r')
        return result
项目:spacel-provision    作者:pebble    | 项目源码 | 文件源码
def _pd_api(self, url, data=None, method='GET'):
        url = '%s/%s' % (PD_API_BASE, url)
        request_args = {
            'headers': dict(self._pd_headers)
        }
        if six.PY3:  # pragma: no cover
            request_args['method'] = method

        if data is not None:
            request_args['data'] = json.dumps(data).encode('utf-8')
            request_args['headers']['Content-Type'] = APPLICATION_JSON

        request = Request(url, **request_args)
        if six.PY2:  # pragma: no cover
            request.get_method = lambda: method

        try:
            response = urlopen(request)
            return json.loads(response.read().decode('utf-8'))
        except HTTPError as e:
            response = e.read().decode('utf-8')
            logger.warning("API error: %s", response)
            if method == 'GET' and e.code == 404:
                return None
            else:
                raise e
项目:open-mic    作者:cosmir    | 项目源码 | 文件源码
def check_connection(default='http://google.com', timeout=1):
    """Test the internet connection.

    Parameters
    ----------
    default : str
        URL to test; defaults to a Google IP address.

    timeout : number
        Time in seconds to wait before giving up.

    Returns
    -------
    success : bool
        True if appears to be online, else False
    """
    success = True
    try:
        surl = urlparse.quote(default, safe=':./')
        urlrequest.urlopen(surl, timeout=timeout)
    except urlerror.URLError as derp:
        success = False
        logger.debug("Network unreachable: {}".format(derp))
    return success
项目:Benchmarks    作者:ECP-CANDLE    | 项目源码 | 文件源码
def urlretrieve(url, filename, reporthook=None, data=None):
        def chunk_read(response, chunk_size=8192, reporthook=None):
            total_size = response.info().get('Content-Length').strip()
            total_size = int(total_size)
            count = 0
            while 1:
                chunk = response.read(chunk_size)
                count += 1
                if not chunk:
                    reporthook(count, total_size, total_size)
                    break
                if reporthook:
                    reporthook(count, chunk_size, total_size)
                yield chunk

        response = urlopen(url, data)
        with open(filename, 'wb') as fd:
            for chunk in chunk_read(response, reporthook=reporthook):
                fd.write(chunk)
项目:arouteserver    作者:pierky    | 项目源码 | 文件源码
def _read_from_url(url):
        try:
            response = urlopen(url)
        except HTTPError as e:
            if e.code == 404:
                return "{}"
            else:
                raise PeeringDBError(
                    "HTTP error while retrieving info from PeeringDB: "
                    "code: {}, reason: {} - {}".format(
                        e.code, e.reason, str(e)
                    )
                )
        except Exception as e:
            raise PeeringDBError(
                "Error while retrieving info from PeeringDB: {}".format(
                    str(e)
                )
            )

        return response.read().decode("utf-8")
项目:file-metadata    作者:pywikibot-catfiles    | 项目源码 | 文件源码
def download(url, filename, overwrite=False, timeout=None):
    """
    Download the given URL to the given filename. If the file exists,
    it won't be downloaded unless asked to overwrite. Both, text data
    like html, txt, etc. or binary data like images, audio, etc. are
    acceptable.

    :param url:       A URL to download.
    :param filename:  The file to store the downloaded file to.
    :param overwrite: Set to True if the file should be downloaded even if it
                      already exists.
    """
    if not os.path.exists(filename) or overwrite:
        if timeout is None:
            response = urlopen(url)
        else:
            response = urlopen(url, timeout=timeout)
        with open(filename, 'wb') as out_file:
            copyfileobj(response, out_file)
项目:DQN    作者:jjakimoto    | 项目源码 | 文件源码
def get_sap_symbols(name='sap500'):
    """Get ticker symbols constituting S&P

    Args:
        name(str): should be 'sap500' or 'sap100'
    """
    if name == 'sap500':
        site = 'http://en.wikipedia.org/wiki/List_of_S%26P_500_companies'
    elif name == 'sap100':
        site = 'https://en.wikipedia.org/wiki/S%26P_100'
    else:
        raise NameError('invalid input: name should be "sap500" or "sap100"')
    # fetch data from yahoo finance
    page = urlopen(site)
    soup = BeautifulSoup(page, 'html.parser')
    table = soup.find('table', {'class': 'wikitable sortable'})
    symbols = []
    for row in table.findAll('tr'):
        col = row.findAll('td')
        if len(col) > 0:
            symbol = col[0].string.replace('.', '-')
            symbols.append(str(symbol))
    return symbols
项目:InnerOuterRNN    作者:Chemoinformatics    | 项目源码 | 文件源码
def urlretrieve(url, filename, reporthook=None, data=None):
        def chunk_read(response, chunk_size=8192, reporthook=None):
            total_size = response.info().get('Content-Length').strip()
            total_size = int(total_size)
            count = 0
            while 1:
                chunk = response.read(chunk_size)
                if not chunk:
                    break
                count += 1
                if reporthook:
                    reporthook(count, chunk_size, total_size)
                yield chunk

        response = urlopen(url, data)
        with open(filename, 'wb') as fd:
            for chunk in chunk_read(response, reporthook=reporthook):
                fd.write(chunk)
项目:solaris-ips    作者:oracle    | 项目源码 | 文件源码
def __network_ping(self):
                try:
                        repourl = urljoin(self.get_depot_url(),
                            "versions/0")
                        # Disable SSL peer verification, we just want to check
                        # if the depot is running.
                        url = urlopen(repourl,
                            context=ssl._create_unverified_context())
                        url.close()
                except HTTPError as e:
                        # Server returns NOT_MODIFIED if catalog is up
                        # to date
                        if e.code == http_client.NOT_MODIFIED:
                                return True
                        else:
                                return False
                except URLError as e:
                        return False
                return True
项目:scikit-dataaccess    作者:MITHaystack    | 项目源码 | 文件源码
def getFileIDs(modis_identifier, start_date, end_date, lat, lon, daynightboth):
    '''
    Retrieve file IDs for images matching search parameters

    @param modis_identifier: Product identifier (e.g. MOD09)
    @param start_date: Starting date
    @param end_date: Ending date
    @param lat: Latitude 
    @param lon: Longitude
    @param daynightboth: Get daytime images ('D'), nightime images ('N') or both ('B')

    @return list of file IDs
    '''

    lat_str = str(lat)
    lon_str = str(lon)

    info_url = ('http://modwebsrv.modaps.eosdis.nasa.gov/axis2/services/MODAPSservices/searchForFiles'
                    + '?product=' + modis_identifier + '&collection=6&start=' + start_date
                    + '&stop=' + end_date + '&north=' + lat_str + '&south=' + lat_str + '&west='
                    + lon_str + '&east=' + lon_str + '&coordsOrTiles=coords&dayNightBoth=' + daynightboth)

    url = urlopen(info_url)
    tree = ET.fromstring(url.read().decode())
    url.close()

    return [ int(child.text) for child in tree ]
项目:ssbio    作者:SBRG    | 项目源码 | 文件源码
def download_biological_assemblies(pdb_id, outdir):
    """Downloads biological assembly file from:
    `ftp://ftp.wwpdb.org/pub/pdb/data/biounit/coordinates/divided/`

    Args:
        outdir (str): Output directory of the decompressed assembly

    """

    # TODO: not tested yet
    if not op.exists(outdir):
        raise ValueError('{}: output directory does not exist'.format(outdir))

    folder = pdb_id[1:3]
    server = 'ftp://ftp.wwpdb.org/pub/pdb/data/biounit/coordinates/divided/{}/'.format(folder)
    html_folder = urlopen(server).readlines()
    for line in html_folder:
        if pdb_id in str(line).strip():
            file_name = '%s' % (pdb_id + str(line).strip().split(pdb_id)[1].split('\r\n')[0])
            outfile_name = file_name.replace('.', '_')
            outfile_name = outfile_name.replace('_gz', '.pdb')
            f = urlopen(op.join(server, file_name))
            decompressed_data = zlib.decompress(f.read(), 16 + zlib.MAX_WBITS)
            with open(op.join(outdir, outfile_name), 'wb') as f:
                f.write(decompressed_data)
                f.close()
            log.debug('{}: downloaded biological assembly')
            return op.join(outdir, outfile_name)
项目:riko    作者:nerevu    | 项目源码 | 文件源码
def open(self, url, **params):
        if url.startswith('http') and params:
            r = requests.get(url, params=params, stream=True)
            r.raw.decode_content = self.decode
            response = r.text if self.cache_type else r.raw
        else:
            try:
                r = urlopen(url, context=self.context, timeout=self.timeout)
            except TypeError:
                r = urlopen(url, timeout=self.timeout)

            text = r.read() if self.cache_type else None

            if self.decode:
                encoding = get_response_encoding(r, self.def_encoding)

                if text:
                    response = decode(text, encoding)
                else:
                    response = reencode(r.fp, encoding, decode=True)
            else:
                response = text or r

        content_type = get_response_content_type(r)

        if 'xml' in content_type:
            self.ext = 'xml'
        elif 'json' in content_type:
            self.ext = 'json'
        else:
            self.ext = content_type.split('/')[1].split(';')[0]

        self.r = r
        return response
项目:auth0-django-web-app    作者:auth0-samples    | 项目源码 | 文件源码
def get_user_details(self, response):
        # Obtain JWT and the keys to validate the signature
        idToken = response.get('id_token')
        jwks = request.urlopen("https://" + self.setting('DOMAIN') + "/.well-known/jwks.json")
        issuer = "https://" + self.setting('DOMAIN') + "/"
        audience = self.setting('KEY') #CLIENT_ID
        payload = jwt.decode(idToken, jwks.read(), algorithms=['RS256'], audience=audience, issuer=issuer)

        return {'username': payload['nickname'],
                'first_name': payload['name'],
                'picture': payload['picture'],
                'user_id': payload['sub']}
项目:kripodb    作者:3D-e-Chem    | 项目源码 | 文件源码
def fetch(self):
        """Fetch report from PDB website

        Yields:
            dict: Dictionary with keys same as ['structureId', 'chainID'] + self.fields

        """
        response = urlopen(self.url)
        return parse_csv_file(response)
项目:fanfou-py    作者:akgnah    | 项目源码 | 文件源码
def open_image(filename):
    if re.match('http[s]?:.*', filename):
        image = request.urlopen(filename)
    else:
        image = open(filename, 'rb')
    return image.read()
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def load_image(in_image):
    """ Load an image, returns PIL.Image. """
    # if the path appears to be an URL
    if urlparse(in_image).scheme in ('http', 'https',):
        # set up the byte stream
        img_stream = BytesIO(request.urlopen(in_image).read())
        # and read in as PIL image
        img = Image.open(img_stream)
    else:
        # else use it as local file path
        img = Image.open(in_image)
    return img
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def urlretrieve(url, filename, reporthook=None, data=None):
        """Replacement for `urlretrive` for Python 2.

        Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
        `urllib` module, known to have issues with proxy management.

        # Arguments
            url: url to retrieve.
            filename: where to store the retrieved data locally.
            reporthook: a hook function that will be called once
                on establishment of the network connection and once
                after each block read thereafter.
                The hook will be passed three arguments;
                a count of blocks transferred so far,
                a block size in bytes, and the total size of the file.
            data: `data` argument passed to `urlopen`.
        """
        def chunk_read(response, chunk_size=8192, reporthook=None):
            total_size = response.info().get('Content-Length').strip()
            total_size = int(total_size)
            count = 0
            while 1:
                chunk = response.read(chunk_size)
                count += 1
                if not chunk:
                    reporthook(count, total_size, total_size)
                    break
                if reporthook:
                    reporthook(count, chunk_size, total_size)
                yield chunk

        response = urlopen(url, data)
        with open(filename, 'wb') as fd:
            for chunk in chunk_read(response, reporthook=reporthook):
                fd.write(chunk)
项目:python-zunclient    作者:openstack    | 项目源码 | 文件源码
def get_template_contents(template_file=None, template_url=None,
                          files=None):

    # Transform a bare file path to a file:// URL.
    if template_file:  # nosec
        template_url = utils.normalise_file_path_to_url(template_file)
        tpl = request.urlopen(template_url).read()
    else:
        raise exceptions.CommandErrorException(_('Need to specify exactly '
                                                 'one of %(arg1)s, %(arg2)s '
                                                 'or %(arg3)s') %
                                               {'arg1': '--template-file',
                                                'arg2': '--template-url'})

    if not tpl:
        raise exceptions.CommandErrorException(_('Could not fetch '
                                                 'template from %s') %
                                               template_url)

    try:
        if isinstance(tpl, six.binary_type):
            tpl = tpl.decode('utf-8')
        template = template_format.parse(tpl)
    except ValueError as e:
        raise exceptions.CommandErrorException(_('Error parsing template '
                                                 '%(url)s %(error)s') %
                                               {'url': template_url,
                                                'error': e})
    return template
项目:chalktalk_docs    作者:loremIpsum1771    | 项目源码 | 文件源码
def fetch_inventory(app, uri, inv):
    """Fetch, parse and return an intersphinx inventory file."""
    # both *uri* (base URI of the links to generate) and *inv* (actual
    # location of the inventory file) can be local or remote URIs
    localuri = uri.find('://') == -1
    join = localuri and path.join or posixpath.join
    try:
        if inv.find('://') != -1:
            f = request.urlopen(inv)
        else:
            f = open(path.join(app.srcdir, inv), 'rb')
    except Exception as err:
        app.warn('intersphinx inventory %r not fetchable due to '
                 '%s: %s' % (inv, err.__class__, err))
        return
    try:
        line = f.readline().rstrip().decode('utf-8')
        try:
            if line == '# Sphinx inventory version 1':
                invdata = read_inventory_v1(f, uri, join)
            elif line == '# Sphinx inventory version 2':
                invdata = read_inventory_v2(f, uri, join)
            else:
                raise ValueError
            f.close()
        except ValueError:
            f.close()
            raise ValueError('unknown or unsupported inventory version')
    except Exception as err:
        app.warn('intersphinx inventory %r not readable due to '
                 '%s: %s' % (inv, err.__class__.__name__, err))
    else:
        return invdata
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def urlretrieve(url, filename, reporthook=None, data=None):
            """Replacement for `urlretrive` for Python 2.

            Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
            `urllib` module, known to have issues with proxy management.

            Arguments:
                url: url to retrieve.
                filename: where to store the retrieved data locally.
                reporthook: a hook function that will be called once
                    on establishment of the network connection and once
                    after each block read thereafter.
                    The hook will be passed three arguments;
                    a count of blocks transferred so far,
                    a block size in bytes, and the total size of the file.
                data: `data` argument passed to `urlopen`.
            """

            def chunk_read(response, chunk_size=8192, reporthook=None):
              content_type = response.info().get('Content-Length')
              total_size = -1
              if content_type is not None:
                total_size = int(content_type.strip())
              count = 0
              while 1:
                chunk = response.read(chunk_size)
                count += 1
                if not chunk:
                  reporthook(count, total_size, total_size)
                  break
                if reporthook:
                  reporthook(count, chunk_size, total_size)
                yield chunk

            response = urlopen(url, data)
            with open(filename, 'wb') as fd:
              for chunk in chunk_read(response, reporthook=reporthook):
                fd.write(chunk)
项目:fastecdsa    作者:AntonKueltz    | 项目源码 | 文件源码
def test_rfc6979(self):
        text = urlopen('https://tools.ietf.org/rfc/rfc6979.txt').read().decode()
        curve_tests = findall(r'curve: NIST P-192(.*)curve: NIST P-224', text, flags=DOTALL)[0]

        q = int(findall(r'q = ([0-9A-F]*)', curve_tests)[0], 16)
        x = int(findall(r'x = ([0-9A-F]*)', curve_tests)[0], 16)

        test_regex = r'With SHA-(\d+), message = "([a-zA-Z]*)":\n' \
                     r'\s*k = ([0-9A-F]*)\n' \
                     r'\s*r = ([0-9A-F]*)\n' \
                     r'\s*s = ([0-9A-F]*)\n'

        hash_lookup = {
            '1': sha1,
            '224': sha224,
            '256': sha256,
            '384': sha384,
            '512': sha512
        }

        for test in findall(test_regex, curve_tests):
            h = hash_lookup[test[0]]
            msg = test[1]
            k = int(test[2], 16)
            r = int(test[3], 16)
            s = int(test[4], 16)

            self.assertEqual(k, RFC6979(msg, x, q, h).gen_nonce())
            self.assertEqual((r, s), sign(msg, x, curve=P192, hashfunc=h))
项目:fastecdsa    作者:AntonKueltz    | 项目源码 | 文件源码
def test_rfc6979(self):
        text = urlopen('https://tools.ietf.org/rfc/rfc6979.txt').read().decode()
        curve_tests = findall(r'curve: NIST P-224(.*)curve: NIST P-256', text, flags=DOTALL)[0]

        q = int(findall(r'q = ([0-9A-F]*)', curve_tests)[0], 16)
        x = int(findall(r'x = ([0-9A-F]*)', curve_tests)[0], 16)

        test_regex = r'With SHA-(\d+), message = "([a-zA-Z]*)":\n' \
                     r'\s*k = ([0-9A-F]*)\n' \
                     r'\s*r = ([0-9A-F]*)\n' \
                     r'\s*s = ([0-9A-F]*)\n'

        hash_lookup = {
            '1': sha1,
            '224': sha224,
            '256': sha256,
            '384': sha384,
            '512': sha512
        }

        for test in findall(test_regex, curve_tests):
            h = hash_lookup[test[0]]
            msg = test[1]
            k = int(test[2], 16)
            r = int(test[3], 16)
            s = int(test[4], 16)

            self.assertEqual(k, RFC6979(msg, x, q, h).gen_nonce())
            self.assertEqual((r, s), sign(msg, x, curve=P224, hashfunc=h))
项目:fastecdsa    作者:AntonKueltz    | 项目源码 | 文件源码
def test_rfc6979(self):
        text = urlopen('https://tools.ietf.org/rfc/rfc6979.txt').read().decode()
        curve_tests = findall(r'curve: NIST P-256(.*)curve: NIST P-384', text, flags=DOTALL)[0]

        q = int(findall(r'q = ([0-9A-F]*)', curve_tests)[0], 16)
        x = int(findall(r'x = ([0-9A-F]*)', curve_tests)[0], 16)

        test_regex = r'With SHA-(\d+), message = "([a-zA-Z]*)":\n' \
                     r'\s*k = ([0-9A-F]*)\n' \
                     r'\s*r = ([0-9A-F]*)\n' \
                     r'\s*s = ([0-9A-F]*)\n'

        hash_lookup = {
            '1': sha1,
            '224': sha224,
            '256': sha256,
            '384': sha384,
            '512': sha512
        }

        for test in findall(test_regex, curve_tests):
            h = hash_lookup[test[0]]
            msg = test[1]
            k = int(test[2], 16)
            r = int(test[3], 16)
            s = int(test[4], 16)

            self.assertEqual(k, RFC6979(msg, x, q, h).gen_nonce())
            self.assertEqual((r, s), sign(msg, x, curve=P256, hashfunc=h))
项目:fastecdsa    作者:AntonKueltz    | 项目源码 | 文件源码
def test_rfc6979(self):
        text = urlopen('https://tools.ietf.org/rfc/rfc6979.txt').read().decode()
        curve_tests = findall(r'curve: NIST P-384(.*)curve: NIST P-521', text, flags=DOTALL)[0]

        q_parts = findall(r'q = ([0-9A-F]*)\n\s*([0-9A-F]*)', curve_tests)[0]
        q = int(q_parts[0] + q_parts[1], 16)
        x_parts = findall(r'x = ([0-9A-F]*)\n\s*([0-9A-F]*)', curve_tests)[0]
        x = int(x_parts[0] + x_parts[1], 16)

        test_regex = r'With SHA-(\d+), message = "([a-zA-Z]*)":\n' \
                     r'\s*k = ([0-9A-F]*)\n\s*([0-9A-F]*)\n' \
                     r'\s*r = ([0-9A-F]*)\n\s*([0-9A-F]*)\n' \
                     r'\s*s = ([0-9A-F]*)\n\s*([0-9A-F]*)\n'

        hash_lookup = {
            '1': sha1,
            '224': sha224,
            '256': sha256,
            '384': sha384,
            '512': sha512
        }

        for test in findall(test_regex, curve_tests):
            h = hash_lookup[test[0]]
            msg = test[1]
            k = int(test[2] + test[3], 16)
            r = int(test[4] + test[5], 16)
            s = int(test[6] + test[7], 16)

            self.assertEqual(k, RFC6979(msg, x, q, h).gen_nonce())
            self.assertEqual((r, s), sign(msg, x, curve=P384, hashfunc=h))
项目:fastecdsa    作者:AntonKueltz    | 项目源码 | 文件源码
def test_rfc6979(self):
        text = urlopen('https://tools.ietf.org/rfc/rfc6979.txt').read().decode()
        curve_tests = findall(r'curve: NIST P-521(.*)curve: NIST K-163', text, flags=DOTALL)[0]

        q_parts = findall(r'q = ([0-9A-F]*)\n\s*([0-9A-F]*)\n\s*([0-9A-F]*)', curve_tests)[0]
        q = int(q_parts[0] + q_parts[1] + q_parts[2], 16)
        x_parts = findall(r'x = ([0-9A-F]*)\n\s*([0-9A-F]*)\n\s*([0-9A-F]*)', curve_tests)[0]
        x = int(x_parts[0] + x_parts[1] + x_parts[2], 16)

        test_regex = r'With SHA-(\d+), message = "([a-zA-Z]*)":\n' \
                     r'\s*k = ([0-9A-F]*)\n\s*([0-9A-F]*)\n\s*([0-9A-F]*)\n' \
                     r'\s*r = ([0-9A-F]*)\n\s*([0-9A-F]*)\n\s*([0-9A-F]*)\n' \
                     r'\s*s = ([0-9A-F]*)\n\s*([0-9A-F]*)\n\s*([0-9A-F]*)\n'

        hash_lookup = {
            '1': sha1,
            '224': sha224,
            '256': sha256,
            '384': sha384,
            '512': sha512
        }

        for test in findall(test_regex, curve_tests):
            h = hash_lookup[test[0]]
            msg = test[1]
            k = int(test[2] + test[3] + test[4], 16)
            r = int(test[5] + test[6] + test[7], 16)
            s = int(test[8] + test[9] + test[10], 16)

            self.assertEqual(k, RFC6979(msg, x, q, h).gen_nonce())
            self.assertEqual((r, s), sign(msg, x, curve=P521, hashfunc=h))
项目:sparksteps    作者:jwplayer    | 项目源码 | 文件源码
def get_demand_price(aws_region, instance_type):
    """Get AWS instance demand price.

    >>> print(get_demand_price('us-east-1', 'm4.2xlarge'))
    """
    soup = BeautifulSoup(urlopen(EC2_INSTANCES_INFO_URL), 'html.parser')
    table = soup.find('table', {'id': 'data'})
    row = table.find(id=instance_type)
    td = row.find('td', {'class': 'cost-ondemand-linux'})
    region_prices = json.loads(td['data-pricing'])
    return float(region_prices[aws_region])
项目:eclcli    作者:nttcom    | 项目源码 | 文件源码
def do_stack_adopt(hc, args):
    '''Adopt a stack.'''
    env_files, env = template_utils.process_multiple_environments_and_files(
        env_paths=args.environment_file)

    if not args.adopt_file:
        raise exc.CommandError(_('Need to specify %(arg)s') %
                               {'arg': '--adopt-file'})

    adopt_url = utils.normalise_file_path_to_url(args.adopt_file)
    adopt_data = request.urlopen(adopt_url).read()

    if not len(adopt_data):
        raise exc.CommandError('Invalid adopt-file, no data!')

    if args.create_timeout:
        logger.warning(_LW('%(arg1)s is deprecated, '
                           'please use %(arg2)s instead'),
                       {
                           'arg1': '-c/--create-timeout',
                           'arg2': '-t/--timeout'})

    fields = {
        'stack_name': args.name,
        'disable_rollback': not(args.enable_rollback),
        'adopt_stack_data': adopt_data,
        'parameters': utils.format_parameters(args.parameters),
        'files': dict(list(env_files.items())),
        'environment': env
    }

    timeout = args.timeout or args.create_timeout
    if timeout:
        fields['timeout_mins'] = timeout

    hc.stacks.create(**fields)
    do_stack_list(hc)
项目:eclcli    作者:nttcom    | 项目源码 | 文件源码
def do_resource_signal(hc, args):
    '''Send a signal to a resource.'''
    fields = {'stack_id': args.id,
              'resource_name': args.resource}
    data = args.data
    data_file = args.data_file
    if data and data_file:
        raise exc.CommandError(_('Can only specify one of data and data-file'))
    if data_file:
        data_url = utils.normalise_file_path_to_url(data_file)
        data = request.urlopen(data_url).read()
    if data:
        if isinstance(data, six.binary_type):
            data = data.decode('utf-8')
        try:
            data = jsonutils.loads(data)
        except ValueError as ex:
            raise exc.CommandError(_('Data should be in JSON format: %s') % ex)
        if not isinstance(data, dict):
            raise exc.CommandError(_('Data should be a JSON dict'))
        fields['data'] = data
    try:
        hc.resources.signal(**fields)
    except exc.HTTPNotFound:
        raise exc.CommandError(_('Stack or resource not found: '
                                 '%(id)s %(resource)s') %
                               {'id': args.id, 'resource': args.resource})
项目:eclcli    作者:nttcom    | 项目源码 | 文件源码
def do_config_create(hc, args):
    '''Create a software configuration.'''
    config = {
        'group': args.group,
        'config': ''
    }

    defn = {}
    if args.definition_file:
        defn_url = utils.normalise_file_path_to_url(
            args.definition_file)
        defn_raw = request.urlopen(defn_url).read() or '{}'
        defn = yaml.load(defn_raw, Loader=template_format.yaml_loader)

    config['inputs'] = defn.get('inputs', [])
    config['outputs'] = defn.get('outputs', [])
    config['options'] = defn.get('options', {})

    if args.config_file:
        config_url = utils.normalise_file_path_to_url(
            args.config_file)
        config['config'] = request.urlopen(config_url).read()

    # build a mini-template with a config resource and validate it
    validate_template = {
        'heat_template_version': '2013-05-23',
        'resources': {
            args.name: {
                'type': 'OS::Heat::SoftwareConfig',
                'properties': config
            }
        }
    }
    hc.stacks.validate(template=validate_template)

    config['name'] = args.name

    sc = hc.software_configs.create(**config)
    print(jsonutils.dumps(sc.to_dict(), indent=2))
项目:eclcli    作者:nttcom    | 项目源码 | 文件源码
def _create_config(heat_client, args):
    config = {
        'group': args.group,
        'config': ''
    }

    defn = {}
    if args.definition_file:
        defn_url = heat_utils.normalise_file_path_to_url(
            args.definition_file)
        defn_raw = request.urlopen(defn_url).read() or '{}'
        defn = yaml.load(defn_raw, Loader=template_format.yaml_loader)

    config['inputs'] = defn.get('inputs', [])
    config['outputs'] = defn.get('outputs', [])
    config['options'] = defn.get('options', {})

    if args.config_file:
        config_url = heat_utils.normalise_file_path_to_url(
            args.config_file)
        config['config'] = request.urlopen(config_url).read()

    # build a mini-template with a config resource and validate it
    validate_template = {
        'heat_template_version': '2013-05-23',
        'resources': {
            args.name: {
                'type': 'OS::Heat::SoftwareConfig',
                'properties': config
            }
        }
    }
    heat_client.stacks.validate(template=validate_template)

    config['name'] = args.name
    sc = heat_client.software_configs.create(**config).to_dict()
    rows = list(six.itervalues(sc))
    columns = list(six.iterkeys(sc))
    return columns, rows
项目:eclcli    作者:nttcom    | 项目源码 | 文件源码
def _resource_signal(heat_client, args):
    fields = {'stack_id': args.stack,
              'resource_name': args.resource}
    data = args.data
    data_file = args.data_file
    if data and data_file:
        raise exc.CommandError(_('Should only specify one of data or '
                                 'data-file'))

    if data_file:
        data_url = heat_utils.normalise_file_path_to_url(data_file)
        data = request.urlopen(data_url).read()

    if data:
        try:
            data = jsonutils.loads(data)
        except ValueError as ex:
            raise exc.CommandError(_('Data should be in JSON format: %s') % ex)
        if not isinstance(data, dict):
            raise exc.CommandError(_('Data should be a JSON dict'))

        fields['data'] = data
    try:
        heat_client.resources.signal(**fields)
    except heat_exc.HTTPNotFound:
        raise exc.CommandError(_('Stack %(stack)s or resource %(resource)s '
                                 'not found.') %
                               {'stack': args.stack,
                                'resource': args.resource})
项目:eclcli    作者:nttcom    | 项目源码 | 文件源码
def take_action(self, parsed_args):
        self.log.debug('take_action(%s)', parsed_args)

        client = self.app.client_manager.orchestration

        env_files, env = (
            template_utils.process_multiple_environments_and_files(
                env_paths=parsed_args.environment))

        adopt_url = heat_utils.normalise_file_path_to_url(
            parsed_args.adopt_file)
        adopt_data = request.urlopen(adopt_url).read().decode('utf-8')

        fields = {
            'stack_name': parsed_args.name,
            'disable_rollback': not parsed_args.enable_rollback,
            'adopt_stack_data': adopt_data,
            'parameters': heat_utils.format_parameters(parsed_args.parameter),
            'files': dict(list(env_files.items())),
            'environment': env,
            'timeout': parsed_args.timeout
        }

        stack = client.stacks.create(**fields)['stack']

        if parsed_args.wait:
            stack_status, msg = event_utils.poll_for_events(
                client, parsed_args.name, action='ADOPT')
            if stack_status == 'ADOPT_FAILED':
                raise exc.CommandError(msg)

        return _show_stack(client, stack['id'], format='table', short=True)
项目:cloud-custodian    作者:capitalone    | 项目源码 | 文件源码
def resolve(self, uri):
        if uri.startswith('s3://'):
            contents = self.get_s3_uri(uri)
        else:
            # TODO: in the case of file: content and untrusted
            # third parties, uri would need sanitization
            fh = urlopen(uri)
            contents = fh.read().decode('utf-8')
            fh.close()
        self.cache.save(("uri-resolver", uri), contents)
        return contents
项目:flask-ask    作者:johnwheeler    | 项目源码 | 文件源码
def _get_json_events_from_wikipedia(month, date):
    url = "{}{}_{}".format(URL_PREFIX, month, date)
    data = urlopen(url).read().decode('utf-8')
    return _parse_json(data)
项目:flask-ask    作者:johnwheeler    | 项目源码 | 文件源码
def load_certificate(cert_url):
    if not _valid_certificate_url(cert_url):
        raise VerificationError("Certificate URL verification failed")
    cert_data = urlopen(cert_url).read()
    cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_data)
    if not _valid_certificate(cert):
        raise VerificationError("Certificate verification failed")
    return cert
项目:cegr-galaxy    作者:seqcode    | 项目源码 | 文件源码
def get(url):
    try:
        return json.loads(urlopen(url).read())
    except ValueError as e:
        stop_err(str(e))
项目:cegr-galaxy    作者:seqcode    | 项目源码 | 文件源码
def post(api_key, url, data):
    url = make_url(api_key, url)
    response = Request(url, headers={'Content-Type': 'application/json'}, data=json.dumps(data))
    return json.loads(urlopen(response).read())
项目:gatk-cwl-generator    作者:wtsi-hgi    | 项目源码 | 文件源码
def example_data():
    if not os.path.isfile("cwl-example-data/chr22_cwl_test.cram"):
        from six.moves.urllib.request import urlopen
        import tarfile
        print("Downloading and extracting cwl-example-data")
        tgz = urlopen("https://cwl-example-data.cog.sanger.ac.uk/chr22_cwl_test.tgz")
        tar = tarfile.open(fileobj=tgz, mode="r|gz")
        tar.extractall(path="cwl-example-data")
        tar.close()
        tgz.close()