Python urllib 模块,urlcleanup() 实例源码

我们从Python开源项目中,提取了以下7个代码示例,用于说明如何使用urllib.urlcleanup()

项目:chromium-libs-media-freeworld    作者:rpmfusion    | 项目源码 | 文件源码
def download_chrome_latest_rpm(arch):

  chrome_rpm = 'google-chrome-%s_current_%s.rpm' % (version_string, arch)
  path = 'https://dl.google.com/linux/direct/%s' % chrome_rpm

  if (args.clean):
    remove_file_if_exists(chrome_rpm)

  # Let's make sure we haven't already downloaded it.
  if os.path.isfile("./%s" % chrome_rpm):
    print "%s already exists!" % chrome_rpm
  else:
    print "Downloading %s" % path
    # Perhaps look at using python-progressbar at some point?
    info=urllib.urlretrieve(path, chrome_rpm, reporthook=dlProgress)[1]
    urllib.urlcleanup()
    print ""
    if (info["Content-Type"] != "binary/octet-stream" and info["Content-Type"] != "application/x-redhat-package-manager"):
      print 'Chrome %s rpms are not on servers.' % version_string
      remove_file_if_exists (chrome_rpm)
      sys.exit(1)


# This is where the magic happens
项目:anita    作者:gson1703    | 项目源码 | 文件源码
def my_urlretrieve(url, filename):
    r = MyURLopener().retrieve(url, filename)
    if sys.version_info >= (2, 7, 12):
        # Work around https://bugs.python.org/issue27973
        urllib.urlcleanup()
    return r

# Download a file, cleaning up the partial file if the transfer
# fails or is aborted before completion.
项目:cninfospider    作者:CloudDataX    | 项目源码 | 文件源码
def downloadPDF(self, companyFolder, reportName, downloadURL, downloadTime):
        downloadTime -= 1
        if downloadTime == 0:
            return False

        suffix = downloadURL[downloadURL.find('.'):].lower()
        print "downloadPDF suffix", suffix

        if SysStr == "Windows":
            pdfPath = companyFolder + '\\'+ reportName + '.pdf'
            filePath = companyFolder + '\\' + reportName + suffix
        else:
            pdfPath = companyFolder + '/'+ reportName + '.pdf'
            filePath = companyFolder + '/' + reportName + suffix

        if ".pdf" != suffix and os.path.exists(pdfPath):
            os.remove(pdfPath)

        realURL = self.homePage + "/" + downloadURL
        print "Download pdfPath:", filePath, ' realURL:',realURL
        try:
            if not os.path.exists(filePath):
                urllib.urlretrieve(realURL, filePath)
            else:
                print 'WRN: ', filePath, 'is already exists'
                return filePath
        except Exception, e:
            urllib.urlcleanup()
            return self.downloadPDF(companyFolder, reportName, downloadURL, downloadTime)

        urllib.urlcleanup()
        gc.collect()
        return pdfPath
项目:Downloader    作者:kashyap32    | 项目源码 | 文件源码
def song_download():


        song = user_input('Enter the name of song: ')  

        try:
            query_string = encode({"search_query" : song})
            content = urlopen("http://www.youtube.com/results?" + query_string)

            if version == 3:
                ##I hate RE

                search_results = re.findall(r'href=\"\/watch\?v=(.{11})', content.read().decode())

            else:
                ##ok!! if its not going work! I'm gonna kill you!!!
                search_results = re.findall(r'href=\"\/watch\?v=(.{11})', content.read())
                ##finally(Thanks to git)

        except:
            print('Something happened!!')
            exit(1)

        # youtube2mp3 API
        downloadLinkOnly = 'http://www.youtubeinmp3.com/fetch/?video=' + 'http://www.youtube.com/watch?v=' + search_results[0]
        try:
            print('Downloading %s' % song)
            urllib.urlretrieve(downloadLinkOnly, filename='%s.mp3' % song)
            urllib.urlcleanup()  
        except:
            print('Error  %s' % song)
            exit(1)
项目:chromium-libs-media-freeworld    作者:rpmfusion    | 项目源码 | 文件源码
def download_file_and_compare_hashes(file_to_download):

  hashes_file = '%s.hashes' % file_to_download

  if (args.clean):
    remove_file_if_exists(file_to_download)
    remove_file_if_exists(hashes_file)

  # Let's make sure we haven't already downloaded it.
  tarball_local_file = "./%s" % file_to_download
  if os.path.isfile(tarball_local_file):
    print "%s already exists!" % file_to_download
  else:
    path = '%s%s' % (chromium_url, file_to_download)
    print "Downloading %s" % path
    # Perhaps look at using python-progressbar at some point?
    info=urllib.urlretrieve(path, file_to_download, reporthook=dlProgress)[1]
    urllib.urlcleanup()
    print ""
    if (info["Content-Type"] != "application/x-tar"):
      print 'Chromium tarballs for %s are not on servers.' % file_to_download
      remove_file_if_exists (file_to_download)
      sys.exit(1)

  hashes_local_file = "./%s" % hashes_file
  if not os.path.isfile(hashes_local_file):
    path = '%s%s' % (chromium_url, hashes_file)
    print "Downloading %s" % path
    # Perhaps look at using python-progressbar at some point?
    info=urllib.urlretrieve(path, hashes_file, reporthook=dlProgress)[1]
    urllib.urlcleanup()
    print ""

  if os.path.isfile(hashes_local_file):
    with open(hashes_local_file, "r") as input_file:
      md5sum = input_file.readline().split()[1]
      md5 = hashlib.md5()
      with open(tarball_local_file, "rb") as f:
        for block in iter(lambda: f.read(65536), b""):
          md5.update(block)
        if (md5sum == md5.hexdigest()):
          print "MD5 matches for %s!" % file_to_download
        else:
          print "MD5 mismatch for %s!" % file_to_download
          sys.exit(1)
  else:
    print "Cannot compare hashes for %s!" % file_to_download
项目:pefile.pypy    作者:cloudtracer    | 项目源码 | 文件源码
def dash_R_cleanup(fs, ps, pic, zdc, abcs):
    import gc, copy_reg
    import _strptime, linecache
    dircache = test_support.import_module('dircache', deprecated=True)
    import urlparse, urllib, urllib2, mimetypes, doctest
    import struct, filecmp
    from distutils.dir_util import _path_created

    # Clear the warnings registry, so they can be displayed again
    for mod in sys.modules.values():
        if hasattr(mod, '__warningregistry__'):
            del mod.__warningregistry__

    # Restore some original values.
    warnings.filters[:] = fs
    copy_reg.dispatch_table.clear()
    copy_reg.dispatch_table.update(ps)
    sys.path_importer_cache.clear()
    sys.path_importer_cache.update(pic)
    try:
        import zipimport
    except ImportError:
        pass # Run unmodified on platforms without zipimport support
    else:
        zipimport._zip_directory_cache.clear()
        zipimport._zip_directory_cache.update(zdc)

    # clear type cache
    sys._clear_type_cache()

    # Clear ABC registries, restoring previously saved ABC registries.
    for abc, registry in abcs.items():
        abc._abc_registry = registry.copy()
        abc._abc_cache.clear()
        abc._abc_negative_cache.clear()

    # Clear assorted module caches.
    _path_created.clear()
    re.purge()
    _strptime._regex_cache.clear()
    urlparse.clear_cache()
    urllib.urlcleanup()
    urllib2.install_opener(None)
    dircache.reset()
    linecache.clearcache()
    mimetypes._default_mime_types()
    filecmp._cache.clear()
    struct._clearcache()
    doctest.master = None
    try:
        import ctypes
    except ImportError:
        # Don't worry about resetting the cache if ctypes is not supported
        pass
    else:
        ctypes._reset_cache()

    # Collect cyclic trash.
    gc.collect()
项目:ndk-python    作者:gittor    | 项目源码 | 文件源码
def dash_R_cleanup(fs, ps, pic, zdc, abcs):
    import gc, copy_reg
    import _strptime, linecache
    dircache = test_support.import_module('dircache', deprecated=True)
    import urlparse, urllib, urllib2, mimetypes, doctest
    import struct, filecmp
    from distutils.dir_util import _path_created

    # Clear the warnings registry, so they can be displayed again
    for mod in sys.modules.values():
        if hasattr(mod, '__warningregistry__'):
            del mod.__warningregistry__

    # Restore some original values.
    warnings.filters[:] = fs
    copy_reg.dispatch_table.clear()
    copy_reg.dispatch_table.update(ps)
    sys.path_importer_cache.clear()
    sys.path_importer_cache.update(pic)
    try:
        import zipimport
    except ImportError:
        pass # Run unmodified on platforms without zipimport support
    else:
        zipimport._zip_directory_cache.clear()
        zipimport._zip_directory_cache.update(zdc)

    # clear type cache
    sys._clear_type_cache()

    # Clear ABC registries, restoring previously saved ABC registries.
    for abc, registry in abcs.items():
        abc._abc_registry = registry.copy()
        abc._abc_cache.clear()
        abc._abc_negative_cache.clear()

    # Clear assorted module caches.
    _path_created.clear()
    re.purge()
    _strptime._regex_cache.clear()
    urlparse.clear_cache()
    urllib.urlcleanup()
    urllib2.install_opener(None)
    dircache.reset()
    linecache.clearcache()
    mimetypes._default_mime_types()
    filecmp._cache.clear()
    struct._clearcache()
    doctest.master = None
    try:
        import ctypes
    except ImportError:
        # Don't worry about resetting the cache if ctypes is not supported
        pass
    else:
        ctypes._reset_cache()

    # Collect cyclic trash.
    gc.collect()