Python urllib2 模块,HTTPHandler() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用urllib2.HTTPHandler()

项目:TACTIC-Handler    作者:listyque    | 项目源码 | 文件源码
def download_from_url(url):
    proxy = env_server.get_proxy()
    if proxy['enabled']:
        server = proxy['server'].replace('http://', '')
        proxy_dict = {
            'http': 'http://{login}:{pass}@{0}'.format(server, **proxy)
        }
        proxy_handler = urllib2.ProxyHandler(proxy_dict)
        auth = urllib2.HTTPBasicAuthHandler()
        opener = urllib2.build_opener(proxy_handler, auth, urllib2.HTTPHandler)
        urllib2.install_opener(opener)

    run_thread = tc.ServerThread(env_inst.ui_main)
    run_thread.kwargs = dict(url=url, timeout=1)
    run_thread.routine = urllib2.urlopen
    run_thread.run()
    result_thread = tc.treat_result(run_thread, silent=True)
    if result_thread.isFailed():
        return False
    else:
        return result_thread.result
项目:dart    作者:lmco    | 项目源码 | 文件源码
def download_vcpython27(self):
        """
        Download vcpython27 since some Windows 7 boxes have it and some don't.
        :return: None
        """

        self._prepare_for_download()

        logger.info('Beginning download of vcpython27... this may take a few minutes...')

        with open(os.path.join(DOWNLOADS_DIR, 'vcpython27.msi'), 'wb') as f:

            if self.PROXY is not None:
                opener = urllib2.build_opener(
                    urllib2.HTTPHandler(),
                    urllib2.HTTPSHandler(),
                    urllib2.ProxyHandler({'http': self.PROXY, 'https': self.PROXY})
                )
                urllib2.install_opener(opener)

            f.write(urllib2.urlopen(self.VCPYTHON27_DOWNLOAD_URL, timeout=self.DOWNLOAD_TIMEOUT).read())

        logger.debug('Download of vcpython27 complete')
项目:dart    作者:lmco    | 项目源码 | 文件源码
def download_python(self):
        """
        Download Python
        :return: None
        """

        self._prepare_for_download()

        logger.info('Beginning download of python')

        with open(os.path.join(DOWNLOADS_DIR, 'python-installer.msi'), 'wb') as f:

            if self.PROXY is not None:
                opener = urllib2.build_opener(
                    urllib2.HTTPHandler(),
                    urllib2.HTTPSHandler(),
                    urllib2.ProxyHandler({'http': self.PROXY, 'https': self.PROXY})
                )
                urllib2.install_opener(opener)

            f.write(urllib2.urlopen(self.PYTHON_DOWNLOAD_URL, timeout=self.DOWNLOAD_TIMEOUT).read())

        logger.debug('Download of python complete')
项目:catchup4kodi    作者:catchup4kodi    | 项目源码 | 文件源码
def _update_opener(self):
        '''
        Builds and installs a new opener to be used by all future calls to 
        :func:`urllib2.urlopen`.
        '''
        if self._http_debug:
            http = urllib2.HTTPHandler(debuglevel=1)
        else:
            http = urllib2.HTTPHandler()

        if self._proxy:
            opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj),
                                          urllib2.ProxyHandler({'http': 
                                                                self._proxy}), 
                                          urllib2.HTTPBasicAuthHandler(),
                                          http)

        else:
            opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj),
                                          urllib2.HTTPBasicAuthHandler(),
                                          http)
        urllib2.install_opener(opener)
项目:catchup4kodi    作者:catchup4kodi    | 项目源码 | 文件源码
def _update_opener(self):
        '''
        Builds and installs a new opener to be used by all future calls to 
        :func:`urllib2.urlopen`.
        '''
        if self._http_debug:
            http = urllib2.HTTPHandler(debuglevel=1)
        else:
            http = urllib2.HTTPHandler()

        if self._proxy:
            opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj),
                                          urllib2.ProxyHandler({'http': 
                                                                self._proxy}), 
                                          urllib2.HTTPBasicAuthHandler(),
                                          http)

        else:
            opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj),
                                          urllib2.HTTPBasicAuthHandler(),
                                          http)
        urllib2.install_opener(opener)
项目:catchup4kodi    作者:catchup4kodi    | 项目源码 | 文件源码
def _update_opener(self):
        '''
        Builds and installs a new opener to be used by all future calls to 
        :func:`urllib2.urlopen`.
        '''
        if self._http_debug:
            http = urllib2.HTTPHandler(debuglevel=1)
        else:
            http = urllib2.HTTPHandler()

        if self._proxy:
            opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj),
                                          urllib2.ProxyHandler({'http': 
                                                                self._proxy}), 
                                          urllib2.HTTPBasicAuthHandler(),
                                          http)

        else:
            opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj),
                                          urllib2.HTTPBasicAuthHandler(),
                                          http)
        urllib2.install_opener(opener)
项目:crawler    作者:brantou    | 项目源码 | 文件源码
def check_gn_proxy(proxy, protocal_type='HTTP'):
    url = 'http://icanhazip.com'
    proxy_handler = urllib2.ProxyHandler({
        'http': 'http://' + proxy,
        'https': 'https://' + proxy,
    })
    if protocal_type == 'HTTPS':
        url = 'https://icanhazip.com'

    opener = urllib2.build_opener(proxy_handler, urllib2.HTTPHandler)
    try:
        response = opener.open(url, timeout=3)
        res_ip = response.read().strip()
        return response.code == 200 and res_ip == proxy.split(':')[0]
    except Exception:
        return False
项目:autoinjection    作者:ChengWiLL    | 项目源码 | 文件源码
def error_handler(url):
    global HANDLE_ERRORS
    orig = HANDLE_ERRORS
    keepalive_handler = HTTPHandler()
    opener = urllib2.build_opener(keepalive_handler)
    urllib2.install_opener(opener)
    pos = {0: 'off', 1: 'on'}
    for i in (0, 1):
        print "  fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i)
        HANDLE_ERRORS = i
        try:
            fo = urllib2.urlopen(url)
            foo = fo.read()
            fo.close()
            try: status, reason = fo.status, fo.reason
            except AttributeError: status, reason = None, None
        except IOError, e:
            print "  EXCEPTION: %s" % e
            raise
        else:
            print "  status = %s, reason = %s" % (status, reason)
    HANDLE_ERRORS = orig
    hosts = keepalive_handler.open_connections()
    print "open connections:", hosts
    keepalive_handler.close_all()
项目:autoinjection    作者:ChengWiLL    | 项目源码 | 文件源码
def comp(N, url):
    print '  making %i connections to:\n  %s' % (N, url)

    sys.stdout.write('  first using the normal urllib handlers')
    # first use normal opener
    opener = urllib2.build_opener()
    urllib2.install_opener(opener)
    t1 = fetch(N, url)
    print '  TIME: %.3f s' % t1

    sys.stdout.write('  now using the keepalive handler       ')
    # now install the keepalive handler and try again
    opener = urllib2.build_opener(HTTPHandler())
    urllib2.install_opener(opener)
    t2 = fetch(N, url)
    print '  TIME: %.3f s' % t2
    print '  improvement factor: %.2f' % (t1/t2, )
项目:Extractor    作者:llq007    | 项目源码 | 文件源码
def getUnRedirectUrl(url,timeout=10):
    req = urllib2.Request(url)
    debug_handler = urllib2.HTTPHandler(debuglevel = 0)
    opener = urllib2.build_opener(debug_handler, RedirctHandler)

    html = None
    response = None
    try:
        response = opener.open(url,timeout=timeout)
        html = response.read()
    except urllib2.URLError as e:
        if hasattr(e, 'headers'):
            error_info = e.headers
        elif hasattr(e, 'reason'):
            error_info = e.reason
    finally:
        if response:
            response.close()
    if html:
        return html
    else:
        return error_info
项目:lightbulb-framework    作者:lightbulb-framework    | 项目源码 | 文件源码
def __init__(self, configuration):
        self.setup(configuration)
        self.echo = None
        if "ECHO" in configuration:
            self.echo = configuration['ECHO']
        if self.proxy_scheme is not None and self.proxy_host is not None and \
                        self.proxy_port is not None:
            credentials = ""
            if self.proxy_username is not None and self.proxy_password is not None:
                credentials = self.proxy_username + ":" + self.proxy_password + "@"
            proxyDict = {
                self.proxy_scheme: self.proxy_scheme + "://" + credentials +
                                                    self.proxy_host + ":" + self.proxy_port
            }

            proxy = urllib2.ProxyHandler(proxyDict)

            if credentials != '':
                auth = urllib2.HTTPBasicAuthHandler()
                opener = urllib2.build_opener(proxy, auth, urllib2.HTTPHandler)
            else:
                opener = urllib2.build_opener(proxy)
            urllib2.install_opener(opener)
项目:oasis    作者:mhfowler    | 项目源码 | 文件源码
def __init__(self, access_token_key, access_token_secret, consumer_key, consumer_secret):
        self.access_token_key = access_token_key
        self.access_token_secret = access_token_secret
        self.consumer_key = consumer_key
        self.consumer_secret = consumer_secret

        _debug = 0

        self.oauth_token = oauth.Token(key=self.access_token_key, secret=self.access_token_secret)
        self.oauth_consumer = oauth.Consumer(key=self.consumer_key, secret=self.consumer_secret)

        self.signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1()


        self.http_handler  = urllib.HTTPHandler(debuglevel=_debug)
        self.https_handler = urllib.HTTPSHandler(debuglevel=_debug)
项目:Eagle    作者:magerx    | 项目源码 | 文件源码
def error_handler(url):
    global HANDLE_ERRORS
    orig = HANDLE_ERRORS
    keepalive_handler = HTTPHandler()
    opener = urllib2.build_opener(keepalive_handler)
    urllib2.install_opener(opener)
    pos = {0: 'off', 1: 'on'}
    for i in (0, 1):
        print "  fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i)
        HANDLE_ERRORS = i
        try:
            fo = urllib2.urlopen(url)
            foo = fo.read()
            fo.close()
            try: status, reason = fo.status, fo.reason
            except AttributeError: status, reason = None, None
        except IOError, e:
            print "  EXCEPTION: %s" % e
            raise
        else:
            print "  status = %s, reason = %s" % (status, reason)
    HANDLE_ERRORS = orig
    hosts = keepalive_handler.open_connections()
    print "open connections:", ' '.join(hosts)
    keepalive_handler.close_all()
项目:Eagle    作者:magerx    | 项目源码 | 文件源码
def comp(N, url):
    print '  making %i connections to:\n  %s' % (N, url)

    sys.stdout.write('  first using the normal urllib handlers')
    # first use normal opener
    opener = urllib2.build_opener()
    urllib2.install_opener(opener)
    t1 = fetch(N, url)
    print '  TIME: %.3f s' % t1

    sys.stdout.write('  now using the keepalive handler       ')
    # now install the keepalive handler and try again
    opener = urllib2.build_opener(HTTPHandler())
    urllib2.install_opener(opener)
    t2 = fetch(N, url)
    print '  TIME: %.3f s' % t2
    print '  improvement factor: %.2f' % (t1/t2, )
项目:Eagle    作者:magerx    | 项目源码 | 文件源码
def send_common_request(url, is_post, cookie, para=''):
    """
    ?????WEB???????
    :url:       ??URL
    :is_post:   ???POST
    :cookie:    cookie
    """
    headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:42.0) Gecko/20100101 Firefox/42.0',
               'Cookie': cookie
               }
    # dns cache
    # socket.getaddrinfo = new_getaddrinfo

    try:
        encoding_support = ContentEncodingProcessor()
        opener = urllib2.build_opener(encoding_support, urllib2.HTTPHandler)
        urllib2.install_opener(opener)
        if is_post == 2:  # post
            # url, query = url.split('?', 1)
            return urllib2.urlopen(urllib2.Request(url, para, headers=headers)).read()
        else:
            return urllib2.urlopen(urllib2.Request('?'.join([url, para]), headers=headers)).read()
    except:
        return ''
项目:lambda_utils    作者:CloudHeads    | 项目源码 | 文件源码
def send_signal(event, response_status, reason, response_data=None):
    response_body = json.dumps(
        {
            'Status': response_status,
            'Reason': str(reason or 'ReasonCanNotBeNone'),
            'PhysicalResourceId': event.get('PhysicalResourceId', event['LogicalResourceId']),
            'StackId': event['StackId'],
            'RequestId': event['RequestId'],
            'LogicalResourceId': event['LogicalResourceId'],
            'Data': response_data or {}
        },
        sort_keys=True,
    )
    logging.debug(response_body)
    opener = build_opener(HTTPHandler)
    request = Request(event['ResponseURL'], data=response_body)
    request.add_header('Content-Type', '')
    request.add_header('Content-Length', len(response_body))
    request.get_method = lambda: 'PUT'
    opener.open(request)
项目:rest_http_test    作者:jie123108    | 项目源码 | 文件源码
def GetLocation(url,timeout=25):
    req = urllib2.Request(url)
    debug_handler = urllib2.HTTPHandler()
    opener = urllib2.build_opener(debug_handler, RedirctHandler)

    location = None
    try:
        opener.open(url,timeout=timeout)
    except urllib2.URLError as e:
        if hasattr(e, 'code'):
            error_info = e.code
        elif hasattr(e, 'reason'):
            error_info = e.reason
    except RedirectException as e:
        location = e.location
    if location:
        return location, None
    else:
        return False, error_info
项目:Helix    作者:3lackrush    | 项目源码 | 文件源码
def error_handler(url):
    global HANDLE_ERRORS
    orig = HANDLE_ERRORS
    keepalive_handler = HTTPHandler()
    opener = urllib2.build_opener(keepalive_handler)
    urllib2.install_opener(opener)
    pos = {0: 'off', 1: 'on'}
    for i in (0, 1):
        print "  fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i)
        HANDLE_ERRORS = i
        try:
            fo = urllib2.urlopen(url)
            foo = fo.read()
            fo.close()
            try: status, reason = fo.status, fo.reason
            except AttributeError: status, reason = None, None
        except IOError, e:
            print "  EXCEPTION: %s" % e
            raise
        else:
            print "  status = %s, reason = %s" % (status, reason)
    HANDLE_ERRORS = orig
    hosts = keepalive_handler.open_connections()
    print "open connections:", hosts
    keepalive_handler.close_all()
项目:Helix    作者:3lackrush    | 项目源码 | 文件源码
def comp(N, url):
    print '  making %i connections to:\n  %s' % (N, url)

    sys.stdout.write('  first using the normal urllib handlers')
    # first use normal opener
    opener = urllib2.build_opener()
    urllib2.install_opener(opener)
    t1 = fetch(N, url)
    print '  TIME: %.3f s' % t1

    sys.stdout.write('  now using the keepalive handler       ')
    # now install the keepalive handler and try again
    opener = urllib2.build_opener(HTTPHandler())
    urllib2.install_opener(opener)
    t2 = fetch(N, url)
    print '  TIME: %.3f s' % t2
    print '  improvement factor: %.2f' % (t1/t2, )
项目:autoscan    作者:b01u    | 项目源码 | 文件源码
def error_handler(url):
    global HANDLE_ERRORS
    orig = HANDLE_ERRORS
    keepalive_handler = HTTPHandler()
    opener = urllib2.build_opener(keepalive_handler)
    urllib2.install_opener(opener)
    pos = {0: 'off', 1: 'on'}
    for i in (0, 1):
        print "  fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i)
        HANDLE_ERRORS = i
        try:
            fo = urllib2.urlopen(url)
            foo = fo.read()
            fo.close()
            try: status, reason = fo.status, fo.reason
            except AttributeError: status, reason = None, None
        except IOError, e:
            print "  EXCEPTION: %s" % e
            raise
        else:
            print "  status = %s, reason = %s" % (status, reason)
    HANDLE_ERRORS = orig
    hosts = keepalive_handler.open_connections()
    print "open connections:", ' '.join(hosts)
    keepalive_handler.close_all()
项目:autoscan    作者:b01u    | 项目源码 | 文件源码
def comp(N, url):
    print '  making %i connections to:\n  %s' % (N, url)

    sys.stdout.write('  first using the normal urllib handlers')
    # first use normal opener
    opener = urllib2.build_opener()
    urllib2.install_opener(opener)
    t1 = fetch(N, url)
    print '  TIME: %.3f s' % t1

    sys.stdout.write('  now using the keepalive handler       ')
    # now install the keepalive handler and try again
    opener = urllib2.build_opener(HTTPHandler())
    urllib2.install_opener(opener)
    t2 = fetch(N, url)
    print '  TIME: %.3f s' % t2
    print '  improvement factor: %.2f' % (t1/t2, )
项目:TinyStockToolKit    作者:lawinse    | 项目源码 | 文件源码
def __init__(self,timeout=10,threads=None,stacksize=32768*16,loginfunc=None):
        #proxy_support = urllib2.ProxyHandler({'http':'http://localhost:3128'})
        cookie_support = urllib2.HTTPCookieProcessor(cookielib.CookieJar())
        encoding_support = ContentEncodingProcessor()
        #self.opener = urllib2.build_opener(cookie_support,encoding_support,proxy_support,urllib2.HTTPHandler)
        self.opener = urllib2.build_opener(cookie_support,encoding_support,urllib2.HTTPHandler)
        self.req = urllib2.Request('http://www.hsbc.com')
        socket.setdefaulttimeout(timeout)
        self.q_req = Queue()
        self.q_ans = Queue()
        self.lock = Lock()
        self.running = 0
        if loginfunc:
            self.opener = loginfunc(self.opener)
        if threads:
            self.threads = threads
            stack_size(stacksize)
            for i in range(threads):
                t = Thread(target=self.threadget)
                t.setDaemon(True)
                t.start()
项目:DoubanSpider    作者:ruiming    | 项目源码 | 文件源码
def __init__(self):
        # ????
        self.proxy_url = proxyList[3]
        self.proxy = urllib2.ProxyHandler({"http": self.proxy_url})
        # ??
        self.hostURL = 'http://book.douban.com/tag/'
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.47 (KHTML, like Gecko)'
                          ' Chrome/48.1.2524.116 Safari/537.36',
            'Referer': 'http://book.douban.com/',
            'Host': 'book.douban.com',
            'Upgrade-Insecure-Requests': '1',
            'Connection': 'keep-alive'
        }
        # opener??
        self.cookie = cookielib.LWPCookieJar()
        self.cookieHandler = urllib2.HTTPCookieProcessor(self.cookie)
        self.opener = urllib2.build_opener(self.cookieHandler, self.proxy, urllib2.HTTPHandler)

    # ????????????
项目:brush    作者:chenshiyang2015    | 项目源码 | 文件源码
def Check(ip):
    try:
        log.step_normal('????ip:[%s]' % ip)
        proxy_support = urllib2.ProxyHandler({'http': 'http://' + ip})
        opener = urllib2.build_opener(proxy_support, urllib2.HTTPHandler)
        urllib2.install_opener(opener)
        request = urllib2.Request('http://www.baidu.com')
        request.add_header("cookie", env.COOKIE)
        request.add_header("User-Agent", getUA())
        content = urllib2.urlopen(request, timeout=4).read()

        if len(content) >= 1000:
            log.step_normal('add proxy [%s]' % ip)
            return ip
        else:
            log.step_normal('??????IP??? [%s]' % ip)
            praserJsonFile()
    except (URLError, HTTPError) as e:
        log.step_normal('??ip?? [%s]' % ip)
        praserJsonFile()
项目:brush    作者:chenshiyang2015    | 项目源码 | 文件源码
def get_local_ip(ip):
    try:
        proxy_support = urllib2.ProxyHandler({'http': 'http://' + ip})
        opener = urllib2.build_opener(proxy_support, urllib2.HTTPHandler)
        urllib2.install_opener(opener)
        request = urllib2.Request('http://ip.chinaz.com/getip.aspx')
        # request.add_header("cookie", env.COOKIE)
        request.add_header("User-Agent", getUA())
        fp = urllib2.urlopen(request)
        mybytes = fp.read()
        # note that Python3 does not read the html code as string
        # but as html code bytearray, convert to string with
        mystr = mybytes.decode('utf-8')
        fp.close()
        ip = mystr.find("ip")
        add = mystr.find("address")
        ip = mystr[ip + 4:add - 2]
        address = mystr[add + 9:-2]
        return [ip, address]

    except (HTTPError, URLError, Exception) as e:
        log.step_warning ('??ip????---> %s' % e)
        return [ip, 'address']

    # ??????????random.randint(0
项目:moodlescan    作者:inc0d3    | 项目源码 | 文件源码
def httpConnection(url,  proxy):



    #TODO: habilitar autenticacion ntlm
    if (proxy.auth == "ntlm"):
        passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
        passman.add_password(None, proxy.url, proxy.user, proxy.password)
        auth = HTTPNtlmAuthHandler.HTTPNtlmAuthHandler(passman)
    else:
        passman = urllib2.HTTPPasswordMgr()
        passman.add_password(None, proxy.url, proxy.user, proxy.password)
        auth = urllib2.HTTPBasicAuthHandler(passman)


    if (proxy.url):
        proxy = urllib2.ProxyHandler({'http': proxy.url})
        opener = urllib2.build_opener(proxy.url, auth, urllib2.HTTPHandler)
        urllib2.install_opener(opener)

    return urllib2.urlopen(url)
项目:lichking    作者:melonrun    | 项目源码 | 文件源码
def check(proxy):
    import urllib2
    url = "http://connect.rom.miui.com/generate_204"
    proxy_handler = urllib2.ProxyHandler({'http': "http://" + proxy})
    opener = urllib2.build_opener(proxy_handler, urllib2.HTTPHandler)
    try:
        response = opener.open(url, timeout=1)
        return response.code == 204 and response.url == url
    except Exception:
        return False
项目:code    作者:ActiveState    | 项目源码 | 文件源码
def get_file(self, url, quality):

        self.cookieJar = cookielib.LWPCookieJar()

        self.opener = urllib2.build_opener(

            urllib2.HTTPCookieProcessor(self.cookieJar),
            urllib2.HTTPRedirectHandler(),
            urllib2.HTTPHandler(debuglevel=0))

        self.opener.addheaders = [('User-agent', "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.72 Safari/537.36")]


        forms = {"youtubeURL": url,
                 'quality':quality

                 }

        data = urllib.urlencode(forms)
        req = urllib2.Request('http://www.convertmemp3.com/',data)
        res = self.opener.open(req)

        self.convhtml = res.read()
项目:darkc0de-old-stuff    作者:tuwid    | 项目源码 | 文件源码
def __init__(self, proxy=None, debuglevel=0):
        self.proxy = proxy

        urllib2.HTTPHandler.__init__(self, debuglevel)
项目:darkc0de-old-stuff    作者:tuwid    | 项目源码 | 文件源码
def do_open(self, http_class, req):
        if self.proxy is not None:
            req.set_proxy(self.proxy, "http")

        return urllib2.HTTPHandler.do_open(self, ProxyHTTPConnection, req)
项目:pushkin    作者:Nordeus    | 项目源码 | 文件源码
def __init__(self, api_key, url=GCM_URL, proxy=None):
        """ api_key : google api key
            url: url of gcm service.
            proxy: can be string "http://host:port" or dict {'https':'host:port'}
        """
        self.api_key = api_key
        self.url = url
        if proxy:
            if isinstance(proxy, basestring):
                protocol = url.split(':')[0]
                proxy = {protocol: proxy}

            auth = urllib2.HTTPBasicAuthHandler()
            opener = urllib2.build_opener(urllib2.ProxyHandler(proxy), auth, urllib2.HTTPHandler)
            urllib2.install_opener(opener)
项目:zeronet-debian    作者:bashrc    | 项目源码 | 文件源码
def __init__(self, *args, **kwargs):
        self.args = args
        self.kw = kwargs
        urllib2.HTTPHandler.__init__(self)
项目:weibo    作者:windskyer    | 项目源码 | 文件源码
def login(self, username, pwd, cookie_file):
        """"
            Login with use name, password and cookies.
            (1) If cookie file exists then try to load cookies;
            (2) If no cookies found then do login
        """
        # If cookie file exists then try to load cookies
        if os.path.exists(cookie_file):
            try:
                cookie_jar = cookielib.LWPCookieJar(cookie_file)
                cookie_jar.load(ignore_discard=True, ignore_expires=True)
                loaded = 1
            except cookielib.LoadError:
                loaded = 0
                LOG.info('Loading cookies error')

            # install loaded cookies for urllib2
            if loaded:
                cookie_support = urllib2.HTTPCookieProcessor(cookie_jar)
                opener = urllib2.build_opener(cookie_support,
                                              urllib2.HTTPHandler)
                urllib2.install_opener(opener)
                LOG.info('Loading cookies success')
                return 1
            else:
                return self.do_login(username, pwd, cookie_file)

        else:  # If no cookies found
            return self.do_login(username, pwd, cookie_file)
项目:weibo    作者:windskyer    | 项目源码 | 文件源码
def save_cookie(self, text, cookie_file=CONF.cookie_file):
        cookie_jar2 = cookielib.LWPCookieJar()
        cookie_support2 = urllib2.HTTPCookieProcessor(cookie_jar2)
        opener2 = urllib2.build_opener(cookie_support2, urllib2.HTTPHandler)
        urllib2.install_opener(opener2)
        if six.PY3:
            text = text.decode('gbk')
        p = re.compile('location\.replace\(\'(.*?)\'\)')
        # ???httpfox??????????????
        # location.replace('http://weibo.com ?????????
        # ?????????????# ????login_url?? ??????re?????
        # p = re.compile('location\.replace\(\B'(.*?)'\B\)')
        # ??? ??????? re?????\'???????
        try:
            # Search login redirection URL
            login_url = p.search(text).group(1)
            data = urllib2.urlopen(login_url).read()
            # Verify login feedback, check whether result is TRUE
            patt_feedback = 'feedBackUrlCallBack\((.*)\)'
            p = re.compile(patt_feedback, re.MULTILINE)

            feedback = p.search(data).group(1)
            feedback_json = json.loads(feedback)
            if feedback_json['result']:
                cookie_jar2.save(cookie_file,
                                 ignore_discard=True,
                                 ignore_expires=True)
                return 1
            else:
                return 0
        except:
            return 0
项目:weibo    作者:windskyer    | 项目源码 | 文件源码
def login(self, username, pwd, cookie_file):
        """"
            Login with use name, password and cookies.
            (1) If cookie file exists then try to load cookies;
            (2) If no cookies found then do login
        """
        #If cookie file exists then try to load cookies
        if os.path.exists(cookie_file):
            try:
                cookie_jar = cookielib.LWPCookieJar(cookie_file)
                cookie_jar.load(ignore_discard=True, ignore_expires=True)
                loaded = 1
            except cookielib.LoadError:
                loaded = 0
                print 'Loading cookies error'

            #install loaded cookies for urllib2
            if loaded:
                cookie_support = urllib2.HTTPCookieProcessor(cookie_jar)
                opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
                urllib2.install_opener(opener)
                print 'Loading cookies success'
                return 1
            else:
                return self.do_login(username, pwd, cookie_file)

        else:  #If no cookies found
            return self.do_login(username, pwd, cookie_file)
项目:weibo    作者:windskyer    | 项目源码 | 文件源码
def login(self, username, pwd, cookie_file):
        """"
            Login with use name, password and cookies.
            (1) If cookie file exists then try to load cookies;
            (2) If no cookies found then do login
        """
        # If cookie file exists then try to load cookies
        if os.path.exists(cookie_file):
            try:
                cookie_jar = cookielib.LWPCookieJar(cookie_file)
                cookie_jar.load(ignore_discard=True, ignore_expires=True)
                loaded = 1
            except cookielib.LoadError:
                loaded = 0
                print('Loading cookies error')

            #install loaded cookies for urllib2
            if loaded:
                cookie_support = urllib2.HTTPCookieProcessor(cookie_jar)
                opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
                urllib2.install_opener(opener)
                print('Loading cookies success')
                return 1
            else:
                return self.do_login(username, pwd, cookie_file)

        else:  #If no cookies found
            return self.do_login(username, pwd, cookie_file)
项目:habrahabr-api-python    作者:dotzero    | 项目源码 | 文件源码
def _request(self, url, method='GET', data=None):
        url = self._auth.endpoint + url
        headers = self._auth.headers

        if data is not None:
            data = urlencode(data)
            if method in ['GET', 'DELETE']:
                url = url + '?' + data
                data = None
            else:
                headers.update({'Content-Type': POST_CONTENT_TYPE})
                if sys.version_info > (3,):  # python3
                    data = data.encode('utf-8')

        log.debug(method + ' ' + url)
        log.debug(data)

        try:
            opener = build_opener(HTTPHandler)
            request = Request(url, data=data, headers=headers)
            request.get_method = lambda: method
            response = opener.open(request).read()
            data = self._parse_response(response)
        except HTTPError as e:
            log.error(e)
            data = self._parse_response(e.read())
            raise ApiHandlerError('Invalid server response', data)
        except ValueError as e:
            log.error(e)
            raise ApiHandlerError('Invalid server response')

        return data
项目:HttpProxy    作者:wuchujiecode    | 项目源码 | 文件源码
def check(proxy):
    import urllib2
    url = 'http://www.baidu.com/js/bdsug.js?v=1.0.3.0'
    proxy_handler = urllib2.ProxyHandler({'http': 'http://' + proxy})
    opener = urllib2.build_opener(proxy_handler, urllib2.HTTPHandler)
    try:
        response = opener.open(url, timeout=3)
        return response.code == 200 and response.url == url
    except Exception:
        return False
项目:MailFail    作者:m0rtem    | 项目源码 | 文件源码
def __init__(self, *args, **kwargs):
        self.args = args
        self.kw = kwargs
        urllib2.HTTPHandler.__init__(self)
项目:TCP-IP    作者:JackZ0    | 项目源码 | 文件源码
def __init__(self):
        """Build an HTTPS opener."""
        # Based on pip 1.4.1's URLOpener
        # This verifies certs on only Python >=2.7.9.
        self._opener = build_opener(HTTPSHandler())
        # Strip out HTTPHandler to prevent MITM spoof:
        for handler in self._opener.handlers:
            if isinstance(handler, HTTPHandler):
                self._opener.handlers.remove(handler)
项目:TCP-IP    作者:JackZ0    | 项目源码 | 文件源码
def hashed_download(url, temp, digest):
    """Download ``url`` to ``temp``, make sure it has the SHA-256 ``digest``,
    and return its path."""
    # Based on pip 1.4.1's URLOpener but with cert verification removed. Python
    # >=2.7.9 verifies HTTPS certs itself, and, in any case, the cert
    # authenticity has only privacy (not arbitrary code execution)
    # implications, since we're checking hashes.
    def opener():
        opener = build_opener(HTTPSHandler())
        # Strip out HTTPHandler to prevent MITM spoof:
        for handler in opener.handlers:
            if isinstance(handler, HTTPHandler):
                opener.handlers.remove(handler)
        return opener

    def read_chunks(response, chunk_size):
        while True:
            chunk = response.read(chunk_size)
            if not chunk:
                break
            yield chunk

    response = opener().open(url)
    path = join(temp, urlparse(url).path.split('/')[-1])
    actual_hash = sha256()
    with open(path, 'wb') as file:
        for chunk in read_chunks(response, 4096):
            file.write(chunk)
            actual_hash.update(chunk)

    actual_digest = actual_hash.hexdigest()
    if actual_digest != digest:
        raise HashError(url, path, actual_digest, digest)
    return path
项目:SinaMicroblog_Creeper-Spider_VerificationCode    作者:somethingx64    | 项目源码 | 文件源码
def EnableCookie(self, enableProxy):
        #"Enable cookie & proxy (if needed)."        
        cookiejar = cookielib.LWPCookieJar()#construct cookie
        cookie_support = urllib2.HTTPCookieProcessor(cookiejar)

        if enableProxy:
            proxy_support = urllib2.ProxyHandler({'http':'http://xxxxx.pac'})#use proxy
            opener = urllib2.build_opener(proxy_support, cookie_support, urllib2.HTTPHandler)
            print ("Proxy enabled")
        else:
            opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
        urllib2.install_opener(opener)#construct cookie's opener
项目:aws-cfn-plex    作者:lordmuffin    | 项目源码 | 文件源码
def send_response(event, context, response_status, reason=None, response_data={}):
    response_body = {
        "Status": response_status,
        "PhysicalResourceId": context.log_stream_name,
        "StackId": event["StackId"],
        "RequestId": event["RequestId"],
        "LogicalResourceId": event["LogicalResourceId"],
    }

    if reason:
        response_body["Reason"] = reason

    if response_data:
        response_body["Data"] = response_data

    response_body = json.dumps(response_body)

    opener = build_opener(HTTPHandler)
    request = Request(event["ResponseURL"], data=response_body)
    request.add_header("Content-Type", "")
    request.add_header("Content-Length", len(response_body))
    request.get_method = lambda: "PUT"

    try:
        response = opener.open(request)
        print("Status code: {}".format(response.getcode()))
        print("Status message: {}".format(response.msg))
        return True
    except HTTPError as exc:
        print("Failed executing HTTP request: {}".format(exc.code))
        return False
项目:aws-cfn-plex    作者:lordmuffin    | 项目源码 | 文件源码
def send(event, context, response_status, reason=None, response_data=None, physical_resource_id=None):
    response_data = response_data or {}
    response_body = json.dumps(
        {
            'Status': response_status,
            'Reason': reason or "See the details in CloudWatch Log Stream: " + context.log_stream_name,
            'PhysicalResourceId': physical_resource_id or context.log_stream_name,
            'StackId': event['StackId'],
            'RequestId': event['RequestId'],
            'LogicalResourceId': event['LogicalResourceId'],
            'Data': response_data
        }
    )

    opener = build_opener(HTTPHandler)
    request = Request(event['ResponseURL'], data=response_body)
    request.add_header('Content-Type', '')
    request.add_header('Content-Length', len(response_body))
    request.get_method = lambda: 'PUT'
    try:
        response = opener.open(request)
        print("Status code: {}".format(response.getcode()))
        print("Status message: {}".format(response.msg))
        return True
    except HTTPError as exc:
        print("Failed executing HTTP request: {}".format(exc.code))
        return False
项目:true_review    作者:lucadealfaro    | 项目源码 | 文件源码
def http_request(self, req):
        """Handle a HTTP request.  Make sure that Content-Length is specified
        if we're using an interable value"""
        # Make sure that if we're using an iterable object as the request
        # body, that we've also specified Content-Length
        if req.has_data():
            data = req.get_data()
            if hasattr(data, 'read') or hasattr(data, 'next'):
                if not req.has_header('Content-length'):
                    raise ValueError(
                            "No Content-Length specified for iterable body")
        return urllib2.HTTPHandler.do_request_(self, req)
项目:DistributeCrawler    作者:SmallHedgehog    | 项目源码 | 文件源码
def __build_opener(self):
        """Build opener"""
        self.opener = urllib2.build_opener(self.cookie_processor, urllib2.HTTPHandler)
项目:Reverse-Ip    作者:rudSarkar    | 项目源码 | 文件源码
def request(target, httpsproxy=None, useragent=None):
    global contenttype

    if not useragent:
        useragent = "Mozilla/5.0 (X11; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0 Iceweasel/22.0"
    else:
        print "["+ bc.G + "+" + bc.ENDC + "] User-Agent: " + useragent

    if httpsproxy:
        print "["+ bc.G + "+" + bc.ENDC + "] Proxy: " + httpsproxy + "\n"
        opener = urllib2.build_opener(
                urllib2.HTTPHandler(),
                urllib2.HTTPSHandler(),
                urllib2.ProxyHandler({'http': 'http://' + httpsproxy}))
        urllib2.install_opener(opener)

    postdata = [('remoteAddress',target),('key','')]
    postdata = urllib.urlencode(postdata)

    request = urllib2.Request(url, postdata)

    request.add_header("Content-type", contenttype)
    request.add_header("User-Agent", useragent)
    try:
        result = urllib2.urlopen(request).read()
    except urllib2.HTTPError, e:
        print "Error: " + e.code
    except urllib2.URLError, e:
        print "Error: " + e.args

    obj = json.loads(result)
    return obj
项目:catchup4kodi    作者:catchup4kodi    | 项目源码 | 文件源码
def getUrl(self,url, ischunkDownloading=False):
        try:
            post=None
            print 'url',url

            #openner = urllib2.build_opener(urllib2.HTTPHandler, urllib2.HTTPSHandler)
            cookie_handler = urllib2.HTTPCookieProcessor(self.cookieJar)
            openner = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())

            if post:
                req = urllib2.Request(url, post)
            else:
                req = urllib2.Request(url)

            ua_header=False
            if self.clientHeader:
                for n,v in self.clientHeader:
                    req.add_header(n,v)
                    if n=='User-Agent':
                        ua_header=True

            if not ua_header:
                req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; Win64; x64; Trident/7.0; rv:11.0) like Gecko')
            #response = urllib2.urlopen(req)
            if self.proxy and (  (not ischunkDownloading) or self.use_proxy_for_chunks ):
                req.set_proxy(self.proxy, 'http')
            response = openner.open(req)
            data=response.read()

            return data

        except:
            print 'Error in getUrl'
            traceback.print_exc()
            return None
项目:catchup4kodi    作者:catchup4kodi    | 项目源码 | 文件源码
def getUrl(self,url, ischunkDownloading=False):
        try:
            post=None
            print 'url',url

            #openner = urllib2.build_opener(urllib2.HTTPHandler, urllib2.HTTPSHandler)
            cookie_handler = urllib2.HTTPCookieProcessor(self.cookieJar)
            openner = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())

            if post:
                req = urllib2.Request(url, post)
            else:
                req = urllib2.Request(url)

            ua_header=False
            if self.clientHeader:
                for n,v in self.clientHeader:
                    req.add_header(n,v)
                    if n=='User-Agent':
                        ua_header=True

            if not ua_header:
                req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; Win64; x64; Trident/7.0; rv:11.0) like Gecko')
            #response = urllib2.urlopen(req)
            if self.proxy and (  (not ischunkDownloading) or self.use_proxy_for_chunks ):
                req.set_proxy(self.proxy, 'http')
            response = openner.open(req)
            data=response.read()

            return data

        except:
            print 'Error in getUrl'
            traceback.print_exc()
            return None
项目:weibo_scrawler_app    作者:coolspiderghy    | 项目源码 | 文件源码
def init(self, proxy=None):
        cj = cookielib.LWPCookieJar()
        cookie_support = urllib2.HTTPCookieProcessor(cj)
        if proxy:
            proxy_support = urllib2.ProxyHandler({'http': proxy})
            opener = urllib2.build_opener(proxy_support, cookie_support, urllib2.HTTPHandler)
        else:
            opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
        urllib2.install_opener(opener)
    #print 'seton'