Python urllib2 模块,HTTPCookieProcessor() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用urllib2.HTTPCookieProcessor()

项目:touch-pay-client    作者:HackPucBemobi    | 项目源码 | 文件源码
def __init__(self, timeout=None, proxy=None, cacert=None, sessions=False):
        if (timeout is not None) and not self.supports_feature('timeout'):
            raise RuntimeError('timeout is not supported with urllib2 transport')
        if proxy:
            raise RuntimeError('proxy is not supported with urllib2 transport')
        if cacert:
            raise RuntimeError('cacert is not support with urllib2 transport')

        handlers = []

        if ((sys.version_info[0] == 2 and sys.version_info >= (2,7,9)) or
            (sys.version_info[0] == 3 and sys.version_info >= (3,2,0))):
            context = ssl.create_default_context()
            context.check_hostname = False
            context.verify_mode = ssl.CERT_NONE
            handlers.append(urllib2.HTTPSHandler(context=context))

        if sessions:
            handlers.append(urllib2.HTTPCookieProcessor(CookieJar()))

        opener = urllib2.build_opener(*handlers)
        self.request_opener = opener.open
        self._timeout = timeout
项目:google_scholar_paper_finder    作者:maikelronnau    | 项目源码 | 文件源码
def __init__(self):
        self.articles = []
        self.query = None
        self.cjar = MozillaCookieJar()

        # If we have a cookie file, load it:
        if ScholarConf.COOKIE_JAR_FILE and \
           os.path.exists(ScholarConf.COOKIE_JAR_FILE):
            try:
                self.cjar.load(ScholarConf.COOKIE_JAR_FILE,
                               ignore_discard=True)
                ScholarUtils.log('info', 'loaded cookies file')
            except Exception as msg:
                ScholarUtils.log('warn', 'could not load cookies file: %s' % msg)
                self.cjar = MozillaCookieJar() # Just to be safe

        self.opener = build_opener(HTTPCookieProcessor(self.cjar))
        self.settings = None # Last settings object, if any
项目:citations    作者:frederick0329    | 项目源码 | 文件源码
def __init__(self):
        self.articles = []
        self.query = None
        self.cjar = MozillaCookieJar()

        # If we have a cookie file, load it:
        if ScholarConf.COOKIE_JAR_FILE and \
           os.path.exists(ScholarConf.COOKIE_JAR_FILE):
            try:
                self.cjar.load(ScholarConf.COOKIE_JAR_FILE,
                               ignore_discard=True)
                ScholarUtils.log('info', 'loaded cookies file')
            except Exception as msg:
                ScholarUtils.log('warn', 'could not load cookies file: %s' % msg)
                self.cjar = MozillaCookieJar() # Just to be safe

        self.opener = build_opener(HTTPCookieProcessor(self.cjar))
        self.settings = None # Last settings object, if any
项目:CN_POI_Data    作者:lyBigdata    | 项目源码 | 文件源码
def __init__(self,proxyHost = ""):
        #???headers
        self.headers = {'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.75 Safari/537.36',
                        'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
                        'Accept-Encoding':'en-us',
                        'Connection':'keep-alive',
                        'Referer':'http://www.baidu.com/'}

        self.proxyHeaders = [('User-Agent','Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.75 Safari/537.36'),
                             ('Accept-Charset','ISO-8859-1,utf-8;q=0.7,*;q=0.3'),
                             ('Accept-Encoding','en-us'),
                             ('Connection','keep-alive'),
                             ('Referer','http://www.baidu.com/')]

        self.cookies = urllib2.HTTPCookieProcessor()
        self.req_timeout = 5

        self.proxyHost = {"http":proxyHost}
项目:CN_POI_Data    作者:lyBigdata    | 项目源码 | 文件源码
def checkAlive(self,ip,port,protocol):
        testUrl = "https://www.baidu.com/"
        req_timeout = 3
        cookies = urllib2.HTTPCookieProcessor()

        proxyHost = ""
        if protocol == 'HTTP' or protocol == 'HTTPS':
            proxyHost = {"http":r'http://%s:%s' % (ip, port)}
            #print proxyHost

        proxyHandler = urllib2.ProxyHandler(proxyHost)
        opener = urllib2.build_opener(cookies, proxyHandler)
        opener.addheaders = [('User-Agent',
                              'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36')]

        try:
            req = opener.open(testUrl, timeout=req_timeout)
            result = req.read()
            #print result
            gevent.sleep(2)
            return  True
        except urllib2.HTTPError as e:
            print  e.message
            return False
项目:auto-laod-hosts    作者:yanjinyi1987    | 项目源码 | 文件源码
def urlopen_test(host):
    headers = [('Host',host),
    ('Connection', 'keep-alive'),
    ('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),
    ('User-Agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0'),
    #('Accept-Encoding','gzip,deflate'), 
    ('Accept-Language', 'en-US,en;q=0.5')]

    #????MozillaCookieJar???????cookie
    cookie=cookielib.MozillaCookieJar()
    handler=urllib2.HTTPCookieProcessor(cookie)

    req=urllib2.Request(u'https://'+host)
    first_opener = urllib2.build_opener(handler)
    first_opener.addheaders = headers
    try:
        result=first_opener.open(req,timeout=60) #60s??
        if result.read()!=None:
            return True
    except Exception,e:
        print e
        return False
项目:wechat-crawler    作者:DMGbupt    | 项目源码 | 文件源码
def get_cookie(self,query):
        """
        @summary: ??cookie
        @param query: ?????????
        @return: ??cookie??
        """
        cookies={}
        i=0
        while True:
            cookie = cookielib.CookieJar()
            handler=urllib2.HTTPCookieProcessor(cookie)
            opener = urllib2.build_opener(handler)
            response = opener.open(self._search_url.format(query)) # ???????????????cookie
            for item in cookie:
                # ???????????cookie????SNUID?????????????
                # ???SNUID???cookie????cookie???????????
                if("SNUID" in item.name):
                    cookies[item.name]=item.value
                    return cookies
            if(i>3):
                # ????3???????????cookie????IP????????????cookie??????????
                spider_logger.error("Can't get cookies when searching {0} !".format(query))
                return cookies
            i=i+1
            time.sleep(10*random.expovariate(1)) # ?????????????????????????????
项目:PySide_For_Amazon_Order    作者:cundi    | 项目源码 | 文件源码
def __init__(self, user, pwd, softId="110614",
                 softKey="469c0d8a805a40f39d3c1ec3c9281e9c",
                 codeType="1004"):
        self.softId = softId
        self.softKey = softKey
        self.user = user
        self.pwd = pwd
        self.codeType = codeType
        self.uid = "100"
        self.initUrl = "http://common.taskok.com:9000/Service/ServerConfig.aspx"
        self.version = '1.1.1.2'
        self.cookieJar = cookielib.CookieJar()
        self.opener = urllib2.build_opener(
            urllib2.HTTPCookieProcessor(self.cookieJar))
        self.loginUrl = None
        self.uploadUrl = None
        self.codeUrl = None
        self.params = []
        self.uKey = None
项目:-scrapy-    作者:PyCN    | 项目源码 | 文件源码
def get_url(self, url, headers, cookie):

        if not isinstance(url, str):
            raise 'url or cookie type error!!'
        req = urllib2.Request(url, None, headers)
        try:
            if not isinstance(cookie, cookielib.CookieJar) and cookie is None:
                response = urllib2.urlopen(req)
            elif not isinstance(cookie, cookielib.CookieJar) and cookie is not None:
                cookie = cookielib.CookieJar()
                handler = urllib2.HTTPCookieProcessor(cookie)
                opener = urllib2.build_opener(handler)
                response = opener.open(req)
            elif isinstance(cookie, cookielib.CookieJar):
                handler = urllib2.HTTPCookieProcessor(cookie)
                opener = urllib2.build_opener(handler)
                response = opener.open(req)

        except urllib2.HTTPError:
            raise 'get url error!!'
        the_page = response.read()

        return the_page, cookie
项目:-scrapy-    作者:PyCN    | 项目源码 | 文件源码
def post_url(self, url, formdata, headers, cookie):
        if not isinstance(url, str):
            raise 'url must be a string and fordata must be a dict'
        data = urllib.urlencode(formdata)
        req = urllib2.Request(url, data, headers)
        try:
            if not isinstance(cookie, cookielib.CookieJar) and cookie is None:
                response = urllib2.urlopen(req)
            elif not isinstance(cookie, cookielib.CookieJar) and cookie is not None:
                cookie = cookielib.CookieJar()
                handler = urllib2.HTTPCookieProcessor(cookie)
                opener = urllib2.build_opener(handler)
                response = opener.open(req)
            elif isinstance(cookie, cookielib.CookieJar):
                handler = urllib2.HTTPCookieProcessor(cookie)
                opener = urllib2.build_opener(handler)
                response = opener.open(req)

        except urllib2.HTTPError:
            raise 'get url error!!'
        the_page = response.read()

        return the_page, cookie
项目:rekall-agent-server    作者:rekall-innovations    | 项目源码 | 文件源码
def __init__(self, timeout=None, proxy=None, cacert=None, sessions=False):
        if (timeout is not None) and not self.supports_feature('timeout'):
            raise RuntimeError('timeout is not supported with urllib2 transport')
        if proxy:
            raise RuntimeError('proxy is not supported with urllib2 transport')
        if cacert:
            raise RuntimeError('cacert is not support with urllib2 transport')

        handlers = []

        if ((sys.version_info[0] == 2 and sys.version_info >= (2,7,9)) or
            (sys.version_info[0] == 3 and sys.version_info >= (3,2,0))):
            context = ssl.create_default_context()
            context.check_hostname = False
            context.verify_mode = ssl.CERT_NONE
            handlers.append(urllib2.HTTPSHandler(context=context))

        if sessions:
            handlers.append(urllib2.HTTPCookieProcessor(CookieJar()))

        opener = urllib2.build_opener(*handlers)
        self.request_opener = opener.open
        self._timeout = timeout
项目:snowballing    作者:JoaoFelipe    | 项目源码 | 文件源码
def __init__(self):
        self.articles = []
        self.query = None
        self.cjar = MozillaCookieJar()

        # If we have a cookie file, load it:
        if ScholarConf.COOKIE_JAR_FILE and \
           os.path.exists(ScholarConf.COOKIE_JAR_FILE):
            try:
                self.cjar.load(ScholarConf.COOKIE_JAR_FILE,
                               ignore_discard=True)
                ScholarUtils.log('info', 'loaded cookies file')
            except Exception as msg:
                ScholarUtils.log('warn', 'could not load cookies file: %s' % msg)
                self.cjar = MozillaCookieJar() # Just to be safe

        self.opener = build_opener(HTTPCookieProcessor(self.cjar))
        self.settings = None # Last settings object, if any
项目:etunexus_api    作者:etusolution    | 项目源码 | 文件源码
def _init_urllib(self, secure, debuglevel=0):
        cj = cookielib.CookieJar()
        no_proxy_support = urllib2.ProxyHandler({})
        cookie_handler = urllib2.HTTPCookieProcessor(cj)
        ctx = None
        if not secure:
            self._logger.info('[WARNING] Skip certificate verification.')
            ctx = ssl.create_default_context()
            ctx.check_hostname = False
            ctx.verify_mode = ssl.CERT_NONE
        https_handler = urllib2.HTTPSHandler(debuglevel=debuglevel, context=ctx)
        opener = urllib2.build_opener(no_proxy_support,
                                      cookie_handler,
                                      https_handler,
                                      MultipartPostHandler.MultipartPostHandler)
        opener.addheaders = [('User-agent', API_USER_AGENT)]
        urllib2.install_opener(opener)
项目:KDDCUP2016    作者:hugochan    | 项目源码 | 文件源码
def __init__(self):
        self.articles = []
        self.query = None
        self.cjar = MozillaCookieJar()

        # If we have a cookie file, load it:
        if ScholarConf.COOKIE_JAR_FILE and \
           os.path.exists(ScholarConf.COOKIE_JAR_FILE):
            try:
                self.cjar.load(ScholarConf.COOKIE_JAR_FILE,
                               ignore_discard=True)
                ScholarUtils.log('info', 'loaded cookies file')
            except Exception as msg:
                ScholarUtils.log('warn', 'could not load cookies file: %s' % msg)
                self.cjar = MozillaCookieJar() # Just to be safe

        self.opener = build_opener(HTTPCookieProcessor(self.cjar))
        self.settings = None # Last settings object, if any
项目:zacui    作者:yoyopie    | 项目源码 | 文件源码
def index(request):
    if request.method == "GET":
        try:
            ssl._create_default_https_context = ssl._create_unverified_context

            opener = wdf_urllib.build_opener(
                wdf_urllib.HTTPCookieProcessor(CookieJar()))
            wdf_urllib.install_opener(opener)
        except:
            pass
        uuid = getUUID()
        url = 'https://login.weixin.qq.com/qrcode/' + uuid
        params = {
            't': 'webwx',
            '_': int(time.time()),
        }

        request = getRequest(url=url, data=urlencode(params))
        response = wdf_urllib.urlopen(request)
        context = {
            'uuid': uuid,
            'response': response.read(),
            'delyou': '',
            }
        return render_to_response('index.html', context)
项目:slack_scholar    作者:xLeitix    | 项目源码 | 文件源码
def __init__(self):
        self.articles = []
        self.query = None
        self.cjar = MozillaCookieJar()

        # If we have a cookie file, load it:
        if ScholarConf.COOKIE_JAR_FILE and \
           os.path.exists(ScholarConf.COOKIE_JAR_FILE):
            try:
                self.cjar.load(ScholarConf.COOKIE_JAR_FILE,
                               ignore_discard=True)
                print "Using cookie file"
                ScholarUtils.log('info', 'loaded cookies file')
            except Exception as msg:
                print "Ignoring cookie file: %s" % msg
                ScholarUtils.log('warn', 'could not load cookies file: %s' % msg)
                self.cjar = MozillaCookieJar() # Just to be safe

        self.opener = build_opener(HTTPCookieProcessor(self.cjar))
        self.settings = None # Last settings object, if any
项目:sogouWechart    作者:duanbj    | 项目源码 | 文件源码
def proxy_identify(proxy, url):
        cookie = cookielib.LWPCookieJar()
        handler = urllib2.HTTPCookieProcessor(cookie)
        proxy_support = urllib2.ProxyHandler({'http': proxy})
        opener = urllib2.build_opener(proxy_support, handler)
        try:
            response = opener.open(url, timeout=3)
            if response.code == 200:
                c = ''
                for item in cookie:
                    c += item.name+'='+item.value+';'
                print c
                IpProxy.sogou_cookie.append(c)
                return True
        except Exception, error:
            print error
            return False
项目:autoscan    作者:b01u    | 项目源码 | 文件源码
def __init__(self, handlers):
        self._cj = cookielib.CookieJar()

        handlers.append(urllib2.HTTPCookieProcessor(self._cj))
        handlers.append(HTTPSHandler())

        self.opener = urllib2.build_opener(*handlers)
        self.opener.addheaders = conf.httpHeaders

        try:
            conn = self.opener.open("http://www.google.com/ncr")
            conn.info()  # retrieve session cookie
        except urllib2.HTTPError, e:
            e.info()
        except urllib2.URLError:
            errMsg = "unable to connect to Google"
            raise SqlmapConnectionException(errMsg)
项目:awesome-hacking-via-python    作者:shashi12533    | 项目源码 | 文件源码
def login(uname,passwd):
    global logging
    global o
    global confget
    global filecookiejar
    logging.debug("Logging using url: %s" % confget('Auth','logincheck'))
    login_encode=urllib.urlencode({'MobileNoLogin':uname, 'LoginPassword':passwd})
    logging.debug("login_encode:%s" % login_encode)
    cookieprocessor=urllib2.HTTPCookieProcessor() #new cookie processor
    o = urllib2.build_opener(cookieprocessor) # a new urlopener
    f = tryopen(o,confget('Auth','logincheck'),login_encode)
    logging.debug("Sent Login information, got the following return URL: %s", f.geturl())
    if f.read().find(confget('Auth','logindone')) != -1:
        #save cookies
        cj=cookieprocessor.cookiejar
        cookie=enumerate(cj).next()[1]
        logging.debug("New Cookie:%s:" % cookie)
        filecookiejar.set_cookie(cookie)
        filecookiejar.save(ignore_discard=True)
        logging.debug("Cookies saved in %s" % filecookiejar.filename)
        return True
    else:
        return False
项目:TinyStockToolKit    作者:lawinse    | 项目源码 | 文件源码
def __init__(self,timeout=10,threads=None,stacksize=32768*16,loginfunc=None):
        #proxy_support = urllib2.ProxyHandler({'http':'http://localhost:3128'})
        cookie_support = urllib2.HTTPCookieProcessor(cookielib.CookieJar())
        encoding_support = ContentEncodingProcessor()
        #self.opener = urllib2.build_opener(cookie_support,encoding_support,proxy_support,urllib2.HTTPHandler)
        self.opener = urllib2.build_opener(cookie_support,encoding_support,urllib2.HTTPHandler)
        self.req = urllib2.Request('http://www.hsbc.com')
        socket.setdefaulttimeout(timeout)
        self.q_req = Queue()
        self.q_ans = Queue()
        self.lock = Lock()
        self.running = 0
        if loginfunc:
            self.opener = loginfunc(self.opener)
        if threads:
            self.threads = threads
            stack_size(stacksize)
            for i in range(threads):
                t = Thread(target=self.threadget)
                t.setDaemon(True)
                t.start()
项目:DoubanSpider    作者:ruiming    | 项目源码 | 文件源码
def __init__(self):
        # ????
        self.proxy_url = proxyList[3]
        self.proxy = urllib2.ProxyHandler({"http": self.proxy_url})
        # ??
        self.hostURL = 'http://book.douban.com/tag/'
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.47 (KHTML, like Gecko)'
                          ' Chrome/48.1.2524.116 Safari/537.36',
            'Referer': 'http://book.douban.com/',
            'Host': 'book.douban.com',
            'Upgrade-Insecure-Requests': '1',
            'Connection': 'keep-alive'
        }
        # opener??
        self.cookie = cookielib.LWPCookieJar()
        self.cookieHandler = urllib2.HTTPCookieProcessor(self.cookie)
        self.opener = urllib2.build_opener(self.cookieHandler, self.proxy, urllib2.HTTPHandler)

    # ????????????
项目:DoubanSpider    作者:ruiming    | 项目源码 | 文件源码
def checkproxy(self):
        cookies = urllib2.HTTPCookieProcessor()
        for proxy in self.proxyList:
            proxyhandler = urllib2.ProxyHandler({"http": r'http://%s:%s' % (proxy[0], proxy[1])})
            opener = urllib2.build_opener(cookies, proxyhandler)
            opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '
                                                '(KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36')]
            opener.addheaders = [('Referer', 'http://proxy.moo.jp/zh/')]
            t1 = time.time()
            try:
                request = opener.open(self.testURL, timeout=self.timeout)
                result = request.read()
                timeused = time.time() - t1
                pos = result.find(self.testStr)
                if pos > 1:
                    print r'success --http://%s:%s' % (proxy[0], proxy[1])
                    checkedProxyList.append((proxy[0], proxy[1]))
                else:
                    print r'fail    --http://%s:%s' % (proxy[0], proxy[1])
                    continue
            except Exception, e:
                print r'fail    --http://%s:%s' % (proxy[0], proxy[1])
                continue
项目:clusterdock    作者:cloudera    | 项目源码 | 文件源码
def __init__(self, base_url, exc_class=None, logger=None):
    """
    @param base_url: The base url to the API.
    @param exc_class: An exception class to handle non-200 results.

    Creates an HTTP(S) client to connect to the Cloudera Manager API.
    """
    self._base_url = base_url.rstrip('/')
    self._exc_class = exc_class or RestException
    self._logger = logger or LOG
    self._headers = { }

    # Make a basic auth handler that does nothing. Set credentials later.
    self._passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
    authhandler = urllib2.HTTPBasicAuthHandler(self._passmgr)

    # Make a cookie processor
    cookiejar = cookielib.CookieJar()

    self._opener = urllib2.build_opener(
        HTTPErrorProcessor(),
        urllib2.HTTPCookieProcessor(cookiejar),
        authhandler)
项目:antares    作者:CONABIO    | 项目源码 | 文件源码
def download_landsat_scene(url, directory, filename):
    '''
    This method downloads a scene directly from usgs. In order to do so, it
    pretends to be a browser to build a request that is accepted by the server.
    We added the headers so we don't get banned when the server detects that we
    are doing lots of requests. This idea is based on the landsat downloader:
    https://github.com/olivierhagolle/LANDSAT-Download
    '''
    cookies = urllib2.HTTPCookieProcessor()
    opener = urllib2.build_opener(cookies)
    urllib2.install_opener(opener)
    data=urllib2.urlopen("https://ers.cr.usgs.gov").read()
    token_group = re.search(r'<input .*?name="csrf_token".*?value="(.*?)"', data)
    if token_group:
        token = token_group.group(1)
    else:
        LOGGER.error('The cross site request forgery token was not found.')
        sys.exit(1)
    usgs = {'account':getattr(SETTINGS, 'USGS_USER'), 'passwd':getattr(SETTINGS, 'USGS_PASSWORD')}
    params = urllib.urlencode(dict(username=usgs['account'], password=usgs['passwd'], csrf_token=token))
    request = urllib2.Request("https://ers.cr.usgs.gov/login", params, headers={'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'})
    f = urllib2.urlopen(request)
    data = f.read()
    f.close()    
    download_chunks(url, directory, filename)
项目:code    作者:ActiveState    | 项目源码 | 文件源码
def get_file(self, url, quality):

        self.cookieJar = cookielib.LWPCookieJar()

        self.opener = urllib2.build_opener(

            urllib2.HTTPCookieProcessor(self.cookieJar),
            urllib2.HTTPRedirectHandler(),
            urllib2.HTTPHandler(debuglevel=0))

        self.opener.addheaders = [('User-agent', "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.72 Safari/537.36")]


        forms = {"youtubeURL": url,
                 'quality':quality

                 }

        data = urllib.urlencode(forms)
        req = urllib2.Request('http://www.convertmemp3.com/',data)
        res = self.opener.open(req)

        self.convhtml = res.read()
项目:transfer    作者:viur-framework    | 项目源码 | 文件源码
def __init__(self, baseURL):
        super(NetworkService, self).__init__()
        self.baseURL = baseURL
        cp = urllib2.HTTPCookieProcessor()
        self.opener = urllib2.build_opener(cp)
        urllib2.install_opener(self.opener)
项目:transfer    作者:viur-framework    | 项目源码 | 文件源码
def __init__( self,  baseURL ):
        super( NetworkService, self ).__init__()
        self.baseURL = baseURL
        cp = urllib2.HTTPCookieProcessor()
        self.opener = urllib2.build_opener( cp )
        urllib2.install_opener( self.opener )
项目:darkc0de-old-stuff    作者:tuwid    | 项目源码 | 文件源码
def __urllib2Opener():
    """
    This function creates the urllib2 OpenerDirector.
    """

    global authHandler
    global proxyHandler

    debugMsg = "creating HTTP requests opener object"
    logger.debug(debugMsg)

    conf.cj = cookielib.LWPCookieJar()
    opener  = urllib2.build_opener(proxyHandler, authHandler, urllib2.HTTPCookieProcessor(conf.cj))

    urllib2.install_opener(opener)
项目:darkc0de-old-stuff    作者:tuwid    | 项目源码 | 文件源码
def __init__(self, proxyHandler):
        self.__googleCookie = None
        self.__matches = []
        self.__cj = cookielib.LWPCookieJar()
        self.opener = urllib2.build_opener(proxyHandler, urllib2.HTTPCookieProcessor(self.__cj))
        self.opener.addheaders = conf.httpHeaders
项目:weibo    作者:windskyer    | 项目源码 | 文件源码
def login(self, username, pwd, cookie_file):
        """"
            Login with use name, password and cookies.
            (1) If cookie file exists then try to load cookies;
            (2) If no cookies found then do login
        """
        # If cookie file exists then try to load cookies
        if os.path.exists(cookie_file):
            try:
                cookie_jar = cookielib.LWPCookieJar(cookie_file)
                cookie_jar.load(ignore_discard=True, ignore_expires=True)
                loaded = 1
            except cookielib.LoadError:
                loaded = 0
                LOG.info('Loading cookies error')

            # install loaded cookies for urllib2
            if loaded:
                cookie_support = urllib2.HTTPCookieProcessor(cookie_jar)
                opener = urllib2.build_opener(cookie_support,
                                              urllib2.HTTPHandler)
                urllib2.install_opener(opener)
                LOG.info('Loading cookies success')
                return 1
            else:
                return self.do_login(username, pwd, cookie_file)

        else:  # If no cookies found
            return self.do_login(username, pwd, cookie_file)
项目:weibo    作者:windskyer    | 项目源码 | 文件源码
def save_cookie(self, text, cookie_file=CONF.cookie_file):
        cookie_jar2 = cookielib.LWPCookieJar()
        cookie_support2 = urllib2.HTTPCookieProcessor(cookie_jar2)
        opener2 = urllib2.build_opener(cookie_support2, urllib2.HTTPHandler)
        urllib2.install_opener(opener2)
        if six.PY3:
            text = text.decode('gbk')
        p = re.compile('location\.replace\(\'(.*?)\'\)')
        # ???httpfox??????????????
        # location.replace('http://weibo.com ?????????
        # ?????????????# ????login_url?? ??????re?????
        # p = re.compile('location\.replace\(\B'(.*?)'\B\)')
        # ??? ??????? re?????\'???????
        try:
            # Search login redirection URL
            login_url = p.search(text).group(1)
            data = urllib2.urlopen(login_url).read()
            # Verify login feedback, check whether result is TRUE
            patt_feedback = 'feedBackUrlCallBack\((.*)\)'
            p = re.compile(patt_feedback, re.MULTILINE)

            feedback = p.search(data).group(1)
            feedback_json = json.loads(feedback)
            if feedback_json['result']:
                cookie_jar2.save(cookie_file,
                                 ignore_discard=True,
                                 ignore_expires=True)
                return 1
            else:
                return 0
        except:
            return 0
项目:weibo    作者:windskyer    | 项目源码 | 文件源码
def login(self, username, pwd, cookie_file):
        """"
            Login with use name, password and cookies.
            (1) If cookie file exists then try to load cookies;
            (2) If no cookies found then do login
        """
        #If cookie file exists then try to load cookies
        if os.path.exists(cookie_file):
            try:
                cookie_jar = cookielib.LWPCookieJar(cookie_file)
                cookie_jar.load(ignore_discard=True, ignore_expires=True)
                loaded = 1
            except cookielib.LoadError:
                loaded = 0
                print 'Loading cookies error'

            #install loaded cookies for urllib2
            if loaded:
                cookie_support = urllib2.HTTPCookieProcessor(cookie_jar)
                opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
                urllib2.install_opener(opener)
                print 'Loading cookies success'
                return 1
            else:
                return self.do_login(username, pwd, cookie_file)

        else:  #If no cookies found
            return self.do_login(username, pwd, cookie_file)
项目:weibo    作者:windskyer    | 项目源码 | 文件源码
def login(self, username, pwd, cookie_file):
        """"
            Login with use name, password and cookies.
            (1) If cookie file exists then try to load cookies;
            (2) If no cookies found then do login
        """
        # If cookie file exists then try to load cookies
        if os.path.exists(cookie_file):
            try:
                cookie_jar = cookielib.LWPCookieJar(cookie_file)
                cookie_jar.load(ignore_discard=True, ignore_expires=True)
                loaded = 1
            except cookielib.LoadError:
                loaded = 0
                print('Loading cookies error')

            #install loaded cookies for urllib2
            if loaded:
                cookie_support = urllib2.HTTPCookieProcessor(cookie_jar)
                opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
                urllib2.install_opener(opener)
                print('Loading cookies success')
                return 1
            else:
                return self.do_login(username, pwd, cookie_file)

        else:  #If no cookies found
            return self.do_login(username, pwd, cookie_file)
项目:SinaMicroblog_Creeper-Spider_VerificationCode    作者:somethingx64    | 项目源码 | 文件源码
def EnableCookie(self, enableProxy):
        #"Enable cookie & proxy (if needed)."        
        cookiejar = cookielib.LWPCookieJar()#construct cookie
        cookie_support = urllib2.HTTPCookieProcessor(cookiejar)

        if enableProxy:
            proxy_support = urllib2.ProxyHandler({'http':'http://xxxxx.pac'})#use proxy
            opener = urllib2.build_opener(proxy_support, cookie_support, urllib2.HTTPHandler)
            print ("Proxy enabled")
        else:
            opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
        urllib2.install_opener(opener)#construct cookie's opener
项目:WeiboPictureWorkflow    作者:cielpy    | 项目源码 | 文件源码
def login(form_data):
    url = 'http://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.18)'
    headers = ('User-Agent', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0')
    cookie = cookielib.MozillaCookieJar(cookie_file)
    handler = urllib2.HTTPCookieProcessor(cookie)
    opener = urllib2.build_opener(handler)
    opener.addheaders.append(headers)
    req = opener.open(url, form_data)
    redirect_result = req.read()
    login_pattern = r'location.replace\(\'(.*?)\'\)'
    login_url = re.search(login_pattern, redirect_result).group(1)
    opener.open(login_url).read()
    cookie.save(cookie_file, ignore_discard=True, ignore_expires=True)
项目:WeiboPictureWorkflow    作者:cielpy    | 项目源码 | 文件源码
def request_image_url(image_path):
    cookie = cookielib.MozillaCookieJar()
    cookie.load(cookie_file, ignore_expires=False, ignore_discard=True)
    opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))
    image_url = 'http://picupload.service.weibo.com/interface/pic_upload.php?mime=image%2Fjpeg&data=base64&url=0&markpos=1&logo=&nick=0&marks=1&app=miniblog'
    b = base64.b64encode(file(image_path).read())
    data = urllib.urlencode({'b64_data': b})
    result = opener.open(image_url, data).read()
    result = re.sub(r"<meta.*</script>", "", result, flags=re.S)
    image_result = json.loads(result)
    image_id = image_result.get('data').get('pics').get('pic_1').get('pid')
    return 'https://ws3.sinaimg.cn/large/%s.jpg' % image_id
项目:DistributeCrawler    作者:SmallHedgehog    | 项目源码 | 文件源码
def __init__(self, cookie_filename=None, timeout=None, **kwargs):
        self.cj = cookielib.LWPCookieJar()
        if cookie_filename is not None:
            self.cj.load(cookie_filename)
        self.cookie_processor = urllib2.HTTPCookieProcessor(self.cj)
        self.__build_opener()
        urllib2.install_opener(self.opener)

        if timeout is None:
            # self._default_timeout = socket._GLOBAL_DEFAULT_TIMEOUT
            # Set default timeout
            self._default_timeout = 5
        else:
            self._default_timeout = timeout
项目:catchup4kodi    作者:catchup4kodi    | 项目源码 | 文件源码
def getUrl(self,url, ischunkDownloading=False):
        try:
            post=None
            print 'url',url

            #openner = urllib2.build_opener(urllib2.HTTPHandler, urllib2.HTTPSHandler)
            cookie_handler = urllib2.HTTPCookieProcessor(self.cookieJar)
            openner = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())

            if post:
                req = urllib2.Request(url, post)
            else:
                req = urllib2.Request(url)

            ua_header=False
            if self.clientHeader:
                for n,v in self.clientHeader:
                    req.add_header(n,v)
                    if n=='User-Agent':
                        ua_header=True

            if not ua_header:
                req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; Win64; x64; Trident/7.0; rv:11.0) like Gecko')
            #response = urllib2.urlopen(req)
            if self.proxy and (  (not ischunkDownloading) or self.use_proxy_for_chunks ):
                req.set_proxy(self.proxy, 'http')
            response = openner.open(req)
            data=response.read()

            return data

        except:
            print 'Error in getUrl'
            traceback.print_exc()
            return None
项目:catchup4kodi    作者:catchup4kodi    | 项目源码 | 文件源码
def getUrl(self,url, ischunkDownloading=False):
        try:
            post=None
            print 'url',url

            #openner = urllib2.build_opener(urllib2.HTTPHandler, urllib2.HTTPSHandler)
            cookie_handler = urllib2.HTTPCookieProcessor(self.cookieJar)
            openner = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())

            if post:
                req = urllib2.Request(url, post)
            else:
                req = urllib2.Request(url)

            ua_header=False
            if self.clientHeader:
                for n,v in self.clientHeader:
                    req.add_header(n,v)
                    if n=='User-Agent':
                        ua_header=True

            if not ua_header:
                req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; Win64; x64; Trident/7.0; rv:11.0) like Gecko')
            #response = urllib2.urlopen(req)
            if self.proxy and (  (not ischunkDownloading) or self.use_proxy_for_chunks ):
                req.set_proxy(self.proxy, 'http')
            response = openner.open(req)
            data=response.read()

            return data

        except:
            print 'Error in getUrl'
            traceback.print_exc()
            return None
项目:weibo_scrawler_app    作者:coolspiderghy    | 项目源码 | 文件源码
def init(self, proxy=None):
        cj = cookielib.LWPCookieJar()
        cookie_support = urllib2.HTTPCookieProcessor(cj)
        if proxy:
            proxy_support = urllib2.ProxyHandler({'http': proxy})
            opener = urllib2.build_opener(proxy_support, cookie_support, urllib2.HTTPHandler)
        else:
            opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
        urllib2.install_opener(opener)
    #print 'seton'
项目:weibo_scrawler_app    作者:coolspiderghy    | 项目源码 | 文件源码
def use_proxy(self, proxy):
        """
        ????????,??proxy?????????????:http://XX.XX.XX.XX:XXXX
        """
        cj = cookielib.LWPCookieJar()
        cookie_support = urllib2.HTTPCookieProcessor(cj)
        if proxy:
            proxy_support = urllib2.ProxyHandler({'http': proxy})
            opener = urllib2.build_opener(proxy_support, cookie_support, urllib2.HTTPHandler)
        else:
            opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
        urllib2.install_opener(opener)
项目:spider    作者:shancang    | 项目源码 | 文件源码
def __init__(self,url):
        cookie_jar = cookielib.LWPCookieJar()
        cookie = urllib2.HTTPCookieProcessor(cookie_jar)
        self.opener = urllib2.build_opener(cookie)
        user_agent="Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36"
        self.url=url
        self.send_headers={'User-Agent':user_agent}
项目:oil    作者:oilshell    | 项目源码 | 文件源码
def test_cookies(self):
        cj = MockCookieJar()
        h = urllib2.HTTPCookieProcessor(cj)
        o = h.parent = MockOpener()

        req = Request("http://example.com/")
        r = MockResponse(200, "OK", {}, "")
        newreq = h.http_request(req)
        self.assertTrue(cj.ach_req is req is newreq)
        self.assertEqual(req.get_origin_req_host(), "example.com")
        self.assertTrue(not req.is_unverifiable())
        newr = h.http_response(req, r)
        self.assertTrue(cj.ec_req is req)
        self.assertTrue(cj.ec_r is r is newr)
项目:oil    作者:oilshell    | 项目源码 | 文件源码
def test_cookie_redirect(self):
        # cookies shouldn't leak into redirected requests
        from cookielib import CookieJar

        from test.test_cookielib import interact_netscape

        cj = CookieJar()
        interact_netscape(cj, "http://www.example.com/", "spam=eggs")
        hh = MockHTTPHandler(302, "Location: http://www.cracker.com/\r\n\r\n")
        hdeh = urllib2.HTTPDefaultErrorHandler()
        hrh = urllib2.HTTPRedirectHandler()
        cp = urllib2.HTTPCookieProcessor(cj)
        o = build_test_opener(hh, hdeh, hrh, cp)
        o.open("http://www.example.com/")
        self.assertTrue(not hh.req.has_header("Cookie"))
项目:python2-tracer    作者:extremecoders-re    | 项目源码 | 文件源码
def test_cookies(self):
        cj = MockCookieJar()
        h = urllib2.HTTPCookieProcessor(cj)
        o = h.parent = MockOpener()

        req = Request("http://example.com/")
        r = MockResponse(200, "OK", {}, "")
        newreq = h.http_request(req)
        self.assertTrue(cj.ach_req is req is newreq)
        self.assertEqual(req.get_origin_req_host(), "example.com")
        self.assertTrue(not req.is_unverifiable())
        newr = h.http_response(req, r)
        self.assertTrue(cj.ec_req is req)
        self.assertTrue(cj.ec_r is r is newr)
项目:python2-tracer    作者:extremecoders-re    | 项目源码 | 文件源码
def test_cookie_redirect(self):
        # cookies shouldn't leak into redirected requests
        from cookielib import CookieJar

        from test.test_cookielib import interact_netscape

        cj = CookieJar()
        interact_netscape(cj, "http://www.example.com/", "spam=eggs")
        hh = MockHTTPHandler(302, "Location: http://www.cracker.com/\r\n\r\n")
        hdeh = urllib2.HTTPDefaultErrorHandler()
        hrh = urllib2.HTTPRedirectHandler()
        cp = urllib2.HTTPCookieProcessor(cj)
        o = build_test_opener(hh, hdeh, hrh, cp)
        o.open("http://www.example.com/")
        self.assertTrue(not hh.req.has_header("Cookie"))
项目:CAAPR    作者:Stargrazer82301    | 项目源码 | 文件源码
def __init__(self, username, password=None):
        # Get password if necessary
        if password is None:
            password = getpass()
        # Get URL for the database
        self.db_url = "http://galaxy-catalogue.dur.ac.uk:8080/Eagle"
        # Set up authentication and cookies
        self.password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
        self.password_mgr.add_password(None, self.db_url, username, password)
        self.opener = urllib2.OpenerDirector()
        self.auth_handler   = urllib2.HTTPBasicAuthHandler(self.password_mgr)
        self.cookie_handler = urllib2.HTTPCookieProcessor(cookie_jar)

    ## This functions executes an SQL query on the database and returns the result as a record array.
项目:CAAPR    作者:Stargrazer82301    | 项目源码 | 文件源码
def __init__(self, username, password=None):
        # Get password if necessary
        if password is None:
            password = getpass()
        # Get URL for the database
        self.db_url = "http://galaxy-catalogue.dur.ac.uk:8080/Eagle"
        # Set up authentication and cookies
        self.password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
        self.password_mgr.add_password(None, self.db_url, username, password)
        self.opener = urllib2.OpenerDirector()
        self.auth_handler   = urllib2.HTTPBasicAuthHandler(self.password_mgr)
        self.cookie_handler = urllib2.HTTPCookieProcessor(cookie_jar)

    ## This functions executes an SQL query on the database and returns the result as a record array.
项目:bokken    作者:thestr4ng3r    | 项目源码 | 文件源码
def __init__(self):
        import cookielib
        self.cookiejar = cookielib.CookieJar()
        self._cookie_processor = urllib2.HTTPCookieProcessor(self.cookiejar)
        self.form = None

        self.url = "http://0.0.0.0:8080/"
        self.path = "/"

        self.status = None
        self.data = None
        self._response = None
        self._forms = None
项目:Crawler    作者:xinhaojing    | 项目源码 | 文件源码
def __init__(self, headers = {},debug = True, p = ''):
        #timeout 
        self.timeout = 10
        #cookie handler
        self.cookie_processor = urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar())

        #debug handler
        self.debug = debug
        if self.debug:
            self.httpHandler = urllib2.HTTPHandler(debuglevel=1)
            self.httpsHandler = urllib2.HTTPSHandler(debuglevel=1)
        else:
            self.httpHandler = urllib2.HTTPHandler(debuglevel=0)
            self.httpsHandler = urllib2.HTTPSHandler(debuglevel=0)

        #proxy handler (http)
        if p != '' and p != 'None' and p != None and p != 'NULL':
            self.proxy_handler = urllib2.ProxyHandler({'http': p})
        else:
            self.proxy_handler = urllib2.ProxyHandler({})

        #opener
        self.opener = urllib2.build_opener( self.cookie_processor,self.proxy_handler, self.httpHandler, self.httpsHandler)
        self.opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'),]

        #header
        for key in headers.keys():
            cur=self._replace(key)
            if cur!=-1:
                self.opener.addheaders.pop(cur)
            self.opener.addheaders += [(key, headers[key]), ]