Python utils 模块,log() 实例源码

我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用utils.log()

项目:py_mbot    作者:evgfilim1    | 项目源码 | 文件源码
def register_command(self, commands, callback, allow_edited=False):
        """Registers commands handler

        Args:
            commands(list|tuple): list of commands to register
            callback(function): callable object to execute
            allow_edited(Optional[bool]): pass edited messages

        Raises:
            ValueError: if one of commands in ``commands`` was already registered

        """
        for command in commands:
            self._register_command(command)

        @utils.log(logger, print_ret=False)
        def process_update(bot, update):
            lang = utils.get_lang(self._storage, update.effective_user)
            callback(update.effective_message,
                     update.effective_message.text.split(' ')[1:], lang)
        self._dispatcher.add_handler(CommandHandler(commands, process_update,
                                                    allow_edited=allow_edited))
项目:rental    作者:meihuanyu    | 项目源码 | 文件源码
def success_parse(self, response):
        proxy = response.meta.get('proxy_info')
        table = response.meta.get('table')

        self.save_page(proxy.ip, response.body)
        self.log('success_parse speed:%s meta:%s' % (time.time() - response.meta.get('cur_time'), response.meta))

        proxy.vali_count += 1
        proxy.speed = time.time() - response.meta.get('cur_time')
        if self.success_mark in response.text or self.success_mark is '':
            if table == self.name:
                if proxy.speed > self.timeout:
                    self.sql.del_proxy_with_id(table, proxy.id)
                else:
                    self.sql.update_proxy(table, proxy)
            else:
                if proxy.speed < self.timeout:
                    self.sql.insert_proxy(table_name = self.name, proxy = proxy)
        else:
            if table == self.name:
                self.sql.del_proxy_with_id(table_name = table, id = proxy.id)

        self.sql.commit()
项目:douban    作者:awolfly9    | 项目源码 | 文件源码
def delete_proxy(self, proxy):
        try:
            rets = proxy.split(':')
            ip = rets[1]
            ip = ip[2:]

            for item in self.proxys:
                if item.get('ip') == ip:
                    self.proxys.remove(item)
                    break

            if len(self.proxys) < 3:
                self.update_proxy()

            utils.log('--------------delete ip:%s-----------' % ip)
            r = requests.get(url = '%s/delete?name=%s&ip=%s' % (self.address, 'douban', ip))
            return r.text
        except:
            return False
项目:douban    作者:awolfly9    | 项目源码 | 文件源码
def __init__(self, *a, **kw):
        super(Movieurls, self).__init__(*a, **kw)
        self.log_dir = 'log/%s' % self.name

        utils.make_dir(self.log_dir)

        self.sql = SqlHelper()
        self.headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'en-US,en;q=0.5',
            'Connection': 'keep-alive',
            'Host': 'movie.douban.com',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:51.0) Gecko/20100101 Firefox/51.0',
        }

        self.init()
项目:douban    作者:awolfly9    | 项目源码 | 文件源码
def get_all_category(self, response):
        self.write_file('%s/category.html' % self.log_dir, response.body)
        tags = response.xpath('//table/tbody/tr/td/a/@href').extract()
        for tag in tags:
            res = tag.split('/')
            res = res[len(res) - 1]
            utils.log('tag:%s' % tag)

            url = response.urljoin(tag)
            yield Request(
                    url = url,
                    headers = self.headers,
                    dont_filter = True,
                    meta = {
                        'tag': res,
                        'download_timeout': 20,
                        # 'is_proxy': False,
                    },
                    callback = self.get_page_count,
                    errback = self.error_parse
            )
项目:douban    作者:awolfly9    | 项目源码 | 文件源码
def __init__(self, *a, **kw):
        super(Bookurls, self).__init__(*a, **kw)
        self.log_dir = 'log/%s' % self.name

        utils.make_dir(self.log_dir)

        self.sql = SqlHelper()
        self.headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'en-US,en;q=0.5',
            'Connection': 'keep-alive',
            'Host': 'book.douban.com',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:51.0) Gecko/20100101 Firefox/51.0',
        }

        self.init()
项目:douban    作者:awolfly9    | 项目源码 | 文件源码
def get_all_category(self, response):
        self.write_file('%s/category.html' % self.log_dir, response.body)
        tags = response.xpath('//table/tbody/tr/td/a/@href').extract()
        for tag in tags:
            res = tag.split('/')
            tag = res[len(res) - 1]
            utils.log('tag:%s' % tag)

            url = response.urljoin(tag)
            yield Request(
                    url = url,
                    headers = self.headers,
                    dont_filter = True,
                    meta = {
                        'tag': tag,
                        'download_timeout': 20,
                        # 'is_proxy': False,
                    },
                    callback = self.get_page_count,
                    errback = self.error_parse
            )
项目:jd_comment    作者:awolfly9    | 项目源码 | 文件源码
def insert_json(self, data = {}, table_name = None, commit = False):
        try:
            keys = []
            vals = []
            for k, v in data.items():
                keys.append(k)
                vals.append(v)
            val_str = ','.join(['%s'] * len(vals))
            key_str = ','.join(keys)

            command = "INSERT IGNORE INTO {table} ({keys}) VALUES({values})". \
                format(keys = key_str, values = val_str, table = table_name)
            # utils.log('insert_json data:%s' % data)
            self.cursor.execute(command, tuple(vals))

            if commit:
                self.conn.commit()
        except Exception, e:
            utils.log('sql helper insert_json exception msg:%s' % e, logging.WARNING)
项目:IPProxyTool    作者:awolfly9    | 项目源码 | 文件源码
def success_parse(self, response):
        proxy = response.meta.get('proxy_info')
        table = response.meta.get('table')

        self.save_page(proxy.ip, response.body)
        self.log('success_parse speed:%s meta:%s' % (time.time() - response.meta.get('cur_time'), response.meta))

        proxy.vali_count += 1
        proxy.speed = time.time() - response.meta.get('cur_time')
        if self.success_content_parse(response):
            if table == self.name:
                if proxy.speed > self.timeout:
                    self.sql.del_proxy_with_id(table, proxy.id)
                else:
                    self.sql.update_proxy(table, proxy)
            else:
                if proxy.speed < self.timeout:
                    self.sql.insert_proxy(table_name = self.name, proxy = proxy)
        else:
            if table == self.name:
                self.sql.del_proxy_with_id(table_name = table, id = proxy.id)

        self.sql.commit()
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def run(self):
#        threading.Thread.run(self)
        #log('  p-running ' + str( self.work_list ))
        self.running = True
        # Rather than running forever, check to see if it is still OK
        while self.running:
            try:
                # Don't block
                #item = self.queue.get(block=False)
                self.do_work()

                self.ev.set()
                #work dome end
                log( '  p-all done '  )
                self.stop()
            except Empty:
                # Allow other stuff to run
                time.sleep(0.1)
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def return_action_and_link_tuple_accdg_to_setting_wether_to_use_addon_for_youtube(self, video_id):
        link_actn=''
        link_=''

        if video_id:
            if use_addon_for_youtube:
                link_actn=self.DI_ACTION_PLAYABLE
                link_="plugin://plugin.video.youtube/play/?video_id=" + video_id
            else:
                link_actn=self.DI_ACTION_YTDL
                #some youtube links take a VERY long time for youtube_dl to parse. we simplify it by getting the video id and using a simpler url
                #BUT if there is a time skip code in the url, we just pass it right through. youtube-dl can handle this part.
                #   time skip code comes in the form of ?t=122  OR #t=1m45s OR ?t=2:43
                link_=self.build_youtube_url_with_video_id(video_id)
            #log('    returning:{0} {1}'.format(link_actn, link_))
            return link_actn, link_
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def get_video_id(self, yt_url):
        #video_id_regex=re.compile('(?:youtube(?:-nocookie)?\.com/(?:\/\S+\/|(?:v|e(?:mbed)?)\/|\S*?[?&;]v=)|youtu\.be\/)([a-zA-Z0-9_-]{11})', re.DOTALL)
        #added parsing for video_id in kodi_youtube_plugin url
        video_id_regex=re.compile('(?:youtube(?:-nocookie)?\.com/(?:\/\S+\/|(?:v|e(?:mbed)?)\/|\S*?[?&;]v=)|youtu\.be\/|plugin:\/\/plugin\.video\.youtube\/play\/\?video_id=)([a-zA-Z0-9_-]{11})', re.DOTALL)
        video_id=''
        match = video_id_regex.findall(yt_url)
        if match:
            video_id=match[0]
        else:
            #log('    second parsing for video id:'+yt_url)
            #for parsing this: https://www.youtube.com/attribution_link?a=y08k0cdNBKw&u=%2Fwatch%3Fv%3DQOVrrL5KtsM%26feature%3Dshare%26list%3DPLVonsjaXkSpfuIv02l6IM1pN1Z3IfXWUW%26index%3D4
            o = urlparse.urlparse(yt_url)
            query = urlparse.parse_qs(o.query)
            if 'a' in query and 'u' in query:   #if all (k in query for k in ("a","u")):
                u=query['u'][0]
                #log('   u  '+ repr(u)) #  <--  /watch?v=QOVrrL5KtsM&feature=share&list=PLVonsjaXkSpfuIv02l6IM1pN1Z3IfXWUW&index=4
                match = video_id_regex.findall('youtube.com'+u)
                if match:
                    video_id=match[0]
                else:
                    log("    Can't get youtube video id:"+yt_url)
        return video_id
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def build_query_params_for_search(self,youtube_api_key,search_string,type_='video'):
        from utils import ret_bracketed_option
        #specify different results by adding order_option in square brackets in the search string.
        stripped_string, order_option=ret_bracketed_option(search_string)  #developer feature: specify the order in search parameter "[date]" etc.
        if order_option:
            if order_option.lower() in['date','rating','relevance','title','videocount','viewcount']:
                log('  youtube search:using special order option [{0}]'.format(order_option))
            else:
                log('  youtube search:unsupported order option [{0}]'.format(order_option))
                order_option='relevance'
                stripped_string=search_string
        else:
            order_option='relevance'

        return  'search', {
                'key': youtube_api_key,
                'fields':'items(kind,id(videoId),snippet(publishedAt,channelTitle,channelId,title,description,thumbnails(medium)))',
                'type': type_,         #video,channel,playlist.
                'maxResults': '50',      # Acceptable values are 0 to 50
                'part': 'snippet',
                'order': order_option, #date,rating,relevance,title,videoCount,viewCount
                'q': stripped_string,
                'safeSearch':'moderate' if hide_nsfw else 'none',
            }
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def get_video_durations(self,youtube_api_key,videoIds):
        from utils import ytDurationToSeconds
        durations=[]
        query_params={'key': youtube_api_key,
                'part': 'contentDetails',
                'id': ",".join(videoIds),            #','.join(map(str, myList))#if the list contains numbers
            }
        api_url='https://www.googleapis.com/youtube/v3/{0}?{1}'.format("videos",urllib.urlencode(query_params))
        r = self.requests_get(api_url)
        j=r.json()
        #log(repr(j))
        for i in j.get('items'):
            d=clean_str(i, ['contentDetails','duration'],'')
            durations.append(ytDurationToSeconds(d))
            #import iso8601
            #iso8601.parse_duration(d)
        return durations
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def get_gallery_info(self, media_url):
        gallery_name = media_url.split("/gallery/",1)[1]
        if gallery_name=="":
            return False

        request_url="https://api.imgur.com/3/gallery/"+gallery_name

        #log("    imgur:check if album- request_url---"+request_url )
        try:
            r = self.requests_get(request_url, headers=ClassImgur.request_header)
        except requests.exceptions.HTTPError:
            #http://imgur.com/gallery/Ji0IWhG this link has /gallery/ but returns 404 if asked as gallery
            request_url="https://api.imgur.com/3/image/"+gallery_name
            #log('      Trying a different query:'+request_url)
            try:
                r = self.requests_get(request_url, headers=ClassImgur.request_header)
            except requests.exceptions.HTTPError:
                #https://imgur.com/gallery/knbXW   this link has is not "image" nor "gallery" but is "album"
                request_url="https://api.imgur.com/3/album/"+gallery_name
                #log('      Trying a different query:'+request_url)
                r = self.requests_get(request_url, headers=ClassImgur.request_header)
                #there has to be a better way to do this...
        return r
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def ask_imgur_for_link(self, media_url):
        #sometimes, imgur links are posted without the extension(gif,jpg etc.). we ask imgur for it.
        #log("  ask_imgur_for_link: "+media_url )

        media_url=media_url.split('?')[0] #get rid of the query string
        img_id=media_url.split("com/",1)[1]  #.... just get whatever is after "imgur.com/"   hope nothing is beyond the id
        #log("    ask_imgur_for_link: "+img_id )

        #6/30/2016: noticed a link like this: http://imgur.com/topic/Aww/FErKmLG
        if '/' in img_id:
            #log("  split_ask_imgur_for_link: "+ str( img_id.split('/')) )
            img_id = img_id.split('/')[-1]     #the -1 gets the last item on the list returned by split

        if img_id:
            request_url="https://api.imgur.com/3/image/"+img_id
            r = self.requests_get(request_url, headers=ClassImgur.request_header)
            j=r.json()

            if j['data'].get('mp4'):
                return j['data'].get('mp4')
            else:
                return j['data'].get('link')
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def get_playable_url(self, media_url='', is_probably_a_video=False ):
        if not media_url:
            media_url=self.media_url

        self.get_video_id()

        if self.video_id:
            #if use_ytdl_for_yt:  #ytdl can also handle vimeo
            # (10/2/2016) --- please only use script.module.youtube.dl if possible and remove these dependencies.
            self.link_action=sitesBase.DI_ACTION_YTDL
            return media_url, self.TYPE_VIDEO
            #else:
            #self.link_action=self.DI_ACTION_PLAYABLE
            #return "plugin://plugin.video.vimeo/play/?video_id=" + self.video_id, self.TYPE_VIDEO
        else:
            log("    %s cannot get videoID %s" %( self.__class__.__name__, media_url) )
            #feed it to ytdl. sometimes link points to multiple streams: https://vimeo.com/mrmichaelrobinson/videos/
            self.link_action=sitesBase.DI_ACTION_YTDL
            return media_url, self.TYPE_VIDEO
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def get_playable_url(self, media_url='', is_probably_a_video=False ):
        if not media_url:
            media_url=self.media_url

        # *** needs access token to get playable url. we'll just have ytdl handle dailymotion
        self.link_action=sitesBase.DI_ACTION_YTDL
        return media_url, self.TYPE_VIDEO

#         self.get_video_id()
#         #log('    videoID:' + self.video_id)
#         if self.video_id:
#             request_url= 'https://api.dailymotion.com/video/' + self.video_id
#
#             #https://api.dailymotion.com/video/x4qviso?fields=aspect_ratio,stream_h264_hd_url,poster_url,thumbnail_url,sprite_320x_url
#
#             content = requests.get(request_url )
#             log('    ' + str(content.text))
#             if content.status_code==200:
#                 j = content.json()
#                 log( pprint.pformat(j, indent=1) )
#             else:
#                 log("  dailymotion query failed:" + str(content.status_code) )
#         else:
#             log("    %s cannot get videoID %s" %( self.__class__.__name__, media_url) )
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def ret_blog_post_request(self):
        o=urlparse.urlparse(self.media_url)   #scheme, netloc, path, params, query, fragment
        #log( '  blogpath=' + o.path )
        blog_path= o.path

        if not blog_path:
            log('    could not determine blog path in:' + self.media_url)
            return None

        blog_info_request='https://www.googleapis.com/blogger/v3/blogs/byurl?' + self.key_string + '&url=' + self.media_url
        content = self.requests_get(blog_info_request)

        j = content.json()
        #log( pprint.pformat(j, indent=1) )
        blog_id=j.get('id')

        blog_post_request='https://www.googleapis.com/blogger/v3/blogs/%s/posts/bypath?%s&path=%s' %( blog_id, self.key_string, blog_path)
        #log( '    api request:'+blog_post_request )
        content = self.requests_get(blog_post_request)
        return content
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def ret_album_list(self,album_url):
        r = self.requests_get(album_url)
        jo=re.compile('window._sharedData = ({.*});</script>').findall(r.text)
        if jo:
            #import pprint; log( pprint.pformat(jo[0], indent=1) )
            try:
                j=json.loads(jo[0] )
                entry_data=j.get('entry_data')
                if entry_data:
                    if 'ProfilePage' in entry_data.keys():
                        profile_page=entry_data.get('ProfilePage')[0]

                        images=self.ret_images_dict_from_album_json(profile_page)
                        #for i in images: log( '##' + repr(i))
                        self.assemble_images_dictList(images)

                        return self.dictList
                else:
                    log("  Could not get 'entry_data' from scraping instagram [window._sharedData = ]")

            except (AttributeError,TypeError) as e:
                log('    exception while parsing json:'+str(e))
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def get_playable(self, media_url='', is_probably_a_video=False ):
        media_type=self.TYPE_VIDEO
        if not media_url:
            media_url=self.media_url

        filename,ext=parse_filename_and_ext_from_url(media_url)
        #log('    file:%s.%s' %(filename,ext)  )
        if ext in ["mp4","webm","gif"]:
            if ext=='gif':
                media_type=self.TYPE_GIF
                self.link_action=sitesBase.DI_ACTION_PLAYABLE
                self.thumb_url=media_url.replace( '%s.%s'%(filename,ext) , '%s.jpg' %(filename))
                self.poster_url=self.thumb_url
                self.media_url=media_url.replace( '%s.%s'%(filename,ext) , '%s.mp4' %(filename))   #just replacing gif to mp4 works
            return self.media_url, media_type

        if ext in image_exts:  #excludes .gif
            self.link_action='viewImage'
            self.thumb_url=media_url
            self.poster_url=self.thumb_url
            return media_url,self.TYPE_IMAGE

        return self.get_playable_url(self.media_url, is_probably_a_video=False )
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def get_video_id(self):
        #looks like the filename is also the video id and some links have it at the "-"end od url
        self.video_id=''

        #https://j.gifs.com/zpOmn5.gif       <-- this is handled in get_playable -> .gif replaced with .mp4
        #http://gifs.com/gif/qxBQMp                   <-- parsed here.
        #https://gifs.com/gif/yes-nooo-whaaa-5yZ8rK   <-- parsed here.

        match = re.compile('gifs\.com/(?:gif/)?(.+)(?:.gif|$)').findall(self.media_url)
        #log('    matches' + repr(match) )

        if match:
            vid=match[0]
            if '-' in vid:
                vid= vid.split('-')[-1]

            self.video_id=vid
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def ret_album_list(self, album_url, thumbnail_size_code=''):
        #returns an object (list of dicts) that contain info for the calling function to create the listitem/addDirectoryItem
        content = self.requests_get( album_url)

        match = re.compile('var album\s=\s(.*)\;').findall(content.text)
        #log('********* ' + match[0])
        if match:
            j = json.loads(match[0])
            images=self.ret_images_dict_from_album_json(j)
            self.assemble_images_dictList(images)
            #self.assemble_images_dictList(   ( [ s.get('description'), prefix+s.get('url_full')] for s in items)    )

        else:
            log('      eroshare:ret_album_list: var album string not found. ')

        return self.dictList
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def get_playable_url(self, media_url, is_probably_a_video=False ):
        if self.is_album(media_url):
            log('  is an album:'+ media_url )
            self.media_type = self.TYPE_ALBUM
            return media_url, sitesBase.TYPE_ALBUM

        log('  scraping:'+ media_url )
        content = self.requests_get( media_url)

        #https://github.com/downthemall/anticontainer/blob/master/plugins/imgbox.com.json
        match = re.compile("id=\"img\".+?src=\"(.+?)\" title=\"(.+?)\"", re.DOTALL).findall(content.text)
        #log('    match:' + repr(match))
        if match:
            #log('      match' + match[0][0])
            self.poster_url=match[0][0]
            self.thumb_url=self.poster_url
            return self.poster_url, self.TYPE_IMAGE
        else:
            log("    %s can't scrape image " %(self.__class__.__name__ ) )
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def get_playable_url(self, link_url, is_probably_a_video):
        from reddit import assemble_reddit_filter_string
        subreddit=self.get_video_id(link_url)
        self.video_id=subreddit
        #log('    **get_playable_url subreddit=' + self.video_id )

        self.media_type=sitesBase.TYPE_REDDIT

        #if link_url is in the form of https://np.reddit.com/r/teslamotors/comments/50bc6a/tesla_bumped_dying_man_up_the_production_queue_so/d72vfbg?context=2
        if '/comments/' in link_url:
            self.link_action='listLinksInComment'
            return link_url, self.media_type
        else:
            #link_url is in the form of "r/subreddit". this type of link is found in comments
            if subreddit:
                self.link_action='listSubReddit'
                reddit_url=assemble_reddit_filter_string('',subreddit)
                return reddit_url, self.media_type
            if link_url.startswith('/u/'):
                author=link_url.split('/u/')[1]
                self.link_action='listSubReddit'
                #show links submitted by author
                reddit_url=assemble_reddit_filter_string("","/user/"+author+'/submitted')
                return reddit_url, self.media_type
        return '',''
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def get_playable_url(self, link_url, is_probably_a_video=False ):
        self.media_url=link_url

        #u=media_url.split('?')[0]
        html=self.requests_get(link_url)
        #if '11616' in link_url:log(html.text)

        images=self.get_images(html.text,self.p)
        if images:
            #if '11616' in link_url:log(pprint.pformat(images))
            self.media_type=self.TYPE_ALBUM
            return self.media_url, self.media_type
        else:
            #default to youtube-dl video.
            #direct image link posts are already taken care of in get_playable()
            #the only video sample i found is not playable via ytdl. TODO: .mp4 is in javascript block
            #    http://acidcow.com/video/61149-russian_soldiers_got_the_steel_balls.html
            self.link_action=self.DI_ACTION_YTDL
            return self.media_url, self.TYPE_VIDEO
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def sitesManager( media_url ):
    #picks which class will handle the media identification and extraction for website_name

    #first resolve url shortener
    shorteners=['bit.ly','goo.gl','tinyurl.com']
    if any(shortener in media_url for shortener in shorteners):
        #v=sitesBase.requests_get('https://unshorten.me/s/'+ urllib.quote_plus( media_url ) )
        v = requests.head( media_url, timeout=REQUEST_TIMEOUT, allow_redirects=True )
        log('  short url(%s)=%s'%(media_url,repr(v.url)))
        media_url=v.url

    for subcls in sitesBase.__subclasses__():
        regex=subcls.regex
        if regex:
            match=re.compile( regex  , re.I).findall( media_url )
            #log("testing:{0}[{1}] {2}".format(media_url,regex, repr(match)) )
            if match :
                return subcls( media_url )
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def ydtl_get_playable_url( url_to_check ):
    from YoutubeDLWrapper import YoutubeDLWrapper, _selectVideoQuality

    #log('ydtl_get_playable_url:' +url_to_check )
    if link_url_is_playable(url_to_check)=='video':
        return url_to_check

    video_urls=[]
    ytdl=YoutubeDLWrapper()
    try:
        ydl_info=ytdl.extract_info(url_to_check, download=False)

        video_infos=_selectVideoQuality(ydl_info, quality=1, disable_dash=True)

        for video_info in video_infos:
            video_urls.append(video_info.get('xbmc_url'))
        return video_urls

    except:
        return None
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def onClick(self, controlID):

        if controlID == self.main_control_id:
            self.gui_listbox_SelectedPosition = self.gui_listbox.getSelectedPosition()
            item = self.gui_listbox.getSelectedItem()
            if not item: #panel listbox control allows user to pick non-existing item by mouse/touchscreen. bypass it here.
                return

            if self.include_parent_directory_entry and self.gui_listbox_SelectedPosition == 0:
                self.close()  #include_parent_directory_entry means that we've added a ".." as the first item on the list onInit

            self.process_clicked_item(item)
        else:
            clicked_control=self.getControl(controlID)
            log('clicked on controlID='+repr(controlID))
            self.process_clicked_item(clicked_control)
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def process_clicked_item(self, clicked_item):
        if isinstance(clicked_item, xbmcgui.ListItem ):
            di_url=clicked_item.getProperty('onClick_action') #this property is created when assembling the kwargs.get("listing") for this class
            item_type=clicked_item.getProperty('item_type').lower()
        elif isinstance(clicked_item, xbmcgui.ControlButton ):
            #buttons have no setProperty() hiding it in Label2 no good.
            #ast.literal_eval(cxm_string):
            #di_url=clicked_item.getLabel2()
            #log('  button label2='+repr(di_url))
            #item_type=clicked_item.getProperty('item_type').lower()
            pass

        log( "  clicked %s  IsPlayable=%s  url=%s " %( repr(clicked_item),item_type, di_url )   )
        if item_type=='playable':
                #a big thank you to spoyser (http://forum.kodi.tv/member.php?action=profile&uid=103929) for this help
                pl = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
                pl.clear()
                pl.add(di_url, clicked_item)
                xbmc.Player().play(pl, windowed=False)
        elif item_type=='script':
            #if user clicked on 'next' we close this screen and load the next page.
            if 'mode=listSubReddit' in di_url:
                self.busy_execute_sleep(di_url,500,True )
            else:
                self.busy_execute_sleep(di_url,3000,False )
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def onAction(self, action):
        if action in [ xbmcgui.ACTION_PREVIOUS_MENU, xbmcgui.ACTION_NAV_BACK ]:
            self.close()

        try:focused_control=self.getFocusId()
        except:focused_control=0
        #log('focused control='+repr(focused_control)+' action='+repr(action))
        if focused_control==self.main_control_id:  #main_control_id is the listbox

            self.gui_listbox_SelectedPosition  = self.gui_listbox.getSelectedPosition()
            item = self.gui_listbox.getSelectedItem()

            item_type=item.getProperty('item_type').lower()

            if action in [ xbmcgui.ACTION_CONTEXT_MENU ]:
                ACTION_manage_subreddits=item.getProperty('ACTION_manage_subreddits')
                log( "   left pressed  %d IsPlayable=%s  url=%s " %(  self.gui_listbox_SelectedPosition, item_type, ACTION_manage_subreddits )   )
                if ACTION_manage_subreddits:
                    xbmc.executebuiltin( ACTION_manage_subreddits  )
                    self.close()
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def populate_tlc_children(self,tlc_id):
        #controls_generator=generator(controls)
        child_comments_tuple_generator=generator(self.child_lists[tlc_id])

        for control_id in self.x_controls:
            control=self.getControl(control_id)

            try:
                post_text,author,depth=child_comments_tuple_generator.next()
            except StopIteration:
                post_text,author,depth=None,None,0

            if post_text:
                #control.setText( ("[B]"+repr(control_id-1000)+"[/B] " + post_text) if post_text else None)
                #control.setText(post_text+' '+author)
                #log(('.'*depth)+repr(post_text))
                control.setText(post_text)
            else:
                control.setText(None)
            #use animation to stagger the comments according to how deep they are
            control.setAnimations( [ animation_format(0,100,'slide', 0, (20*depth), 'sine', 'in' ) ] )

        #either there's no more child comments or we run out of controls
        return
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def onClick(self, controlID):
        clicked_control=self.getControl(controlID)
        #log('clicked on controlID='+repr(controlID))
        #button control does not have a property, we use a different method.
        value_to_search=clicked_control.getLabel() #we'll just use the Property('link_url') that we used as button label to search
        listitems=self.listing

        li = next(l for l in listitems if l.getProperty('link_url') == value_to_search)

        item_type=li.getProperty('item_type')
        di_url=li.getProperty('onClick_action')

        log( "  clicked %s  IsPlayable=%s  url=%s " %( repr(clicked_control),item_type, di_url )   )
        if item_type=='playable':
                #a big thank you to spoyser (http://forum.kodi.tv/member.php?action=profile&uid=103929) for this help
                pl = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
                pl.clear()
                pl.add(di_url, value_to_search)
                xbmc.Player().play(pl, windowed=False)
        elif item_type=='script':
            self.busy_execute_sleep(di_url,5000,False)
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def __init__(self, thread_event, image_queue):
        #self.log('__init__ start')
        self.exit_requested = False
        self.toggle_info_display_requested=False
        self.background_control = None
        self.preload_control = None
        self.image_count = 0
        #self.image_controls = []
        self.tni_controls = []
        self.global_controls = []
        self.exit_monitor = ExitMonitor(self.stop)

        self.init_xbmc_window()
#         self.xbmc_window = ScreensaverWindow(self.stop)
#         self.xbmc_window.show()

        self.init_global_controls()
        self.load_settings()
        self.init_cycle_controls()
        self.stack_cycle_controls()
        #self.log('__init__ end')
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def init_cycle_controls(self):
        #self.log('  init_cycle_controls start')
        for _ in xrange(self.IMAGE_CONTROL_COUNT):
            img_control = ControlImage(0, 0, 0, 0, '', aspectRatio=2)  #(values 0 = stretch (default), 1 = scale up (crops), 2 = scale down (black bars)
            txt_control = ControlTextBox(0, 0, 0, 0, font='font16')
#                     xbfont_left = 0x00000000
#                     xbfont_right = 0x00000001
#                     xbfont_center_x = 0x00000002
#                     xbfont_center_y = 0x00000004
#                     xbfont_truncated = 0x00000008
            #ControlLabel(x, y, width, height, label, font=None, textColor=None, disabledColor=None, alignment=0, hasPath=False, angle=0)
            #txt_control = ControlLabel(0, 0, 0, 0, '', font='font30', textColor='', disabledColor='', alignment=6, hasPath=False, angle=0)

            #self.image_controls.append(img_control)
            self.tni_controls.append([txt_control,img_control])
        #self.log('  init_cycle_controls end')
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def get_description_and_images(self, source):
        #self.log('get_images2')
        self.image_aspect_ratio = 16.0 / 9.0

        images = []

        if source == 'image_folder':
            #image folder source not used
            path = '' #SlideshowCacheFolder  #addon.getSetting('image_path')
            if path:
                images = self._get_folder_images(path)
        elif source == 'q':
            #implement width & height extract here.
            #images=[[item[0], item[1],item[2], item[3], ] for item in q.queue]

            #[title,media_url, width, height, len(entries), description])
            images=[  [i.get('li_label'), i.get('DirectoryItem_url'),i.get('width'), i.get('height'), i.get('description') ] for i in q.queue]

            log( "queue size:%d" %q.qsize() )
            #texts=[item[0] for item in q.queue]
            #for i in images: self.log('   image: %s' %i)
            #self.log('    %d images' % len(images))

        return images
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def _get_folder_images(self, path):
        self.log('_get_folder_images started with path: %s' % repr(path))
        _, files = xbmcvfs.listdir(path)
        images = [
            xbmc.validatePath(path + f) for f in files
            if f.lower()[-3:] in ('jpg', 'png')
        ]
        #if addon.getSetting('recursive') == 'true':
        #    for directory in dirs:
        #        if directory.startswith('.'):
        #            continue
        #        images.extend(
        #            self._get_folder_images(
        #                xbmc.validatePath('/'.join((path, directory, '')))
        #            )
        #        )
        self.log('_get_folder_images ends')
        return images
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def wait(self):
        # wait in chunks of 500ms to react earlier on exit request
        chunk_wait_time = int(CHUNK_WAIT_TIME)
        remaining_wait_time = int(self.NEXT_IMAGE_TIME)
        while remaining_wait_time > 0:
            if self.exit_requested:
                self.log('wait aborted')
                return
            if self.toggle_info_display_requested:  #this value is set on specific keypress in action_id_handler
                #self.log('toggle_info_display_requested')
                self.toggle_info_display_requested=False
                self.toggle_info_display_handler()
            if remaining_wait_time < chunk_wait_time:
                chunk_wait_time = remaining_wait_time
            remaining_wait_time -= chunk_wait_time
            xbmc.sleep(chunk_wait_time)
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def del_controls(self):
        #self.log('del_controls start')
        #self.xbmc_window.removeControls(self.img_controls)
        try: self.xbmc_window.removeControls(self.tni_controls[0]) #imageControls
        except: pass
        try: self.xbmc_window.removeControls(self.tni_controls[1]) #textBoxes
        except: pass

        self.xbmc_window.removeControls(self.global_controls)
        self.preload_control = None
        self.background_control = None
        self.loading_control = None
        self.tni_controls = []
        self.global_controls = []
        self.xbmc_window.close()
        self.xbmc_window = None
        #self.log('del_controls end')
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def populate_subreddits_pickle():
    from guis import progressBG
    loading_indicator=progressBG(translation(32023))   #Gathering icons..

    with open(subredditsFile, 'r') as fh:
        subreddit_settings = fh.readlines()

    #xbmc_notify("initializing", "Building icons cache", 5000)
    loading_indicator.set_tick_total(len(subreddit_settings))
    for entry in subreddit_settings:
        entry=entry.strip()
        loading_indicator.tick(1,entry)
        s=convert_settings_entry_into_subreddits_list_or_domain(entry)
        if s:
            #t = threading.Thread(target=get_subreddit_entry_info_thread, args=(s,) )
            log('processing saved entry:'+repr(entry))
            get_subreddit_entry_info_thread(s)

    xbmc.sleep(2000)
    loading_indicator.end()
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def listAlbum(album_url, name, type_):
    from slideshow import slideshowAlbum
    from domains import sitesManager
    log("    listAlbum:"+album_url)

    hoster = sitesManager( album_url )
    #log( '  %s %s ' %(hoster.__class__.__name__, album_url ) )

    if hoster:
        dictlist=hoster.ret_album_list(album_url)

        if type_=='return_dictlist':  #used in autoSlideshow
            return dictlist

        if not dictlist:
            xbmc_notify(translation(32200), translation(32055)) #slideshow, no playable items
            return
        #log(pprint.pformat(dictlist))
        if addon.getSetting('use_slideshow_for_album') == 'true':
            slideshowAlbum( dictlist, name )
        else:
            display_album_from( dictlist, name )
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def playVideo(url, name, type_):
    xbmc_busy(False)

    pl = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
    pl.clear()

    if url : #sometimes url is a list of url or just a single string
        if isinstance(url, basestring):
            pl.add(url, xbmcgui.ListItem(name))
            xbmc.Player().play(pl, windowed=False)  #scripts play video like this.
            #listitem = xbmcgui.ListItem(path=url)   #plugins play video like this.
            #xbmcplugin.setResolvedUrl(pluginhandle, True, listitem)
        else:
            for u in url:
                #log('u='+ repr(u))
                #pl.add(u)
                pl.add(u, xbmcgui.ListItem(name))
            xbmc.Player().play(pl, windowed=False)
    else:
        log("playVideo(url) url is blank")
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def parse_web_url_from(recently_played_url):
# builds the youtube url from plugin://plugin.video.youtube/play/?video_id=dIgSKPzLC9g
# grabs the youtube url from plugin://plugin.video.reddit_viewer/?mode=play&url=https%3A%2F%2Fyoutu.be%2FUhOx-FpEAQk
# excludes googlevideo.com/videoplayback
    from domains import ClassYoutube
    ret_url=recently_played_url.split("|", 1)[0] #remove |Useragent:...
    link_components=urlparse.urlparse( recently_played_url )
    #log("*****{0} scheme[{1}]**".format( recently_played_url, link_components.scheme ) )
    if link_components.scheme=="plugin":
        query = urlparse.parse_qs(link_components.query)
        netloc=link_components.netloc

        if netloc=="plugin.video.reddit_viewer":
            ret_url=''.join(query.get("url"))
        elif netloc=="plugin.video.youtube":
            video_id=query.get("video_id")
            ret_url=ClassYoutube.build_youtube_url_with_video_id(''.join(video_id))  #''.join(video_id)  <-- list to string
            #log("***** video ID="+''.join(video_id)) #ClassYoutube
    elif link_components.scheme=="https":
        #log(link_components.path)
        if link_components.path.startswith('/videoplayback'): #youtube videos parsed by youtube_dl are unplayable from history, we exclude it here
            ret_url=None

    return ret_url
项目:script.reddit.reader    作者:gedisony    | 项目源码 | 文件源码
def build_reddit_search_context_menu_entries(hasmultiplesubreddit,subreddit,link_url ):
    cxm_list=[]
    colored_subreddit_full=colored_subreddit( subreddit )
    label_search=translation(32520)
    parts_of_link_url=urlparse.urlparse(link_url)

    if cxm_show_search:
        if GCXM_hasmultiplesubreddit:
            cxm_list.append( (label_search        , build_script("search", '', '')  ) )
        else:
            label_search+=' {0}'.format(colored_subreddit_full)
            cxm_list.append( (label_search        , build_script("search", '', subreddit)  ) )
        #NOTE: can't use the entire link_url because it will work for www.reddit.com but not for oauth.reddit.com
        part_to_search="{0} {1}".format(parts_of_link_url.path,parts_of_link_url.query)
        if part_to_search.startswith('/'): part_to_search=part_to_search[1:]  #remove starting '/'

        remove_these_words=['.mp4','.webm','/v/','.jpg','.png'] #mainly to match imgur links where we want to catch the imageID not "imageID.mp4"
        part_to_search=re.sub('|'.join(re.escape(word) for word in remove_these_words), '', part_to_search)

        #log('parts to search='+part_to_search)
        cxm_list.append( (translation(32531)    , build_script("listSubReddit", assemble_reddit_filter_string(part_to_search,'','',''), name=translation(32531))  ) ) #"Other posts with this link"
    return cxm_list
项目:loveliv    作者:xmcp    | 项目源码 | 文件源码
def send_msgs():
    with sqlite3.connect('events.db') as db:
        cur=db.cursor()
        cur.execute('select msgid,content from push_msgs')
        res=cur.fetchall()
        for msgid,content in res:
            if itchat.send_msg(content,toUserName=group_name):
                cur=db.cursor()
                cur.execute('delete from push_msgs where msgid=?',[msgid])
                db.commit()
                print(' -> sent msg:',content)
            else:
                if msgid not in logged_errors:
                    print('!!! send failed:',content)
                    log('error','?????????#%d??%s'%(msgid,content))
                    logged_errors.add(msgid)
            time.sleep(.5)
项目:unity    作者:awolfly9    | 项目源码 | 文件源码
def get_unity_version(self, response):
        content = json.loads(response.body)
        utils.log('content:%s' % response.body)

        self.unity_version = content.get('kharma_version', '')
        self.headers['X-Kharma-Version'] = self.unity_version

        # unity asset store ????? json ??
        url = 'https://www.assetstore.unity3d.com/api/en-US/home/categories.json'

        yield Request(
                url = url,
                method = 'GET',
                headers = self.headers,
                meta = {
                    'download_timeout': 20,
                    'is_proxy': False,
                },
                callback = self.get_categories,
        )

    # ?????? json ???????? unity ???????
    # ????????????????
项目:unity    作者:awolfly9    | 项目源码 | 文件源码
def get_proxy(self):
        if get_project_settings().get('IS_USE_PROXY', True):
            if len(self.proxys) <= 10:
                self.update_proxy()

            if len(self.proxys) > 0:
                self.index = self.index + 1
                self.index = self.index % len(self.proxys)

                proxy = 'http://%s:%s' % (self.proxys[self.index].get('ip'), self.proxys[self.index].get('port'))
                utils.log('++++++++++proxy:%s++++++++++' % proxy)
                return proxy

            return None
        else:
            return None
项目:unity    作者:awolfly9    | 项目源码 | 文件源码
def delete_proxy(self, proxy):
        if proxy == None:
            return

        try:
            rets = proxy.split(':')
            ip = rets[1]
            ip = ip[2:]

            for item in self.proxys:
                if item.get('ip') == ip:
                    self.proxys.remove(item)
                    break

            if len(self.proxys) < 3:
                self.update_proxy()

            utils.log('--------------delete ip:%s-----------' % ip)
            r = requests.get(url = '%s/delete?name=%s&ip=%s' % (self.address, self.name, ip))
            return r.text
        except:
            return False
项目:py_mbot    作者:evgfilim1    | 项目源码 | 文件源码
def register_text_handler(self, callback, allow_edited=False):
        """Registers text message handler

        Args:
            callback(function): callable object to execute
            allow_edited(Optional[bool]): pass edited messages

        """
        @utils.log(logger, print_ret=False)
        def process_update(bot, update):
            lang = utils.get_lang(self._storage, update.effective_user)
            callback(update.effective_message, lang)
        self._dispatcher.add_handler(MessageHandler(Filters.text, process_update,
                                                    edited_updates=allow_edited))