我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用sqlalchemy.func.count()。
def revert_league_points(session, league_id, results): league = session.query(League).filter(League.id == league_id).first() for i, result in enumerate(results): res = result.result_str winners = session.query(TeamHero.user_id). \ filter(and_(TeamHero.hero_id == result.hero, TeamHero.league == league_id)).all() for winner in winners: userq = session.query(LeagueUser).filter(and_(LeagueUser.user_id == winner[0], LeagueUser.league == league_id)).first() user_id = userq.user_id userq_day = session.query(LeagueUserDay).filter(and_(LeagueUserDay.user_id == user_id, LeagueUserDay.league == userq.league, LeagueUserDay.day == league.current_day )).first() hero_count = session.query(func.count(TeamHero)).filter(and_(TeamHero.league == league_id, TeamHero.user_id == user_id)).scalar() remove_result_to_user(userq, res, hero_count) remove_result_to_user(userq_day, res, hero_count)
def machoc_get_unique_match(cls, sample_src, sample_dst): """ Get machoc similar functions @arg: two samples @return: A list of functions in sample `sample_dst` that have the same machoc hash as a least one on `sample_src` """ src_funcs = cls.get_functions_filtered(sample_src.id) matches = [] funcs = FunctionInfo.query.filter_by(sample_id=sample_dst.id) funcs = funcs.group_by(FunctionInfo.machoc_hash) funcs = funcs.having(func.count(FunctionInfo.machoc_hash) == 1) for funcx in src_funcs: match = funcs.filter_by(machoc_hash=funcx.machoc_hash) match = match.scalar() if match is not None: matches.append(match) app.logger.debug("Got %d direct machoc matches" % len(matches)) return matches
def current(*groups): groups = GroupModel.query.filter(GroupModel.name.in_(*[groups])).all() if not len(groups): return False ret_data = [] for group in groups: group_data = {} group_data['name'] = group.name icpes = [node.icpe.mac_address for node in group.nodes if node.icpe] min_ago = (datetime.now() - timedelta(hours=0.5)) latest_heat = SQL.session.query(HeatModel,\ label('sum', func.sum(HeatModel.average)), label('count', func.count(HeatModel.average))).\ join(HeatModel.icpe).\ filter(iCPEModel.mac_address.in_(*[icpes])).\ filter(HeatModel.date > min_ago).first() if latest_heat.count: group_data['heat'] = latest_heat.sum / latest_heat.count else: group_data['heat'] = 0.0 ret_data.append(group_data) return ret_data
def maintenance_artists(): show_ignored = get_argument('show_ignored', arg_type=bool) artist_count = db.session.query( Scrobble.artist, func.count(Scrobble.artist).label('count') ).group_by(Scrobble.artist).all() artist_count = {artist: count for artist, count in artist_count} diffs = ( db.session.query(DiffArtists.id, DiffArtists.artist1, DiffArtists.artist2) .filter(DiffArtists.ignore == show_ignored) .order_by(DiffArtists.id.asc()) .all() ) # diffs = sorted(diffs, key=lambda x: artist_count.get(x[1], 0) + artist_count.get(x[2], 0)) return render_template('maintenance/artists.html', diffs=diffs, artist_count=artist_count)
def top_artists(period=None): params = get_chart_params(period) scrobbles = func.count(Scrobble.artist).label('count') chart = ( db.session.query(Scrobble.artist, scrobbles) .group_by(Scrobble.artist) .filter( Scrobble.user_id == current_user.id, Scrobble.played_at >= params['time_from'], Scrobble.played_at <= params['time_to'], ) .order_by(scrobbles.desc()) .limit(params['count']) .all() ) return render_template( 'charts/top_artists.html', chart=enumerate(chart, start=1), max_count=chart[0][1] if chart else 0, **params )
def top_tracks(period=None): params = get_chart_params(period) scrobbles = func.count(Scrobble.artist).label('count') chart = ( db.session.query(Scrobble.artist, Scrobble.track, scrobbles) .group_by(Scrobble.artist, Scrobble.track, Scrobble.user_id == current_user.id) .filter( Scrobble.played_at >= params['time_from'], Scrobble.played_at <= params['time_to'], ) .order_by(scrobbles.desc()) .limit(params['count']) .all() ) return render_template( 'charts/top_tracks.html', chart=enumerate(chart, start=1), max_count=chart[0][2] if chart else 0, **params )
def ajax_dashboard_per_hour(): arg_year = request.args.get('year', 'all') arg_month = request.args.get('month', 'all') arg_artist = request.args.get('artist', '') count = func.count(Scrobble.id).label('count') time = Scrobble.played_at hour = func.extract('hour', time).label('hour') weekday = func.extract('isodow', time).label('weekday') year = func.extract('year', time).label('year') month = func.extract('month', time).label('month') year_filter = True if arg_year == 'all' else (year == arg_year) month_filter = True if arg_month == 'all' else (month == arg_month) artist_filter = True if arg_artist == '' else (Scrobble.artist == arg_artist) per_hour = ( db.session.query(weekday, hour, count) .filter(Scrobble.user_id == current_user.id) .filter(year_filter, month_filter, artist_filter) .group_by('weekday', 'hour').all() ) per_hour = [(d, h + 1, v) for d, h, v in per_hour] return dumps(per_hour)
def last_scrobbles(): count = get_argument('count', default=app.config['RESULTS_COUNT']) scrobbles = ( db.session.query(Scrobble) .filter(Scrobble.user_id == current_user.id) .order_by(Scrobble.played_at.desc()) .limit(count) .all() ) nowplaying = ( db.session.query(NowPlaying) .filter(NowPlaying.user_id == current_user.id, NowPlaying.played_at + NowPlaying.length >= func.now()) .order_by(NowPlaying.played_at.desc()) .first() ) return render_template('latest.html', scrobbles=scrobbles, nowplaying=nowplaying)
def __getitem__(self, item): if isinstance(item, slice): start, stop, step = util.decode_slice(item) if isinstance(stop, int) and \ isinstance(start, int) and \ stop - start <= 0: return [] # perhaps we should execute a count() here so that we # can still use LIMIT/OFFSET ? elif (isinstance(start, int) and start < 0) \ or (isinstance(stop, int) and stop < 0): return list(self)[item] res = self.slice(start, stop) if step is not None: return list(res)[None:None:item.step] else: return list(res) else: if item == -1: return list(self)[-1] else: return list(self[item:item + 1])[0]
def wbgetentities(self, debug=False): sub = (session.query(Item.item_id) .join(ItemTag) .group_by(Item.item_id) .subquery()) q = (self.items.filter(Item.item_id == sub.c.item_id) .options(load_only(Item.qid))) if debug: print('running wbgetentities query') print(q) print(q.count()) items = {i.qid: i for i in q} if debug: print('{} items'.format(len(items))) for qid, entity in wikidata.entity_iter(items.keys(), debug=debug): if debug: print(qid) items[qid].entity = entity
def chunk(self): chunk_size = utils.calc_chunk_size(self.area_in_sq_km) chunks = self.chunk_n(chunk_size) print('chunk size:', chunk_size) files = [] for num, chunk in enumerate(chunks): filename = self.chunk_filename(num, len(chunks)) # print(num, q.count(), len(tags), filename, list(tags)) full = os.path.join('overpass', filename) files.append(full) if os.path.exists(full): continue oql = self.oql_for_chunk(chunk, include_self=(num == 0)) r = overpass.run_query_persistent(oql) if not r: print(oql) assert r open(full, 'wb').write(r.content) cmd = ['osmium', 'merge'] + files + ['-o', self.overpass_filename] print(' '.join(cmd)) subprocess.run(cmd)
def get_existing(sort, name_filter): q = Place.query.filter(Place.state.isnot(None), Place.osm_type != 'node') if name_filter: q = q.filter(Place.display_name.ilike('%' + name_filter + '%')) if sort == 'name': return q.order_by(Place.display_name) if sort == 'area': return q.order_by(Place.area) existing = q.all() if sort == 'match': return sorted(existing, key=lambda p: (p.items_with_candidates_count() or 0)) if sort == 'ratio': return sorted(existing, key=lambda p: (p.match_ratio or 0)) if sort == 'item': return sorted(existing, key=lambda p: p.items.count()) return q
def space(): overpass_dir = app.config['OVERPASS_DIR'] files = [{'file': f, 'size': f.stat().st_size} for f in os.scandir(overpass_dir) if '_' not in f.name and f.name.endswith('.xml')] files.sort(key=lambda f: f['size'], reverse=True) files = files[:200] place_lookup = {int(f['file'].name[:-4]): f for f in files} # q = Place.query.outerjoin(Changeset).filter(Place.place_id.in_(place_lookup.keys())).add_columns(func.count(Changeset.id)) q = (database.session.query(Place, func.count(Changeset.id)) .outerjoin(Changeset) .filter(Place.place_id.in_(place_lookup.keys())) .options(load_only(Place.place_id, Place.display_name, Place.state)) .group_by(Place.place_id, Place.display_name, Place.state)) for place, num in q: place_id = place.place_id place_lookup[place_id]['place'] = place place_lookup[place_id]['changesets'] = num return render_template('space.html', files=files)
def sidebar_data(): """Set the sidebar function.""" # Get post of recent recent = db.session.query(Post).order_by( Post.publish_date.desc() ).limit(5).all() # Get the tags and sort by count of posts. top_tags = db.session.query( Tag, func.count(posts_tags.c.post_id).label('total') ).join( posts_tags ).group_by(Tag).order_by('total DESC').limit(5).all() return recent, top_tags # Use the Blueprint object to set the Route URL # Register the view function into blueprint
def update_downloads(resource_id, total_downloads): table = get_table('ckanext_tayside_resource_downloads') id_col_name = 'resource_id' id_col = getattr(table.c, id_col_name) s = select([func.count(id_col)], id_col == resource_id) connection = model.Session.connection() count = connection.execute(s).fetchone() engine = model.meta.engine if count and count[0]: engine.execute(table.update() .where(id_col == resource_id) .values(total_downloads=total_downloads)) else: values = {id_col_name: resource_id, 'total_downloads': total_downloads} engine.execute(table.insert().values(**values))
def sightings_to_csv(since=None, output='sightings.csv'): from csv import writer as csv_writer if since: conf.REPORT_SINCE = since with session_scope() as session: sightings = get_sightings_per_pokemon(session) od = OrderedDict() for pokemon_id in range(1, 252): if pokemon_id not in sightings: od[pokemon_id] = 0 od.update(sightings) with open(output, 'wt') as csvfile: writer = csv_writer(csvfile) writer.writerow(('pokemon_id', 'count')) for item in od.items(): writer.writerow(item)
def user_profile(user_id): """Show user profile""" user_info = User.query.filter_by(user_id=user_id).first() account_created = user_info.account_created account_created = str(account_created)[:11] system_info = UserSystem.query.filter_by(user_id=user_id).all() rating_info = (db.session.query(Game.name, Rating.score, Game.game_id) .join(Rating).filter(Rating.user_id==user_id) .all()) num_games = (db.session.query(func.count(Rating.user_id)) .filter(Rating.user_id == user_id) .first()) num_games = int(num_games[0]) return render_template("user_profile.html", user_info=user_info, system_info=system_info, rating_info=rating_info, account_created=account_created, user_id=user_id, num_games=num_games)
def update_words(self, title, text): title_id = self.title_id(title) self.db.query(Word).filter(Word.page==title_id).delete() if not text: return words = self.count_words(self.split_text(text)) title_words = self.count_words(self.split_text(title)) for word, count in title_words.iteritems(): words[word] = words.get(word, 0) + count self.db.begin(subtransactions=True) try: for word, count in words.iteritems(): self.db.add(Word(word, title_id, count)) self.db.commit() except: self.db.rollback() raise
def test04(self): """Create a table and populate it with some objects """ MyModelB().create_table(self.client) # create some objects session = self.client.create_session() session.add(MyModelB(my_other_field=17)) session.add(MyModelB(my_other_field=18)) session.add(MyModelB(my_other_field=19)) session.commit() metadata = sa.MetaData() metadata.reflect(self.client.get_engine()) query = select([func.count()]).select_from(metadata.tables['mymodelb']) count = self.client.get_engine().execute(query).scalar() self.assertEqual(count, 3) session.close()
def largest_groups(cls, limit=10): member = table('member') package = table('package') j = join(member, package, member.c.table_id == package.c.id) s = select([member.c.group_id, func.count(member.c.table_id)]).\ select_from(j).\ group_by(member.c.group_id).\ where(and_(member.c.group_id!=None, member.c.table_name=='package', package.c.private==False, package.c.state=='active')).\ order_by(func.count(member.c.table_id).desc()).\ limit(limit) res_ids = model.Session.execute(s).fetchall() res_groups = [(model.Session.query(model.Group).get(unicode(group_id)), val) for group_id, val in res_ids] return res_groups
def are_dependents_done(self, session=None): """ Checks whether the dependents of this task instance have all succeeded. This is meant to be used by wait_for_downstream. This is useful when you do not want to start processing the next schedule of a task until the dependents are done. For instance, if the task DROPs and recreates a table. """ task = self.task if not task.downstream_task_ids: return True ti = session.query(func.count(TaskInstance.task_id)).filter( TaskInstance.dag_id == self.dag_id, TaskInstance.task_id.in_(task.downstream_task_ids), TaskInstance.execution_date == self.execution_date, TaskInstance.state == State.SUCCESS, ) count = ti[0][0] return count == len(task.downstream_task_ids)
def get_num_task_instances(dag_id, task_ids, states=None, session=None): """ Returns the number of task instances in the given DAG. :param session: ORM session :param dag_id: ID of the DAG to get the task concurrency of :type dag_id: unicode :param task_ids: A list of valid task IDs for the given DAG :type task_ids: list[unicode] :param states: A list of states to filter by if supplied :type states: list[state] :return: The number of running tasks :rtype: int """ qry = session.query(func.count(TaskInstance.task_id)).filter( TaskInstance.dag_id == dag_id, TaskInstance.task_id.in_(task_ids)) if states is not None: if None in states: qry = qry.filter(or_( TaskInstance.state.in_(states), TaskInstance.state.is_(None))) else: qry = qry.filter(TaskInstance.state.in_(states)) return qry.scalar()
def yield_per(self, count): """Yield only ``count`` rows at a time. WARNING: use this method with caution; if the same instance is present in more than one batch of rows, end-user changes to attributes will be overwritten. In particular, it's usually impossible to use this setting with eagerly loaded collections (i.e. any lazy='joined' or 'subquery') since those collections will be cleared for a new load when encountered in a subsequent result batch. In the case of 'subquery' loading, the full result for all rows is fetched which generally defeats the purpose of :meth:`~sqlalchemy.orm.query.Query.yield_per`. Also note that while :meth:`~sqlalchemy.orm.query.Query.yield_per` will set the ``stream_results`` execution option to True, currently this is only understood by :mod:`~sqlalchemy.dialects.postgresql.psycopg2` dialect which will stream results using server side cursors instead of pre-buffer all rows for this query. Other DBAPIs pre-buffer all rows before making them available. """ self._yield_per = count self._execution_options = self._execution_options.union( {"stream_results": True})
def _files_by_country(): results = [] q = current_app.db_session.query( func.count(Report.probe_cc), Report.probe_cc ).group_by(Report.probe_cc).order_by(Report.probe_cc) for row in q: count, alpha_2 = row country = "Unknown" if alpha_2 != "ZZ": try: country = countries.get(alpha_2=alpha_2).name except KeyError: country = "Unknown (%s)" % alpha_2 results.append({ 'count': count, 'alpha2': alpha_2, 'country': country }) results.sort(key=operator.itemgetter('country')) return results
def detail(site_name): results_tmp = [] s = sha1() s.update(site_name.encode("utf-8")) results = db.session.query(AttackSuccess.module_id, func.count(AttackSuccess.module_id))\ .filter_by(hash=s.digest()).group_by(AttackSuccess.module_id).all() for result in results: m = Module.query.get(result[0]) print(result) results_tmp.append({ 'id': result[0], 'name': m.name, 'count': result[1], }) return render_template('detail.html', detail_site_host=site_name, results=results_tmp, title="%s" % (site_name))
def delete_no_vuln(): global sites_list sites = Site.query.all() for site in sites: s = sha1() s.update(site.host.encode("utf-8")) tmp_count = AttackSuccess.query \ .filter_by(hash=s.digest()).count() if tmp_count == 0: is_scan = SiteIsScan.query.filter_by(hash=s.digest()).count() print(site.host, tmp_count, is_scan) if is_scan == 0: try: sites_list.remove(site.host) except Exception as e: print(e) db.session.delete(site) db.session.commit() return redirect('/')
def mac_address_range_find_allocation_counts(context, address=None, use_forbidden_mac_range=False): count = sql_func.count(models.MacAddress.address) query = context.session.query(models.MacAddressRange, count.label("count")).with_lockmode("update") query = query.outerjoin(models.MacAddress) query = query.group_by(models.MacAddressRange.id) query = query.order_by(desc(count)) if address: query = query.filter(models.MacAddressRange.last_address >= address) query = query.filter(models.MacAddressRange.first_address <= address) query = query.filter(models.MacAddressRange.next_auto_assign_mac != -1) if not use_forbidden_mac_range: query = query.filter(models.MacAddressRange.do_not_use == '0') # noqa query = query.limit(1) return query.first()
def get_match_summary_aggregate(self, match_id): return Database.session.query(MatchHero.account_id, func.sum(text('match_heroes.player_win')).label('player_win'), func.count(MatchHero.player_win).label('matches')). \ filter(MatchHero.match_id >= match_id). \ group_by(MatchHero.account_id). \ all()
def get_match_hero_summary_aggregate(self, match_id): return Database.session.query(MatchHero.account_id, MatchHero.hero_id, func.sum(text('match_heroes.player_win')).label('player_win'), func.count(MatchHero.player_win).label('matches')). \ filter(MatchHero.match_id >= match_id). \ group_by(MatchHero.account_id). \ group_by(MatchHero.hero_id). \ all()
def get_match_item_summary_aggregate(self, match_id): return Database.session.query(MatchItem.account_id, MatchItem.item_id, func.sum(text('match_items.player_win')).label('player_win'), func.count(MatchItem.player_win).label('matches')). \ filter(MatchItem.match_id >= match_id). \ group_by(MatchItem.account_id). \ group_by(MatchItem.item_id). \ all()
def having(self, criterion): """apply a HAVING criterion to the query and return the newly resulting :class:`.Query`. :meth:`~.Query.having` is used in conjunction with :meth:`~.Query.group_by`. HAVING criterion makes it possible to use filters on aggregate functions like COUNT, SUM, AVG, MAX, and MIN, eg.:: q = session.query(User.id).\\ join(User.addresses).\\ group_by(User.id).\\ having(func.count(Address.id) > 2) """ criterion = expression._expression_literal_as_text(criterion) if criterion is not None and \ not isinstance(criterion, sql.ClauseElement): raise sa_exc.ArgumentError( "having() argument must be of type " "sqlalchemy.sql.ClauseElement or string") criterion = self._adapt_clause(criterion, True, True) if self._having is not None: self._having = self._having & criterion else: self._having = criterion
def scalar(self): """Return the first element of the first result or None if no rows present. If multiple rows are returned, raises MultipleResultsFound. >>> session.query(Item).scalar() <Item> >>> session.query(Item.id).scalar() 1 >>> session.query(Item.id).filter(Item.id < 0).scalar() None >>> session.query(Item.id, Item.name).scalar() 1 >>> session.query(func.count(Parent.id)).scalar() 20 This results in an execution of the underlying query. """ try: ret = self.one() if not isinstance(ret, tuple): return ret return ret[0] except orm_exc.NoResultFound: return None
def count(self): """Return a count of rows this Query would return. This generates the SQL for this Query as follows:: SELECT count(1) AS count_1 FROM ( SELECT <rest of query follows...> ) AS anon_1 .. versionchanged:: 0.7 The above scheme is newly refined as of 0.7b3. For fine grained control over specific columns to count, to skip the usage of a subquery or otherwise control of the FROM clause, or to use other aggregate functions, use :attr:`~sqlalchemy.sql.expression.func` expressions in conjunction with :meth:`~.Session.query`, i.e.:: from sqlalchemy import func # count User records, without # using a subquery. session.query(func.count(User.id)) # return count of user "id" grouped # by "name" session.query(func.count(User.id)).\\ group_by(User.name) from sqlalchemy import distinct # count distinct "name" values session.query(func.count(distinct(User.name))) """ col = sql.func.count(sql.literal_column('*')) return self.from_self(col).scalar()
def _create_distinct(cls, expr): """Produce an column-expression-level unary ``DISTINCT`` clause. This applies the ``DISTINCT`` keyword to an individual column expression, and is typically contained within an aggregate function, as in:: from sqlalchemy import distinct, func stmt = select([func.count(distinct(users_table.c.name))]) The above would produce an expression resembling:: SELECT COUNT(DISTINCT name) FROM user The :func:`.distinct` function is also available as a column-level method, e.g. :meth:`.ColumnElement.distinct`, as in:: stmt = select([func.count(users_table.c.name.distinct())]) The :func:`.distinct` operator is different from the :meth:`.Select.distinct` method of :class:`.Select`, which produces a ``SELECT`` statement with ``DISTINCT`` applied to the result set as a whole, e.g. a ``SELECT DISTINCT`` expression. See that method for further information. .. seealso:: :meth:`.ColumnElement.distinct` :meth:`.Select.distinct` :data:`.func` """ expr = _literal_as_binds(expr) return UnaryExpression( expr, operator=operators.distinct_op, type_=expr.type, wraps_column_expression=False)
def __init__(self, func, *criterion): """Produce a :class:`.FunctionFilter` object against a function. Used against aggregate and window functions, for database backends that support the "FILTER" clause. E.g.:: from sqlalchemy import funcfilter funcfilter(func.count(1), MyClass.name == 'some name') Would produce "COUNT(1) FILTER (WHERE myclass.name = 'some name')". This function is also available from the :data:`~.expression.func` construct itself via the :meth:`.FunctionElement.filter` method. .. versionadded:: 1.0.0 .. seealso:: :meth:`.FunctionElement.filter` """ self.func = func self.filter(*criterion)
def db_state(args): engine.execute('vacuum') statuses = db_session.query(WordList.language, WordList.state, func.count(WordList.word)) \ .group_by(WordList.language, WordList.state) \ .order_by(WordList.language, WordList.state).all() # spacing: shitty, but good enough print(colorize('Language\tState\t\tCount', 'white', True)) for status in statuses: print('{lang}\t\t{state} \t{count}'.format(lang=status[0], state=status[1], count=status[2]))
def query_matches(sample_1, sample_2, match_type): """ Return true if there is an existing match of type "match_type" between the two samples. """ query = SampleMatch.query.filter(SampleMatch.sid_1.in_([sample_1.id, sample_2.id]), SampleMatch.sid_2.in_([sample_1.id, sample_2.id]), SampleMatch.match_type == match_type) if query.count() != 0: return True return False
def match_by_machoc80(cls, sample): """ Match samples by machoc hash. """ if sample.functions.count() == 0: return True for sample_2 in Sample.query.filter(Sample.id != sample.id).all(): if cls.query_matches(sample, sample_2, "machoc80"): continue elif cls.machoc_diff_samples(sample, sample_2) >= 0.8: app.logger.debug("Add machoc match %d %d", sample.id, sample_2.id) cls.add_sample_match(sample, sample_2, "machoc80") cls.add_sample_match(sample_2, sample, "machoc80") return True
def machoc_diff_with_all_samples(cls, sample, level=0.8): """ Diff a sample with all other samples. Class method. """ if sample.functions.count() == 0: return [] hits = [] for sample_2 in Sample.query.all(): if sample_2.functions.count() == 0 or sample_2.id == sample.id: continue hit_rate = cls.machoc_diff_samples(sample, sample_2) if hit_rate >= level: hits.append((sample_2, hit_rate)) return hits
def get_user_uncategorized_samples(user, limit=15): """ By user with no family. """ samples = [] for sample in user.samples: if sample.families.count() == 0: samples.append(sample) limit = limit - 1 if limit == 0: break return samples
def current(group): group = SQL.session.query(GroupModel).filter(GroupModel.name == group).first() if group is None: return False ret_data = [] group_data = {} group_data['name'] = group.name group_data['heat'] = 0.0 for node in group.nodes: if not node.icpe: continue node_data = {} node_data['name'] = node.name min_ago = (datetime.now() - timedelta(hours=0.5)) latest_heat = SQL.session.query(HeatModel,\ label('sum', func.sum(HeatModel.average)), label('count', func.count(HeatModel.average))).\ join(HeatModel.icpe).\ filter(iCPEModel.mac_address == node.icpe.mac_address).\ filter(HeatModel.date > min_ago).first() if latest_heat.count: node_data['heat'] = latest_heat.sum / latest_heat.count group_data['heat'] += node_data['heat'] else: node_data['heat'] = 0.0 ret_data.append(node_data) ret_data.append(group_data) return ret_data
def current(group): group = SQL.session.query(GroupModel).filter(GroupModel.name == group).first() if group is None: return False ret_data = [] group_data = {} group_data['name'] = group.name group_data['power'] = 0.0 for node in group.nodes: if not node.icpe: continue node_data = {} node_data['name'] = node.name min_ago = (datetime.now() - timedelta(hours=0.5)) latest_power = SQL.session.query(PowerModel,\ label('sum', func.sum(PowerModel.average)), label('count', func.count(PowerModel.average))).\ join(PowerModel.icpe).\ filter(iCPEModel.mac_address == node.icpe.mac_address).\ filter(PowerModel.date > min_ago).first() if latest_power.count: node_data['power'] = latest_power.sum / latest_power.count group_data['power'] += node_data['power'] else: node_data['power'] = 0.0 ret_data.append(node_data) ret_data.append(group_data) return ret_data
def current(icpe, sensor): sensor = SQL.session.query(SensorModel).\ join(HeatModel.icpe).\ filter(iCPEModel.mac_address == icpe).\ filter(SensorModel.sensor_id == sensor).first() if sensor is None or sensor.heat is None: return False sensor_data = {} sensor_data['name'] = sensor.name sensor_data['sensor'] = sensor.sensor_id sensor_data['icpe'] = sensor.icpe.mac_address min_ago = (datetime.now() - timedelta(hours=0.5)) latest_heat = SQL.session.query(HeatModel,\ label('sum', func.sum(HeatModel.average)), label('count', func.count(HeatModel.average))).\ join(HeatModel.icpe).\ join(HeatModel.sensor).\ filter(iCPEModel.mac_address == sensor.icpe.mac_address).\ filter(SensorModel.sensor_id == sensor.sensor_id).\ filter(HeatModel.date > min_ago).first() if latest_heat.count: sensor_data['heat'] = latest_heat.sum / latest_heat.count sensor_data['heat'] += sensor_data['heat'] else: sensor_data['heat'] = 0.0 return sensor_data