我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用sqlalchemy.sql.func.count()。
def materials_in_generation(run_id, generation): """Count number of materials in a generation. Args: run_id (str): identification string for run. generation (int): iteration in overall bin-mutate-simulate rountine. Returns: Number(int) of materials in a particular generation that are present in the database (the final step in bin-mutate-simulate routine). """ return session.query(Material).filter( Material.run_id == run_id, Material.generation == generation ).count()
def fetch_buy_or_sell_item_market_stats(item_id, is_buy_order): stats = MarketOrder \ .query \ .filter(MarketOrder.expire_time > datetime.utcnow()) \ .filter_by(item_id=item_id, is_buy_order=is_buy_order) \ .with_entities( func.sum(MarketOrder.amount).label('total_volume'), func.avg(MarketOrder.price).label('price_average'), func.min(MarketOrder.price).label('price_minimum'), func.max(MarketOrder.price).label('price_maximum'), func.count(MarketOrder.id).label('order_count'), ).one() return { 'total_volume': stats.total_volume if stats.total_volume else 0, 'price_average': round(float(stats.price_average), 2) if stats.price_average else 0, 'price_minimum': stats.price_minimum if stats.price_minimum else 0, 'price_maximum': stats.price_maximum if stats.price_maximum else 0, 'order_count': stats.order_count, }
def payment(service_provider): info = db.session.query(Info).first() service_providers = db.session.query( Payment.service_provider).group_by(Payment.service_provider) if service_provider.lower() == 'all': payments = db.session.query(Payment).all() complete = db.session.query(Payment.service_provider, func.count( Payment.amount), func.sum(Payment.amount)).filter(Payment.status == "COMPLETE").group_by(Payment.service_provider) cancel = db.session.query(Payment.service_provider, func.count( Payment.amount), func.sum(Payment.amount)).filter(Payment.status == "CANCELED").group_by(Payment.service_provider) else: payments = db.session.query(Payment).filter_by( service_provider=service_provider.upper()) complete = db.session.query(Payment.service_name, func.count( Payment.amount), func.sum(Payment.amount)).filter(Payment.status == "COMPLETE").filter_by(service_provider=service_provider.upper()).group_by(Payment.service_name) cancel = db.session.query(Payment.service_name, func.count( Payment.amount), func.sum(Payment.amount)).filter(Payment.status == "CANCELED").filter_by(service_provider=service_provider.upper()).group_by(Payment.service_name) return render_template('payment.html', payments=payments, service_providers=service_providers, complete=complete, cancel=cancel, info=info)
def _record_test_failures(self, test_list): create_or_update( ItemStat, where={ 'item_id': self.job.id, 'name': 'tests.failures', }, values={ 'value': db.session.query(func.count(TestCase.id)).filter( TestCase.job_id == self.job.id, TestCase.result == Result.failed, ).as_scalar(), } ) db.session.flush()
def _get_project_user_quota_usages(context, project_id, user_id): rows = model_query(context, models.QuotaUsage, read_deleted="no").\ filter_by(project_id=project_id).\ order_by(models.QuotaUsage.id.asc()).\ with_lockmode('update').\ all() proj_result = dict() user_result = dict() # Get the total count of in_use,reserved for row in rows: proj_result.setdefault(row.resource, dict(in_use=0, reserved=0, total=0)) proj_result[row.resource]['in_use'] += row.in_use proj_result[row.resource]['reserved'] += row.reserved proj_result[row.resource]['total'] += (row.in_use + row.reserved) if row.user_id is None or row.user_id == user_id: user_result[row.resource] = row return proj_result, user_result
def _is_quota_refresh_needed(quota_usage, max_age): """Determines if a quota usage refresh is needed. :param quota_usage: A QuotaUsage object for a given resource. :param max_age: Number of seconds between subsequent usage refreshes. :return: True if a refresh is needed, False otherwise. """ refresh = False if quota_usage.in_use < 0: # Negative in_use count indicates a desync, so try to # heal from that... LOG.debug('in_use has dropped below 0; forcing refresh for ' 'QuotaUsage: %s', dict(quota_usage)) refresh = True elif quota_usage.until_refresh is not None: quota_usage.until_refresh -= 1 if quota_usage.until_refresh <= 0: refresh = True elif max_age and (timeutils.utcnow() - quota_usage.updated_at).seconds >= max_age: refresh = True return refresh
def security_group_in_use(context, group_id): # Are there any instances that haven't been deleted # that include this group? inst_assoc = model_query(context, models.SecurityGroupInstanceAssociation, read_deleted="no").\ filter_by(security_group_id=group_id).\ all() for ia in inst_assoc: num_instances = model_query(context, models.Instance, read_deleted="no").\ filter_by(uuid=ia.instance_uuid).\ count() if num_instances: return True return False
def update_bangumi_status(self, bangumi): session = SessionManager.Session try: # if bangumi has no not downloaded episode, we consider it's finished. episode_count = session.query(func.count(Episode.id)). \ filter(Episode.bangumi_id == bangumi.id). \ filter(Episode.status == Episode.STATUS_NOT_DOWNLOADED). \ scalar() logger.debug('bangumi %s has %d un-downloaded episodes', bangumi.name, episode_count) if (bangumi.status == Bangumi.STATUS_ON_AIR) and (episode_count == 0): session.add(bangumi) bangumi.status = Bangumi.STATUS_FINISHED session.commit() except Exception as error: logger.error(error, exc_info=True) finally: SessionManager.Session.remove()
def get_all_announce(self, offset, count): session = SessionManager.Session() try: announce_list = session.query(Announce).\ offset(offset).\ limit(count).\ all() total = session.query(func.count(Announce.id)). \ scalar() announce_dict_list = [] for announce in announce_list: announce_dict = row2dict(announce) announce_dict_list.append(announce_dict) return json_resp({'data': announce_dict_list, 'total': total}) finally: SessionManager.Session.remove()
def evaluate_convergence(run_id, generation): '''Determines convergence by calculating variance of bin-counts. Args: run_id (str): identification string for run. generation (int): iteration in bin-mutate-simulate routine. Returns: bool: True if variance is less than or equal to cutt-off criteria (so method will continue running). ''' simulations = config['simulations'] query_group = [] if 'gas_adsorption' in simulations: query_group.append( getattr(Material, 'gas_adsorption_bin') ) if 'surface_area' in simulations: query_group.append( getattr(Material, 'surface_area_bin') ) if 'helium_void_fraction' in simulations: query_group.append( getattr(Material, 'void_fraction_bin') ) bin_counts = session \ .query(func.count(Material.id)) \ .filter( Material.run_id == run_id, Material.generation < generation, Material.generation_index < config['children_per_generation'] ) \ .group_by(*query_group).all() bin_counts = [i[0] for i in bin_counts] # convert SQLAlchemy result to list variance = sqrt( sum([(i - (sum(bin_counts) / len(bin_counts)))**2 for i in bin_counts]) / len(bin_counts)) print('\nCONVERGENCE:\t%s\n' % variance) sys.stdout.flush() return variance <= config['convergence_cutoff_criteria']
def indicator_count(self): return self.indicators.count()
def indicator_count(cls): return (select([func.count(Indicator.id)]). where(Indicator.event_id == cls.id). label("indicator_count") )
def count(session, query): """Returns the count of the specified `query`. This function employs an optimization that bypasses the :meth:`sqlalchemy.orm.Query.count` method, which can be very slow for large queries. """ counts = query.selectable.with_only_columns([func.count()]) num_results = session.execute(counts.order_by(None)).scalar() if num_results is None or query._limit: return query.count() return num_results
def mls_acquisition_census(self): """ Return census of MLS acquisition paths of new players. :param year: Year object :return: list of tuples (acquisition path, number of players) """ return self.session.query(AcquisitionPaths.path, func.count(AcquisitionPaths.player_id)).filter( AcquisitionPaths.year == self.year).group_by(AcquisitionPaths.path)
def is_initial_draft(self, player_id, current_year): """ Return boolean that indicates whether a player was drafted for the first time in a given year. :param player_id: Unique player ID :param current_year: Draft year (int) :return: """ draft_history = self.history(player_id) if draft_history.count() > 1: initial_draft_year = self.session.query(Years).get(draft_history.first().year_id).yr return True if initial_draft_year == current_year else False else: return True
def run_query_limited(q): return q.count(), q.limit(current_app.config.get('MAX_INDEX_SIZE', 100)).all()
def filter_ebooks_by_genres(q,genres): return q.join(model.Ebook.genres).filter(model.Genre.id.in_(genres)).group_by(model.Ebook.id)\ .having(func.count(model.Ebook.id) == len(genres))
def calc_avg_ebook_rating(ebook_id): return db.session.query(func.avg(model.EbookRating.rating), func.count(model.EbookRating.id))\ .filter(model.EbookRating.ebook_id == ebook_id).one()
def get_metadata(self, docname, moderator): session = Session() subquery = session.query( Comment.node_id, func.count('*').label('comment_count')).group_by( Comment.node_id).subquery() nodes = session.query(Node.id, subquery.c.comment_count).outerjoin( (subquery, Node.id == subquery.c.node_id)).filter( Node.document == docname) session.close() session.commit() return dict([(k, v or 0) for k, v in nodes])
def cashback(service_provider='all'): info = db.session.query(Info).first() service_providers = db.session.query( Cashback.service_provider).group_by(Cashback.service_provider) if service_provider.lower() == 'all' or service_provider is None: cashbacks = db.session.query(Cashback).all() total = db.session.query(Cashback.service_provider, func.count( Cashback.amount), func.sum(Cashback.amount)).group_by(Cashback.service_provider) else: cashbacks = db.session.query(Cashback).filter_by( service_provider=service_provider.upper()) total = db.session.query(Cashback.service_name, func.count( Cashback.amount), func.sum(Cashback.amount)).filter_by(service_provider=service_provider.upper()).group_by(Cashback.service_name) return render_template('cashback.html', cashbacks=cashbacks, total=total, info=info, service_providers=service_providers)
def transfer(service_provider='all'): if service_provider.lower() == 'all' or service_provider is None: transfers = db.session.query(Transfer).all() else: transfers = db.session.query(Transfer).filter_by( service_provider=service_provider.upper()) received = db.session.query(Transfer.name, func.count( Transfer.amount), func.sum(Transfer.amount)).filter_by(service_name='received').group_by(Transfer.name) transferred = db.session.query(Transfer.name, func.count( Transfer.amount), func.sum(Transfer.amount)).filter_by( service_name='transferred').group_by(Transfer.name) info = db.session.query(Info).first() return render_template('transfer.html', transfers=transfers, service_provider=service_provider, received=received, transferred=transferred, info=info)
def missing(service_provider): info = db.session.query(Info).first() service_providers = db.session.query( Missing.service_provider).group_by(Missing.service_provider) if service_provider.lower() == 'all' or service_provider is None: missings = db.session.query(Missing).all() total = db.session.query(Missing.service_provider, func.count( Missing.amount), func.sum(Missing.amount)).group_by(Missing.service_provider) else: missings = db.session.query(Missing).filter_by( service_provider=service_provider.upper()) total = db.session.query(Missing.service_name, func.count( Missing.amount), func.sum(Missing.amount)).filter_by(service_provider=service_provider.upper()).group_by(Missing.service_name) return render_template('missing.html', missings=missings, total=total, info=info, service_providers=service_providers)
def single(table, k, v): cnt = settings.engine.execute('select count(*) from ' + table + ' where ' + k + '=\'' + str(v) + '\'').fetchone() if cnt[0] == 0: return True else: return False
def stat_playlist(): data = {} data["gdType"] = settings.Session.query(func.substring(Playlist163.dsc, 4, 2).label('type'), func.count('*').label('count')).group_by("type").all() data["gdOver"] = settings.Session.query(Playlist163.over.label('over'), func.count('*').label('count')).group_by("over").all() return data
def stat_music(): data = {"author-comment-count": []} cd = settings.Session.query(Music163.author.label('author'), func.sum(Music163.comment).label('count')).group_by("author").order_by(func.sum(Music163.comment).label('count').label('count').desc()).limit(30).all() for m in cd: data["author-comment-count"].append([m[0], int(m[1])]) data["music-comment-count"] = settings.Session.query(Music163.song_name, Music163.comment.label("count")).order_by(Music163.comment.label("count").desc()).limit(30).all() return data
def stat_data(): data = {} data["countPlaylist"] = int(settings.engine.execute("select(select count(*) from playlist163 where over = 'Y')*100 / count(*) from playlist163").fetchone()[0]); data["countComment"] = int(settings.engine.execute("select(select count(*) from music163 where over = 'Y')*100 / count(*) from music163").fetchone()[0]); data["countLyric"] = int(settings.engine.execute("select(select count(*) from music163 where has_lyric = 'Y')*100 / count(*) from music163").fetchone()[0]); return data
def _record_test_counts(self, test_list): create_or_update( ItemStat, where={ 'item_id': self.job.id, 'name': 'tests.count', }, values={ 'value': db.session.query(func.count(TestCase.id)).filter( TestCase.job_id == self.job.id, ).as_scalar(), } ) db.session.flush()
def record_style_violation_stats(job_id: UUID): create_or_update( ItemStat, where={ 'item_id': job_id, 'name': 'style_violations.count', }, values={ 'value': db.session.query(func.count(StyleViolation.id)).filter( StyleViolation.job_id == job_id, ).as_scalar(), } ) db.session.flush()
def re_evaluate_did(scope, name, rule_evaluation_action, session=None): """ Re-Evaluates a did. :param scope: The scope of the did to be re-evaluated. :param name: The name of the did to be re-evaluated. :param rule_evaluation_action: The Rule evaluation action. :param session: The database session in use. :raises: DataIdentifierNotFound """ try: did = session.query(models.DataIdentifier).filter(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name).one() except NoResultFound: raise DataIdentifierNotFound() if rule_evaluation_action == DIDReEvaluation.ATTACH: __evaluate_did_attach(did, session=session) else: __evaluate_did_detach(did, session=session) # Update size and length of did if session.bind.dialect.name == 'oracle': stmt = session.query(func.sum(models.DataIdentifierAssociation.bytes), func.count(1)).\ with_hint(models.DataIdentifierAssociation, "index(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) for bytes, length in stmt: did.bytes = bytes did.length = length # Add an updated_col_rep if did.did_type == DIDType.DATASET: models.UpdatedCollectionReplica(scope=scope, name=name, did_type=did.did_type).save(session=session)
def failover_segment_delete(context, segment_uuid): count = model_query(context, models.FailoverSegment ).filter_by(uuid=segment_uuid ).soft_delete(synchronize_session=False) if count == 0: raise exception.FailoverSegmentNotFound(id=segment_uuid) model_query(context, models.Host).filter_by( failover_segment_id=segment_uuid).soft_delete( synchronize_session=False)
def is_failover_segment_under_recovery(context, failover_segment_id, filters=None): filters = filters or {} # get all hosts against the failover_segment inner_select = model_query( context, models.Host, (models.Host.uuid,)).filter( models.Host.failover_segment_id == failover_segment_id) # check if any host has notification status as new, running or error query = model_query(context, models.Notification, (func.count(models.Notification.id),)) if 'status' in filters: status = filters['status'] if isinstance(status, (list, tuple, set, frozenset)): column_attr = getattr(models.Notification, 'status') query = query.filter(column_attr.in_(status)) else: query = query.filter(models.Notification.status == status) query = query.filter( models.Notification.source_host_uuid.in_(inner_select.subquery())) return query.first()[0] > 0 # db apis for host
def host_delete(context, host_uuid): count = model_query(context, models.Host ).filter_by(uuid=host_uuid ).soft_delete(synchronize_session=False) if count == 0: raise exception.HostNotFound(id=host_uuid) # db apis for notifications
def notification_delete(context, notification_uuid): count = model_query(context, models.Notification ).filter_by(notification_uuid=notification_uuid ).soft_delete(synchronize_session=False) if count == 0: raise exception.NotificationNotFound(id=notification_uuid)
def show_stats(): dow = extract('dow', Message.timestamp) stats = { i: session.query(Message).filter(dow == i).count() for i in range(7) } return render_template('stats.html')
def yearly_stats(): day = datetime.datetime.utcnow().date() # FIXME: Use dateutil.relativedelta or something last_year = day + datetime.timedelta(-365) doy = extract('doy', Message.timestamp) messages = session.query(Message.timestamp, func.count(doy))\ .filter(last_year < Message.timestamp).group_by(doy) response = {date.strftime('%Y-%m-%d'): count for date, count in messages.all()} return jsonify(response) # Helpers ####
def find_many(conn, params): filters = [] if 'name' in params: filters.append(artist.c.name.ilike('%{}%'.format(params['name']))) if 'first_play_gt' in params: filters.append(artist.c.first_play >= params['first_play_gt']) if 'first_play_lt' in params: filters.append(artist.c.first_play <= params['first_play_lt']) if 'last_play_gt' in params: filters.append(artist.c.last_play >= params['last_play_gt']) if 'last_play_lt' in params: filters.append(artist.c.last_play <= params['last_play_lt']) order = params.get('order') if order: order_clause = getattr(artist.c[order['column']], order.get('direction', 'asc'))() else: order_clause = artist.c.name.asc() stmt = select([artist]) if filters: stmt = stmt.where(and_(*filters)) total = await conn.scalar(stmt.with_only_columns([func.count(artist.c.id)])) stmt = stmt.offset(params['offset']).limit(params['limit']).order_by(order_clause) result = await conn.execute(stmt) items = await result.fetchall() return {'items': items, 'total': total}
def count_total(conn): return await conn.scalar(artist.count())
def count_new(conn, since): return await conn.scalar(select([func.count()]).where(artist.c.first_play >= since))
def count_total(conn): return await conn.scalar(track.count())
def find_many(conn, params): artist_name = artist.c.name.label('artist') filters = [] if 'artist' in params: filters.append(artist_name.ilike('%{}%'.format(params['artist']))) if 'name' in params: filters.append(album.c.name.ilike('%{}%'.format(params['name']))) if 'first_play_gt' in params: filters.append(album.c.first_play >= params['first_play_gt']) if 'first_play_lt' in params: filters.append(album.c.first_play <= params['first_play_lt']) if 'last_play_gt' in params: filters.append(album.c.last_play >= params['last_play_gt']) if 'last_play_lt' in params: filters.append(album.c.last_play <= params['last_play_lt']) order = params.get('order') order_field = order['column'] if order else 'artist' order_direction = order['direction'] if order else 'asc' order_clause = artist_name if order_field == 'artist' else album.c[order_field] order_clause = getattr(order_clause, order_direction)() stmt = select([album, artist_name]).select_from(album.join(artist)) if filters: stmt = stmt.where(and_(*filters)) total = await conn.scalar(stmt.with_only_columns([func.count(album.c.id)])) stmt = stmt.offset(params['offset']).limit(params['limit']).order_by(order_clause) result = await conn.execute(stmt) items = await result.fetchall() return {'items': items, 'total': total}
def count_total(conn): return await conn.scalar(album.count())
def count_new(conn, since): return await conn.scalar(select([func.count()]).where(album.c.first_play >= since))
def is_date_exists(conn, date): return await conn.scalar(select([func.count()]).where(play.c.date == date)) > 0
def count_total(conn): return await conn.scalar(play.count())
def count_for_period(conn, params): period = params.get('period') if not period: label_edge = 'year' else: label_edge = { 'year': 'month', 'month': 'day', 'day': 'hour' }[period['kind']] label = func.date_trunc(label_edge, play.c.date).label('label') stmt = select([label, func.count().label('value')]) if period: stmt = stmt.where(func.date_trunc(period['kind'], play.c.date) == period['value']) stmt = stmt.group_by(label).order_by(label) filter_kind = params.get('filter_kind') if filter_kind == 'artist': filter_column = artist.c.id from_clause = play.join(track).join(album).join(artist) elif filter_kind == 'album': filter_column = album.c.id from_clause = play.join(track).join(album) elif filter_kind == 'track': filter_column = track.c.id from_clause = play.join(track) else: filter_column = None from_clause = None if filter_column is not None: filter_value = params.get('filter_value') if not filter_value: raise ValidationError({'filter_value': ['This field is required.']}) stmt = stmt.where(filter_column == filter_value) if from_clause is not None: stmt = stmt.select_from(from_clause) result = await conn.execute(stmt) return await result.fetchall()
def compute_node_statistics(context): """Compute statistics over all compute nodes.""" # TODO(sbauza): Remove the service_id filter in a later release # once we are sure that all compute nodes report the host field _filter = or_(models.Service.host == models.ComputeNode.host, models.Service.id == models.ComputeNode.service_id) result = model_query(context, models.ComputeNode, ( func.count(models.ComputeNode.id), func.sum(models.ComputeNode.vcpus), func.sum(models.ComputeNode.memory_mb), func.sum(models.ComputeNode.local_gb), func.sum(models.ComputeNode.vcpus_used), func.sum(models.ComputeNode.memory_mb_used), func.sum(models.ComputeNode.local_gb_used), func.sum(models.ComputeNode.free_ram_mb), func.sum(models.ComputeNode.free_disk_gb), func.sum(models.ComputeNode.current_workload), func.sum(models.ComputeNode.running_vms), func.sum(models.ComputeNode.disk_available_least), ), read_deleted="no").\ filter(models.Service.disabled == false()).\ filter(models.Service.binary == "nova-compute").\ filter(_filter).\ first() # Build a dict of the info--making no assumptions about result fields = ('count', 'vcpus', 'memory_mb', 'local_gb', 'vcpus_used', 'memory_mb_used', 'local_gb_used', 'free_ram_mb', 'free_disk_gb', 'current_workload', 'running_vms', 'disk_available_least') return {field: int(result[idx] or 0) for idx, field in enumerate(fields)} ###################
def _ip_range_splitter(ips, block_size=256): """Yields blocks of IPs no more than block_size elements long.""" out = [] count = 0 for ip in ips: out.append(ip['address']) count += 1 if count > block_size - 1: yield out out = [] count = 0 if out: yield out
def floating_ip_bulk_destroy(context, ips): project_id_to_quota_count = collections.defaultdict(int) for ip_block in _ip_range_splitter(ips): # Find any floating IPs that were not auto_assigned and # thus need quota released. query = model_query(context, models.FloatingIp).\ filter(models.FloatingIp.address.in_(ip_block)).\ filter_by(auto_assigned=False) for row in query.all(): # The count is negative since we release quota by # reserving negative quota. project_id_to_quota_count[row['project_id']] -= 1 # Delete the floating IPs. model_query(context, models.FloatingIp).\ filter(models.FloatingIp.address.in_(ip_block)).\ soft_delete(synchronize_session='fetch') # Delete the quotas, if needed. # Quota update happens in a separate transaction, so previous must have # been committed first. for project_id, count in project_id_to_quota_count.items(): try: reservations = quota.QUOTAS.reserve(context, project_id=project_id, floating_ips=count) quota.QUOTAS.commit(context, reservations, project_id=project_id) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Failed to update usages bulk " "deallocating floating IP"))
def _floating_ip_count_by_project(context, project_id): nova.context.authorize_project_context(context, project_id) # TODO(tr3buchet): why leave auto_assigned floating IPs out? return model_query(context, models.FloatingIp, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(auto_assigned=False).\ count()