我们从Python开源项目中,提取了以下25个代码示例,用于说明如何使用sqlalchemy.sql.func.sum()。
def service_get_all_learning_sorted(context): session = get_session() with session.begin(): topic = CONF.learning_topic label = 'learning_gigabytes' subq = model_query(context, models.Share, func.sum(models.Share.size).label(label), session=session, read_deleted="no").\ join(models.ShareInstance, models.ShareInstance.learning_id == models.Share.id).\ group_by(models.ShareInstance.host).\ subquery() return _service_get_all_topic_subquery(context, session, topic, subq, label)
def season_payrolls(self, comp_season=None): """ Calculate base salary totals for all MLS clubs participating in competition and season. If comp_season is None, calculate clubs in competition/season defined in class. :param comp_season: CompetitionSeason object of specific competition/season (or None) :return: dictionary of team names and total payroll """ clubs = self.session.query(Clubs).filter(Clubs.symbol.isnot(None)).all() payroll_list = [] for club in clubs: team_base = self.club_roster(club, comp_season) if team_base.count() > 0: payroll_list.append( dict(team=club.name, payroll=sum([player.base_salary for player in team_base]) / 100.00)) return payroll_list
def player_weeks(self, club, player_on_roster, comp_season=None): """ Calculate number of weeks that player spent on club roster during competition and season. If comp_season is None, calculate clubs in competition/season defined in class. :param club: Clubs object :param player_on_roster: PlayerSalaries object, player on team roster :param comp_season: CompetitionSeasons object or None :return: integer """ comp_season = comp_season or self.comp_season tenure = self.session.query(PartialTenures.start_week, PartialTenures.end_week). \ filter(PartialTenures.club == club, PartialTenures.comp_season == comp_season, PartialTenures.player_id == player_on_roster.player_id) if tenure.count() == 0: tenure = [(1, comp_season.weeks)] duration = sum([(end_week - start_week) + 1 for (start_week, end_week) in tenure]) return duration
def league_efficiency(self, comp_season=None): """ Calculate average front-office efficiency of league competition season. If comp_season is None, calculate clubs in competition/season defined in class. Returns payroll cost per point per game, in cents. :param comp_season: CompetitionSeasons object for competition/season or None :return: float """ comp_season = comp_season or self.comp_season avail_payroll, util_factor = self.league_utilization(comp_season) points = self.session.query(func.sum(LeaguePoints.points)).filter( LeaguePoints.comp_season == comp_season).scalar() matches_played = self.session.query(func.sum(LeaguePoints.played)).filter( LeaguePoints.comp_season == comp_season).scalar() number_clubs = self.number_clubs(comp_season) points_per_game = float(points) / matches_played return (avail_payroll / number_clubs * util_factor) / points_per_game
def fetch_buy_or_sell_item_market_stats(item_id, is_buy_order): stats = MarketOrder \ .query \ .filter(MarketOrder.expire_time > datetime.utcnow()) \ .filter_by(item_id=item_id, is_buy_order=is_buy_order) \ .with_entities( func.sum(MarketOrder.amount).label('total_volume'), func.avg(MarketOrder.price).label('price_average'), func.min(MarketOrder.price).label('price_minimum'), func.max(MarketOrder.price).label('price_maximum'), func.count(MarketOrder.id).label('order_count'), ).one() return { 'total_volume': stats.total_volume if stats.total_volume else 0, 'price_average': round(float(stats.price_average), 2) if stats.price_average else 0, 'price_minimum': stats.price_minimum if stats.price_minimum else 0, 'price_maximum': stats.price_maximum if stats.price_maximum else 0, 'order_count': stats.order_count, }
def get_event_count_in_date_range(username, start_time, end_time): """ Returns list of tuples (date, count1, count2) ordered by date ASC e.g. [(datetime(), 1, 2), (datetime(), 2, 0)] """ user = user_manager.get_or_create_user(username) seizure_count_case_when = (case( [(Event.event_type == SEIZURE_EVENT_TYPE, 1), ], else_=0) .label(SEIZURE_EVENT_TYPE)) aura_count_case_when = (case( [(Event.event_type == AURA_EVENT_TYPE, 1), ], else_=0).label(AURA_EVENT_TYPE)) result = (db.query(func.date(Event.event_time), func.sum(seizure_count_case_when), func.sum(aura_count_case_when)) .filter_by(user_id=user.id) .filter_by(event_tracking_status_name="COMPLETE") .filter(Event.event_time.between(start_time, end_time)) .group_by(func.date(Event.event_time)) .order_by(func.date(Event.event_time).asc()) .all()) print "Returning query result " + str(result) return result
def payment(service_provider): info = db.session.query(Info).first() service_providers = db.session.query( Payment.service_provider).group_by(Payment.service_provider) if service_provider.lower() == 'all': payments = db.session.query(Payment).all() complete = db.session.query(Payment.service_provider, func.count( Payment.amount), func.sum(Payment.amount)).filter(Payment.status == "COMPLETE").group_by(Payment.service_provider) cancel = db.session.query(Payment.service_provider, func.count( Payment.amount), func.sum(Payment.amount)).filter(Payment.status == "CANCELED").group_by(Payment.service_provider) else: payments = db.session.query(Payment).filter_by( service_provider=service_provider.upper()) complete = db.session.query(Payment.service_name, func.count( Payment.amount), func.sum(Payment.amount)).filter(Payment.status == "COMPLETE").filter_by(service_provider=service_provider.upper()).group_by(Payment.service_name) cancel = db.session.query(Payment.service_name, func.count( Payment.amount), func.sum(Payment.amount)).filter(Payment.status == "CANCELED").filter_by(service_provider=service_provider.upper()).group_by(Payment.service_name) return render_template('payment.html', payments=payments, service_providers=service_providers, complete=complete, cancel=cancel, info=info)
def aggregate_stat_for_build(build: Build, name: str, func_=func.sum): """ Aggregates a single stat for all jobs the given build. """ value = db.session.query( func.coalesce(func_(ItemStat.value), 0), ).filter( ItemStat.item_id.in_(db.session.query(Job.id).filter( Job.build_id == build.id, )), ItemStat.name == name, ).as_scalar() create_or_update( model=ItemStat, where={ 'item_id': build.id, 'name': name, }, values={'value': value}, )
def getHostsByImageId(id): result = [] hosts = session.query(Host).\ filter(Host.ip == LSP.to).\ filter(LSP.image_id == id).\ distinct(LSP.to).all() for host in hosts: H = copy(host) H.lsplist = session.query(LSP).\ filter(LSP.image_id == id).\ filter(LSP.to == host.ip).\ order_by(LSP.output.desc()).all() H.sumoutput = sum([x.output for x in H.lsplist if x.output is not None]) H.sumbandwidth = sum([x.bandwidth for x in H.lsplist if x.bandwidth is not None]) rbandwidth = np.mean([x.rbandwidth for x in H.lsplist if x.rbandwidth is not None]) if np.isnan(rbandwidth): rbandwidth = None H.rbandwidth = rbandwidth result.append(H) result_sorted = sorted(result, key=lambda x: x.sumoutput, reverse=True) return result_sorted
def getInterfacesByImageId(id): result = [] interfaces = session.query(Interface).\ filter(Interface.image_id == id).all() for interface in interfaces: I = copy(interface) I.lsplist = session.query(LSP).\ filter(LSP.image_id == id).\ filter(LSP.interface_id == interface.id).\ order_by(LSP.output.desc()).all() I.rsvpout = sum([x.output for x in I.lsplist if x.output is not None]) if I.output and I.rsvpout: I.ldpout = I.output - I.rsvpout else: I.ldpout = I.output if I.ldpout < 100: I.ldpout = 0 result.append(I) result_sorted = sorted(result, key=lambda x: x.output, reverse=True) return result_sorted
def get_breadth_first_task(project_id, user_id=None, user_ip=None, external_uid=None, offset=0, limit=1, orderby='id', desc=False): """Get a new task which have the least number of task runs.""" project_query = session.query(Task.id).filter(Task.project_id==project_id, Task.state!='completed') if user_id and not user_ip and not external_uid: subquery = session.query(TaskRun.task_id).filter_by(project_id=project_id, user_id=user_id) else: if not user_ip: # pragma: no cover user_ip = '127.0.0.1' if user_ip and not external_uid: subquery = session.query(TaskRun.task_id).filter_by(project_id=project_id, user_ip=user_ip) else: subquery = session.query(TaskRun.task_id).filter_by(project_id=project_id, external_uid=external_uid) tmp = project_query.except_(subquery) query = session.query(Task, func.sum(Counter.n_task_runs).label('n_task_runs'))\ .filter(Task.id==Counter.task_id)\ .filter(Counter.task_id.in_(tmp))\ .group_by(Task.id)\ .order_by('n_task_runs ASC')\ query = _set_orderby_desc(query, orderby, desc) print offset print "breadth_first" data = query.limit(limit).offset(offset).all() return data
def get_top_terms(self, init_date, final_date, num_limit): pterm = PopularTerm query = db.session.query(pterm.term, func.sum(pterm.times).label('total') ).filter((pterm.date_search.between(init_date, final_date)) ).group_by(pterm.term).order_by(desc('total')).limit(num_limit) terms = [] for q in query: term = { 'name': q[0], 'times': q[1] } terms.append(term) return terms
def cashback(service_provider='all'): info = db.session.query(Info).first() service_providers = db.session.query( Cashback.service_provider).group_by(Cashback.service_provider) if service_provider.lower() == 'all' or service_provider is None: cashbacks = db.session.query(Cashback).all() total = db.session.query(Cashback.service_provider, func.count( Cashback.amount), func.sum(Cashback.amount)).group_by(Cashback.service_provider) else: cashbacks = db.session.query(Cashback).filter_by( service_provider=service_provider.upper()) total = db.session.query(Cashback.service_name, func.count( Cashback.amount), func.sum(Cashback.amount)).filter_by(service_provider=service_provider.upper()).group_by(Cashback.service_name) return render_template('cashback.html', cashbacks=cashbacks, total=total, info=info, service_providers=service_providers)
def transfer(service_provider='all'): if service_provider.lower() == 'all' or service_provider is None: transfers = db.session.query(Transfer).all() else: transfers = db.session.query(Transfer).filter_by( service_provider=service_provider.upper()) received = db.session.query(Transfer.name, func.count( Transfer.amount), func.sum(Transfer.amount)).filter_by(service_name='received').group_by(Transfer.name) transferred = db.session.query(Transfer.name, func.count( Transfer.amount), func.sum(Transfer.amount)).filter_by( service_name='transferred').group_by(Transfer.name) info = db.session.query(Info).first() return render_template('transfer.html', transfers=transfers, service_provider=service_provider, received=received, transferred=transferred, info=info)
def missing(service_provider): info = db.session.query(Info).first() service_providers = db.session.query( Missing.service_provider).group_by(Missing.service_provider) if service_provider.lower() == 'all' or service_provider is None: missings = db.session.query(Missing).all() total = db.session.query(Missing.service_provider, func.count( Missing.amount), func.sum(Missing.amount)).group_by(Missing.service_provider) else: missings = db.session.query(Missing).filter_by( service_provider=service_provider.upper()) total = db.session.query(Missing.service_name, func.count( Missing.amount), func.sum(Missing.amount)).filter_by(service_provider=service_provider.upper()).group_by(Missing.service_name) return render_template('missing.html', missings=missings, total=total, info=info, service_providers=service_providers)
def stat_music(): data = {"author-comment-count": []} cd = settings.Session.query(Music163.author.label('author'), func.sum(Music163.comment).label('count')).group_by("author").order_by(func.sum(Music163.comment).label('count').label('count').desc()).limit(30).all() for m in cd: data["author-comment-count"].append([m[0], int(m[1])]) data["music-comment-count"] = settings.Session.query(Music163.song_name, Music163.comment.label("count")).order_by(Music163.comment.label("count").desc()).limit(30).all() return data
def record_coverage_stats(build_id: UUID): """ Aggregates all FileCoverage stats for the given build. """ coverage_stats = db.session.query( func.sum(FileCoverage.lines_covered).label('coverage.lines_covered'), func.sum(FileCoverage.lines_uncovered).label( 'coverage.lines_uncovered'), func.sum(FileCoverage.diff_lines_covered).label( 'coverage.diff_lines_covered'), func.sum(FileCoverage.diff_lines_uncovered).label( 'coverage.diff_lines_uncovered'), ).filter( FileCoverage.build_id == build_id, ).group_by( FileCoverage.build_id, ).first() # TODO(dcramer): it'd be safer if we did this query within SQL stat_list = ( 'coverage.lines_covered', 'coverage.lines_uncovered', 'coverage.diff_lines_covered', 'coverage.diff_lines_uncovered', ) if not any(getattr(coverage_stats, n, None) is not None for n in stat_list): ItemStat.query.filter( ItemStat.item_id == build_id, ItemStat.name.in_(stat_list) ).delete(synchronize_session=False) else: for name in stat_list: create_or_update( model=ItemStat, where={ 'item_id': build_id, 'name': name, }, values={ 'value': getattr(coverage_stats, name, 0) or 0, }, )
def re_evaluate_did(scope, name, rule_evaluation_action, session=None): """ Re-Evaluates a did. :param scope: The scope of the did to be re-evaluated. :param name: The name of the did to be re-evaluated. :param rule_evaluation_action: The Rule evaluation action. :param session: The database session in use. :raises: DataIdentifierNotFound """ try: did = session.query(models.DataIdentifier).filter(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name).one() except NoResultFound: raise DataIdentifierNotFound() if rule_evaluation_action == DIDReEvaluation.ATTACH: __evaluate_did_attach(did, session=session) else: __evaluate_did_detach(did, session=session) # Update size and length of did if session.bind.dialect.name == 'oracle': stmt = session.query(func.sum(models.DataIdentifierAssociation.bytes), func.count(1)).\ with_hint(models.DataIdentifierAssociation, "index(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) for bytes, length in stmt: did.bytes = bytes did.length = length # Add an updated_col_rep if did.did_type == DIDType.DATASET: models.UpdatedCollectionReplica(scope=scope, name=name, did_type=did.did_type).save(session=session)
def compute_node_statistics(context): """Compute statistics over all compute nodes.""" # TODO(sbauza): Remove the service_id filter in a later release # once we are sure that all compute nodes report the host field _filter = or_(models.Service.host == models.ComputeNode.host, models.Service.id == models.ComputeNode.service_id) result = model_query(context, models.ComputeNode, ( func.count(models.ComputeNode.id), func.sum(models.ComputeNode.vcpus), func.sum(models.ComputeNode.memory_mb), func.sum(models.ComputeNode.local_gb), func.sum(models.ComputeNode.vcpus_used), func.sum(models.ComputeNode.memory_mb_used), func.sum(models.ComputeNode.local_gb_used), func.sum(models.ComputeNode.free_ram_mb), func.sum(models.ComputeNode.free_disk_gb), func.sum(models.ComputeNode.current_workload), func.sum(models.ComputeNode.running_vms), func.sum(models.ComputeNode.disk_available_least), ), read_deleted="no").\ filter(models.Service.disabled == false()).\ filter(models.Service.binary == "nova-compute").\ filter(_filter).\ first() # Build a dict of the info--making no assumptions about result fields = ('count', 'vcpus', 'memory_mb', 'local_gb', 'vcpus_used', 'memory_mb_used', 'local_gb_used', 'free_ram_mb', 'free_disk_gb', 'current_workload', 'running_vms', 'disk_available_least') return {field: int(result[idx] or 0) for idx, field in enumerate(fields)} ###################
def _instance_data_get_for_user(context, project_id, user_id): result = model_query(context, models.Instance, ( func.count(models.Instance.id), func.sum(models.Instance.vcpus), func.sum(models.Instance.memory_mb))).\ filter_by(project_id=project_id) if user_id: result = result.filter_by(user_id=user_id).first() else: result = result.first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0, result[2] or 0)
def plot_host(router, key): images = session.query(Image).\ filter(Image.router == router).\ filter(Image.time > datetime.now() - timedelta(days=1, hours=3)).all() x = [k.time for k in images] HostOutput = [] for image in images: output = session.query(func.sum(LSP.output)).\ filter(LSP.image_id == image.id).\ filter(LSP.to == key).scalar() if output is None: output = 0 HostOutput.append(output) y = HostOutput return getGraph(x, y)
def record_test_stats(job_id: UUID): create_or_update( ItemStat, where={ 'item_id': job_id, 'name': 'tests.count', }, values={ 'value': db.session.query(func.count(TestCase.id)).filter( TestCase.job_id == job_id, ).as_scalar(), } ) create_or_update( ItemStat, where={ 'item_id': job_id, 'name': 'tests.failures', }, values={ 'value': db.session.query(func.count(TestCase.id)).filter( TestCase.job_id == job_id, TestCase.result == Result.failed, ).as_scalar(), } ) create_or_update( ItemStat, where={ 'item_id': job_id, 'name': 'tests.duration', }, values={ 'value': db.session.query(func.coalesce(func.sum(TestCase.duration), 0)).filter( TestCase.job_id == job_id, ).as_scalar(), } ) db.session.flush()
def __find_stuck_locks_and_repair_them(datasetfiles, locks, replicas, source_replicas, rseselector, rule, source_rses, session=None): """ Find stuck locks for a rule and repair them. :param datasetfiles: Dict holding all datasets and files. :param locks: Dict holding locks. :param replicas: Dict holding replicas. :param source_replicas: Dict holding source replicas. :param rseselector: The RSESelector to be used. :param rule: The rule. :param source_rses: RSE ids of eglible source RSEs. :param session: Session of the db. :raises: InsufficientAccountLimit, IntegrityError, InsufficientTargetRSEs :attention: This method modifies the contents of the locks and replicas input parameters. """ logging.debug("Finding and repairing stuck locks for rule %s [%d/%d/%d]" % (str(rule.id), rule.locks_ok_cnt, rule.locks_replicating_cnt, rule.locks_stuck_cnt)) replicas_to_create, locks_to_create, transfers_to_create,\ locks_to_delete = repair_stuck_locks_and_apply_rule_grouping(datasetfiles=datasetfiles, locks=locks, replicas=replicas, source_replicas=source_replicas, rseselector=rseselector, rule=rule, source_rses=source_rses, session=session) # Add the replicas session.add_all([item for sublist in replicas_to_create.values() for item in sublist]) session.flush() # Add the locks session.add_all([item for sublist in locks_to_create.values() for item in sublist]) session.flush() # Increase rse_counters for rse_id in replicas_to_create.keys(): rse_counter.increase(rse_id=rse_id, files=len(replicas_to_create[rse_id]), bytes=sum([replica.bytes for replica in replicas_to_create[rse_id]]), session=session) # Increase account_counters for rse_id in locks_to_create.keys(): account_counter.increase(rse_id=rse_id, account=rule.account, files=len(locks_to_create[rse_id]), bytes=sum([lock.bytes for lock in locks_to_create[rse_id]]), session=session) # Decrease account_counters for rse_id in locks_to_delete: account_counter.decrease(rse_id=rse_id, account=rule.account, files=len(locks_to_delete[rse_id]), bytes=sum([lock.bytes for lock in locks_to_delete[rse_id]]), session=session) # Delete the locks: for lock in [item for sublist in locks_to_delete.values() for item in sublist]: session.delete(lock) # Add the transfers request_core.queue_requests(requests=transfers_to_create, session=session) session.flush() logging.debug("Finished finding and repairing stuck locks for rule %s [%d/%d/%d]" % (str(rule.id), rule.locks_ok_cnt, rule.locks_replicating_cnt, rule.locks_stuck_cnt))
def __create_locks_replicas_transfers(datasetfiles, locks, replicas, source_replicas, rseselector, rule, preferred_rse_ids=[], source_rses=[], session=None): """ Apply a created replication rule to a set of files :param datasetfiles: Dict holding all datasets and files. :param locks: Dict holding locks. :param replicas: Dict holding replicas. :param source_replicas: Dict holding source replicas. :param rseselector: The RSESelector to be used. :param rule: The rule. :param preferred_rse_ids: Preferred RSE's to select. :param source_rses: RSE ids of eglible source replicas. :param session: Session of the db. :raises: InsufficientAccountLimit, IntegrityError, InsufficientTargetRSEs :attention: This method modifies the contents of the locks and replicas input parameters. """ logging.debug("Creating locks and replicas for rule %s [%d/%d/%d]" % (str(rule.id), rule.locks_ok_cnt, rule.locks_replicating_cnt, rule.locks_stuck_cnt)) replicas_to_create, locks_to_create, transfers_to_create = apply_rule_grouping(datasetfiles=datasetfiles, locks=locks, replicas=replicas, source_replicas=source_replicas, rseselector=rseselector, rule=rule, preferred_rse_ids=preferred_rse_ids, source_rses=source_rses, session=session) # Add the replicas session.add_all([item for sublist in replicas_to_create.values() for item in sublist]) session.flush() # Add the locks session.add_all([item for sublist in locks_to_create.values() for item in sublist]) session.flush() # Increase rse_counters for rse_id in replicas_to_create.keys(): rse_counter.increase(rse_id=rse_id, files=len(replicas_to_create[rse_id]), bytes=sum([replica.bytes for replica in replicas_to_create[rse_id]]), session=session) # Increase account_counters for rse_id in locks_to_create.keys(): account_counter.increase(rse_id=rse_id, account=rule.account, files=len(locks_to_create[rse_id]), bytes=sum([lock.bytes for lock in locks_to_create[rse_id]]), session=session) # Add the transfers logging.debug("Rule %s [%d/%d/%d] queued %d transfers" % (str(rule.id), rule.locks_ok_cnt, rule.locks_replicating_cnt, rule.locks_stuck_cnt, len(transfers_to_create))) request_core.queue_requests(requests=transfers_to_create, session=session) session.flush() logging.debug("Finished creating locks and replicas for rule %s [%d/%d/%d]" % (str(rule.id), rule.locks_ok_cnt, rule.locks_replicating_cnt, rule.locks_stuck_cnt))
def __resolve_bytes_length_events_did(scope, name, session): """ Resolve bytes, length and events of a did :param scope: The scope of the DID. :param name: The name of the DID. :param session: The database session in use. """ try: did = session.query(models.DataIdentifier).filter_by(scope=scope, name=name).one() except NoResultFound: raise exception.DataIdentifierNotFound("Data identifier '%s:%s' not found" % (scope, name)) bytes, length, events = 0, 0, 0 if did.did_type == DIDType.FILE: bytes, length, events = did.bytes, 1, did.events elif did.did_type == DIDType.DATASET: try: length, bytes, events = session.query(func.count(models.DataIdentifierAssociation.scope), func.sum(models.DataIdentifierAssociation.bytes), func.sum(models.DataIdentifierAssociation.events)).\ filter_by(scope=scope, name=name).\ one() except NoResultFound: length, bytes, events = 0, 0, 0 elif did.did_type == DIDType.CONTAINER: for dataset in list_child_datasets(scope=scope, name=name, session=session): try: tmp_length, tmp_bytes, tmp_events = session.query(func.count(models.DataIdentifierAssociation.scope), func.sum(models.DataIdentifierAssociation.bytes), func.sum(models.DataIdentifierAssociation.events)).\ filter_by(scope=dataset['scope'], name=dataset['name']).\ one() except NoResultFound: tmp_length, tmp_bytes, tmp_events = 0, 0, 0 bytes += tmp_bytes or 0 length += tmp_length or 0 events += tmp_events or 0 return (bytes, length, events)