我们从Python开源项目中,提取了以下20个代码示例,用于说明如何使用sqlalchemy.func.coalesce()。
def get_device_tokens(login_id): ''' Get device tokens for a given login. Removes duplicates per provider. ''' with session_scope() as session: result = session.query(model.Device.platform_id, func.coalesce(model.Device.device_token_new, model.Device.device_token).label('device_token')).\ filter(model.Device.login_id == login_id).filter(model.Device.unregistered_ts.is_(None)).all() # only return unique device tokens per provider (gcm, apn) to avoid sending duplicates devices = set() provider_tokens = set() for device in sorted(result): # sorting to make unit tests easier platform_id, device_token = device provider_id = (constants.PLATFORM_BY_PROVIDER.get(platform_id, 0) or platform_id) # NOTE: Use unique tokens per *provider* only for known providers, # and unique tokens per *platform* in other cases, since # it is hard to verify providers for custom senders provider_token = (provider_id, device_token) if provider_token not in provider_tokens: devices.add(device) provider_tokens.add(provider_token) return list(devices)
def _set_orderby_desc(self, query, model, limit, last_id, offset, descending, orderby): """Return an updated query with the proper orderby and desc.""" if orderby == 'fav_user_ids': n_favs = func.coalesce(func.array_length(model.fav_user_ids, 1), 0).label('n_favs') query = query.add_column(n_favs) if orderby in ['created', 'updated', 'finish_time']: if descending: query = query.order_by(desc( cast(getattr(model, orderby), TIMESTAMP))) else: query = query.order_by(cast(getattr(model, orderby), TIMESTAMP)) else: if orderby != 'fav_user_ids': if descending: query = query.order_by(desc(getattr(model, orderby))) else: query = query.order_by(getattr(model, orderby)) else: if descending: query = query.order_by(desc("n_favs")) else: query = query.order_by("n_favs") if last_id: query = query.limit(limit) else: query = query.limit(limit).offset(offset) return query
def update_canonicals(canonicals): ''' Update canonical data for android devices. ''' global ENGINE binding = [{"p_{}".format(k): v for k, v in canonical.items()} for canonical in canonicals] device_table = model.metadata.tables['device'] stmt = update(device_table).\ values(device_token_new=bindparam('p_new_token')).\ where(and_(device_table.c.login_id == bindparam('p_login_id'), func.coalesce(device_table.c.device_token_new, device_table.c.device_token) == bindparam('p_old_token'))) ENGINE.execute(stmt, binding) with session_scope() as session: query = text('SELECT keep_max_users_per_device( \ (:platform_id)::int2, :device_token, (:max_users_per_device)::int2)') for canonical in canonicals: session.execute(query, {'platform_id': constants.PLATFORM_ANDROID, 'device_token': canonical['new_token'], 'max_users_per_device': config.max_users_per_device }) session.execute(query, {'platform_id': constants.PLATFORM_ANDROID_TABLET, 'device_token': canonical['new_token'], 'max_users_per_device': config.max_users_per_device }) session.commit()
def update_unregistered_devices(unregistered): ''' Update data for unregistered Android devices. Unregistered device will not receive notifications and will be deleted when number of devices exceeds maximum. ''' global ENGINE binding = [{"p_{}".format(k): v for k, v in u.items()} for u in unregistered] device_table = model.metadata.tables['device'] stmt = update(device_table).\ values(unregistered_ts=func.now()).\ where(and_(device_table.c.login_id == bindparam('p_login_id'), func.coalesce(device_table.c.device_token_new, device_table.c.device_token) == bindparam('p_device_token'))) ENGINE.execute(stmt, binding)
def upsert_device(login_id, platform_id, device_token, application_version, unregistered_ts=None): ''' Add or update a device entity. Returns new or updated device with relation to login preloaded. ''' with session_scope() as session: login = session.query(model.Login).filter(model.Login.id == login_id).one() device = session.query(model.Device).\ filter(model.Device.login == login).\ filter(model.Device.platform_id == platform_id).\ filter(func.coalesce(model.Device.device_token_new, model.Device.device_token) == device_token).\ one_or_none() if device is not None: device.application_version = application_version device.unregistered_ts = unregistered_ts else: device = model.Device(login=login, platform_id=platform_id, device_token=device_token, application_version=application_version, unregistered_ts=unregistered_ts) session.add(device) session.commit() session.refresh(device) session.refresh(device.login) return device
def status(self): return func.coalesce(self._status, 0)
def is_published(self): return func.coalesce(self._status, 0) == current_repo.PostStatus.PUBLISHED.value
def available_stock(self): return self.stocks.filter(Stock.is_sold != True, Stock.expired == False)\ .with_entities(func.coalesce(func.Sum(Stock.units_purchased), 0)-func.coalesce(func.Sum(Stock.units_sold), 0)).scalar()
def available_stock(cls): return select([func.coalesce(func.Sum(Stock.units_purchased), 0)-func.coalesce(func.Sum(Stock.units_sold), 0)])\ .where(and_(or_(Stock.is_sold != True), Stock.product_id == cls.id)).as_scalar()
def units_sold(cls): return select([func.coalesce(func.Sum(Item.quantity), 0)]).where(Item.stock_id == cls.id).as_scalar()
def expired(self): return and_(or_(self.is_sold != True), func.coalesce(self.expiry_date, datetime.now().date()) < datetime.now().date()).label('expired')
def total_orders(self): return self.orders.with_entities(func.coalesce(func.Count(Order.id), 0)).scalar()
def amount_due(self): return self.orders.with_entities(func.coalesce(func.Sum(Order.total), 0) - func.coalesce(func.Sum(Order.amount_paid), 0)).scalar() - \ self.transactions.with_entities(func.coalesce(func.Sum(CustomerTransaction.amount), 0)).scalar()
def get_exercise_grade_counts(exercise_id): responses = db.session.query(Response.id, func.count(func.distinct(ResponseGrade.score)), func.count(func.distinct( ResponseGrade.misconception)), func.count(func.distinct(ResponseGrade.junk)), func.count( func.coalesce(ResponseGrade.id, None))) \ .outerjoin(ResponseGrade) \ .filter(Response.exercise_id == exercise_id) \ .group_by(Response.id).all() for response in responses: yield response
def get_graded_count(exercise_id): query = db.session.query(Response.id, func.count(func.coalesce(ResponseGrade.id, None))) \ .outerjoin(ResponseGrade) \ .filter(Response.exercise_id == exercise_id) \ .group_by(Response.id) \ .order_by(Response.id) return query.all()
def grade_counts(self): response = db.session.query(Response.id, func.count( func.distinct(ResponseGrade.score)), func.count(func.distinct( ResponseGrade.misconception)), func.count( func.distinct(ResponseGrade.junk)), func.count( func.coalesce(ResponseGrade.id, None))) \ .outerjoin(ResponseGrade) \ .filter(Response.id == self.id) \ .group_by(Response.id) return response.first()
def get_object_list(self): args = parse_args(FormCountListRequestSerializer) group = args['group'] patient = args['patient'] type = args['type'] q1 = db.session.query( Entry.form_id.label('form_id'), func.count().label('entry_count') ) q1 = q1.select_from(Entry) if patient is not None: # Only include entries that belong to this patient q1 = q1.filter(Entry.patient == patient) q1 = q1.group_by(Entry.form_id) q1 = q1.subquery() # Get forms with their entry counts (set entry count to zero if their # form hasn't been filled in yet). q2 = db.session.query(Form, func.coalesce(q1.c.entry_count, 0)) q2 = q2.outerjoin(q1, Form.id == q1.c.form_id) if group is not None: # Filter by forms for group q2 = query_by_group(q2, group, type) elif type is not None: # Filter by form type q2 = query_by_type(q2, type) else: q2 = q2.order_by(Form.id) results = [dict(form=form, count=count) for form, count in q2] return results
def _get_dep_statuses(self, ti, session, dep_context): TI = airflow.models.TaskInstance TR = airflow.models.TriggerRule # Checking that all upstream dependencies have succeeded if not ti.task.upstream_list: yield self._passing_status( reason="The task instance did not have any upstream tasks.") return if ti.task.trigger_rule == TR.DUMMY: yield self._passing_status(reason="The task had a dummy trigger rule set.") return # TODO(unknown): this query becomes quite expensive with dags that have many # tasks. It should be refactored to let the task report to the dag run and get the # aggregates from there. qry = ( session .query( func.coalesce(func.sum( case([(TI.state == State.SUCCESS, 1)], else_=0)), 0), func.coalesce(func.sum( case([(TI.state == State.SKIPPED, 1)], else_=0)), 0), func.coalesce(func.sum( case([(TI.state == State.FAILED, 1)], else_=0)), 0), func.coalesce(func.sum( case([(TI.state == State.UPSTREAM_FAILED, 1)], else_=0)), 0), func.count(TI.task_id), ) .filter( TI.dag_id == ti.dag_id, TI.task_id.in_(ti.task.upstream_task_ids), TI.execution_date == ti.execution_date, TI.state.in_([ State.SUCCESS, State.FAILED, State.UPSTREAM_FAILED, State.SKIPPED]), ) ) successes, skipped, failed, upstream_failed, done = qry.first() for dep_status in self._evaluate_trigger_rule( ti=ti, successes=successes, skipped=skipped, failed=failed, upstream_failed=upstream_failed, done=done, flag_upstream_failed=dep_context.flag_upstream_failed, session=session): yield dep_status
def _get_dep_statuses(self, ti, session, dep_context): TI = airflow.models.TaskInstance TR = airflow.models.TriggerRule # Checking that all upstream dependencies have succeeded if not ti.task.upstream_list: yield self._passing_status( reason="The task instance did not have any upstream tasks.") raise StopIteration if ti.task.trigger_rule == TR.DUMMY: yield self._passing_status(reason="The task had a dummy trigger rule set.") raise StopIteration # TODO(unknown): this query becomes quite expensive with dags that have many # tasks. It should be refactored to let the task report to the dag run and get the # aggregates from there. qry = ( session .query( func.coalesce(func.sum( case([(TI.state == State.SUCCESS, 1)], else_=0)), 0), func.coalesce(func.sum( case([(TI.state == State.SKIPPED, 1)], else_=0)), 0), func.coalesce(func.sum( case([(TI.state == State.FAILED, 1)], else_=0)), 0), func.coalesce(func.sum( case([(TI.state == State.UPSTREAM_FAILED, 1)], else_=0)), 0), func.count(TI.task_id), ) .filter( TI.dag_id == ti.dag_id, TI.task_id.in_(ti.task.upstream_task_ids), TI.execution_date == ti.execution_date, TI.state.in_([ State.SUCCESS, State.FAILED, State.UPSTREAM_FAILED, State.SKIPPED]), ) ) successes, skipped, failed, upstream_failed, done = qry.first() for dep_status in self._evaluate_trigger_rule( ti=ti, successes=successes, skipped=skipped, failed=failed, upstream_failed=upstream_failed, done=done, flag_upstream_failed=dep_context.flag_upstream_failed, session=session): yield dep_status
def _get_results(query, interval='month'): buckets_q = _get_buckets(query, interval) counts_q = db.session.query( query.c.group_id, _to_month(query.c.date).label('date'), func.count(query.c.patient_id).label('count') ) counts_q = counts_q.group_by(query.c.group_id, _to_month(query.c.date)) counts_q = counts_q.cte() new_c = func.coalesce(counts_q.c.count, 0).label('new') total_c = func.coalesce( func.sum(counts_q.c.count).over( partition_by=buckets_q.c.group_id, order_by=buckets_q.c.date), 0).label('total') timeline_q = db.session.query(buckets_q.c.group_id, buckets_q.c.date, new_c, total_c) timeline_q = timeline_q.select_from(buckets_q) timeline_q = timeline_q.outerjoin( counts_q, and_(buckets_q.c.group_id == counts_q.c.group_id, buckets_q.c.date == counts_q.c.date)) timeline_q = timeline_q.cte() results_q = db.session.query(Group, timeline_q.c.date, timeline_q.c.new, timeline_q.c.total) results_q = results_q.join(timeline_q, Group.id == timeline_q.c.group_id) results_q = results_q.order_by(Group.id, timeline_q.c.date) results = [] groups = {} for group, date, new_patients, total_patients in results_q.all(): result = groups.get(group) if result is None: result = {'group': group, 'counts': []} results.append(result) groups[group] = result result['counts'].append({ 'date': date.date(), 'new_patients': new_patients, 'total_patients': total_patients, }) return results