我们从Python开源项目中,提取了以下17个代码示例,用于说明如何使用sqlalchemy.sql.expression.asc()。
def fixed_ip_get_by_instance(context, instance_uuid): if not uuidutils.is_uuid_like(instance_uuid): raise exception.InvalidUUID(uuid=instance_uuid) vif_and = and_(models.VirtualInterface.id == models.FixedIp.virtual_interface_id, models.VirtualInterface.deleted == 0) result = model_query(context, models.FixedIp, read_deleted="no").\ filter_by(instance_uuid=instance_uuid).\ outerjoin(models.VirtualInterface, vif_and).\ options(contains_eager("virtual_interface")).\ options(joinedload('network')).\ options(joinedload('floating_ips')).\ order_by(asc(models.VirtualInterface.created_at), asc(models.VirtualInterface.id)).\ all() if not result: raise exception.FixedIpNotFoundForInstance(instance_uuid=instance_uuid) return result
def _get_project_user_quota_usages(context, project_id, user_id): rows = model_query(context, models.QuotaUsage, read_deleted="no").\ filter_by(project_id=project_id).\ order_by(models.QuotaUsage.id.asc()).\ with_lockmode('update').\ all() proj_result = dict() user_result = dict() # Get the total count of in_use,reserved for row in rows: proj_result.setdefault(row.resource, dict(in_use=0, reserved=0, total=0)) proj_result[row.resource]['in_use'] += row.in_use proj_result[row.resource]['reserved'] += row.reserved proj_result[row.resource]['total'] += (row.in_use + row.reserved) if row.user_id is None or row.user_id == user_id: user_result[row.resource] = row return proj_result, user_result
def __query__(self): if self.order == self.DESCENDANT: query = desc(self.attribute) elif self.order == self.ASCENDANT: query = asc(self.attribute) return query
def release_waiting_requests(rse, activity=None, rse_id=None, count=None, account=None, session=None): """ Release waiting requests. :param rse: The RSE name. :param activity: The activity. :param rse_id: The RSE id. :param count: The count to be released. If None, release all waiting requests. """ try: if not rse_id: rse_id = get_rse_id(rse=rse, session=session) rowcount = 0 if count is None: query = session.query(models.Request).\ filter_by(dest_rse_id=rse_id, state=RequestState.WAITING) if activity: query = query.filter_by(activity=activity) if account: query = query.filter_by(account=account) rowcount = query.update({'state': RequestState.QUEUED}, synchronize_session=False) elif count > 0: subquery = session.query(models.Request.id)\ .filter(models.Request.dest_rse_id == rse_id)\ .filter(models.Request.state == RequestState.WAITING)\ .order_by(asc(models.Request.requested_at)) if activity: subquery = subquery.filter(models.Request.activity == activity) if account: subquery = subquery.filter(models.Request.account == account) subquery = subquery.limit(count).with_for_update() rowcount = session.query(models.Request)\ .filter(models.Request.id.in_(subquery))\ .update({'state': RequestState.QUEUED}, synchronize_session=False) return rowcount except IntegrityError as error: raise RucioException(error.args)
def get_pk_constraint(self, connection, table_name, schema=None, **kw): """ Override TODO: Check if we need PRIMARY Indices or PRIMARY KEY Indices TODO: Check for border cases (No PK Indices) """ if schema is None: schema = self.default_schema_name stmt = select([column('ColumnName'), column('IndexName')], from_obj=[text('dbc.Indices')]).where( and_(text('DatabaseName = :schema'), text('TableName=:table'), text('IndexType=:indextype')) ).order_by(asc(column('IndexNumber'))) # K for Primary Key res = connection.execute(stmt, schema=schema, table=table_name, indextype='K').fetchall() index_columns = list() index_name = None for index_column in res: index_columns.append(self.normalize_name(index_column['ColumnName'])) index_name = self.normalize_name(index_column['IndexName']) # There should be just one IndexName return { "constrained_columns": index_columns, "name": index_name }
def get_unique_constraints(self, connection, table_name, schema=None, **kw): """ Overrides base class method """ if schema is None: schema = self.default_schema_name stmt = select([column('ColumnName'), column('IndexName')], from_obj=[text('dbc.Indices')]) \ .where(and_(text('DatabaseName = :schema'), text('TableName=:table'), text('IndexType=:indextype'))) \ .order_by(asc(column('IndexName'))) # U for Unique res = connection.execute(stmt, schema=schema, table=table_name, indextype='U').fetchall() def grouper(fk_row): return { 'name': self.normalize_name(fk_row['IndexName']), } unique_constraints = list() for constraint_info, constraint_cols in groupby(res, grouper): unique_constraint = { 'name': self.normalize_name(constraint_info['name']), 'column_names': list() } for constraint_col in constraint_cols: unique_constraint['column_names'].append(self.normalize_name(constraint_col['ColumnName'])) unique_constraints.append(unique_constraint) return unique_constraints
def get_indexes(self, connection, table_name, schema=None, **kw): """ Overrides base class method """ if schema is None: schema = self.default_schema_name stmt = select(["*"], from_obj=[text('dbc.Indices')]) \ .where(and_(text('DatabaseName = :schema'), text('TableName=:table'))) \ .order_by(asc(column('IndexName'))) res = connection.execute(stmt, schema=schema, table=table_name).fetchall() def grouper(fk_row): return { 'name': fk_row.IndexName or fk_row.IndexNumber, # If IndexName is None TODO: Check what to do 'unique': True if fk_row.UniqueFlag == 'Y' else False } # TODO: Check if there's a better way indices = list() for index_info, index_cols in groupby(res, grouper): index_dict = { 'name': index_info['name'], 'column_names': list(), 'unique': index_info['unique'] } for index_col in index_cols: index_dict['column_names'].append(self.normalize_name(index_col['ColumnName'])) indices.append(index_dict) return indices
def test_activation_mixin(self): activated_student = Student() activated_student.name = 'activated-student' activated_student.activated_at = datetime.now() DBSession.add(activated_student) deactivated_student = Student() deactivated_student.name = 'deactivated-student' deactivated_student.activated_at = None DBSession.add(deactivated_student) DBSession.commit() # Test ordering: student_list = Student.query.order_by(desc(Student.is_active)).all() self.assertIsNotNone(student_list[0].activated_at) self.assertIsNone(student_list[-1].activated_at) student_list = Student.query.order_by(asc(Student.is_active)).all() self.assertIsNotNone(student_list[-1].activated_at) self.assertIsNone(student_list[0].activated_at) # Test filtering: student_list = Student.query.filter(Student.is_active).all() for student in student_list: self.assertIsNotNone(student.activated_at) student_list = Student.query.filter(not_(Student.is_active)).all() for student in student_list: self.assertIsNone(student.activated_at)
def virtual_interface_get_by_instance(context, instance_uuid): """Gets all virtual interfaces for instance. :param instance_uuid: = uuid of the instance to retrieve vifs for """ vif_refs = _virtual_interface_query(context).\ filter_by(instance_uuid=instance_uuid).\ order_by(asc("created_at"), asc("id")).\ all() return vif_refs
def _flavor_get_by_flavor_id_from_db(context, flavor_id): """Returns a dict describing specific flavor_id.""" result = Flavor._flavor_get_query_from_db(context).\ filter_by(flavorid=flavor_id).\ order_by(asc(api_models.Flavors.id)).\ first() if not result: raise exception.FlavorNotFound(flavor_id=flavor_id) return db_api._dict_with_extra_specs(result)
def get_all(cls, context, inactive=False, filters=None, sort_key='flavorid', sort_dir='asc', limit=None, marker=None): try: api_db_flavors = _flavor_get_all_from_db(context, inactive=inactive, filters=filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker) # NOTE(danms): If we were asked for a marker and found it in # results from the API DB, we must continue our pagination with # just the limit (if any) to the main DB. marker = None except exception.MarkerNotFound: api_db_flavors = [] if limit is not None: limit_more = limit - len(api_db_flavors) else: limit_more = None if limit_more is None or limit_more > 0: db_flavors = db.flavor_get_all(context, inactive=inactive, filters=filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit_more, marker=marker) else: db_flavors = [] return base.obj_make_list(context, cls(context), objects.Flavor, api_db_flavors + db_flavors, expected_attrs=['extra_specs'])
def list_episode(self, page, count, sort_field, sort_order, status): try: session = SessionManager.Session() query_object = session.query(Episode).\ filter(Episode.delete_mark == None) if status is not None: query_object = query_object.filter(Episode.status==status) # count total rows total = session.query(func.count(Episode.id)).filter(Episode.status==status).scalar() else: total = session.query(func.count(Episode.id)).scalar() offset = (page - 1) * count if sort_order == 'desc': episode_list = query_object.\ order_by(desc(getattr(Episode, sort_field))).\ offset(offset).\ limit(count).\ all() else: episode_list = query_object.\ order_by(asc(getattr(Episode, sort_field))).\ offset(offset).limit(count).\ all() episode_dict_list = [row2dict(episode) for episode in episode_list] return json_resp({'data': episode_dict_list, 'total': total}) finally: SessionManager.Session.remove()
def get_foreign_keys(self, connection, table_name, schema=None, **kw): """ Overrides base class method """ if schema is None: schema = self.default_schema_name stmt = select([column('IndexID'), column('IndexName'), column('ChildKeyColumn'), column('ParentDB'), column('ParentTable'), column('ParentKeyColumn')], from_obj=[text('DBC.All_RI_ChildrenV')]) \ .where(and_(text('ChildTable = :table'), text('ChildDB = :schema'))) \ .order_by(asc(column('IndexID'))) res = connection.execute(stmt, schema=schema, table=table_name).fetchall() def grouper(fk_row): return { 'name': fk_row.IndexName or fk_row.IndexID, #ID if IndexName is None 'schema': fk_row.ParentDB, 'table': fk_row.ParentTable } # TODO: Check if there's a better way fk_dicts = list() for constraint_info, constraint_cols in groupby(res, grouper): fk_dict = { 'name': constraint_info['name'], 'constrained_columns': list(), 'referred_table': constraint_info['table'], 'referred_schema': constraint_info['schema'], 'referred_columns': list() } for constraint_col in constraint_cols: fk_dict['constrained_columns'].append(self.normalize_name(constraint_col['ChildKeyColumn'])) fk_dict['referred_columns'].append(self.normalize_name(constraint_col['ParentKeyColumn'])) fk_dicts.append(fk_dict) return fk_dicts
def fixed_ip_associate_pool(context, network_id, instance_uuid=None, host=None, virtual_interface_id=None): """allocate a fixed ip out of a fixed ip network pool. This allocates an unallocated fixed ip out of a specified network. We sort by updated_at to hand out the oldest address in the list. """ if instance_uuid and not uuidutils.is_uuid_like(instance_uuid): raise exception.InvalidUUID(uuid=instance_uuid) network_or_none = or_(models.FixedIp.network_id == network_id, models.FixedIp.network_id == null()) fixed_ip_ref = model_query(context, models.FixedIp, read_deleted="no").\ filter(network_or_none).\ filter_by(reserved=False).\ filter_by(instance_uuid=None).\ filter_by(host=None).\ filter_by(leased=False).\ order_by(asc(models.FixedIp.updated_at)).\ first() if not fixed_ip_ref: raise exception.NoMoreFixedIps(net=network_id) params = {'allocated': virtual_interface_id is not None} if fixed_ip_ref['network_id'] is None: params['network_id'] = network_id if instance_uuid: params['instance_uuid'] = instance_uuid if host: params['host'] = host if virtual_interface_id: params['virtual_interface_id'] = virtual_interface_id rows_updated = model_query(context, models.FixedIp, read_deleted="no").\ filter_by(id=fixed_ip_ref['id']).\ filter_by(network_id=fixed_ip_ref['network_id']).\ filter_by(reserved=False).\ filter_by(instance_uuid=None).\ filter_by(host=None).\ filter_by(leased=False).\ filter_by(address=fixed_ip_ref['address']).\ update(params, synchronize_session='evaluate') if not rows_updated: LOG.debug('The row was updated in a concurrent transaction, ' 'we will fetch another row') raise db_exc.RetryRequest( exception.FixedIpAssociateFailed(net=network_id)) return fixed_ip_ref
def flavor_get_all(context, inactive=False, filters=None, sort_key='flavorid', sort_dir='asc', limit=None, marker=None): """Returns all flavors. """ filters = filters or {} # FIXME(sirp): now that we have the `disabled` field for flavors, we # should probably remove the use of `deleted` to mark inactive. `deleted` # should mean truly deleted, e.g. we can safely purge the record out of the # database. read_deleted = "yes" if inactive else "no" query = _flavor_get_query(context, read_deleted=read_deleted) if 'min_memory_mb' in filters: query = query.filter( models.InstanceTypes.memory_mb >= filters['min_memory_mb']) if 'min_root_gb' in filters: query = query.filter( models.InstanceTypes.root_gb >= filters['min_root_gb']) if 'disabled' in filters: query = query.filter( models.InstanceTypes.disabled == filters['disabled']) if 'is_public' in filters and filters['is_public'] is not None: the_filter = [models.InstanceTypes.is_public == filters['is_public']] if filters['is_public'] and context.project_id is not None: the_filter.extend([ models.InstanceTypes.projects.any( project_id=context.project_id, deleted=0) ]) if len(the_filter) > 1: query = query.filter(or_(*the_filter)) else: query = query.filter(the_filter[0]) marker_row = None if marker is not None: marker_row = _flavor_get_query(context, read_deleted=read_deleted).\ filter_by(flavorid=marker).\ first() if not marker_row: raise exception.MarkerNotFound(marker) query = sqlalchemyutils.paginate_query(query, models.InstanceTypes, limit, [sort_key, 'id'], marker=marker_row, sort_dir=sort_dir) inst_types = query.all() return [_dict_with_extra_specs(i) for i in inst_types]
def list_bangumi(self, page, count, sort_field, sort_order, name, user_id, bangumi_type): try: session = SessionManager.Session() query_object = session.query(Bangumi).\ options(joinedload(Bangumi.cover_image)).\ filter(Bangumi.delete_mark == None) if bangumi_type != -1: query_object = query_object.filter(Bangumi.type == bangumi_type) if name is not None: name_pattern = '%{0}%'.format(name.encode('utf-8'),) logger.debug(name_pattern) query_object = query_object.\ filter(or_(Bangumi.name.ilike(name_pattern), Bangumi.name_cn.ilike(name_pattern))) # count total rows total = session.query(func.count(Bangumi.id)).\ filter(or_(Bangumi.name.ilike(name_pattern), Bangumi.name_cn.ilike(name_pattern))).\ scalar() else: total = session.query(func.count(Bangumi.id)).scalar() if sort_order == 'desc': query_object = query_object.\ order_by(desc(getattr(Bangumi, sort_field))) else: query_object = query_object.\ order_by(asc(getattr(Bangumi, sort_field))) if count == -1: bangumi_list = query_object.all() else: offset = (page - 1) * count bangumi_list = query_object.offset(offset).limit(count).all() bangumi_id_list = [bgm.id for bgm in bangumi_list] favorites = session.query(Favorites).\ filter(Favorites.bangumi_id.in_(bangumi_id_list)).\ filter(Favorites.user_id == user_id).\ all() bangumi_dict_list = [] for bgm in bangumi_list: bangumi = row2dict(bgm) bangumi['cover'] = utils.generate_cover_link(bgm) utils.process_bangumi_dict(bgm, bangumi) for fav in favorites: if fav.bangumi_id == bgm.id: bangumi['favorite_status'] = fav.status bangumi_dict_list.append(bangumi) return json_resp({'data': bangumi_dict_list, 'total': total}) finally: SessionManager.Session.remove()
def list_bangumi(self, page, count, sort_field, sort_order, name, bangumi_type): try: session = SessionManager.Session() query_object = session.query(Bangumi).\ options(joinedload(Bangumi.cover_image)).\ options(joinedload(Bangumi.created_by)).\ options(joinedload(Bangumi.maintained_by)).\ filter(Bangumi.delete_mark == None) if bangumi_type != -1: query_object = query_object.filter(Bangumi.type == bangumi_type) if name is not None: name_pattern = '%{0}%'.format(name.encode('utf-8'),) logger.debug(name_pattern) query_object = query_object.\ filter(or_(Bangumi.name.ilike(name_pattern), Bangumi.name_cn.ilike(name_pattern))) # count total rows total = session.query(func.count(Bangumi.id)).\ filter(or_(Bangumi.name.ilike(name_pattern), Bangumi.name_cn.ilike(name_pattern))).\ scalar() else: total = session.query(func.count(Bangumi.id)).scalar() if sort_order == 'desc': query_object = query_object.\ order_by(desc(getattr(Bangumi, sort_field))) else: query_object = query_object.\ order_by(asc(getattr(Bangumi, sort_field))) # we now support query all method by passing count = -1 if count == -1: bangumi_list = query_object.all() else: offset = (page - 1) * count bangumi_list = query_object.offset(offset).limit(count).all() bangumi_dict_list = [] for bgm in bangumi_list: bangumi = row2dict(bgm) bangumi['cover'] = utils.generate_cover_link(bgm) utils.process_bangumi_dict(bgm, bangumi) self.__process_user_obj_in_bangumi(bgm, bangumi) bangumi_dict_list.append(bangumi) return json_resp({'data': bangumi_dict_list, 'total': total}) # raise ClientError('something happened') finally: SessionManager.Session.remove()