我们从Python开源项目中,提取了以下18个代码示例,用于说明如何使用sqlalchemy.not_()。
def worker_count(self): """ Return the number of workers in the account """ query = select([func.count(distinct( user_model.User.id))])\ .where(Organization.id == self.id)\ .where(RoleToUser.archived == False)\ .where(Organization.id == Location.organization_id)\ .where(Location.id == Role.location_id)\ .where(Role.id == RoleToUser.role_id)\ .where(RoleToUser.user_id == user_model.User.id)\ .select_from(RoleToUser)\ .select_from(Role)\ .select_from(Location)\ .select_from(Organization)\ .select_from(user_model.User) # Filter out demo account and Staffjoy emails for email in current_app.config.get("KPI_EMAILS_TO_EXCLUDE"): query = query.where(not_(user_model.User.email.like(email))) workers = db.session.execute(query).fetchone()[0] return workers
def get_all_batches(self): query = sqlalchemy.select([ self._batches, sqlalchemy.not_(sqlalchemy.exists( sqlalchemy.select([1]).where(sqlalchemy.and_( self._batches.c.id == self._batch_entries.c.batch_id, self._batch_entries.c.crtsh_id == self._certs.c.crtsh_id, self._certs.c.expiration_date > datetime.datetime.utcnow(), self._certs.c.revoked_at.is_(None), )) )) ]) rows = self._engine.execute(query) return [ Batch( id=id, description=description, completed=completed, ) for id, description, completed in rows ]
def resolve(self): """Create filter for a particular node of the filter tree""" if 'or' not in self.filter_ and 'and' not in self.filter_ and 'not' not in self.filter_: value = self.value if isinstance(value, dict): value = Node(self.related_model, value, self.resource, self.related_schema).resolve() if '__' in self.filter_.get('name', ''): value = {self.filter_['name'].split('__')[1]: value} if isinstance(value, dict): return getattr(self.column, self.operator)(**value) else: return getattr(self.column, self.operator)(value) if 'or' in self.filter_: return or_(Node(self.model, filt, self.resource, self.schema).resolve() for filt in self.filter_['or']) if 'and' in self.filter_: return and_(Node(self.model, filt, self.resource, self.schema).resolve() for filt in self.filter_['and']) if 'not' in self.filter_: return not_(Node(self.model, self.filter_['not'], self.resource, self.schema).resolve())
def get_count_not_in_view_types(cls, view_types): '''Returns the count of ResourceView not in the view types list''' query = meta.Session.query(ResourceView.view_type, sa.func.count(ResourceView.id)) \ .group_by(ResourceView.view_type) \ .filter(sa.not_(ResourceView.view_type.in_(view_types))) return query.all()
def delete_not_in_view_types(cls, view_types): '''Delete the Resource Views not in the received view types list''' query = meta.Session.query(ResourceView) \ .filter(sa.not_(ResourceView.view_type.in_(view_types))) return query.delete(synchronize_session='fetch')
def get(self): resources = Item.query.filter(not_(Item.id.like('%_LEVEL%'))).filter_by(category_id='resources') results = [] for item in resources: stats = { 'stats': fetch_item_market_stats(item.id), 'item': { 'id': item.id, 'name': item.name, 'category_id': item.category_id, 'category_name': item.category.name, 'sub_category_id': item.sub_category_id, 'sub_category_name': item.sub_category.name, 'tier': item.tier, }, } results.append(stats) data = { 'resources': results } return data, 200
def _network_find(context, limit, sorts, marker, page_reverse, fields, defaults=None, provider_query=False, **filters): query = context.session.query(models.Network) model_filters = _model_query(context, models.Network, filters, query) if defaults: invert_defaults = False if INVERT_DEFAULTS in defaults: invert_defaults = True defaults.pop(0) if filters and invert_defaults: query = query.filter(and_(not_(models.Network.id.in_(defaults)), and_(*model_filters))) elif not provider_query and filters and not invert_defaults: query = query.filter(or_(models.Network.id.in_(defaults), and_(*model_filters))) elif not invert_defaults: query = query.filter(models.Network.id.in_(defaults)) else: query = query.filter(*model_filters) if "join_subnets" in filters: query = query.options(orm.joinedload(models.Network.subnets)) return paginate_query(query, models.Network, limit, sorts, marker)
def _find_addresses_to_be_unlocked(context, network_ids, addresses): addresses = [_to_int(address) for address in addresses] query = context.session.query(models.IPAddress) query = query.filter(models.IPAddress.network_id.in_(network_ids)) if addresses: query = query.filter(not_(models.IPAddress.address.in_(addresses))) query = query.filter(not_(models.IPAddress.lock_id.is_(None))) return query.all()
def _do_none_of(self, clause_name, payload): if not isinstance(payload, dict) or not len(payload): raise ServerError('can\'t parse "%s" clause: contents must be a dict, ' 'but got %s', clause_name, payload.__class__.__name__) from sqlalchemy import not_, or_ return not_(or_(*[self._compile_clause(*t) for t in payload.iteritems()]))
def _filter_not_staffjoy(query): """Remove known Staffjoy emails""" for email in current_app.config.get("KPI_EMAILS_TO_EXCLUDE"): query = query.where(not_(User.email.like(email))) return query
def test_activation_mixin(self): activated_student = Student() activated_student.name = 'activated-student' activated_student.activated_at = datetime.now() DBSession.add(activated_student) deactivated_student = Student() deactivated_student.name = 'deactivated-student' deactivated_student.activated_at = None DBSession.add(deactivated_student) DBSession.commit() # Test ordering: student_list = Student.query.order_by(desc(Student.is_active)).all() self.assertIsNotNone(student_list[0].activated_at) self.assertIsNone(student_list[-1].activated_at) student_list = Student.query.order_by(asc(Student.is_active)).all() self.assertIsNotNone(student_list[-1].activated_at) self.assertIsNone(student_list[0].activated_at) # Test filtering: student_list = Student.query.filter(Student.is_active).all() for student in student_list: self.assertIsNotNone(student.activated_at) student_list = Student.query.filter(not_(Student.is_active)).all() for student in student_list: self.assertIsNone(student.activated_at)
def _addFilter(self, filterList, filter): """ Add a filter to a list of SQLAlchemy filters. :param filterList: a list of SQLAlchemy filters which is modified. :param filter: information on the filter. :return: the modified list. """ if 'group' in filter: sublist = [] for subfilter in filter['value']: sublist = self._addFilter(sublist, subfilter) if filter['group'] == 'and': filterList.append(sqlalchemy.and_(*sublist)) elif filter['group'] == 'or': filterList.append(sqlalchemy.or_(*sublist)) return filterList operator = filter['operator'] operator = base.FilterOperators.get(operator, operator) operator = self.databaseOperators.get(operator, operator) field = self._convertFieldOrFunction(filter['field']) negate = False if operator.startswith('not_'): negate = True operator = operator.split('not_', 1)[1] if operator == 'in': values = filter['value'] if not isinstance(values, (list, tuple)): values = [values] values = [self._convertFieldOrFunction(value, True) for value in values] opfunc = field.in_(values) elif operator == 'is': value = self._convertFieldOrFunction(filter['value'], True) opfunc = field.is_(value) else: value = self._convertFieldOrFunction(filter['value'], True) opfunc = field.op(operator)(value) if negate: opfunc = sqlalchemy.not_(opfunc) filterList.append(opfunc) return filterList
def _subnet_find(context, limit, sorts, marker, page_reverse, fields, defaults=None, provider_query=False, **filters): query = context.session.query(models.Subnet) model_filters = _model_query(context, models.Subnet, filters, query) if defaults: invert_defaults = False if INVERT_DEFAULTS in defaults: invert_defaults = True defaults.pop(0) # when 'invert_defaults' were the only entry in defaults, # defaults will be empty now. The next 4 lines optimize # performance by avoiding running the in_ filter on an empty set: # like so: models.Subnet.id.in_([]) if defaults: subnet_filter = models.Subnet.id.in_(defaults) else: # if defaults is an empty list, just create a False # BinaryExpression subnet_filter = models.Subnet.id != models.Subnet.id if filters and invert_defaults: query = query.filter(and_(not_(subnet_filter), and_(*model_filters))) elif not provider_query and filters and not invert_defaults: query = query.filter(or_(subnet_filter, and_(*model_filters))) elif not invert_defaults: query = query.filter(subnet_filter) else: query = query.filter(*model_filters) if "join_dns" in filters: query = query.options(orm.joinedload(models.Subnet.dns_nameservers)) if "join_routes" in filters: query = query.options(orm.joinedload(models.Subnet.routes)) if "join_pool" in filters: query = query.options(orm.undefer('_allocation_pool_cache')) return paginate_query(query, models.Subnet, limit, sorts, marker)
def get_used_ips(session, **kwargs): """Returns dictionary with keys segment_id and value used IPs count. Used IP address count is determined by: - allocated IPs - deallocated IPs whose `deallocated_at` is within the `reuse_after` window compared to the present time, excluding IPs that are accounted for in the current IP policy (because IP policy is mutable and deallocated IPs are not checked nor deleted on IP policy creation, thus deallocated IPs that don't fit the current IP policy can exist in the neutron database). """ LOG.debug("Getting used IPs...") with session.begin(): query = session.query( models.Subnet.segment_id, func.count(models.IPAddress.address)) query = query.group_by(models.Subnet.segment_id) query = _filter(query, **kwargs) reuse_window = timeutils.utcnow() - datetime.timedelta( seconds=cfg.CONF.QUARK.ipam_reuse_after) # NOTE(asadoughi): This is an outer join instead of a regular join # to include subnets with zero IP addresses in the database. query = query.outerjoin( models.IPAddress, and_(models.Subnet.id == models.IPAddress.subnet_id, or_(not_(models.IPAddress.lock_id.is_(None)), models.IPAddress._deallocated.is_(None), models.IPAddress._deallocated == 0, models.IPAddress.deallocated_at > reuse_window))) query = query.outerjoin( models.IPPolicyCIDR, and_( models.Subnet.ip_policy_id == models.IPPolicyCIDR.ip_policy_id, models.IPAddress.address >= models.IPPolicyCIDR.first_ip, models.IPAddress.address <= models.IPPolicyCIDR.last_ip)) # NOTE(asadoughi): (address is allocated) OR # (address is deallocated and not inside subnet's IP policy) query = query.filter(or_( models.IPAddress._deallocated.is_(None), models.IPAddress._deallocated == 0, models.IPPolicyCIDR.id.is_(None))) ret = ((segment_id, address_count) for segment_id, address_count in query.all()) return dict(ret)
def list_unlocked_replicas(rse, limit, bytes=None, rse_id=None, worker_number=None, total_workers=None, delay_seconds=0, session=None): """ List RSE File replicas with no locks. :param rse: the rse name. :param bytes: the amount of needed bytes. :param session: The database session in use. :returns: a list of dictionary replica. """ if not rse_id: rse_id = get_rse_id(rse=rse, session=session) # filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED).\ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ order_by(models.RSEFileAssociation.tombstone) # do no delete files used as sources stmt = exists(select([1]).prefix_with("/*+ INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX) */", dialect='oracle')).\ where(and_(models.RSEFileAssociation.scope == models.Request.scope, models.RSEFileAssociation.name == models.Request.name)) query = query.filter(not_(stmt)) if worker_number and total_workers and total_workers - 1 > 0: if session.bind.dialect.name == 'oracle': bindparams = [bindparam('worker_number', worker_number - 1), bindparam('total_workers', total_workers - 1)] query = query.filter(text('ORA_HASH(name, :total_workers) = :worker_number', bindparams=bindparams)) elif session.bind.dialect.name == 'mysql': query = query.filter(text('mod(md5(name), %s) = %s' % (total_workers - 1, worker_number - 1))) elif session.bind.dialect.name == 'postgresql': query = query.filter(text('mod(abs((\'x\'||md5(name))::bit(32)::int), %s) = %s' % (total_workers - 1, worker_number - 1))) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): if state != ReplicaState.UNAVAILABLE: total_bytes += bytes if tombstone != OBSOLETE and needed_space is not None and total_bytes > needed_space: break total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) return rows
def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ rse_ids = {} for replica in replicas: if 'rse_id' not in replica: if replica['rse'] not in rse_ids: rse_ids[replica['rse']] = get_rse_id(rse=replica['rse'], session=session) replica['rse_id'] = rse_ids[replica['rse']] query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse_id raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse_id: %s" % (replica['scope'], replica['name'], replica['rse_id'])) if isinstance(replica['state'], str) or isinstance(replica['state'], unicode): replica['state'] = ReplicaState.from_string(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True
def list_quarantined_replicas(rse, limit, worker_number=None, total_workers=None, session=None): """ List RSE Quarantined File replicas. :param rse: the rse name. :param limit: The maximum number of replicas returned. :param worker_number: id of the executing worker. :param total_workers: Number of total workers. :param session: The database session in use. :returns: a list of dictionary replica. """ rse_id = get_rse_id(rse, session=session) query = session.query(models.QuarantinedReplica.path, models.QuarantinedReplica.bytes, models.QuarantinedReplica.scope, models.QuarantinedReplica.name, models.QuarantinedReplica.created_at).\ filter(models.QuarantinedReplica.rse_id == rse_id) # do no delete valid replicas stmt = exists(select([1]).prefix_with("/*+ index(REPLICAS REPLICAS_PK) */", dialect='oracle')).\ where(and_(models.RSEFileAssociation.scope == models.QuarantinedReplica.scope, models.RSEFileAssociation.name == models.QuarantinedReplica.name, models.RSEFileAssociation.rse_id == models.QuarantinedReplica.rse_id)) query = query.filter(not_(stmt)) if worker_number and total_workers and total_workers - 1 > 0: if session.bind.dialect.name == 'oracle': bindparams = [bindparam('worker_number', worker_number - 1), bindparam('total_workers', total_workers - 1)] query = query.filter(text('ORA_HASH(path, :total_workers) = :worker_number', bindparams=bindparams)) elif session.bind.dialect.name == 'mysql': query = query.filter('mod(md5(path), %s) = %s' % (total_workers - 1, worker_number - 1)) elif session.bind.dialect.name == 'postgresql': query = query.filter('mod(abs((\'x\'||md5(path))::bit(32)::int), %s) = %s' % (total_workers - 1, worker_number - 1)) return [{'path': path, 'rse': rse, 'rse_id': rse_id, 'created_at': created_at, 'scope': scope, 'name': name, 'bytes': bytes} for path, bytes, scope, name, created_at in query.limit(limit)]
def get_uninvited_associated_users(self, question): ''' .. function:: get_uninvited_associated_users() Get all users who participated in OTHER questions also participated in by this user. :param question: question :type question: Question :rtype: list ''' invited_uids = set() current_invites = question.invites.all() for invite in current_invites: invited_uids.add(invite.receiver_id) new_invites = question.invites_sent.all() for invite in new_invites: invited_uids.add(invite.receiver_id) invited_uids = list(invited_uids) questions_participated = db_session.query(Invite.question_id)\ .filter(Invite.receiver_id == self.id)\ .all() qids = set() for item in questions_participated: qids.add(item[0]) qids = list(qids) associates = db_session.query(Invite.receiver_id)\ .filter(Invite.question_id.in_(qids))\ .filter(Invite.receiver_id != self.id)\ .filter(not_(Invite.receiver_id.in_(invited_uids)))\ .all() uids = set() for user in associates: uids.add(user[0]) uids = list(uids) users = db_session.query(User)\ .filter(User.id.in_(uids))\ .all() uninvited_associates = list() for user in users: uninvited_associates.append({'username': user.username, 'user_id': user.id}) return uninvited_associates