我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用sqlalchemy.or_()。
def get_profile(user_id): session = db.session user, results = get_profile_data(session, user_id) user_games = session.query(Game).filter( sqla.or_( Game.winner_id == user_id, Game.loser_id == user_id, ), Game.deleted_at.is_(None) ).order_by(Game.created_at).all() return flask.render_template('user.html', title=user.name, user=user, results=results, auth_url=get_google_authorization_url(), user_games=[game_dict(session, game) for game in user_games], active_users=get_active_users(session) # used in base for 'Add Game' )
def logged_in_user(self): """Returns the logged in user as JSON data """ from pyramid.security import authenticated_userid login_name = authenticated_userid(self.request) from stalker import User from stalker.db.session import DBSession from sqlalchemy import or_ user_id = DBSession.query(User.id) \ .filter(or_(User.login == login_name, User.email == login_name)) \ .first() if not user_id: from pyramid.exceptions import HTTPForbidden raise HTTPForbidden(self.request) else: self.entity_id = user_id return self.get_entity()
def add_backnode(): if request.method == 'POST': node_name = request.form['node_name'] ftp_ip = request.form['ftp_ip'] ftp_port = request.form['ftp_port'] ftp_user = request.form['ftp_user'] ftp_pass = request.form['ftp_pass'] back_node = backhosts.query.filter(or_(backhosts.host_node==node_name,backhosts.ftp_ip==ftp_ip)).all() if back_node: flash(u'%s ?????????????!' %node_name) return render_template('addbacknode.html') backhost = backhosts(host_node=node_name,ftp_ip=ftp_ip,ftp_port=ftp_port,ftp_user=ftp_user,ftp_pass=ftp_pass) db.session.add(backhost) db.session.commit() flash(u'%s ??????!' %node_name) return render_template('addbacknode.html') else: return render_template('addbacknode.html')
def __or__(self, other): """Implement the ``|`` operator. When used with SQL expressions, results in an OR operation, equivalent to :func:`~.expression.or_`, that is:: a | b is equivalent to:: from sqlalchemy import or_ or_(a, b) Care should be taken when using ``|`` regarding operator precedence; the ``|`` operator has the highest precedence. The operands should be enclosed in parenthesis if they contain further sub expressions:: (a == 2) | (b == 4) """ return self.operate(or_, other)
def validate(self): rv = BaseForm.validate(self) if not rv: return False if self.username.data in self.password.data: self.password.errors.append('Password must not contain the username.') return False if self.edit: return True user = User.query.filter(or_(User.name == self.username.data, User.email == self.email.data)).first() if not user: return True if user.name == self.username.data: self.username.errors.append(ERROR_USERNAME_EXISTS) if user.email == self.email.data: self.email.errors.append(ERROR_EMAIL_EXISTS) return False
def select_words(args): query = db_session.query(WordList).filter(WordList.language == args.language) if args.include_skipped: query = query.filter(or_(WordList.state == 'pending', WordList.state == 'skipped')) else: query = query.filter(WordList.state == 'pending') query = query.order_by(func.length(WordList.word)) word = query.first() while word is not None: _cli_input_for_word(word) word = query.first() if args.include_skipped: print('DB has no more words in \'pending\' or \'skipped \' state') else: print('DB has no more words in \'pending\' state')
def material(uuid): cols = [materials.c.uuid, materials.c.parent_id, materials.c.ga_absolute_volumetric_loading, materials.c.sa_volumetric_surface_area, materials.c.vf_helium_void_fraction, materials.c.generation, materials.c.run_id] rows = and_(materials.c.uuid == uuid, or_(materials.c.retest_passed == None, materials.c.retest_passed == True)) s = select(cols, rows) print( '\nuuid\t\t\t\t\tparent\tgas adsorption (cc/cc)\tsurface area (m2/cc)' + '\tvoid fraction\tgeneration\trun' ) result = engine.execute(s) for row in result: print( '%s\t%s\t%s\t\t%s\t\t\t' % (row[0], row[1], row[2], row[3]) + '%s\t%s\t\t%s' % (row[4], row[5], row[6]) ) result.close()
def find_children(uuid): cols = [materials.c.id] rows = [materials.c.uuid == uuid] result = engine.execute(select(cols, *rows)) for row in result: parent_id = row[0] result.close() cols = [materials.c.uuid] rows = and_(materials.c.parent_id == parent_id, or_(materials.c.retest_passed == None, materials.c.retest_passed == True)) print('\nchildren of %s :' % uuid) result = engine.execute(select(cols, rows)) for row in result: print('\t%s' % row[0]) result.close()
def msearch(self, m, query, fields=None, limit=None, or_=False): if fields is None: fields = m.__searchable__ f = [] if self.analyzer is not None: keywords = self.analyzer(query) else: keywords = query.split(' ') for field in fields: query = [getattr(m, field).contains(keyword) for keyword in keywords if keyword] if not or_: f.append(_and(*query)) else: f.append(_or(*query)) results = m.query.filter(_or(*f)) if limit is not None: results = results.limit(limit) return results
def search(keyword): Blog.update() pt = request.args.get('page') if pt is None: page = 1 else: page = int(pt) try: keywords = keyword.split() from sqlalchemy import and_, or_ rules = and_( *[or_(Post.title.ilike('%%%s%%' % k), Post.summary.ilike('%%%s%%' % k), Post.content.ilike('%%%s%%' % k)) for k in keywords]) pagination = Post.query.filter(rules).order_by(Post.date.desc()).paginate( page=page, per_page=current_app.config['FMBLOG_PER_PAGE']) except Exception: return render_template('404.html', e='Error: Empty Keyword', site=current_app.config['FMBLOG_SITE'], value={}), 404 return render_template('search.html', value={'keyword': keyword}, pagination=pagination, endpoint='main.search', page_list=get_page_list(pagination, page), tags=Tag.query.order_by(Tag.count.desc()).all(), cats=Category.query.order_by(Category.count.desc()).all(), site=current_app.config['FMBLOG_SITE'])
def get_recent_pastes(page_num, num_per_page): """ Get recently posted pastes that are active and not expired. This query is intended to be used in chunks, indexed by page: e.g., results 0-4 appear on page 0, 5-9 appear on page 1, etc. :param page_num: The page number. Indexes from 0. :param num_per_page: The number of results to query for in this chunk (e.g., to display on this page). :return: A list of models.Paste objects sorted by post time (descending) that are active and not expired. """ return models.Paste.query.filter_by( is_active=True, ).filter( or_(models.Paste.expiry_time.is_(None), models.Paste.expiry_time > time.time()), ).order_by( models.Paste.post_time.desc(), ).offset( page_num * num_per_page, ).limit( num_per_page, ).all()
def get_top_pastes(page_num, num_per_page): """ Get the top (most viewed) pastes that are active and not expired. This query is intended to be used in chunks, indexed by page: e.g., results 0-4 appear on page 0, 5-9 appear on page 1, etc. :param page_num: The page number. Indexes from 0. :param num_per_page: The number of results to query for in this chunk (e.g., to display on this page). :return: A list of models.Paste objects sorted by number of views (descending) that are active and not expired. """ return models.Paste.query.filter_by( is_active=True, ).filter( or_(models.Paste.expiry_time.is_(None), models.Paste.expiry_time > time.time()), ).order_by( models.Paste.views.desc(), ).offset( page_num * num_per_page, ).limit( num_per_page, ).all()
def upgrade(): op.alter_column('kubes', 'name', existing_type=sa.VARCHAR(length=64), nullable=False) op.create_index('one_default', 'kubes', ['is_default'], unique=True, postgresql_where=sa.text(u'kubes.is_default IS true')) op.drop_constraint(u'kubes_is_default_key', 'kubes', type_='unique') op.alter_column('packages', 'name', existing_type=sa.VARCHAR(length=64), nullable=False) op.alter_column('packages', 'prefix', existing_type=sa.VARCHAR(), nullable=False) op.alter_column('packages', 'suffix', existing_type=sa.VARCHAR(), nullable=False) session = Session(bind=op.get_bind()) session.query(PackageKube).filter(sa.or_( PackageKube.package_id.is_(None), PackageKube.kube_id.is_(None), )).delete() session.commit() op.alter_column('package_kube', 'kube_id', existing_type=sa.INTEGER(), nullable=False) op.alter_column('package_kube', 'package_id', existing_type=sa.INTEGER(), nullable=False)
def _secure_query(model, *columns): query = db_base.model_query(model, columns) if not issubclass(model, model_base.QinlingSecureModelBase): return query if model == models.Runtime: query_criterion = sa.or_( model.project_id == context.get_ctx().projectid, model.is_public ) else: query_criterion = model.project_id == context.get_ctx().projectid query = query.filter(query_criterion) return query
def run(self, query): assert isinstance(query, dict) # no support for faceting atm self.facets = {} limit = min(1000, int(query.get('rows', 10))) q = query.get('q') ourq = model.Session.query(model.Package.id).filter_by(state='active') def makelike(field): _attr = getattr(model.Package, field) return _attr.ilike('%' + term + '%') if q and q not in ('""', "''", '*:*'): terms = q.split() # TODO: tags ...? fields = ['name', 'title', 'notes'] for term in terms: args = [makelike(field) for field in fields] subq = or_(*args) ourq = ourq.filter(subq) self.count = ourq.count() ourq = ourq.limit(limit) self.results = [{'id': r[0]} for r in ourq.all()] return {'results': self.results, 'count': self.count}
def get_num_task_instances(dag_id, task_ids, states=None, session=None): """ Returns the number of task instances in the given DAG. :param session: ORM session :param dag_id: ID of the DAG to get the task concurrency of :type dag_id: unicode :param task_ids: A list of valid task IDs for the given DAG :type task_ids: list[unicode] :param states: A list of states to filter by if supplied :type states: list[state] :return: The number of running tasks :rtype: int """ qry = session.query(func.count(TaskInstance.task_id)).filter( TaskInstance.dag_id == dag_id, TaskInstance.task_id.in_(task_ids)) if states is not None: if None in states: qry = qry.filter(or_( TaskInstance.state.in_(states), TaskInstance.state.is_(None))) else: qry = qry.filter(TaskInstance.state.in_(states)) return qry.scalar()
def upgrade(): ### commands auto generated by Alembic - please adjust! ### query_tbl = sa.sql.table('query', sa.sql.column('platform', sa.String)) pack_tbl = sa.sql.table('pack', sa.sql.column('platform', sa.String)) op.execute( query_tbl.update() \ .where( sa.or_( query_tbl.c.platform==op.inline_literal('redhat,centos'), query_tbl.c.platform==op.inline_literal('ubuntu'), ) ).values({'platform': op.inline_literal('linux')}) ) op.execute( pack_tbl.update() \ .where( sa.or_( query_tbl.c.platform==op.inline_literal('redhat,centos'), query_tbl.c.platform==op.inline_literal('ubuntu'), ) ).values({'platform': op.inline_literal('linux')}) ) op.add_column('query', sa.Column('shard', sa.Integer(), nullable=True)) ### end Alembic commands ###
def api_private_blockpage_detected(): q = current_app.db_session.query( distinct(Report.probe_cc).label('probe_cc'), ).join(Measurement, Measurement.report_no == Report.report_no) \ .filter(Measurement.confirmed == True) \ .filter(or_( Report.test_name == 'http_requests', Report.test_name == 'web_connectivity' )) results = [] for row in q: results.append({ 'probe_cc': row.probe_cc }) return jsonify({ 'results': results })
def search_biblio(): """Search entries in biblio.""" # Get the form corresponding to the query: form = ExtendedSearchForm() if form.validate_on_submit(): if form.source.data == "local": s = "%" + form.name.data + "%" # Send request to database: bibdat = convert_rows_to_dict(db.session.query(BiblioEntry)\ .filter(or_(BiblioEntry.authors.like(s), BiblioEntry.title.like(s)))) # Format bibdat and sort by years: templateVars = format_bibdatabase(bibdat) if len(bibdat) == 0: flash("No entry found") return render_template("references.html", **templateVars) elif form.source.data == "hal": redirect("/hal/"+form.name.data) else: flash("Not implemented yet") return redirect("/biblio")
def _get_new_resource(self): committee_table = self.db_meta.tables.get("kns_committee") committeesession_table = self.db_meta.tables.get("kns_committeesession") documentcommitteesession_table = self.db_meta.tables.get("kns_documentcommitteesession") if committee_table is None or committeesession_table is None or documentcommitteesession_table is None: raise Exception("processor requires kns committee tables to exist") override_meeting_ids = os.environ.get("OVERRIDE_COMMITTEE_MEETING_IDS") for db_row in self.db_session\ .query(committee_table, committeesession_table, documentcommitteesession_table)\ .filter(committeesession_table.c.CommitteeID==committee_table.c.CommitteeID)\ .filter(committeesession_table.c.CommitteeSessionID==documentcommitteesession_table.c.CommitteeSessionID)\ .filter(or_(*(documentcommitteesession_table.c.FilePath.like("%.{}".format(e)) for e in SUPPORTED_EXTENSIONS)))\ .all(): row = db_row._asdict() if str(row["GroupTypeID"]) == "23": if not override_meeting_ids or str(row["CommitteeSessionID"]) in override_meeting_ids.split(","): yield {"url": row["FilePath"], "kns_committee_id": row["CommitteeID"], "kns_session_id": row["CommitteeSessionID"]}
def async_transaction_find(context, lock_mode=False, **filters): query = context.session.query(models.AsyncTransactions) mod = models.AsyncTransactions if lock_mode: query = query.with_lockmode("update") model_filters = _model_query(context, mod, filters) if 'transaction_id' in filters: query = query.filter(or_( mod.id == filters['transaction_id'], and_(*model_filters))) else: query = query.filter(*model_filters) return query
def build_full_day_ips(query, period_start, period_end): """Method to build an IP list for the case 1 when the IP was allocated before the period start and is still allocated after the period end. This method only looks at public IPv4 addresses. """ # Filter out only IPv4 that have not been deallocated ip_list = query.\ filter(models.IPAddress.version == 4L).\ filter(models.IPAddress.network_id == PUBLIC_NETWORK_ID).\ filter(models.IPAddress.used_by_tenant_id is not None).\ filter(models.IPAddress.allocated_at != null()).\ filter(models.IPAddress.allocated_at < period_start).\ filter(or_(models.IPAddress._deallocated is False, models.IPAddress.deallocated_at == null(), models.IPAddress.deallocated_at >= period_end)).all() return ip_list
def build_partial_day_ips(query, period_start, period_end): """Method to build an IP list for the case 2 when the IP was allocated after the period start and is still allocated after the period end. This method only looks at public IPv4 addresses. """ # Filter out only IPv4 that were allocated after the period start # and have not been deallocated before the period end. # allocated_at will be set to a date ip_list = query.\ filter(models.IPAddress.version == 4L).\ filter(models.IPAddress.network_id == PUBLIC_NETWORK_ID).\ filter(models.IPAddress.used_by_tenant_id is not None).\ filter(and_(models.IPAddress.allocated_at != null(), models.IPAddress.allocated_at >= period_start, models.IPAddress.allocated_at < period_end)).\ filter(or_(models.IPAddress._deallocated is False, models.IPAddress.deallocated_at == null(), models.IPAddress.deallocated_at >= period_end)).all() return ip_list
def register(ctx, request): try: payload = request.json() nickname = payload['nickname'] mail = payload['mail'] password = payload['password'] except KeyError as e: raise HTTPBadRequest('{} is required'.format(e)) except Exception as e: raise HTTPBadRequest(e) user = User.query.filter(or_(User.nickname == nickname, User.mail == mail)).first() if user is not None: return jsonify(code=400, message='user exist') catalog = Catalog(name='notes') user = User(nickname=nickname, mail=mail, catalogs=[catalog], password=bcrypt.hashpw(password.encode(), bcrypt.gensalt())) db.session.add(user) try: db.session.commit() return jsonify(code=200) except Exception as e: logging.error(e) db.session.rollback() raise HTTPInternalServerError(e)
def _many_primary_key_filter(items, model_class): """Return filter criteria for models with many primary keys.""" pk_cols = mapper_primary_key(model_class) pk_criteria = [] def obj_pk_index(idx, col): return col.name def idx_pk_index(idx, col): return idx for item in items: # AND each primary key value together to filter for that record # uniquely. pk_index = (idx_pk_index if isinstance(item, tuple) else obj_pk_index) pk_criteria.append( sa.and_(*(col == pyd.get(item, pk_index(idx, col)) for idx, col in enumerate(pk_cols)))) # Our final filter is an OR filter that ANDs each of the primary keys # from each model. return sa.or_(*pk_criteria)
def _do_num_not_in_range(self, attr_getter, clause_name, payload): if (not isinstance(payload, list) or len(payload) != 2 or not isinstance(payload[0], (int, float)) or not isinstance(payload[1], (int, float))): raise ServerError('can\'t parse "%s" clause: contents must be a list of two numbers, ' 'but got %s', clause_name, payload.__class__.__name__) v1, v2 = payload if v1 > v2: v1, v2 = v2, v1 from sqlalchemy import or_ value = attr_getter() return or_(value < v1, value > v2) # Custom, generic clauses.
def gitless_drafts(): """ Render the gitless posts that a user has created in table form Editors can see all the posts created via Gitless_Editing """ prefixes = current_app.config.get('WEB_EDITOR_PREFIXES', []) if prefixes == []: raise Exception("Web editing is not configured") query = (db_session.query(Post)) if prefixes is not None: query = query.filter(or_(*[Post.path.like(p + '%') for p in prefixes])) if current_user.identifier not in current_repo.config.editors: query = (query.outerjoin(PostAuthorAssoc, PostAuthorAssoc.post_id == Post.id) .filter(PostAuthorAssoc.user_id == current_user.id)) return render_template("web_posts.html", posts=query.all())
def _SslDomain_inject_exipring_days(ctx, q, expiring_days, order=False): """helper function for the count/paginated queries""" SslServerCertificateMulti = sqlalchemy.orm.aliased(SslServerCertificate) SslServerCertificateSingle = sqlalchemy.orm.aliased(SslServerCertificate) _until = ctx.timestamp + datetime.timedelta(days=expiring_days) q = q.outerjoin(SslServerCertificateMulti, SslDomain.ssl_server_certificate_id__latest_multi == SslServerCertificateMulti.id )\ .outerjoin(SslServerCertificateSingle, SslDomain.ssl_server_certificate_id__latest_single == SslServerCertificateSingle.id )\ .filter(sqlalchemy.or_(sqlalchemy.and_(SslServerCertificateMulti.is_active == True, # noqa SslServerCertificateMulti.timestamp_expires <= _until, ), sqlalchemy.and_(SslServerCertificateSingle.is_active == True, # noqa SslServerCertificateSingle.timestamp_expires <= _until, ), ) ) if order: q = q.order_by(min_date(SslServerCertificateMulti.timestamp_expires, SslServerCertificateSingle.timestamp_expires, ).asc(), ) return q
def get__SslDomain__paginated(ctx, expiring_days=None, eagerload_web=False, limit=None, offset=0, active_only=False): q = ctx.dbSession.query(SslDomain) if active_only and not expiring_days: q = q.filter(sqlalchemy.or_(SslDomain.ssl_server_certificate_id__latest_single.op('IS NOT')(None), SslDomain.ssl_server_certificate_id__latest_multi.op('IS NOT')(None), ), ) if eagerload_web: q = q.options(sqlalchemy.orm.joinedload('server_certificate__latest_single'), sqlalchemy.orm.joinedload('server_certificate__latest_multi'), ) if expiring_days: q = _SslDomain_inject_exipring_days(ctx, q, expiring_days, order=True) else: q = q.order_by(sa.func.lower(SslDomain.domain_name).asc()) q = q.limit(limit)\ .offset(offset) items_paged = q.all() return items_paged # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_online_admins(self, session=None) -> list: admins = session.query(GlobalRoles).filter(or_( GlobalRoles.roles.ilike('%{}%'.format(RoleKeys.SUPER_USER)), GlobalRoles.roles.ilike('%{}%'.format(RoleKeys.GLOBAL_MODERATOR)) )).all() admin_ids = [admin.user_id for admin in admins] if len(admin_ids) == 0: return [] online_admins = session.query(UserStatus)\ .filter(UserStatus.status.in_([ UserKeys.STATUS_INVISIBLE, UserKeys.STATUS_AVAILABLE, UserKeys.STATUS_CHAT]))\ .filter(UserStatus.uuid.in_(admin_ids)).all() return [admin.uuid for admin in online_admins]
def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all()
def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()]
def delete_volatile_replicas(rse, replicas, session=None): """ Bulk delete volatile replicas. :param rse: the rse name. :param replicas: the list of volatile replicas. :param session: The database session in use. :returns: True is successful. """ # first check that the rse is a volatile one try: rse_id = session.query(models.RSE.id).filter_by(rse=rse, volatile=True).one()[0] except NoResultFound: raise exception.UnsupportedOperation('No volatile rse found for %(rse)s !' % locals()) conditions = [] for replica in replicas: conditions.append(and_(models.RSEFileAssociation.scope == replica['scope'], models.RSEFileAssociation.name == replica['name'])) if conditions: session.query(models.RSEFileAssociation).\ filter(models.RSEFileAssociation.rse_id == rse_id).\ filter(or_(*conditions)).\ delete(synchronize_session=False)
def delete_temporary_dids(dids, session=None): """ Delete file replicas. :param rse: the rse name. :param files: the list of files to delete. :param session """ where_clause = [] for did in dids: where_clause.append(and_(models.TemporaryDataIdentifier.scope == did['scope'], models.TemporaryDataIdentifier.name == did['name'])) if where_clause: return session.query(models.TemporaryDataIdentifier).\ with_hint(models.TemporaryDataIdentifier, "INDEX(tmp_dids TMP_DIDS_PK)", 'oracle').\ filter(or_(*where_clause)).delete(synchronize_session=False) return
def delete_messages(messages, session=None): """ Delete all messages with the given IDs, and archive them to the history. :param messages: The messages to delete as a list of dictionaries. """ message_condition = [] for message in messages: message_condition.append(Message.id == message['id']) try: if message_condition: session.query(Message).\ with_hint(Message, "index(messages MESSAGES_ID_PK)", 'oracle').\ filter(or_(*message_condition)).\ delete(synchronize_session=False) session.bulk_insert_mappings(MessageHistory, messages) except IntegrityError, e: raise RucioException(e.args)
def get_previous_game(session, user, game): """ given a user and a game, get previous game for user """ previous_game = session.query(Game).filter( Game.deleted_at.is_(None), sqla.or_( Game.winner_id == user.id, Game.loser_id == user.id ), Game.id < game.id ).order_by(Game.id.desc()).first() return previous_game
def get_next_game(session, user_id, last_game, updated_games_ids): if not last_game: return return session.query(Game).filter( sqla.or_( Game.winner_id == user_id, Game.loser_id == user_id ), Game.id > last_game.id, Game.id.notin_(updated_games_ids), Game.deleted_at.is_(None) ).first()
def get_user_most_recent_game(session, user_id): game_id_result = session.query(Game.id).filter( sqla.or_( Game.winner_id == user_id, Game.loser_id == user_id ) ).order_by(Game.id.desc()).first() if game_id_result: return game_id_result[0]