我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用sqlalchemy.and_()。
def add_sighting(session, pokemon): # Check if there isn't the same entry already if pokemon in SIGHTING_CACHE: return if session.query(exists().where(and_( Sighting.expire_timestamp == pokemon['expire_timestamp'], Sighting.encounter_id == pokemon['encounter_id'])) ).scalar(): SIGHTING_CACHE.add(pokemon) return obj = Sighting( pokemon_id=pokemon['pokemon_id'], spawn_id=pokemon['spawn_id'], encounter_id=pokemon['encounter_id'], expire_timestamp=pokemon['expire_timestamp'], lat=pokemon['lat'], lon=pokemon['lon'], atk_iv=pokemon.get('individual_attack'), def_iv=pokemon.get('individual_defense'), sta_iv=pokemon.get('individual_stamina'), move_1=pokemon.get('move_1'), move_2=pokemon.get('move_2') ) session.add(obj) SIGHTING_CACHE.add(pokemon)
def traverse(self, senses: List[Sense], depth: int) -> int: if senses: synsets = [sense.synset for sense in senses] synlinks = self.session \ .query(Synlink) \ .filter(and_(Synlink.synset1.in_(synsets), Synlink.link == HYPERNYM)) \ .all() if synlinks: synset2s = [synlink.synset2 for synlink in synlinks] child_senses = self.session \ .query(Sense) \ .filter(and_(Sense.synset.in_(synset2s), Sense.lang == self.lang)) \ .all() return self.traverse(child_senses, depth + 1) else: return depth else: return depth
def revert_hero_points(session, league_id, results): for i, result in enumerate(results): res = result.result_str heroq_all = session.query(Hero).filter(and_(Hero.id == result.hero, Hero.league == league_id)).all() for heroq in heroq_all: print result.match_id print "Hero id: ", result.hero if "p" in res: heroq.picks -= 1 if "w" in res: heroq.wins -= 1 if "b" in res: heroq.bans -= 1 print "Would remove %s to hero points", Result.result_to_value(res) heroq.points -= MULTIPLIER * Result.result_to_value(res)
def revert_league_points(session, league_id, results): league = session.query(League).filter(League.id == league_id).first() for i, result in enumerate(results): res = result.result_str winners = session.query(TeamHero.user_id). \ filter(and_(TeamHero.hero_id == result.hero, TeamHero.league == league_id)).all() for winner in winners: userq = session.query(LeagueUser).filter(and_(LeagueUser.user_id == winner[0], LeagueUser.league == league_id)).first() user_id = userq.user_id userq_day = session.query(LeagueUserDay).filter(and_(LeagueUserDay.user_id == user_id, LeagueUserDay.league == userq.league, LeagueUserDay.day == league.current_day )).first() hero_count = session.query(func.count(TeamHero)).filter(and_(TeamHero.league == league_id, TeamHero.user_id == user_id)).scalar() remove_result_to_user(userq, res, hero_count) remove_result_to_user(userq_day, res, hero_count)
def swap_out(session, user_id, hero_id, league_id): l_user = session.query(LeagueUser).filter(LeagueUser.user_id == user_id).filter(LeagueUser.league == league_id).first() user_money = l_user.money teamq_hero = session.query(TeamHero).filter(and_(TeamHero.user_id == user_id, TeamHero.league == league_id)) if teamq_hero.first(): check_hero = teamq_hero.filter(and_(TeamHero.hero_id == hero_id)) check_hero_res = check_hero.first() if check_hero_res: hero_value = session.query(Hero.value).filter(Hero.league == league_id).filter(Hero.id == hero_id).first()[0] new_credits = round(user_money + hero_value, 1) l_user.money = new_credits check_hero_res.reserve = 1 l_user.last_change = int(time.time()) return {"success": True, "message": "Hero successfully sold", "action": "sell", "hero": hero_id, "new_credits": new_credits} else: return {"success": False, "message": "ERROR: Cannot sell, hero not in your team"} return {"success": False, "message": "Erm....you don't appear to be in this league. This is awkward"}
def __and__(self, other): """Implement the ``&`` operator. When used with SQL expressions, results in an AND operation, equivalent to :func:`~.expression.and_`, that is:: a & b is equivalent to:: from sqlalchemy import and_ and_(a, b) Care should be taken when using ``&`` regarding operator precedence; the ``&`` operator has the highest precedence. The operands should be enclosed in parenthesis if they contain further sub expressions:: (a == 2) & (b == 4) """ return self.operate(and_, other)
def get_candidate_task_ids(project_id, user_id=None, user_ip=None, external_uid=None, limit=1, offset=0, orderby='priority_0', desc=True): """Get all available tasks for a given project and user.""" data = None if user_id and not user_ip and not external_uid: subquery = session.query(TaskRun.task_id).filter_by(project_id=project_id, user_id=user_id) else: if not user_ip: user_ip = '127.0.0.1' if user_ip and not external_uid: subquery = session.query(TaskRun.task_id).filter_by(project_id=project_id, user_ip=user_ip) else: subquery = session.query(TaskRun.task_id).filter_by(project_id=project_id, external_uid=external_uid) query = session.query(Task).filter(and_(~Task.id.in_(subquery.subquery()), Task.project_id == project_id, Task.state != 'completed')) query = _set_orderby_desc(query, orderby, desc) for q in query: print q data = query.limit(limit).offset(offset).all() return _handle_tuples(data)
def test_secondary_joins_with_custom_primary_join_conditions_are_not_supported(self): mapper(self.classes.A, self.tables.a) mapper(self.classes.B, self.tables.b, properties={ 'a': relationship( self.classes.A, secondary=self.tables.a_to_b, primaryjoin=and_(self.tables.b.c.id == self.tables.a_to_b.c.b_id, self.tables.b.c.id > 10), lazy="bulk", ) }) exception = None try: configure_mappers() except UnsupportedRelationError as e: exception = e assert exception.args[0] == ( 'BulkLazyLoader B.a: ' 'Only simple relations on 1 primary key and without custom joins are supported' )
def test_secondary_joins_with_custom_secondary_join_conditions_are_not_supported(self): mapper(self.classes.A, self.tables.a) mapper(self.classes.B, self.tables.b, properties={ 'a': relationship( self.classes.A, secondary=self.tables.a_to_b, secondaryjoin=and_(self.tables.a.c.id == self.tables.a_to_b.c.a_id, self.tables.a.c.id > 10), lazy="bulk", ) }) exception = None try: configure_mappers() except UnsupportedRelationError as e: exception = e assert exception.args[0] == ( 'BulkLazyLoader B.a: ' 'Only simple relations on 1 primary key and without custom joins are supported' )
def get(self): cluster = self.get_argument('cluster') fr = self.get_argument('fr', default='1970-01-01T00:00:00.000000') to = self.get_argument('to', default='2200-01-01T00:00:00.000000') # Parse the dates fr = datetime.strptime(fr, "%Y-%m-%dT%H:%M:%S.%f") to = datetime.strptime(to, "%Y-%m-%dT%H:%M:%S.%f") logs = list() for log_line in DB.session.query( ClusterLog ).filter(and_(ClusterLog.cluster == cluster, ClusterLog.when < to, ClusterLog.when > fr)).all(): logs.append(log_line.to_dict()) self.set_status(200) self.write(json.dumps(logs).encode('utf-8'))
def get_trait_types(self, event_type): """Return a dictionary containing the name and data type of the trait. Only trait types for the provided event_type are returned. :param event_type: the type of the Event """ session = self._engine_facade.get_session() with session.begin(): for trait_model in [models.TraitText, models.TraitInt, models.TraitFloat, models.TraitDatetime]: query = (session.query(trait_model.key) .join(models.Event, models.Event.id == trait_model.event_id) .join(models.EventType, sa.and_(models.EventType.id == models.Event.event_type_id, models.EventType.desc == event_type)) .distinct()) dtype = TRAIT_MODEL_TO_ID.get(trait_model) for row in query.all(): yield {'name': row[0], 'data_type': dtype}
def post(self): try: username = self.get_argument('username_guai') password = self.get_argument('password_guai') user = self.session.query(User).filter(and_(User.username == username, User.password == password)).first() if user: logging.info(str(username) + 'login success') return 'true' else: logging.info(str(username) + 'login false') return 'false' except: logging.info(str(username) + 'login database_error') return 'database_error' # ?????? ??????
def material(uuid): cols = [materials.c.uuid, materials.c.parent_id, materials.c.ga_absolute_volumetric_loading, materials.c.sa_volumetric_surface_area, materials.c.vf_helium_void_fraction, materials.c.generation, materials.c.run_id] rows = and_(materials.c.uuid == uuid, or_(materials.c.retest_passed == None, materials.c.retest_passed == True)) s = select(cols, rows) print( '\nuuid\t\t\t\t\tparent\tgas adsorption (cc/cc)\tsurface area (m2/cc)' + '\tvoid fraction\tgeneration\trun' ) result = engine.execute(s) for row in result: print( '%s\t%s\t%s\t\t%s\t\t\t' % (row[0], row[1], row[2], row[3]) + '%s\t%s\t\t%s' % (row[4], row[5], row[6]) ) result.close()
def find_children(uuid): cols = [materials.c.id] rows = [materials.c.uuid == uuid] result = engine.execute(select(cols, *rows)) for row in result: parent_id = row[0] result.close() cols = [materials.c.uuid] rows = and_(materials.c.parent_id == parent_id, or_(materials.c.retest_passed == None, materials.c.retest_passed == True)) print('\nchildren of %s :' % uuid) result = engine.execute(select(cols, rows)) for row in result: print('\t%s' % row[0]) result.close()
def msearch(self, m, query, fields=None, limit=None, or_=False): if fields is None: fields = m.__searchable__ f = [] if self.analyzer is not None: keywords = self.analyzer(query) else: keywords = query.split(' ') for field in fields: query = [getattr(m, field).contains(keyword) for keyword in keywords if keyword] if not or_: f.append(_and(*query)) else: f.append(_or(*query)) results = m.query.filter(_or(*f)) if limit is not None: results = results.limit(limit) return results
def remove_old_members(self, trans, guild): # Since pylint complains about <thing> == True. # We need to do this otherwise silly comparison # because it's not a comparison at all, it's actually # creating a SQLAlchemy "equality" object that is used # to generate the query. # # pylint: disable=singleton-comparison self.logger.info(f"Deleting old members from guild {guild.name}") sel = select([self.tb_guild_membership]) \ .where(and_( self.tb_guild_membership.c.guild_id == guild.id, self.tb_guild_membership.c.is_member == True, )) result = trans.execute(sel) for row in result.fetchall(): user_id = row[0] member = guild.get_member(user_id) if member is not None: self.remove_member(trans, member)
def upsert_emoji(self, trans, emoji): data = EmojiData(emoji) values = data.values() if self.emoji_cache.get(data.cache_id) == values: self.logger.debug(f"Emoji lookup for {data} is already up-to-date") return self.logger.debug(f"Upserting emoji {data}") ups = p_insert(self.tb_emojis) \ .values(values) \ .on_conflict_do_update( index_elements=['emoji_id', 'emoji_unicode'], index_where=and_( self.tb_emojis.c.emoji_id == data.id, self.tb_emojis.c.emoji_unicode == data.unicode, ), set_=values, ) trans.execute(ups) self.emoji_cache[data.cache_id] = values # Audit log
def networks(self, role="all", full="all"): """All the networks in the experiment.""" if full not in ["all", True, False]: raise ValueError("full must be boolean or all, it cannot be {}" .format(full)) if full == "all": if role == "all": return Network.query.all() else: return Network\ .query\ .filter_by(role=role)\ .all() else: if role == "all": return Network.query.filter_by(full=full)\ .all() else: return Network\ .query\ .filter(and_(Network.role == role, Network.full == full))\ .all()
def get_namespace_entry(self, url, name): """Gets a given NamespaceEntry object. :param str url: The url of the namespace source :param str name: The value of the namespace from the given url's document :rtype: Optional[NamespaceEntry] """ if self.namespace_object_cache and url in self.namespace_object_cache: return self.namespace_object_cache[url][name] entry_filter = and_(Namespace.url == url, NamespaceEntry.name == name) result = self.session.query(NamespaceEntry).join(Namespace).filter(entry_filter).all() if 0 == len(result): return if 1 < len(result): log.warning('result for get_namespace_entry is too long. Returning first of %s', [str(r) for r in result]) return result[0]
def search(keyword): Blog.update() pt = request.args.get('page') if pt is None: page = 1 else: page = int(pt) try: keywords = keyword.split() from sqlalchemy import and_, or_ rules = and_( *[or_(Post.title.ilike('%%%s%%' % k), Post.summary.ilike('%%%s%%' % k), Post.content.ilike('%%%s%%' % k)) for k in keywords]) pagination = Post.query.filter(rules).order_by(Post.date.desc()).paginate( page=page, per_page=current_app.config['FMBLOG_PER_PAGE']) except Exception: return render_template('404.html', e='Error: Empty Keyword', site=current_app.config['FMBLOG_SITE'], value={}), 404 return render_template('search.html', value={'keyword': keyword}, pagination=pagination, endpoint='main.search', page_list=get_page_list(pagination, page), tags=Tag.query.order_by(Tag.count.desc()).all(), cats=Category.query.order_by(Category.count.desc()).all(), site=current_app.config['FMBLOG_SITE'])
def get_db_object_by_attr(object_, **kwargs): assert len(kwargs) >= 1, 'function get_db_object_by_attr need argument' if 'ignore' in kwargs: ignore = kwargs.pop('ignore') else: ignore = False if len(kwargs) > 1: filter_ = and_(*[getattr(object_, key) == value for key, value in kwargs.iteritems()]) else: key, value = kwargs.popitem() filter_ = getattr(object_, key) == value user = yield execute(sqls=[('query', object_), ('filter', filter_), ('first', None)]) if not user and not ignore: raise gen.Return(invalid_argument_error('wrong %s' % key)) raise gen.Return(user)
def start_new_test(db, repository, delay): """ Function to start a new test based on kvm table. """ from run import log finished_tests = db.query(TestProgress.test_id).filter( TestProgress.status.in_([TestStatus.canceled, TestStatus.completed]) ).subquery() test = Test.query.filter( and_(Test.id.notin_(finished_tests)) ).order_by(Test.id.asc()).first() if test is None: return elif test.platform is TestPlatform.windows: kvm_processor_windows(db, repository, delay) elif test.platform is TestPlatform.linux: kvm_processor_linux(db, repository, delay) else: log.error("Unsupported CI platform: {platform}".format( platform=test.platform)) return
def add_fort_sighting(session, raw_fort): # Check if fort exists fort = session.query(Fort) \ .filter(Fort.external_id == raw_fort['external_id']) \ .first() if not fort: fort = Fort( external_id=raw_fort['external_id'], lat=raw_fort['lat'], lon=raw_fort['lon'], ) session.add(fort) if fort.id and session.query(exists().where(and_( FortSighting.fort_id == fort.id, FortSighting.last_modified == raw_fort['last_modified'] ))).scalar(): # Why is it not in the cache? It should be there! FORT_CACHE.add(raw_fort) return obj = FortSighting( fort=fort, team=raw_fort['team'], prestige=raw_fort['prestige'], guard_pokemon_id=raw_fort['guard_pokemon_id'], last_modified=raw_fort['last_modified'], ) session.add(obj) FORT_CACHE.add(raw_fort)
def pw(): form = MyForm.MyForm_pw() if form.submit.data: pw = Md5.Md5_make(form.password.data) pw1 = form.new_password1.data pw2 = form.new_password2.data try: db = db_op.idc_users va = db.query.filter(and_(db.name == g.user, db.passwd == pw)).first() if pw1 == pw2: if va: va.passwd = Md5.Md5_make(pw1) db_op.DB.session.commit() db_op.DB.session.close() flash('??????,?????!') app_resp = make_response( redirect(url_for('index.index'))) return app_resp else: flash('?????!') else: flash('??????!') except Exception as e: flash(e) return render_template('password.html',Main_Infos=g.main_infos,form=form)
def largest_groups(cls, limit=10): member = table('member') package = table('package') j = join(member, package, member.c.table_id == package.c.id) s = select([member.c.group_id, func.count(member.c.table_id)]).\ select_from(j).\ group_by(member.c.group_id).\ where(and_(member.c.group_id!=None, member.c.table_name=='package', package.c.private==False, package.c.state=='active')).\ order_by(func.count(member.c.table_id).desc()).\ limit(limit) res_ids = model.Session.execute(s).fetchall() res_groups = [(model.Session.query(model.Group).get(unicode(group_id)), val) for group_id, val in res_ids] return res_groups
def _get(cls, follower_id=None, object_id=None): follower_alias = sqlalchemy.orm.aliased(cls._follower_class()) object_alias = sqlalchemy.orm.aliased(cls._object_class()) follower_id = follower_id or cls.follower_id object_id = object_id or cls.object_id query = meta.Session.query(cls, follower_alias, object_alias)\ .filter(sqlalchemy.and_( follower_alias.id == follower_id, cls.follower_id == follower_alias.id, cls.object_id == object_alias.id, follower_alias.state != core.State.DELETED, object_alias.state != core.State.DELETED, object_alias.id == object_id)) return query
def __add_mob(self, new_mob): mob = None try: mob = self.session.query(MOB).filter(and_(MOB.id == new_mob.id, MOB.group_id == new_mob.group_id)).one() sg.logger.info("Updating mob %s..." % (mob.id, )) mob.update_from_new(new_mob) if mob.metamob_id is None: mob.link_metamob(self.session.query(METAMOB).all()) self.session.add(mob) except orm.exc.NoResultFound: mob = new_mob sg.logger.info("Creating MOB %s..." % (mob.id, )) mob.sciz_notif = True mob.link_metamob(self.session.query(METAMOB).all()) self.session.add(mob) self.session.commit() return mob # Add a PIEGE
def get_latest_runs(cls, session): """Returns the latest DagRun for each DAG. """ subquery = ( session .query( cls.dag_id, func.max(cls.execution_date).label('execution_date')) .group_by(cls.dag_id) .subquery() ) dagruns = ( session .query(cls) .join(subquery, and_(cls.dag_id == subquery.c.dag_id, cls.execution_date == subquery.c.execution_date)) .all() ) return dagruns
def display_picks(user_id, season): session = DbSessionFactory.create_session() picks_query = session.query(PlayerPicks.pick_type, ConferenceInfo.conference, DivisionInfo.division, TeamInfo.name, PlayerPicks.rank, ActiveNFLPlayers.firstname, ActiveNFLPlayers.lastname, PlayerPicks.multiplier) \ .outerjoin(ConferenceInfo)\ .outerjoin(DivisionInfo) \ .outerjoin(TeamInfo)\ .outerjoin(ActiveNFLPlayers, and_(PlayerPicks.player_id == ActiveNFLPlayers.player_id, PlayerPicks.season == ActiveNFLPlayers.season)).\ filter(PlayerPicks.user_id == user_id, PlayerPicks.season == season) return picks_query
def assignResults(user, assign_id, sort_id): assign = session.query(Assignment).filter( Assignment.id == assign_id).first() if user.admin: if sort_id == 0: posts = session.query(Post).join(Post.user).filter( Post.assignment_id == assign_id).order_by( User.l_name, User.f_name) elif sort_id == 1: posts = session.query(Post).filter( Post.assignment_id == assign_id).order_by(Post.created.desc()) else: posts = session.query(Post).filter(and_( Post.assignment_id == assign_id, Post.user_id == user.id)).order_by(desc(Post.created)).all() return render_template('assignResults.html', user=user, posts=posts, assign=assign, sort_id=sort_id)
def _lock_create(context, target, **kwargs): with context.session.begin(): result = context.session.execute( models.Lock.__table__.insert(), {"type": kwargs["type"]}) lock_id = result.lastrowid target_model = target.__class__ row_count = context.session.query(target_model).filter( and_(target_model.id == target.id, target_model.lock_id.is_(None)) ).update(dict(lock_id=lock_id)) if row_count == 0: context.session.query(models.Lock).filter( models.Lock.id == lock_id).delete() return None return lock_id
def async_transaction_find(context, lock_mode=False, **filters): query = context.session.query(models.AsyncTransactions) mod = models.AsyncTransactions if lock_mode: query = query.with_lockmode("update") model_filters = _model_query(context, mod, filters) if 'transaction_id' in filters: query = query.filter(or_( mod.id == filters['transaction_id'], and_(*model_filters))) else: query = query.filter(*model_filters) return query
def build_partial_day_ips(query, period_start, period_end): """Method to build an IP list for the case 2 when the IP was allocated after the period start and is still allocated after the period end. This method only looks at public IPv4 addresses. """ # Filter out only IPv4 that were allocated after the period start # and have not been deallocated before the period end. # allocated_at will be set to a date ip_list = query.\ filter(models.IPAddress.version == 4L).\ filter(models.IPAddress.network_id == PUBLIC_NETWORK_ID).\ filter(models.IPAddress.used_by_tenant_id is not None).\ filter(and_(models.IPAddress.allocated_at != null(), models.IPAddress.allocated_at >= period_start, models.IPAddress.allocated_at < period_end)).\ filter(or_(models.IPAddress._deallocated is False, models.IPAddress.deallocated_at == null(), models.IPAddress.deallocated_at >= period_end)).all() return ip_list
def get_wiggle_hints(genome, speciesnames, seqnames, hints, session): """ Extracts all wiggle hints for a genome to a BED-like format. :param genome: genome (table) to query :param speciesnames: speciesnames Table from reflect_hints_db :param seqnames: seqnames Table from reflect_hints_db :param hints: hints Table from reflect_hints_db :param session: Session object from reflect_hints_db :return: iterator of BED format lists """ speciesid = session.query(speciesnames.speciesid).filter_by(speciesname=genome) seqs = {x.seqnr: x.seqname for x in session.query(seqnames).filter_by(speciesid=speciesid)} # chunk up the genome to reduce memory usage for seqnr, seqname in seqs.iteritems(): query = session.query(hints.start, hints.end, hints.score).filter( sqlalchemy.and_(hints.speciesid.in_(speciesid), hints.source == 'w2h', hints.seqnr == seqnr)) for start, end, score in query: # add 1 to end to convert to half-open interval yield seqname, start, end + 1, score
def _many_primary_key_filter(items, model_class): """Return filter criteria for models with many primary keys.""" pk_cols = mapper_primary_key(model_class) pk_criteria = [] def obj_pk_index(idx, col): return col.name def idx_pk_index(idx, col): return idx for item in items: # AND each primary key value together to filter for that record # uniquely. pk_index = (idx_pk_index if isinstance(item, tuple) else obj_pk_index) pk_criteria.append( sa.and_(*(col == pyd.get(item, pk_index(idx, col)) for idx, col in enumerate(pk_cols)))) # Our final filter is an OR filter that ANDs each of the primary keys # from each model. return sa.or_(*pk_criteria)
def get_public_translations(cls, page): """ Get a list of public translations for a page :param page: the page to query :return: a list of languages or an empty list """ try: return lang_models.Language.query.\ join(cls, and_(cls.page_id == page.id, cls.group_id == -1, lang_models.Language.id == cls.language_id)).\ with_entities(lang_models.Language.id, lang_models.Language.name, lang_models.Language.pubid, cls.group_id).all() except exc.NoResultFound: return None
def get_page_translations(cls, page): """ Get a list of all translations for a page :param page: the page to query :return: a list of languages or an empty list """ try: return lang_models.Language.query.\ join(cls, and_(cls.page_id == page.id, lang_models.Language.id == cls.language_id)).\ with_entities(lang_models.Language.id, lang_models.Language.name, lang_models.Language.pubid, cls.group_id).all() except exc.NoResultFound: return None
def like_post(): """ Like a post """ try: post_id = int(request.args.get('post_id', '-1')) # This will prevent old code from adding invalid post_ids if post_id < 0: return "No Post Found to like!" vote = (db_session.query(Vote) .filter(and_(Vote.object_id == post_id, Vote.user_id == current_user.id)) .first()) if not vote: vote = Vote(user_id=current_user.id, object_id=post_id) db_session.add(vote) db_session.commit() except: logging.warning("ERROR processing request") return ""
def unlike_post(): """ Un-like a post """ try: post_id = int(request.args.get('post_id', '-1')) # This will prevent old code from adding invalid post_ids if post_id < 0: return "No Post Found to Unlike!" votes = (db_session.query(Vote) .filter(and_(Vote.object_id == post_id, Vote.user_id == current_user.id)) .all()) if votes: for vote in votes: db_session.delete(vote) db_session.commit() except: logging.warning("ERROR processing request") return ""