我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用sqlalchemy.func.now()。
def rename_table( cls, operations, old_table_name, new_table_name, schema=None): """Emit an ALTER TABLE to rename a table. :param old_table_name: old name. :param new_table_name: new name. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. .. versionadded:: 0.7.0 'schema' can now accept a :class:`~sqlalchemy.sql.elements.quoted_name` construct. """ op = cls(old_table_name, new_table_name, schema=schema) return operations.invoke(op)
def last_scrobbles(): count = get_argument('count', default=app.config['RESULTS_COUNT']) scrobbles = ( db.session.query(Scrobble) .filter(Scrobble.user_id == current_user.id) .order_by(Scrobble.played_at.desc()) .limit(count) .all() ) nowplaying = ( db.session.query(NowPlaying) .filter(NowPlaying.user_id == current_user.id, NowPlaying.played_at + NowPlaying.length >= func.now()) .order_by(NowPlaying.played_at.desc()) .first() ) return render_template('latest.html', scrobbles=scrobbles, nowplaying=nowplaying)
def rename_table(self, old_table_name, new_table_name, schema=None): """Emit an ALTER TABLE to rename a table. :param old_table_name: old name. :param new_table_name: new name. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. .. versionadded:: 0.7.0 'schema' can now accept a :class:`~sqlalchemy.sql.elements.quoted_name` construct. """ self.impl.rename_table( old_table_name, new_table_name, schema=schema )
def drop_index(self, name, table_name=None, schema=None): """Issue a "drop index" instruction using the current migration context. e.g.:: drop_index("accounts") :param name: name of the index. :param table_name: name of the owning table. Some backends such as Microsoft SQL Server require this. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. .. versionadded:: 0.7.0 'schema' can now accept a :class:`~sqlalchemy.sql.elements.quoted_name` construct. """ # need a dummy column name here since SQLAlchemy # 0.7.6 and further raises on Index with no columns self.impl.drop_index( self._index(name, table_name, ['x'], schema=schema) )
def command_history(self, player): """View the most recent messages for this channel.""" if self.allowed(player): if player in self.listeners: messages = self.messages[-10:] if messages: player.notify( 'Showing the most recent %d %s.', len(messages), pluralise(len(messages), 'message') ) now = datetime.utcnow() for m in messages: player.notify( '%s ago, %s transmitted: "%s"', format_timedelta(now - m.sent), m.owner if player.is_admin else m.owner.title(), m.text ) else: player.notify('There are no messages to display.') else: player.notify('You must connect to this channel first.') else: player.notify('You cannot view the history for this channel.')
def bag_dag(self, dag, parent_dag, root_dag): """ Adds the DAG into the bag, recurses into sub dags. """ self.dags[dag.dag_id] = dag dag.resolve_template_files() dag.last_loaded = datetime.now() for task in dag.tasks: settings.policy(task) for subdag in dag.subdags: subdag.full_filepath = dag.full_filepath subdag.parent_dag = dag subdag.fileloc = root_dag.full_filepath subdag.is_subdag = True self.bag_dag(subdag, parent_dag=dag, root_dag=root_dag) self.logger.debug('Loaded DAG {dag}'.format(**locals()))
def __init__(self, event, task_instance, owner=None, extra=None, **kwargs): self.dttm = datetime.now() self.event = event self.extra = extra task_owner = None if task_instance: self.dag_id = task_instance.dag_id self.task_id = task_instance.task_id self.execution_date = task_instance.execution_date task_owner = task_instance.task.owner if 'task_id' in kwargs: self.task_id = kwargs['task_id'] if 'dag_id' in kwargs: self.dag_id = kwargs['dag_id'] if 'execution_date' in kwargs: if kwargs['execution_date']: self.execution_date = kwargs['execution_date'] self.owner = owner or task_owner
def run( self, start_date=None, end_date=None, ignore_first_depends_on_past=False, ignore_ti_state=False, mark_success=False): """ Run a set of task instances for a date range. """ start_date = start_date or self.start_date end_date = end_date or self.end_date or datetime.now() for dt in self.dag.date_range(start_date, end_date=end_date): TaskInstance(self, dt).run( mark_success=mark_success, ignore_depends_on_past=( dt == start_date and ignore_first_depends_on_past), ignore_ti_state=ignore_ti_state)
def get_task_instances( self, session, start_date=None, end_date=None, state=None): TI = TaskInstance if not start_date: start_date = (datetime.today()-timedelta(30)).date() start_date = datetime.combine(start_date, datetime.min.time()) end_date = end_date or datetime.now() tis = session.query(TI).filter( TI.dag_id == self.dag_id, TI.execution_date >= start_date, TI.execution_date <= end_date, TI.task_id.in_([t.task_id for t in self.tasks]), ) if state: tis = tis.filter(TI.state == state) tis = tis.all() return tis
def drop_constraint( cls, operations, constraint_name, table_name, type_=None, schema=None): """Drop a constraint of the given name, typically via DROP CONSTRAINT. :param constraint_name: name of the constraint. :param table_name: table name. :param type_: optional, required on MySQL. can be 'foreignkey', 'primary', 'unique', or 'check'. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. .. versionadded:: 0.7.0 'schema' can now accept a :class:`~sqlalchemy.sql.elements.quoted_name` construct. .. versionchanged:: 0.8.0 The following positional argument names have been changed: * name -> constraint_name """ op = cls(constraint_name, table_name, type_=type_, schema=schema) return operations.invoke(op)
def drop_index(cls, operations, index_name, table_name=None, schema=None): """Issue a "drop index" instruction using the current migration context. e.g.:: drop_index("accounts") :param index_name: name of the index. :param table_name: name of the owning table. Some backends such as Microsoft SQL Server require this. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. .. versionadded:: 0.7.0 'schema' can now accept a :class:`~sqlalchemy.sql.elements.quoted_name` construct. .. versionchanged:: 0.8.0 The following positional argument names have been changed: * name -> index_name """ op = cls(index_name, table_name=table_name, schema=schema) return operations.invoke(op)
def update_unregistered_devices(unregistered): ''' Update data for unregistered Android devices. Unregistered device will not receive notifications and will be deleted when number of devices exceeds maximum. ''' global ENGINE binding = [{"p_{}".format(k): v for k, v in u.items()} for u in unregistered] device_table = model.metadata.tables['device'] stmt = update(device_table).\ values(unregistered_ts=func.now()).\ where(and_(device_table.c.login_id == bindparam('p_login_id'), func.coalesce(device_table.c.device_token_new, device_table.c.device_token) == bindparam('p_device_token'))) ENGINE.execute(stmt, binding)
def open_changeset(osm_type, osm_id): place = Place.get_or_abort(osm_type, osm_id) osm_backend, auth = get_backend_and_auth() comment = request.form['comment'] changeset = new_changeset(comment) if not really_save: return Response(changeset.id, mimetype='text/plain') try: r = osm_backend.request(osm_api_base + '/changeset/create', method='PUT', data=changeset.encode('utf-8'), auth=auth, headers=user_agent_headers()) except requests.exceptions.HTTPError as e: mail.error_mail('error creating changeset: ' + place.name, changeset, e.response) return Response('error', mimetype='text/plain') changeset_id = r.text.strip() if not changeset_id.isdigit(): mail.open_changeset_error(place, changeset, r) return Response('error', mimetype='text/plain') change = Changeset(id=changeset_id, place=place, created=func.now(), comment=comment, update_count=0, user=g.user) database.session.add(change) database.session.commit() return Response(changeset_id, mimetype='text/plain')
def create_index(self, name, table_name, columns, schema=None, **kw): """Issue a "create index" instruction using the current migration context. e.g.:: from alembic import op op.create_index('ik_test', 't1', ['foo', 'bar']) :param name: name of the index. :param table_name: name of the owning table. .. versionchanged:: 0.5.0 The ``tablename`` parameter is now named ``table_name``. As this is a positional argument, the old name is no longer present. :param columns: a list of string column names in the table. :param schema: Optional schema name to operate within. .. versionadded:: 0.4.0 """ self.impl.create_index( self._index(name, table_name, columns, schema=schema, **kw) )
def drop_index(self, name, table_name=None, schema=None): """Issue a "drop index" instruction using the current migration context. e.g.:: drop_index("accounts") :param name: name of the index. :param table_name: name of the owning table. Some backends such as Microsoft SQL Server require this. .. versionchanged:: 0.5.0 The ``tablename`` parameter is now named ``table_name``. The old name will continue to function for backwards compatibility. :param schema: Optional schema name to operate within. .. versionadded:: 0.4.0 """ # need a dummy column name here since SQLAlchemy # 0.7.6 and further raises on Index with no columns self.impl.drop_index( self._index(name, table_name, ['x'], schema=schema) )
def test_keys(engine): result = await engine.execute(select([func.now().label('time')])) assert await result.keys() == ['time'] await result.close()
def skip(self, dag_run, execution_date, tasks, session=None): """ Sets tasks instances to skipped from the same dag run. :param dag_run: the DagRun for which to set the tasks to skipped :param execution_date: execution_date :param tasks: tasks to skip (not task_ids) :param session: db session to use """ if not tasks: return task_ids = [d.task_id for d in tasks] now = timezone.utcnow() if dag_run: session.query(TaskInstance).filter( TaskInstance.dag_id == dag_run.dag_id, TaskInstance.execution_date == dag_run.execution_date, TaskInstance.task_id.in_(task_ids) ).update({TaskInstance.state : State.SKIPPED, TaskInstance.start_date: now, TaskInstance.end_date: now}, synchronize_session=False) session.commit() else: assert execution_date is not None, "Execution date is None and no dag run" self.log.warning("No DAG RUN present this should not happen") # this is defensive against dag runs that are not complete for task in tasks: ti = TaskInstance(task, execution_date=execution_date) ti.state = State.SKIPPED ti.start_date = now ti.end_date = now session.merge(ti) session.commit()
def drop_constraint(self, name, table_name, type_=None, schema=None): """Drop a constraint of the given name, typically via DROP CONSTRAINT. :param name: name of the constraint. :param table_name: table name. :param ``type_``: optional, required on MySQL. can be 'foreignkey', 'primary', 'unique', or 'check'. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. .. versionadded:: 0.7.0 'schema' can now accept a :class:`~sqlalchemy.sql.elements.quoted_name` construct. """ t = self._table(table_name, schema=schema) types = { 'foreignkey': lambda name: sa_schema.ForeignKeyConstraint( [], [], name=name), 'primary': sa_schema.PrimaryKeyConstraint, 'unique': sa_schema.UniqueConstraint, 'check': lambda name: sa_schema.CheckConstraint("", name=name), None: sa_schema.Constraint } try: const = types[type_] except KeyError: raise TypeError("'type' can be one of %s" % ", ".join(sorted(repr(x) for x in types))) const = const(name=name) t.append_constraint(const) self.impl.drop_constraint(const)
def is_published(self): rv = self.status == self.STATUS_CHOICES[0][0] and self.publish_date <= datetime.now() if self.expire_date is not None: rv = rv and self.expire_date >= datetime.now() return rv
def published(self): pub = self._joinpoint_zero().columns['publish_date'] expire = self._joinpoint_zero().columns['expire_date'] rv = self.filter_by(status='published').filter(pub<=datetime.now()) if expire is not None: rv.filter(expire>=datetime.now()) return rv
def add_condition_order(self, stock_code, direction, compare_price, action, deal_price, amount, begin_in_day, end_in_day): #insert into db print("stock_code=", stock_code, ", direction=", direction, ", action=", action, ", amount=", amount, ",deal_price=", deal_price, "compare_price=", compare_price, ", begin_in_day=", begin_in_day, ", end_in_day=", end_in_day) new_order = cond_order(order_id=0, stock_code=stock_code, direction=direction, action=action, amount=amount, deal_price=deal_price, compare_price=compare_price, begin_in_day=begin_in_day, end_in_day=end_in_day, state=0, insert_time=func.now()) self.session.add(new_order) #print new_order.stock_code ret = self.session.commit() return 1
def command_default(self, player): """Set this channel as your default so you don't need to type it's name every time you want to transmit.""" if player in self.listeners: if self.allowed(player): player.default_communication_channel = self player.save() player.notify( 'Your default communication channel is now %s.', self.name ) else: player.notify('You are blocked from accessing this channel.') else: player.notify('You must connect first.')
def now_sql(self): if self._engine.driver == "psycopg2": return func.now() else: # 1 Minute modifier to fix strange unit test race return func.datetime("now", "localtime", "+1 minutes")
def clear_task_instances(tis, session, activate_dag_runs=True): """ Clears a set of task instances, but makes sure the running ones get killed. """ job_ids = [] for ti in tis: if ti.state == State.RUNNING: if ti.job_id: ti.state = State.SHUTDOWN job_ids.append(ti.job_id) # todo: this creates an issue with the webui tests # elif ti.state != State.REMOVED: # ti.state = State.NONE # session.merge(ti) else: session.delete(ti) if job_ids: from airflow.jobs import BaseJob as BJ for job in session.query(BJ).filter(BJ.id.in_(job_ids)).all(): job.state = State.SHUTDOWN if activate_dag_runs: execution_dates = {ti.execution_date for ti in tis} dag_ids = {ti.dag_id for ti in tis} drs = session.query(DagRun).filter( DagRun.dag_id.in_(dag_ids), DagRun.execution_date.in_(execution_dates), ).all() for dr in drs: dr.state = State.RUNNING dr.start_date = datetime.now()
def kill_zombies(self, session=None): """ Fails tasks that haven't had a heartbeat in too long """ from airflow.jobs import LocalTaskJob as LJ self.logger.info("Finding 'running' jobs without a recent heartbeat") TI = TaskInstance secs = ( configuration.getint('scheduler', 'job_heartbeat_sec') * 3) + 120 limit_dttm = datetime.now() - timedelta(seconds=secs) self.logger.info( "Failing jobs without heartbeat after {}".format(limit_dttm)) tis = ( session.query(TI) .join(LJ, TI.job_id == LJ.id) .filter(TI.state == State.RUNNING) .filter( or_( LJ.state != State.RUNNING, LJ.latest_heartbeat < limit_dttm, )) .all() ) for ti in tis: if ti and ti.dag_id in self.dags: dag = self.dags[ti.dag_id] if ti.task_id in dag.task_ids: task = dag.get_task(ti.task_id) ti.task = task ti.handle_failure("{} killed as zombie".format(ti)) self.logger.info( 'Marked zombie job {} as failed'.format(ti)) Stats.incr('zombies_killed') session.commit()
def ready_for_retry(self): """ Checks on whether the task instance is in the right state and timeframe to be retried. """ return (self.state == State.UP_FOR_RETRY and self.next_retry_datetime() < datetime.now())