我们从Python开源项目中,提取了以下4个代码示例,用于说明如何使用sqlalchemy.dialects.postgresql.DOUBLE_PRECISION。
def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('label_event', sa.Column('label', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('dataset', sa.Column('probability', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True)) op.add_column('problem', sa.Column('label', sa.Unicode(length=255), nullable=True)) op.execute(''' UPDATE label_event SET label = (SELECT label FROM problem_label WHERE problem_label.id = label_event.label_id) ''') op.execute(''' UPDATE dataset SET probability = (SELECT probability FROM dataset_label_probability WHERE dataset_label_probability.data_id = dataset.id) ''') op.execute(''' UPDATE problem SET label = (SELECT label FROM problem_label WHERE problem_label.problem_id = problem.id) ''') op.alter_column('label_event', 'label', nullable=False) op.alter_column('problem', 'label', nullable=False) op.drop_column('problem', 'name') op.drop_index(op.f('ix_label_event_label_id'), table_name='label_event') op.drop_column('label_event', 'label_id') op.drop_index(op.f('ix_dataset_label_probability_label_id'), table_name='dataset_label_probability') op.drop_index(op.f('ix_dataset_label_probability_data_id'), table_name='dataset_label_probability') op.drop_table('dataset_label_probability') op.drop_table('problem_label') # ### end Alembic commands ###
def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column('items', 'price', existing_type=postgresql.DOUBLE_PRECISION(precision=53), nullable=True) # ### end Alembic commands ###
def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column('items', 'price', existing_type=postgresql.DOUBLE_PRECISION(precision=53), nullable=False) # ### end Alembic commands ###
def downgrade(): """Downgrade the database to an older revision.""" # ### commands auto generated by Alembic - please adjust! ### op.create_table('stacks', sa.Column('id', sa.INTEGER(), server_default=sa.text("nextval('stacks_id_seq'::regclass)"), nullable=False), sa.Column('is_ref_stack', sa.BOOLEAN(), autoincrement=False, nullable=False), sa.Column('stack_json', postgresql.JSONB(astext_type=sa.Text()), autoincrement=False, nullable=False), sa.PrimaryKeyConstraint('id', name='stacks_pkey'), postgresql_ignore_search_path=False) op.create_table('similar_components', sa.Column('id', sa.INTEGER(), nullable=False), sa.Column('fromcomponent', sa.TEXT(), autoincrement=False, nullable=False), sa.Column('tocomponent', sa.TEXT(), autoincrement=False, nullable=False), sa.Column('similarity_distance', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=False), sa.PrimaryKeyConstraint('id', name='similar_components_pkey'), sa.UniqueConstraint('fromcomponent', 'tocomponent', name='sim_comps')) op.create_table('similar_stacks', sa.Column('id', sa.INTEGER(), nullable=False), sa.Column('analysis', postgresql.JSONB(astext_type=sa.Text()), autoincrement=False, nullable=True), sa.Column('similar_stack_id', sa.INTEGER(), autoincrement=False, nullable=False), sa.Column('similarity_value', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=False), sa.Column('stack_id', sa.INTEGER(), autoincrement=False, nullable=False), sa.ForeignKeyConstraint(['similar_stack_id'], ['stacks.id'], name='similar_stacks_similar_stack_id_fkey'), sa.ForeignKeyConstraint(['stack_id'], ['stacks.id'], name='similar_stacks_stack_id_fkey'), sa.PrimaryKeyConstraint('id', name='similar_stacks_pkey'), sa.UniqueConstraint('stack_id', 'similar_stack_id', name='sim_unique')) op.create_table('esmarker', sa.Column('id', sa.INTEGER(), nullable=False), sa.Column('worker_result_id', sa.INTEGER(), autoincrement=False, nullable=True), sa.ForeignKeyConstraint(['worker_result_id'], ['worker_results.id'], name='esmarker_worker_result_id_fkey'), sa.PrimaryKeyConstraint('id', name='esmarker_pkey')) # ### end Alembic commands ###