Java 类org.hibernate.engine.spi.RowSelection 实例源码

项目:phoenix-hibernate-dialect    文件:PhoenixDialect.java   
@Override
public LimitHandler getLimitHandler() {
    return new AbstractLimitHandler() {
        @Override
        public boolean supportsLimit() {
            return true;
        }

        @Override
        public String processSql(String sql, RowSelection selection) {
            if (LimitHelper.useLimit(this, selection)) {
                final boolean hasMaxRows = LimitHelper.hasMaxRows(selection);
                final boolean hasOffset = LimitHelper.hasFirstRow(selection);
                return sql + (hasMaxRows ? " limit ?" : "")
                            + (hasOffset ? " offset ?" : "");
            }
            return sql;
        }
    };
}
项目:lams    文件:DynamicBatchingEntityLoaderBuilder.java   
private List doTheLoad(String sql, QueryParameters queryParameters, SessionImplementor session) throws SQLException {
    final RowSelection selection = queryParameters.getRowSelection();
    final int maxRows = LimitHelper.hasMaxRows( selection ) ?
            selection.getMaxRows() :
            Integer.MAX_VALUE;

    final List<AfterLoadAction> afterLoadActions = new ArrayList<AfterLoadAction>();
    final SqlStatementWrapper wrapper = executeQueryStatement( sql, queryParameters, false, afterLoadActions, session );
    final ResultSet rs = wrapper.getResultSet();
    final Statement st = wrapper.getStatement();
    try {
        return processResultSet( rs, queryParameters, session, false, null, maxRows, afterLoadActions );
    }
    finally {
        session.getTransactionCoordinator().getJdbcCoordinator().release( st );
    }
}
项目:lams    文件:Loader.java   
/**
 * Advance the cursor to the first required row of the <tt>ResultSet</tt>
 */
private void advance(final ResultSet rs, final RowSelection selection)
        throws SQLException {

    final int firstRow = LimitHelper.getFirstRow( selection );
    if ( firstRow != 0 ) {
        if ( getFactory().getSettings().isScrollableResultSetsEnabled() ) {
            // we can go straight to the first required row
            rs.absolute( firstRow );
        }
        else {
            // we need to step through the rows one row at a time (slow)
            for ( int m = 0; m < firstRow; m++ ) rs.next();
        }
    }
}
项目:lams    文件:Loader.java   
/**
 * Execute given <tt>PreparedStatement</tt>, advance to the first result and return SQL <tt>ResultSet</tt>.
 */
protected final ResultSet getResultSet(
        final PreparedStatement st,
        final RowSelection selection,
        final LimitHandler limitHandler,
        final boolean autodiscovertypes,
        final SessionImplementor session)
throws SQLException, HibernateException {

    try {
        ResultSet rs = session.getTransactionCoordinator().getJdbcCoordinator().getResultSetReturn().extract( st );
        rs = wrapResultSetIfEnabled( rs , session );

        if ( !limitHandler.supportsLimitOffset() || !LimitHelper.useLimit( limitHandler, selection ) ) {
            advance( rs, selection );
        }

        if ( autodiscovertypes ) {
            autoDiscoverTypes( rs );
        }
        return rs;
    }
    catch ( SQLException sqle ) {
        session.getTransactionCoordinator().getJdbcCoordinator().release( st );
        throw sqle;
    }
}
项目:lams    文件:DynamicBatchingCollectionInitializerBuilder.java   
private void doTheLoad(String sql, QueryParameters queryParameters, SessionImplementor session) throws SQLException {
    final RowSelection selection = queryParameters.getRowSelection();
    final int maxRows = LimitHelper.hasMaxRows( selection ) ?
            selection.getMaxRows() :
            Integer.MAX_VALUE;

    final List<AfterLoadAction> afterLoadActions = Collections.emptyList();
    final SqlStatementWrapper wrapper = executeQueryStatement( sql, queryParameters, false, afterLoadActions, session );
    final ResultSet rs = wrapper.getResultSet();
    final Statement st = wrapper.getStatement();
    try {
        processResultSet( rs, queryParameters, session, true, null, maxRows, afterLoadActions );
    }
    finally {
        session.getTransactionCoordinator().getJdbcCoordinator().release( st );
    }
}
项目:lams    文件:AbstractLoadPlanBasedLoader.java   
/**
 * Execute given <tt>PreparedStatement</tt>, advance to the first result and return SQL <tt>ResultSet</tt>.
 */
protected final ResultSet getResultSet(
        final PreparedStatement st,
        final RowSelection selection,
        final LimitHandler limitHandler,
        final boolean autodiscovertypes,
        final SessionImplementor session)
        throws SQLException, HibernateException {

    try {
        ResultSet rs = session.getTransactionCoordinator().getJdbcCoordinator().getResultSetReturn().extract( st );
        rs = wrapResultSetIfEnabled( rs , session );

        if ( !limitHandler.supportsLimitOffset() || !LimitHelper.useLimit( limitHandler, selection ) ) {
            advance( rs, selection );
        }

        if ( autodiscovertypes ) {
            autoDiscoverTypes( rs );
        }
        return rs;
    }
    catch ( SQLException sqle ) {
        session.getTransactionCoordinator().getJdbcCoordinator().release( st );
        throw sqle;
    }
}
项目:lams    文件:AbstractLoadPlanBasedLoader.java   
/**
 * Advance the cursor to the first required row of the <tt>ResultSet</tt>
 */
protected void advance(final ResultSet rs, final RowSelection selection) throws SQLException {
    final int firstRow = LimitHelper.getFirstRow( selection );
    if ( firstRow != 0 ) {
        if ( getFactory().getSettings().isScrollableResultSetsEnabled() ) {
            // we can go straight to the first required row
            rs.absolute( firstRow );
        }
        else {
            // we need to step through the rows one row at a time (slow)
            for ( int m = 0; m < firstRow; m++ ) {
                rs.next();
            }
        }
    }
}
项目:engerek    文件:SqlAuditServiceImpl.java   
private int selectRecordsByMaxAge(Session session, String tempTable, Date minValue, Dialect dialect) {

        // fill temporary table, we don't need to join task on object on
        // container, oid and id is already in task table
        StringBuilder selectSB = new StringBuilder();
        selectSB.append("select a.id as id from ").append(RAuditEventRecord.TABLE_NAME).append(" a");
        selectSB.append(" where a.").append(RAuditEventRecord.COLUMN_TIMESTAMP).append(" < ###TIME###");
        String selectString = selectSB.toString();

        // batch size
        RowSelection rowSelection = new RowSelection();
        rowSelection.setMaxRows(CLEANUP_AUDIT_BATCH_SIZE);
        LimitHandler limitHandler = dialect.buildLimitHandler(selectString, rowSelection);
        selectString = limitHandler.getProcessedSql();

        // replace ? -> batch size, $ -> ?
        // Sorry for that .... I just don't know how to write this query in HQL,
        // nor I'm not sure if limiting max size in
        // compound insert into ... select ... query via query.setMaxSize()
        // would work - TODO write more nicely if anybody knows how)
        selectString = selectString.replace("?", String.valueOf(CLEANUP_AUDIT_BATCH_SIZE));
        selectString = selectString.replace("###TIME###", "?");

        String queryString = "insert into " + tempTable + " " + selectString;
        LOGGER.trace("Query string = {}", queryString);
        SQLQuery query = session.createSQLQuery(queryString);
        query.setParameter(0, new Timestamp(minValue.getTime()));

        return query.executeUpdate();
    }
项目:high-performance-java-persistence    文件:OracleResultSetLimitTest.java   
@Test
public void testLimit() {
    RowSelection rowSelection = new RowSelection();
    rowSelection.setMaxRows(getMaxRows());
    long startNanos = System.nanoTime();
    doInJDBC(connection -> {
        try (PreparedStatement statement = connection.prepareStatement(SELECT_POST)
        ) {
            statement.setMaxRows(getMaxRows());
            assertEquals(getMaxRows(), processResultSet(statement));
        } catch (SQLException e) {
            fail(e.getMessage());
        }

    });
    LOGGER.info("{} Result Set with limit took {} millis",
            dataSourceProvider().database(),
            TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos));
}
项目:high-performance-java-persistence    文件:SQLServerResultSetLimitTest.java   
@Test
public void testLimit() {
    RowSelection rowSelection = new RowSelection();
    rowSelection.setMaxRows(getMaxRows());
    long startNanos = System.nanoTime();
    doInJDBC(connection -> {
        try (PreparedStatement statement1 = connection.prepareStatement(SELECT_POST_COMMENT_1);
             PreparedStatement statement11 = connection.prepareStatement(SELECT_POST_COMMENT_1);
             PreparedStatement statement2 = connection.prepareStatement(SELECT_POST_COMMENT_2);
        ) {
            statement1.setMaxRows(getMaxRows());
            assertEquals(getMaxRows(), processResultSet(statement1));
            assertEquals(getPostCommentCount() * getPostCount(), processResultSet(statement11));
            assertEquals(getPostCommentCount() * getPostCount(), processResultSet(statement2));
        } catch (SQLException e) {
            fail(e.getMessage());
        }

    });
    LOGGER.info("{} Result Set with limit took {} millis",
            dataSourceProvider().database(),
            TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos));
}
项目:sqlite-dialect    文件:SQLiteLimitHandler.java   
@Override
public String processSql(String sql, RowSelection selection) {
    if (LimitHelper.hasFirstRow(selection)) {
        return sql + " limit ? offset ?";
    } else {
        return sql + " limit ?";
    }
}
项目:sqlite-dialect    文件:SQLiteLimitHandlerTest.java   
@Before
public void setup() {
    limitHandler = new SQLiteLimitHandler();
    rowSelection = new RowSelection();

    PowerMockito.mockStatic(LimitHelper.class);
}
项目:lams    文件:Loader.java   
private List doQuery(
            final SessionImplementor session,
            final QueryParameters queryParameters,
            final boolean returnProxies,
            final ResultTransformer forcedResultTransformer) throws SQLException, HibernateException {

        final RowSelection selection = queryParameters.getRowSelection();
        final int maxRows = LimitHelper.hasMaxRows( selection ) ?
                selection.getMaxRows() :
                Integer.MAX_VALUE;

        final List<AfterLoadAction> afterLoadActions = new ArrayList<AfterLoadAction>();

        final SqlStatementWrapper wrapper = executeQueryStatement( queryParameters, false, afterLoadActions, session );
        final ResultSet rs = wrapper.getResultSet();
        final Statement st = wrapper.getStatement();

// would be great to move all this below here into another method that could also be used
// from the new scrolling stuff.
//
// Would need to change the way the max-row stuff is handled (i.e. behind an interface) so
// that I could do the control breaking at the means to know when to stop

        try {
            return processResultSet( rs, queryParameters, session, returnProxies, forcedResultTransformer, maxRows, afterLoadActions );
        }
        finally {
            session.getTransactionCoordinator().getJdbcCoordinator().release( st );
        }

    }
项目:lams    文件:HQLQueryPlan.java   
/**
 * If we're able to guess a likely size of the results we can optimize allocation
 * of our datastructures.
 * Essentially if we detect the user is not using pagination, we attempt to use the FetchSize
 * as a reasonable hint. If fetch size is not being set either, it is reasonable to expect
 * that we're going to have a single hit. In such a case it would be tempting to return a constant
 * of value one, but that's dangerous as it doesn't scale up appropriately for example
 * with an ArrayList if the guess is wrong.
 *
 * @param rowSelection
 * @return a reasonable size to use for allocation
 */
private final int guessResultSize(RowSelection rowSelection) {
    if ( rowSelection != null ) {
        final int maxReasonableAllocation = rowSelection.getFetchSize() != null ? rowSelection.getFetchSize().intValue() : 100;
        if ( rowSelection.getMaxRows() != null && rowSelection.getMaxRows().intValue() > 0 ) {
            return Math.min( maxReasonableAllocation, rowSelection.getMaxRows().intValue() );
        }
        else if ( rowSelection.getFetchSize() != null && rowSelection.getFetchSize().intValue() > 0 ) {
            return rowSelection.getFetchSize().intValue();
        }
    }
    return 7;//magic number guessed as a reasonable default.
}
项目:lams    文件:AbstractQueryImpl.java   
public AbstractQueryImpl(
        String queryString,
        FlushMode flushMode,
        SessionImplementor session,
        ParameterMetadata parameterMetadata) {
    this.session = session;
    this.queryString = queryString;
    this.selection = new RowSelection();
    this.flushMode = flushMode;
    this.cacheMode = null;
    this.parameterMetadata = parameterMetadata;
}
项目:lams    文件:BasicExecutor.java   
protected int doExecute(QueryParameters parameters, SessionImplementor session, String sql,
        List parameterSpecifications) throws HibernateException {
    BulkOperationCleanupAction action = new BulkOperationCleanupAction( session, persister );
    if ( session.isEventSource() ) {
        ( (EventSource) session ).getActionQueue().addAction( action );
    }
    else {
        action.getAfterTransactionCompletionProcess().doAfterTransactionCompletion( true, session );
    }

    PreparedStatement st = null;
    RowSelection selection = parameters.getRowSelection();

    try {
        try {
            st = session.getTransactionCoordinator().getJdbcCoordinator().getStatementPreparer().prepareStatement( sql, false );
            Iterator paramSpecItr = parameterSpecifications.iterator();
            int pos = 1;
            while ( paramSpecItr.hasNext() ) {
                final ParameterSpecification paramSpec = (ParameterSpecification) paramSpecItr.next();
                pos += paramSpec.bind( st, parameters, session, pos );
            }
            if ( selection != null ) {
                if ( selection.getTimeout() != null ) {
                    st.setQueryTimeout( selection.getTimeout() );
                }
            }

            return session.getTransactionCoordinator().getJdbcCoordinator().getResultSetReturn().executeUpdate( st );
        }
        finally {
            if ( st != null ) {
                session.getTransactionCoordinator().getJdbcCoordinator().release( st );
            }
        }
    }
    catch( SQLException sqle ) {
        throw factory.getSQLExceptionHelper().convert( sqle, "could not execute update query", sql );
    }
}
项目:engerek    文件:SqlAuditServiceImpl.java   
private int selectRecordsByNumberToKeep(Session session, String tempTable, Integer recordsToKeep, Dialect dialect) {
    Number totalAuditRecords = (Number) session.createCriteria(RAuditEventRecord.class)
            .setProjection(Projections.rowCount())
            .uniqueResult();
    int recordsToDelete = totalAuditRecords.intValue() - recordsToKeep;
    if (recordsToDelete <= 0) {
        recordsToDelete = 0;
    } else if (recordsToDelete > CLEANUP_AUDIT_BATCH_SIZE) {
        recordsToDelete = CLEANUP_AUDIT_BATCH_SIZE;
    }
    LOGGER.debug("Total audit records: {}, records to keep: {} => records to delete in this batch: {}",
            totalAuditRecords, recordsToKeep, recordsToDelete);
    if (recordsToDelete == 0) {
        return 0;
    }

    StringBuilder selectSB = new StringBuilder();
    selectSB.append("select a.id as id from ").append(RAuditEventRecord.TABLE_NAME).append(" a");
    selectSB.append(" order by a.").append(RAuditEventRecord.COLUMN_TIMESTAMP).append(" asc");
    String selectString = selectSB.toString();

    // batch size
    RowSelection rowSelection = new RowSelection();
    rowSelection.setMaxRows(recordsToDelete);
    LimitHandler limitHandler = dialect.buildLimitHandler(selectString, rowSelection);
    selectString = limitHandler.getProcessedSql();
    selectString = selectString.replace("?", String.valueOf(recordsToDelete));

    String queryString = "insert into " + tempTable + " " + selectString;
    LOGGER.trace("Query string = {}", queryString);
    SQLQuery query = session.createSQLQuery(queryString);
    return query.executeUpdate();
}
项目:high-performance-java-persistence    文件:ResultSetLimitTest.java   
@Test
public void testLimit() {
    final RowSelection rowSelection = new RowSelection();
    rowSelection.setMaxRows(getMaxRows());
    LimitHandler limitHandler = ((SessionFactoryImpl) sessionFactory()).getDialect().getLimitHandler();
    String limitStatement = limitHandler.processSql(SELECT_POST_COMMENT, rowSelection);
    long startNanos = System.nanoTime();
    doInJDBC(connection -> {
        try (PreparedStatement statement = connection.prepareStatement(limitStatement)) {
            limitHandler.bindLimitParametersAtEndOfQuery(rowSelection, statement, 1);
            statement.setInt(1, getMaxRows());
            statement.execute();
            int count = 0;
            ResultSet resultSet = statement.getResultSet();
            while (resultSet.next()) {
                resultSet.getLong(1);
                count++;
            }
            assertEquals(getMaxRows(), count);
        } catch (SQLException e) {
            fail(e.getMessage());
        }

    });
    LOGGER.info("{} Result Set with limit took {} millis",
            dataSourceProvider().database(),
            TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos));
}
项目:midpoint    文件:SqlAuditServiceImpl.java   
private int selectRecordsByMaxAge(Session session, String tempTable, Date minValue, Dialect dialect) {

        // fill temporary table, we don't need to join task on object on
        // container, oid and id is already in task table
        StringBuilder selectSB = new StringBuilder();
        selectSB.append("select a.id as id from ").append(RAuditEventRecord.TABLE_NAME).append(" a");
        selectSB.append(" where a.").append(RAuditEventRecord.COLUMN_TIMESTAMP).append(" < ###TIME###");
        String selectString = selectSB.toString();

        // batch size
        RowSelection rowSelection = new RowSelection();
        rowSelection.setMaxRows(CLEANUP_AUDIT_BATCH_SIZE);
        LimitHandler limitHandler = dialect.getLimitHandler();
        selectString = limitHandler.processSql(selectString, rowSelection);

        // replace ? -> batch size, $ -> ?
        // Sorry for that .... I just don't know how to write this query in HQL,
        // nor I'm not sure if limiting max size in
        // compound insert into ... select ... query via query.setMaxSize()
        // would work - TODO write more nicely if anybody knows how)
        selectString = selectString.replace("?", String.valueOf(CLEANUP_AUDIT_BATCH_SIZE));
        selectString = selectString.replace("###TIME###", "?");

        String queryString = "insert into " + tempTable + " " + selectString;
        LOGGER.trace("Query string = {}", queryString);
        NativeQuery query = session.createNativeQuery(queryString);
        query.setParameter(1, new Timestamp(minValue.getTime()));

        return query.executeUpdate();
    }
项目:midpoint    文件:SqlAuditServiceImpl.java   
private int selectRecordsByNumberToKeep(Session session, String tempTable, Integer recordsToKeep, Dialect dialect) {
       Number totalAuditRecords = (Number) session.createCriteria(RAuditEventRecord.class)
               .setProjection(Projections.rowCount())
               .uniqueResult();
       int recordsToDelete = totalAuditRecords.intValue() - recordsToKeep;
       if (recordsToDelete <= 0) {
           recordsToDelete = 0;
       } else if (recordsToDelete > CLEANUP_AUDIT_BATCH_SIZE) {
           recordsToDelete = CLEANUP_AUDIT_BATCH_SIZE;
       }
       LOGGER.debug("Total audit records: {}, records to keep: {} => records to delete in this batch: {}",
               totalAuditRecords, recordsToKeep, recordsToDelete);
       if (recordsToDelete == 0) {
        return 0;
       }

    StringBuilder selectSB = new StringBuilder();
    selectSB.append("select a.id as id from ").append(RAuditEventRecord.TABLE_NAME).append(" a");
    selectSB.append(" order by a.").append(RAuditEventRecord.COLUMN_TIMESTAMP).append(" asc");
    String selectString = selectSB.toString();

    // batch size
    RowSelection rowSelection = new RowSelection();
    rowSelection.setMaxRows(recordsToDelete);
    LimitHandler limitHandler = dialect.getLimitHandler();
    selectString = limitHandler.processSql(selectString, rowSelection);
    selectString = selectString.replace("?", String.valueOf(recordsToDelete));

    String queryString = "insert into " + tempTable + " " + selectString;
    LOGGER.trace("Query string = {}", queryString);
    NativeQuery query = session.createNativeQuery(queryString);
    return query.executeUpdate();
}
项目:midpoint    文件:SqlAuditServiceImpl.java   
private int selectRecordsByMaxAge(Session session, String tempTable, Date minValue, Dialect dialect) {

        // fill temporary table, we don't need to join task on object on
        // container, oid and id is already in task table
        StringBuilder selectSB = new StringBuilder();
        selectSB.append("select a.id as id from ").append(RAuditEventRecord.TABLE_NAME).append(" a");
        selectSB.append(" where a.").append(RAuditEventRecord.COLUMN_TIMESTAMP).append(" < ###TIME###");
        String selectString = selectSB.toString();

        // batch size
        RowSelection rowSelection = new RowSelection();
        rowSelection.setMaxRows(CLEANUP_AUDIT_BATCH_SIZE);
        LimitHandler limitHandler = dialect.getLimitHandler();
        selectString = limitHandler.processSql(selectString, rowSelection);

        // replace ? -> batch size, $ -> ?
        // Sorry for that .... I just don't know how to write this query in HQL,
        // nor I'm not sure if limiting max size in
        // compound insert into ... select ... query via query.setMaxSize()
        // would work - TODO write more nicely if anybody knows how)
        selectString = selectString.replace("?", String.valueOf(CLEANUP_AUDIT_BATCH_SIZE));
        selectString = selectString.replace("###TIME###", "?");

        String queryString = "insert into " + tempTable + " " + selectString;
        LOGGER.trace("Query string = {}", queryString);
        NativeQuery query = session.createNativeQuery(queryString);
        query.setParameter(1, new Timestamp(minValue.getTime()));

        return query.executeUpdate();
    }
项目:midpoint    文件:SqlAuditServiceImpl.java   
private int selectRecordsByNumberToKeep(Session session, String tempTable, Integer recordsToKeep, Dialect dialect) {
       Number totalAuditRecords = (Number) session.createCriteria(RAuditEventRecord.class)
               .setProjection(Projections.rowCount())
               .uniqueResult();
       int recordsToDelete = totalAuditRecords.intValue() - recordsToKeep;
       if (recordsToDelete <= 0) {
           recordsToDelete = 0;
       } else if (recordsToDelete > CLEANUP_AUDIT_BATCH_SIZE) {
           recordsToDelete = CLEANUP_AUDIT_BATCH_SIZE;
       }
       LOGGER.debug("Total audit records: {}, records to keep: {} => records to delete in this batch: {}",
               totalAuditRecords, recordsToKeep, recordsToDelete);
       if (recordsToDelete == 0) {
        return 0;
       }

    StringBuilder selectSB = new StringBuilder();
    selectSB.append("select a.id as id from ").append(RAuditEventRecord.TABLE_NAME).append(" a");
    selectSB.append(" order by a.").append(RAuditEventRecord.COLUMN_TIMESTAMP).append(" asc");
    String selectString = selectSB.toString();

    // batch size
    RowSelection rowSelection = new RowSelection();
    rowSelection.setMaxRows(recordsToDelete);
    LimitHandler limitHandler = dialect.getLimitHandler();
    selectString = limitHandler.processSql(selectString, rowSelection);
    selectString = selectString.replace("?", String.valueOf(recordsToDelete));

    String queryString = "insert into " + tempTable + " " + selectString;
    LOGGER.trace("Query string = {}", queryString);
    NativeQuery query = session.createNativeQuery(queryString);
    return query.executeUpdate();
}
项目:sqlite-dialect    文件:SQLiteLimitHandler.java   
@Override
public int bindLimitParametersAtStartOfQuery(RowSelection selection, PreparedStatement statement, int index) throws SQLException {
    return 0;
}
项目:sqlite-dialect    文件:SQLiteLimitHandler.java   
@Override
public int bindLimitParametersAtEndOfQuery(RowSelection selection, PreparedStatement statement, int index) throws SQLException {
    return 0;
}
项目:sqlite-dialect    文件:SQLiteLimitHandler.java   
@Override
public void setMaxRows(RowSelection selection, PreparedStatement statement) throws SQLException {
}
项目:spanner-hibernate    文件:CloudSpannerDialect.java   
@Override
public String processSql(String sql, RowSelection selection)
{
    final boolean hasOffset = LimitHelper.hasFirstRow(selection);
    return sql + (hasOffset ? " limit ? offset ?" : " limit ?");
}
项目:lams    文件:ResultSetProcessorImpl.java   
@Override
public List extractResults(
        ResultSet resultSet,
        final SessionImplementor session,
        QueryParameters queryParameters,
        NamedParameterContext namedParameterContext,
        boolean returnProxies,
        boolean readOnly,
        ResultTransformer forcedResultTransformer,
        List<AfterLoadAction> afterLoadActionList) throws SQLException {

    handlePotentiallyEmptyCollectionRootReturns( loadPlan, queryParameters.getCollectionKeys(), resultSet, session );

    final int maxRows;
    final RowSelection selection = queryParameters.getRowSelection();
    if ( LimitHelper.hasMaxRows( selection ) ) {
        maxRows = selection.getMaxRows();
        LOG.tracef( "Limiting ResultSet processing to just %s rows", maxRows );
    }
    else {
        maxRows = Integer.MAX_VALUE;
    }

    // There are times when the "optional entity information" on QueryParameters should be used and
    // times when they should be ignored.  Loader uses its isSingleRowLoader method to allow
    // subclasses to override that.  Collection initializers, batch loaders, e.g. override that
    // it to be false.  The 'shouldUseOptionalEntityInstance' setting is meant to fill that same role.
    final boolean shouldUseOptionalEntityInstance = true;

    // Handles the "FETCH ALL PROPERTIES" directive in HQL
    final boolean forceFetchLazyAttributes = false;

    final ResultSetProcessingContextImpl context = new ResultSetProcessingContextImpl(
            resultSet,
            session,
            loadPlan,
            readOnly,
            shouldUseOptionalEntityInstance,
            forceFetchLazyAttributes,
            returnProxies,
            queryParameters,
            namedParameterContext,
            hadSubselectFetches
    );

    final List loadResults = new ArrayList();

    LOG.trace( "Processing result set" );
    int count;
    for ( count = 0; count < maxRows && resultSet.next(); count++ ) {
        LOG.debugf( "Starting ResultSet row #%s", count );

        Object logicalRow = rowReader.readRow( resultSet, context );

        // todo : apply transformers here?

        loadResults.add( logicalRow );

        context.finishUpRow();
    }

    LOG.tracev( "Done processing result set ({0} rows)", count );

    rowReader.finishUp( context, afterLoadActionList );
    context.wrapUp();

    session.getPersistenceContext().initializeNonLazyCollections();

    return loadResults;
}
项目:lams    文件:AbstractLoadPlanBasedLoader.java   
/**
 * Obtain a <tt>PreparedStatement</tt> with all parameters pre-bound.
 * Bind JDBC-style <tt>?</tt> parameters, named parameters, and
 * limit parameters.
 */
protected final PreparedStatement prepareQueryStatement(
        final String sql,
        final QueryParameters queryParameters,
        final LimitHandler limitHandler,
        final boolean scroll,
        final SessionImplementor session) throws SQLException, HibernateException {
    final Dialect dialect = getFactory().getDialect();
    final RowSelection selection = queryParameters.getRowSelection();
    final boolean useLimit = LimitHelper.useLimit( limitHandler, selection );
    final boolean hasFirstRow = LimitHelper.hasFirstRow( selection );
    final boolean useLimitOffset = hasFirstRow && useLimit && limitHandler.supportsLimitOffset();
    final boolean callable = queryParameters.isCallable();
    final ScrollMode scrollMode = getScrollMode( scroll, hasFirstRow, useLimitOffset, queryParameters );

    final PreparedStatement st = session.getTransactionCoordinator().getJdbcCoordinator()
            .getStatementPreparer().prepareQueryStatement( sql, callable, scrollMode );

    try {

        int col = 1;
        //TODO: can we limit stored procedures ?!
        col += limitHandler.bindLimitParametersAtStartOfQuery( st, col );

        if (callable) {
            col = dialect.registerResultSetOutParameter( (CallableStatement)st, col );
        }

        col += bindParameterValues( st, queryParameters, col, session );

        col += limitHandler.bindLimitParametersAtEndOfQuery( st, col );

        limitHandler.setMaxRows( st );

        if ( selection != null ) {
            if ( selection.getTimeout() != null ) {
                st.setQueryTimeout( selection.getTimeout() );
            }
            if ( selection.getFetchSize() != null ) {
                st.setFetchSize( selection.getFetchSize() );
            }
        }

        // handle lock timeout...
        final LockOptions lockOptions = queryParameters.getLockOptions();
        if ( lockOptions != null ) {
            if ( lockOptions.getTimeOut() != LockOptions.WAIT_FOREVER ) {
                if ( !dialect.supportsLockTimeouts() ) {
                    if ( log.isDebugEnabled() ) {
                        log.debugf(
                                "Lock timeout [%s] requested but dialect reported to not support lock timeouts",
                                lockOptions.getTimeOut()
                        );
                    }
                }
                else if ( dialect.isLockTimeoutParameterized() ) {
                    st.setInt( col++, lockOptions.getTimeOut() );
                }
            }
        }

        if ( log.isTraceEnabled() ) {
            log.tracev( "Bound [{0}] parameters total", col );
        }
    }
    catch ( SQLException sqle ) {
        session.getTransactionCoordinator().getJdbcCoordinator().release( st );
        throw sqle;
    }
    catch ( HibernateException he ) {
        session.getTransactionCoordinator().getJdbcCoordinator().release( st );
        throw he;
    }

    return st;
}
项目:lams    文件:HQLQueryPlan.java   
/**
 * Coordinates the efforts to perform a list across all the included query translators.
 *
 * @param queryParameters The query parameters
 * @param session The session
 *
 * @return The query result list
 *
 * @throws HibernateException Indicates a problem performing the query
 */
@SuppressWarnings("unchecked")
public List performList(
        QueryParameters queryParameters,
        SessionImplementor session) throws HibernateException {
    if ( TRACE_ENABLED ) {
        LOG.tracev( "Find: {0}", getSourceQuery() );
        queryParameters.traceParameters( session.getFactory() );
    }

    final RowSelection rowSelection = queryParameters.getRowSelection();
    final boolean hasLimit = rowSelection != null
            && rowSelection.definesLimits();
    final boolean needsLimit = hasLimit && translators.length > 1;

    final QueryParameters queryParametersToUse;
    if ( needsLimit ) {
        LOG.needsLimit();
        final RowSelection selection = new RowSelection();
        selection.setFetchSize( queryParameters.getRowSelection().getFetchSize() );
        selection.setTimeout( queryParameters.getRowSelection().getTimeout() );
        queryParametersToUse = queryParameters.createCopyUsing( selection );
    }
    else {
        queryParametersToUse = queryParameters;
    }

    final int guessedResultSize = guessResultSize( rowSelection );
    final List combinedResults = new ArrayList( guessedResultSize );
    final IdentitySet distinction = new IdentitySet( guessedResultSize );
    int includedCount = -1;
    translator_loop:
    for ( QueryTranslator translator : translators ) {
        final List tmp = translator.list( session, queryParametersToUse );
        if ( needsLimit ) {
            // NOTE : firstRow is zero-based
            final int first = queryParameters.getRowSelection().getFirstRow() == null
                    ? 0
                    : queryParameters.getRowSelection().getFirstRow();
            final int max = queryParameters.getRowSelection().getMaxRows() == null
                    ? -1
                    : queryParameters.getRowSelection().getMaxRows();
            for ( final Object result : tmp ) {
                if ( !distinction.add( result ) ) {
                    continue;
                }
                includedCount++;
                if ( includedCount < first ) {
                    continue;
                }
                combinedResults.add( result );
                if ( max >= 0 && includedCount > max ) {
                    // break the outer loop !!!
                    break translator_loop;
                }
            }
        }
        else {
            combinedResults.addAll( tmp );
        }
    }
    return combinedResults;
}
项目:lams    文件:AbstractQueryImpl.java   
protected RowSelection getRowSelection() {
    return selection;
}
项目:lams    文件:CUBRIDDialect.java   
@Override
public LimitHandler buildLimitHandler(String sql, RowSelection selection) {
    return new CUBRIDLimitHandler( this, sql, selection );
}
项目:lams    文件:SQLServer2005Dialect.java   
@Override
public LimitHandler buildLimitHandler(String sql, RowSelection selection) {
    return new SQLServer2005LimitHandler( sql, selection );
}
项目:lams    文件:QueryKey.java   
/**
 * Generates a QueryKey.
 *
 * @param queryString The sql query string.
 * @param queryParameters The query parameters
 * @param filterKeys The keys of any enabled filters.
 * @param session The current session.
 * @param customTransformer The result transformer; should be null if data is not transformed before being cached.
 *
 * @return The generate query cache key.
 */
public static QueryKey generateQueryKey(
        String queryString,
        QueryParameters queryParameters,
        Set filterKeys,
        SessionImplementor session,
        CacheableResultTransformer customTransformer) {
    // disassemble positional parameters
    final int positionalParameterCount = queryParameters.getPositionalParameterTypes().length;
    final Type[] types = new Type[positionalParameterCount];
    final Object[] values = new Object[positionalParameterCount];
    for ( int i = 0; i < positionalParameterCount; i++ ) {
        types[i] = queryParameters.getPositionalParameterTypes()[i];
        values[i] = types[i].disassemble( queryParameters.getPositionalParameterValues()[i], session, null );
    }

    // disassemble named parameters
    final Map<String,TypedValue> namedParameters;
    if ( queryParameters.getNamedParameters() == null ) {
        namedParameters = null;
    }
    else {
        namedParameters = CollectionHelper.mapOfSize( queryParameters.getNamedParameters().size() );
        for ( Map.Entry<String,TypedValue> namedParameterEntry : queryParameters.getNamedParameters().entrySet() ) {
            namedParameters.put(
                    namedParameterEntry.getKey(),
                    new TypedValue(
                            namedParameterEntry.getValue().getType(),
                            namedParameterEntry.getValue().getType().disassemble(
                                    namedParameterEntry.getValue().getValue(),
                                    session,
                                    null
                            )
                    )
            );
        }
    }

    // decode row selection...
    final RowSelection selection = queryParameters.getRowSelection();
    final Integer firstRow;
    final Integer maxRows;
    if ( selection != null ) {
        firstRow = selection.getFirstRow();
        maxRows = selection.getMaxRows();
    }
    else {
        firstRow = null;
        maxRows = null;
    }

    return new QueryKey(
            queryString,
            types,
            values,
            namedParameters,
            firstRow,
            maxRows,
            filterKeys,
            session.getTenantIdentifier(),
            customTransformer
    );
}
项目:lams    文件:QueryTranslatorImpl.java   
@Override
public List list(SessionImplementor session, QueryParameters queryParameters)
        throws HibernateException {
    // Delegate to the QueryLoader...
    errorIfDML();

    final QueryNode query = (QueryNode) sqlAst;
    final boolean hasLimit = queryParameters.getRowSelection() != null && queryParameters.getRowSelection().definesLimits();
    final boolean needsDistincting = ( query.getSelectClause().isDistinct() || hasLimit ) && containsCollectionFetches();

    QueryParameters queryParametersToUse;
    if ( hasLimit && containsCollectionFetches() ) {
        LOG.firstOrMaxResultsSpecifiedWithCollectionFetch();
        RowSelection selection = new RowSelection();
        selection.setFetchSize( queryParameters.getRowSelection().getFetchSize() );
        selection.setTimeout( queryParameters.getRowSelection().getTimeout() );
        queryParametersToUse = queryParameters.createCopyUsing( selection );
    }
    else {
        queryParametersToUse = queryParameters;
    }

    List results = queryLoader.list( session, queryParametersToUse );

    if ( needsDistincting ) {
        int includedCount = -1;
        // NOTE : firstRow is zero-based
        int first = !hasLimit || queryParameters.getRowSelection().getFirstRow() == null
                    ? 0
                    : queryParameters.getRowSelection().getFirstRow();
        int max = !hasLimit || queryParameters.getRowSelection().getMaxRows() == null
                    ? -1
                    : queryParameters.getRowSelection().getMaxRows();
        List tmp = new ArrayList();
        IdentitySet distinction = new IdentitySet();
        for ( final Object result : results ) {
            if ( !distinction.add( result ) ) {
                continue;
            }
            includedCount++;
            if ( includedCount < first ) {
                continue;
            }
            tmp.add( result );
            // NOTE : ( max - 1 ) because first is zero-based while max is not...
            if ( max >= 0 && ( includedCount - first ) >= ( max - 1 ) ) {
                break;
            }
        }
        results = tmp;
    }

    return results;
}
项目:vsDiaryWriter    文件:SQLiteDialect.java   
@Override
public String processSql(String sql, RowSelection selection) {
    final boolean hasOffset = LimitHelper.hasFirstRow(selection);
    return sql + (hasOffset ? " limit ? offset ?" : " limit ?");
}
项目:spletne-seje    文件:SQLiteDialect.java   
@Override
public String processSql(String sql, RowSelection selection) {
    final boolean hasOffset = LimitHelper.hasFirstRow(selection);
    return sql + (hasOffset ? " limit ? offset ?" : " limit ?");
}
项目:lams    文件:Loader.java   
/**
 * Build LIMIT clause handler applicable for given selection criteria. Returns {@link NoopLimitHandler} delegate
 * if dialect does not support LIMIT expression or processed query does not use pagination.
 *
 * @param sql Query string.
 * @param selection Selection criteria.
 * @return LIMIT clause delegate.
 */
protected LimitHandler getLimitHandler(String sql, RowSelection selection) {
    final LimitHandler limitHandler = getFactory().getDialect().buildLimitHandler( sql, selection );
    return LimitHelper.useLimit( limitHandler, selection ) ? limitHandler : new NoopLimitHandler( sql, selection );
}
项目:lams    文件:AbstractLoadPlanBasedLoader.java   
/**
 * Build LIMIT clause handler applicable for given selection criteria. Returns {@link org.hibernate.dialect.pagination.NoopLimitHandler} delegate
 * if dialect does not support LIMIT expression or processed query does not use pagination.
 *
 * @param sql Query string.
 * @param selection Selection criteria.
 * @return LIMIT clause delegate.
 */
protected LimitHandler getLimitHandler(String sql, RowSelection selection) {
    final LimitHandler limitHandler = getFactory().getDialect().buildLimitHandler( sql, selection );
    return LimitHelper.useLimit( limitHandler, selection ) ? limitHandler : new NoopLimitHandler( sql, selection );
}
项目:lams    文件:LegacyLimitHandler.java   
/**
 * Constructs a LegacyLimitHandler
 *
 * @param dialect The dialect
 * @param sql The sql
 * @param selection The row selection
 */
public LegacyLimitHandler(Dialect dialect, String sql, RowSelection selection) {
    super( sql, selection );
    this.dialect = dialect;
}
项目:lams    文件:CUBRIDLimitHandler.java   
/**
 * Constructs a CUBRIDLimitHandler
 *
 * @param dialect Currently not used
 * @param sql The SQL
 * @param selection The row selection options
 */
public CUBRIDLimitHandler(Dialect dialect, String sql, RowSelection selection) {
    super( sql, selection );
    this.dialect = dialect;
}