Java 类org.apache.cassandra.thrift.SliceRange 实例源码

项目:cassandra-trunk    文件:Operation.java   
protected SlicePredicate slicePredicate()
{
    final SlicePredicate predicate = new SlicePredicate();
    if (state.settings.columns.slice)
    {
        int count = state.rowGen.count(index);
        int start = sliceStart(count);
        predicate.setSlice_range(new SliceRange()
                                 .setStart(state.settings.columns.names.get(start))
                                 .setFinish(new byte[] {})
                                 .setReversed(false)
                                 .setCount(count)
        );
    }
    else
        predicate.setColumn_names(randomNames());
    return predicate;
}
项目:archived-net-virt-platform    文件:Connection.java   
private SlicePredicate getSlicePredicate(String[] columnNameList) {
    SlicePredicate slicePredicate = new SlicePredicate();
    try {
        if (columnNameList != null) {
            List<ByteBuffer> columnNameByteBufferList = new ArrayList<ByteBuffer>();
            for (String columnName: columnNameList) {
                byte[] columnNameBytes = columnName.getBytes("UTF-8");
                columnNameByteBufferList.add(ByteBuffer.wrap(columnNameBytes));
            }
            slicePredicate.setColumn_names(columnNameByteBufferList);
        } else {
            SliceRange sliceRange = new SliceRange();
            sliceRange.setStart(new byte[0]);
            sliceRange.setFinish(new byte[0]);
            // FIXME: The default column count is 100. We should tune the value.
            sliceRange.setCount(100000);

            slicePredicate.setSlice_range(sliceRange);
        }
    }
    catch (UnsupportedEncodingException exc) {
        throw new StorageException("Character encoding exception with key range", exc);
    }
    return slicePredicate;
}
项目:CadalWorkspace    文件:CasTimeReader.java   
public CasTimeReader() {
    try {
        TTransport tr = new TFramedTransport(new TSocket("10.15.61.111",
                9160));
        TProtocol proto = new TBinaryProtocol(tr);
        client = new Cassandra.Client(proto);
        tr.open();

        client.set_keyspace("CadalSecTest");

        predicate = new SlicePredicate();
        SliceRange range = new SliceRange();
        range.setStart(new byte[0]);
        range.setFinish(new byte[0]);
        range.setCount(10000);
        predicate.setSlice_range(range);

        columnParent = new ColumnParent();
        columnParent.setColumn_family("RecordMinute");
    } catch (Exception e) {
        System.out.println(e);
    }
}
项目:CadalWorkspace    文件:CasTimeBook.java   
public CasTimeBook() {
    try {
        TTransport tr = new TFramedTransport(new TSocket("10.15.61.111",
                9160));
        TProtocol proto = new TBinaryProtocol(tr);
        client = new Cassandra.Client(proto);
        tr.open();

        client.set_keyspace("CadalSecTest");

        predicate = new SlicePredicate();
        SliceRange range = new SliceRange();
        range.setStart(new byte[0]);
        range.setFinish(new byte[0]);
        range.setCount(10000);
        predicate.setSlice_range(range);

        columnParent = new ColumnParent();
        columnParent.setColumn_family("RecordMinute");
    } catch (Exception e) {
        System.out.println(e);
    }
}
项目:CadalWorkspace    文件:CasTimeReader.java   
public CasTimeReader() {
    try {
        TTransport tr = new TFramedTransport(new TSocket("10.15.61.111",
                9160));
        TProtocol proto = new TBinaryProtocol(tr);
        client = new Cassandra.Client(proto);
        tr.open();

        client.set_keyspace("CadalSecTest");

        predicate = new SlicePredicate();
        SliceRange range = new SliceRange();
        range.setStart(new byte[0]);
        range.setFinish(new byte[0]);
        range.setCount(10000);
        predicate.setSlice_range(range);

        columnParent = new ColumnParent();
        columnParent.setColumn_family("RecordMinute");
    } catch (Exception e) {
        System.out.println(e);
    }
}
项目:CadalWorkspace    文件:CasTimeBook.java   
public CasTimeBook() {
    try {
        TTransport tr = new TFramedTransport(new TSocket("10.15.61.111",
                9160));
        TProtocol proto = new TBinaryProtocol(tr);
        client = new Cassandra.Client(proto);
        tr.open();

        client.set_keyspace("CadalSecTest");

        predicate = new SlicePredicate();
        SliceRange range = new SliceRange();
        range.setStart(new byte[0]);
        range.setFinish(new byte[0]);
        range.setCount(10000);
        predicate.setSlice_range(range);

        columnParent = new ColumnParent();
        columnParent.setColumn_family("RecordMinute");
    } catch (Exception e) {
        System.out.println(e);
    }
}
项目:titan1withtp3.1    文件:CassandraBinaryInputFormat.java   
private SliceRange getSliceRange(final SliceQuery slice, final int limit) {
    final SliceRange sliceRange = new SliceRange();
    sliceRange.setStart(slice.getSliceStart().asByteBuffer());
    sliceRange.setFinish(slice.getSliceEnd().asByteBuffer());
    sliceRange.setCount(Math.min(limit, slice.getLimit()));
    return sliceRange;
}
项目:titan1.0.1.kafka    文件:CassandraEmbeddedKeyColumnValueStore.java   
/**
 * Create a RangeSliceCommand and run it against the StorageProxy.
 * <p>
 * To match the behavior of the standard Cassandra thrift API endpoint, the
 * {@code nowMillis} argument should be the number of milliseconds since the
 * UNIX Epoch (e.g. System.currentTimeMillis() or equivalent obtained
 * through a {@link TimestampProvider}). This is per
 * {@link org.apache.cassandra.thrift.CassandraServer#get_range_slices(ColumnParent, SlicePredicate, KeyRange, ConsistencyLevel)},
 * which passes the server's System.currentTimeMillis() to the
 * {@code RangeSliceCommand} constructor.
 */
private List<Row> getKeySlice(Token start,
                              Token end,
                              @Nullable SliceQuery sliceQuery,
                              int pageSize,
                              long nowMillis) throws BackendException {
    IPartitioner partitioner = StorageService.getPartitioner();

    SliceRange columnSlice = new SliceRange();
    if (sliceQuery == null) {
        columnSlice.setStart(ArrayUtils.EMPTY_BYTE_ARRAY)
                .setFinish(ArrayUtils.EMPTY_BYTE_ARRAY)
                .setCount(5);
    } else {
        columnSlice.setStart(sliceQuery.getSliceStart().asByteBuffer())
                .setFinish(sliceQuery.getSliceEnd().asByteBuffer())
                .setCount(sliceQuery.hasLimit() ? sliceQuery.getLimit() : Integer.MAX_VALUE);
    }
    /* Note: we need to fetch columns for each row as well to remove "range ghosts" */
    SlicePredicate predicate = new SlicePredicate().setSlice_range(columnSlice);

    RowPosition startPosition = start.minKeyBound(partitioner);
    RowPosition endPosition = end.minKeyBound(partitioner);

    List<Row> rows;

    try {
        CFMetaData cfm = Schema.instance.getCFMetaData(keyspace, columnFamily);
        IDiskAtomFilter filter = ThriftValidation.asIFilter(predicate, cfm, null);

        RangeSliceCommand cmd = new RangeSliceCommand(keyspace, columnFamily, nowMillis, filter, new Bounds<RowPosition>(startPosition, endPosition), pageSize);

        rows = StorageProxy.getRangeSlice(cmd, ConsistencyLevel.QUORUM);
    } catch (Exception e) {
        throw new PermanentBackendException(e);
    }

    return rows;
}
项目:titan1.0.1.kafka    文件:CassandraBinaryInputFormat.java   
private SliceRange getSliceRange(final SliceQuery slice, final int limit) {
    final SliceRange sliceRange = new SliceRange();
    sliceRange.setStart(slice.getSliceStart().asByteBuffer());
    sliceRange.setFinish(slice.getSliceEnd().asByteBuffer());
    sliceRange.setCount(Math.min(limit, slice.getLimit()));
    return sliceRange;
}
项目:openyu-commons    文件:CassandraThriftDMLTest.java   
/**
 * get 讀取所有column
 *
 * @throws Exception
 */
@Test
public void get2() throws Exception {
    String KEYSPACE = "mock";
    client.set_keyspace(KEYSPACE);

    // 讀取所有column
    String COLUMN_FAMILY = "student";
    ColumnParent columnParent = new ColumnParent(COLUMN_FAMILY);

    // 術語
    SlicePredicate predicate = new SlicePredicate();

    // 範圍
    SliceRange sliceRange = new SliceRange();
    // sliceRange.setStart(ByteBufferHelper.toByteBuffer(new byte[0]));//開始
    sliceRange.setStart(new byte[0]);// 開始
    sliceRange.setFinish(new byte[0]);// 結束
    sliceRange.setCount(100);// 筆數
    //
    predicate.setSlice_range(sliceRange);

    String ROW_KEY = "Jack";
    // 結果
    // key, column_parent, predicate, consistency_level
    List<ColumnOrSuperColumn> results = client.get_slice(
            ByteBufferHelper.toByteBuffer(ROW_KEY), columnParent,
            predicate, ConsistencyLevel.ONE);

    for (ColumnOrSuperColumn cos : results) {
        Column column = cos.getColumn();
        System.out.println(ROW_KEY + ", "
                + ByteHelper.toString(column.getName()) + ": "
                + ByteHelper.toString(column.getValue()) + ", "
                + column.getTimestamp());
        // Jack, art, 87, 1380788003220
        // Jack, grad, 5, 1380788003203
        // Jack, math, 97, 1380788003214
    }
}
项目:titan0.5.4-hbase1.1.1-custom    文件:CassandraEmbeddedKeyColumnValueStore.java   
/**
 * Create a RangeSliceCommand and run it against the StorageProxy.
 * <p>
 * To match the behavior of the standard Cassandra thrift API endpoint, the
 * {@code nowMillis} argument should be the number of milliseconds since the
 * UNIX Epoch (e.g. System.currentTimeMillis() or equivalent obtained
 * through a {@link TimestampProvider}). This is per
 * {@link org.apache.cassandra.thrift.CassandraServer#get_range_slices(ColumnParent, SlicePredicate, KeyRange, ConsistencyLevel)},
 * which passes the server's System.currentTimeMillis() to the
 * {@code RangeSliceCommand} constructor.
 */
private List<Row> getKeySlice(Token start,
                              Token end,
                              @Nullable SliceQuery sliceQuery,
                              int pageSize,
                              long nowMillis) throws BackendException {
    IPartitioner<?> partitioner = StorageService.getPartitioner();

    SliceRange columnSlice = new SliceRange();
    if (sliceQuery == null) {
        columnSlice.setStart(ArrayUtils.EMPTY_BYTE_ARRAY)
                .setFinish(ArrayUtils.EMPTY_BYTE_ARRAY)
                .setCount(5);
    } else {
        columnSlice.setStart(sliceQuery.getSliceStart().asByteBuffer())
                .setFinish(sliceQuery.getSliceEnd().asByteBuffer())
                .setCount(sliceQuery.hasLimit() ? sliceQuery.getLimit() : Integer.MAX_VALUE);
    }
    /* Note: we need to fetch columns for each row as well to remove "range ghosts" */
    SlicePredicate predicate = new SlicePredicate().setSlice_range(columnSlice);

    RowPosition startPosition = start.minKeyBound(partitioner);
    RowPosition endPosition = end.minKeyBound(partitioner);

    List<Row> rows;

    try {
        CFMetaData cfm = Schema.instance.getCFMetaData(keyspace, columnFamily);
        IDiskAtomFilter filter = ThriftValidation.asIFilter(predicate, cfm, null);

        RangeSliceCommand cmd = new RangeSliceCommand(keyspace, columnFamily, nowMillis, filter, new Bounds<RowPosition>(startPosition, endPosition), pageSize);

        rows = StorageProxy.getRangeSlice(cmd, ConsistencyLevel.QUORUM);
    } catch (Exception e) {
        throw new PermanentBackendException(e);
    }

    return rows;
}
项目:titan0.5.4-hbase1.1.1-custom    文件:TitanCassandraInputFormat.java   
private SliceRange getSliceRange(final SliceQuery slice, final int limit) {
    final SliceRange sliceRange = new SliceRange();
    sliceRange.setStart(slice.getSliceStart().asByteBuffer());
    sliceRange.setFinish(slice.getSliceEnd().asByteBuffer());
    sliceRange.setCount(Math.min(limit, slice.getLimit()));
    return sliceRange;
}
项目:Cassandra-Wasef    文件:RangeSliceCommand.java   
public static SlicePredicate asSlicePredicate(IDiskAtomFilter predicate)
{
    SlicePredicate sp = new SlicePredicate();
    if (predicate instanceof NamesQueryFilter)
    {
        sp.setColumn_names(new ArrayList<ByteBuffer>(((NamesQueryFilter)predicate).columns));
    }
    else
    {
        SliceQueryFilter sqf = (SliceQueryFilter)predicate;
        sp.setSlice_range(new SliceRange(sqf.start(), sqf.finish(), sqf.reversed, sqf.count));
    }
    return sp;
}
项目:cassandra-cqlMod    文件:ThriftCounterGetter.java   
public void run(final ThriftClient client) throws IOException
{
    SliceRange sliceRange = new SliceRange();
    // start/finish
    sliceRange.setStart(new byte[] {}).setFinish(new byte[] {});
    // reversed/count
    sliceRange.setReversed(false).setCount(state.settings.columns.maxColumnsPerKey);
    // initialize SlicePredicate with existing SliceRange
    final SlicePredicate predicate = new SlicePredicate().setSlice_range(sliceRange);

    final ByteBuffer key = getKey();
    for (final ColumnParent parent : state.columnParents)
    {

        timeWithRetry(new RunOp()
        {
            @Override
            public boolean run() throws Exception
            {
                return client.get_slice(key, parent, predicate, state.settings.command.consistencyLevel).size() != 0;
            }

            @Override
            public String key()
            {
                return new String(key.array());
            }

            @Override
            public int keyCount()
            {
                return 1;
            }
        });
    }
}
项目:cassandra-cqlMod    文件:ThriftReader.java   
public void run(final ThriftClient client) throws IOException
{
    final SlicePredicate predicate = new SlicePredicate();
    if (state.settings.columns.names == null)
        predicate.setSlice_range(new SliceRange()
                .setStart(new byte[] {})
                .setFinish(new byte[] {})
                .setReversed(false)
                .setCount(state.settings.columns.maxColumnsPerKey)
        );
    else // see CASSANDRA-3064 about why this is useful
        predicate.setColumn_names(state.settings.columns.names);

    final ByteBuffer key = getKey();
    for (final ColumnParent parent : state.columnParents)
    {
        timeWithRetry(new RunOp()
        {
            @Override
            public boolean run() throws Exception
            {
                return client.get_slice(key, parent, predicate, state.settings.command.consistencyLevel).size() != 0;
            }

            @Override
            public String key()
            {
                return new String(key.array());
            }

            @Override
            public int keyCount()
            {
                return 1;
            }
        });
    }
}
项目:cassandra-cqlMod    文件:ThriftMultiGetter.java   
public void run(final ThriftClient client) throws IOException
{

    final SlicePredicate predicate = new SlicePredicate().setSlice_range(
            new SliceRange(
                    ByteBufferUtil.EMPTY_BYTE_BUFFER,
                    ByteBufferUtil.EMPTY_BYTE_BUFFER,
                    false,
                    state.settings.columns.maxColumnsPerKey
            )
    );

    final List<ByteBuffer> keys = getKeys(((SettingsCommandMulti) state.settings.command).keysAtOnce);

    for (final ColumnParent parent : state.columnParents)
    {
        timeWithRetry(new RunOp()
        {
            int count;
            @Override
            public boolean run() throws Exception
            {
                return (count = client.multiget_slice(keys, parent, predicate, state.settings.command.consistencyLevel).size()) != 0;
            }

            @Override
            public String key()
            {
                return keys.toString();
            }

            @Override
            public int keyCount()
            {
                return count;
            }
        });
    }
}
项目:wso2-cassandra    文件:RangeSliceCommand.java   
public static SlicePredicate asSlicePredicate(IDiskAtomFilter predicate)
{
    SlicePredicate sp = new SlicePredicate();
    if (predicate instanceof NamesQueryFilter)
    {
        sp.setColumn_names(new ArrayList<ByteBuffer>(((NamesQueryFilter)predicate).columns));
    }
    else
    {
        SliceQueryFilter sqf = (SliceQueryFilter)predicate;
        sp.setSlice_range(new SliceRange(sqf.start(), sqf.finish(), sqf.reversed, sqf.count));
    }
    return sp;
}
项目:cassandra-trunk    文件:ThriftMultiGetter.java   
public void run(final ThriftClient client) throws IOException
{

    final SlicePredicate predicate = new SlicePredicate().setSlice_range(
            new SliceRange(
                    ByteBufferUtil.EMPTY_BYTE_BUFFER,
                    ByteBufferUtil.EMPTY_BYTE_BUFFER,
                    false,
                    state.settings.columns.maxColumnsPerKey
            )
    );

    final List<ByteBuffer> keys = getKeys(state.settings.command.keysAtOnce);

    for (final ColumnParent parent : state.columnParents)
    {
        timeWithRetry(new RunOp()
        {
            int count;
            @Override
            public boolean run() throws Exception
            {
                return (count = client.multiget_slice(keys, parent, predicate, state.settings.command.consistencyLevel).size()) != 0;
            }

            @Override
            public String key()
            {
                return keys.toString();
            }

            @Override
            public int keyCount()
            {
                return count;
            }
        });
    }
}
项目:cassandra-1.2.16    文件:RangeSliceCommand.java   
public static SlicePredicate asSlicePredicate(IDiskAtomFilter predicate)
{
    SlicePredicate sp = new SlicePredicate();
    if (predicate instanceof NamesQueryFilter)
    {
        sp.setColumn_names(new ArrayList<ByteBuffer>(((NamesQueryFilter)predicate).columns));
    }
    else
    {
        SliceQueryFilter sqf = (SliceQueryFilter)predicate;
        sp.setSlice_range(new SliceRange(sqf.start(), sqf.finish(), sqf.reversed, sqf.count));
    }
    return sp;
}
项目:Doradus    文件:DBConn.java   
private static String toString(SlicePredicate slicePred) {
    StringBuilder buffer = new StringBuilder();
    if (slicePred.isSetColumn_names()) {
        buffer.append("Columns(");
        buffer.append(slicePred.getColumn_names().size());
        buffer.append(" total)");
    } else if (slicePred.isSetSlice_range()) {
        SliceRange sliceRange = slicePred.getSlice_range();
        ByteBuffer startCol = sliceRange.start;
        String startColStr = "<null>";
        if (startCol != null) {
            startColStr = Utils.toString(startCol.array(), startCol.arrayOffset(), startCol.limit());
        }
        if (startColStr.length() == 0) {
            startColStr = "<first>";
        }
        ByteBuffer endCol = sliceRange.finish;
        String endColStr = "<null>";
        if (endCol != null) {
            endColStr = Utils.toString(endCol.array(), endCol.arrayOffset(), endCol.limit());
        }
        if (endColStr.length() == 0) {
            endColStr = "<last>";
        }
        if (startColStr.equals("<first>") && endColStr.equals("<last>")) {
            buffer.append("Slice(<all>)");
        } else {
            buffer.append("Slice('");
            buffer.append(startColStr);
            buffer.append("' to '");
            buffer.append(endColStr);
            buffer.append("')");
        }
    }
    return buffer.toString();
}
项目:Doradus    文件:CassandraDefs.java   
/**
 * Create a SlicePredicate that starts at the given column name, selecting up to
 * {@link #MAX_COLS_BATCH_SIZE} columns.
 * 
 * @param startColName  Starting column name as a byte[].
 * @param endColName    Ending column name as a byte[]
 * @return              SlicePredicate that starts at the given starting column name,
 *                      ends at the given ending column name, selecting up to
 *                      {@link #MAX_COLS_BATCH_SIZE} columns.
 */
static SlicePredicate slicePredicateStartEndCol(byte[] startColName, byte[] endColName, boolean reversed) {
    if(startColName == null) startColName = EMPTY_BYTES;
    if(endColName == null) endColName = EMPTY_BYTES;
    SliceRange sliceRange =
        new SliceRange(
                ByteBuffer.wrap(startColName), ByteBuffer.wrap(endColName), 
                reversed, CassandraDefs.MAX_COLS_BATCH_SIZE);
    SlicePredicate slicePred = new SlicePredicate();
    slicePred.setSlice_range(sliceRange);
    return slicePred;
}
项目:Cassandra-KVPM    文件:QueryFilter.java   
public static IFilter getFilter(SlicePredicate predicate, AbstractType comparator)
{
    if (predicate.column_names != null)
    {
        final SortedSet<ByteBuffer> columnNameSet = new TreeSet<ByteBuffer>(comparator);
        columnNameSet.addAll(predicate.column_names);
        return new NamesQueryFilter(columnNameSet);
    }

    SliceRange range = predicate.slice_range;
    return new SliceQueryFilter(range.start, range.finish, range.reversed, range.count);
}
项目:Cassandra-KVPM    文件:SerializationsTest.java   
private void testRangeSliceCommandWrite() throws IOException
{
    ByteBuffer startCol = ByteBufferUtil.bytes("Start");
    ByteBuffer stopCol = ByteBufferUtil.bytes("Stop");
    ByteBuffer emptyCol = ByteBufferUtil.bytes("");
    SlicePredicate namesPred = new SlicePredicate();
    namesPred.column_names = Statics.NamedCols;
    SliceRange emptySliceRange = new SliceRange(emptyCol, emptyCol, false, 100); 
    SliceRange nonEmptySliceRange = new SliceRange(startCol, stopCol, true, 100);
    SlicePredicate emptyRangePred = new SlicePredicate();
    emptyRangePred.slice_range = emptySliceRange;
    SlicePredicate nonEmptyRangePred = new SlicePredicate();
    nonEmptyRangePred.slice_range = nonEmptySliceRange;
    IPartitioner part = StorageService.getPartitioner();
    AbstractBounds bounds = new Range(part.getRandomToken(), part.getRandomToken());

    Message namesCmd = new RangeSliceCommand(Statics.KS, "Standard1", null, namesPred, bounds, 100).getMessage(MessagingService.version_);
    Message emptyRangeCmd = new RangeSliceCommand(Statics.KS, "Standard1", null, emptyRangePred, bounds, 100).getMessage(MessagingService.version_);
    Message regRangeCmd = new RangeSliceCommand(Statics.KS, "Standard1", null,  nonEmptyRangePred, bounds, 100).getMessage(MessagingService.version_);
    Message namesCmdSup = new RangeSliceCommand(Statics.KS, "Super1", Statics.SC, namesPred, bounds, 100).getMessage(MessagingService.version_);
    Message emptyRangeCmdSup = new RangeSliceCommand(Statics.KS, "Super1", Statics.SC, emptyRangePred, bounds, 100).getMessage(MessagingService.version_);
    Message regRangeCmdSup = new RangeSliceCommand(Statics.KS, "Super1", Statics.SC,  nonEmptyRangePred, bounds, 100).getMessage(MessagingService.version_);

    DataOutputStream dout = getOutput("db.RangeSliceCommand.bin");

    Message.serializer().serialize(namesCmd, dout, getVersion());
    Message.serializer().serialize(emptyRangeCmd, dout, getVersion());
    Message.serializer().serialize(regRangeCmd, dout, getVersion());
    Message.serializer().serialize(namesCmdSup, dout, getVersion());
    Message.serializer().serialize(emptyRangeCmdSup, dout, getVersion());
    Message.serializer().serialize(regRangeCmdSup, dout, getVersion());
    dout.close();
}
项目:CadalWorkspace    文件:CasTimePV.java   
/**
 * Query from cf [RecordMinute]
 * @param queryWord
 * @return
 */
public int QueryRecordMinute(String queryWord) {
    TTransport tr = new TFramedTransport(new TSocket("10.15.61.118", 9160));
    TProtocol proto = new TBinaryProtocol(tr);
    Cassandra.Client client = new Cassandra.Client(proto);

    try {
        tr.open();
        client.set_keyspace("CadalSecTest");

        // read entire row
        SlicePredicate predicate = new SlicePredicate();// new SliceRange(new byte[0], new byte[0], false, 10)
        SliceRange range = new SliceRange();
        range.start = Utils.toByteBuffer("");
        range.finish = Utils.toByteBuffer("");
        predicate.setSlice_range(range);

        ColumnParent parent = new ColumnParent();
        parent.column_family = "RecordMinute";

        List<ColumnOrSuperColumn> results = client.get_slice(Utils.toByteBuffer(queryWord), parent, predicate, ConsistencyLevel.ONE);

        tr.close();

        return results.size();
    } catch (Exception e) {
        e.printStackTrace();
    }

    return 0;
}
项目:CadalWorkspace    文件:QueryChapterInfo.java   
/**
     * Query From CF -- "UserChapter"
     * @param int userid: 'userid' is column key of this CF, like '119115'
     */
    public List<String> QueryFromUserChapter(int userid){
//      System.out.println("------------QueryFromUserChapter--------------");

        List<String> listStr = new ArrayList<String>();

        try {
            SlicePredicate predicate = new SlicePredicate();
            SliceRange range = new SliceRange();
            range.start = this.cassandraUtil.toByteBuffer("");
            range.finish = this.cassandraUtil.toByteBuffer("");
            range.setCount(10000000);
            predicate.setSlice_range(range);

            ColumnParent parent = new ColumnParent();
            parent.column_family = "UserChapter";    // CF name

            List<ColumnOrSuperColumn> results = client.get_slice(this.cassandraUtil.toByteBuffer(String.valueOf(userid)), parent, predicate,ConsistencyLevel.ONE);

            for (ColumnOrSuperColumn result : results) {
                Column column1 = result.column;
                listStr.add(new String(column1.getName(), "UTF-8"));
            }

            return listStr;

        } catch (Exception e) {
            return listStr;
        }
    }
项目:CadalWorkspace    文件:CasTimePV.java   
/**
 * Query from cf [RecordMinute]
 * @param queryWord
 * @return
 */
public int QueryRecordMinute(String queryWord) {
    TTransport tr = new TFramedTransport(new TSocket("10.15.61.118", 9160));
    TProtocol proto = new TBinaryProtocol(tr);
    Cassandra.Client client = new Cassandra.Client(proto);

    try {
        tr.open();
        client.set_keyspace("CadalSecTest");

        // read entire row
        SlicePredicate predicate = new SlicePredicate();// new SliceRange(new byte[0], new byte[0], false, 10)
        SliceRange range = new SliceRange();
        range.start = Utils.toByteBuffer("");
        range.finish = Utils.toByteBuffer("");
        predicate.setSlice_range(range);

        ColumnParent parent = new ColumnParent();
        parent.column_family = "RecordMinute";

        List<ColumnOrSuperColumn> results = client.get_slice(Utils.toByteBuffer(queryWord), parent, predicate, ConsistencyLevel.ONE);

        tr.close();

        return results.size();
    } catch (Exception e) {
        e.printStackTrace();
    }

    return 0;
}
项目:titan1withtp3.1    文件:CassandraEmbeddedKeyColumnValueStore.java   
/**
 * Create a RangeSliceCommand and run it against the StorageProxy.
 * <p>
 * To match the behavior of the standard Cassandra thrift API endpoint, the
 * {@code nowMillis} argument should be the number of milliseconds since the
 * UNIX Epoch (e.g. System.currentTimeMillis() or equivalent obtained
 * through a {@link TimestampProvider}). This is per
 * {@link org.apache.cassandra.thrift.CassandraServer#get_range_slices(ColumnParent, SlicePredicate, KeyRange, ConsistencyLevel)},
 * which passes the server's System.currentTimeMillis() to the
 * {@code RangeSliceCommand} constructor.
 */
private List<Row> getKeySlice(Token start,
                              Token end,
                              @Nullable SliceQuery sliceQuery,
                              int pageSize,
                              long nowMillis) throws BackendException {
    IPartitioner partitioner = StorageService.getPartitioner();

    SliceRange columnSlice = new SliceRange();
    if (sliceQuery == null) {
        columnSlice.setStart(ArrayUtils.EMPTY_BYTE_ARRAY)
                .setFinish(ArrayUtils.EMPTY_BYTE_ARRAY)
                .setCount(5);
    } else {
        columnSlice.setStart(sliceQuery.getSliceStart().asByteBuffer())
                .setFinish(sliceQuery.getSliceEnd().asByteBuffer())
                .setCount(sliceQuery.hasLimit() ? sliceQuery.getLimit() : Integer.MAX_VALUE);
    }
    /* Note: we need to fetch columns for each row as well to remove "range ghosts" */
    SlicePredicate predicate = new SlicePredicate().setSlice_range(columnSlice);

    // DAVID CASSANDRA
    // Old cassandra code did not use partitioner anyway in this call...so new code removed it as a parmaeter
    // RowPosition startPosition = start.minKeyBound(partitioner);
    RowPosition startPosition = start.minKeyBound();
    // DAVID CASSANDRA
    // RowPosition endPosition = end.minKeyBound(partitioner);
    RowPosition endPosition = end.minKeyBound();

    List<Row> rows;

    try {
        CFMetaData cfm = Schema.instance.getCFMetaData(keyspace, columnFamily);
        IDiskAtomFilter filter = ThriftValidation.asIFilter(predicate, cfm, null);

        RangeSliceCommand cmd = new RangeSliceCommand(keyspace, columnFamily, nowMillis, filter, new Bounds<RowPosition>(startPosition, endPosition), pageSize);

        rows = StorageProxy.getRangeSlice(cmd, ConsistencyLevel.QUORUM);
    } catch (Exception e) {
        throw new PermanentBackendException(e);
    }

    return rows;
}
项目:cassandra-kmean    文件:ColumnFamilyStoreTest.java   
@Test
public void testDeleteSuperRowSticksAfterFlush() throws Throwable
{
    String keyspaceName = "Keyspace1";
    String cfName= "Super1";
    ByteBuffer scfName = ByteBufferUtil.bytes("SuperDuper");
    Keyspace keyspace = Keyspace.open(keyspaceName);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName);
    DecoratedKey key = Util.dk("flush-resurrection");

    // create an isolated sstable.
    putColsSuper(cfs, key, scfName,
            new BufferCell(cellname(1L), ByteBufferUtil.bytes("val1"), 1),
            new BufferCell(cellname(2L), ByteBufferUtil.bytes("val2"), 1),
            new BufferCell(cellname(3L), ByteBufferUtil.bytes("val3"), 1));
    cfs.forceBlockingFlush();

    // insert, don't flush.
    putColsSuper(cfs, key, scfName,
            new BufferCell(cellname(4L), ByteBufferUtil.bytes("val4"), 1),
            new BufferCell(cellname(5L), ByteBufferUtil.bytes("val5"), 1),
            new BufferCell(cellname(6L), ByteBufferUtil.bytes("val6"), 1));

    // verify insert.
    final SlicePredicate sp = new SlicePredicate();
    sp.setSlice_range(new SliceRange());
    sp.getSlice_range().setCount(100);
    sp.getSlice_range().setStart(ArrayUtils.EMPTY_BYTE_ARRAY);
    sp.getSlice_range().setFinish(ArrayUtils.EMPTY_BYTE_ARRAY);

    assertRowAndColCount(1, 6, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, scfName), 100));

    // delete
    Mutation rm = new Mutation(keyspace.getName(), key.getKey());
    rm.deleteRange(cfName, SuperColumns.startOf(scfName), SuperColumns.endOf(scfName), 2);
    rm.apply();

    // verify delete.
    assertRowAndColCount(1, 0, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, scfName), 100));

    // flush
    cfs.forceBlockingFlush();

    // re-verify delete.
    assertRowAndColCount(1, 0, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, scfName), 100));

    // late insert.
    putColsSuper(cfs, key, scfName,
            new BufferCell(cellname(4L), ByteBufferUtil.bytes("val4"), 1L),
            new BufferCell(cellname(7L), ByteBufferUtil.bytes("val7"), 1L));

    // re-verify delete.
    assertRowAndColCount(1, 0, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, scfName), 100));

    // make sure new writes are recognized.
    putColsSuper(cfs, key, scfName,
            new BufferCell(cellname(3L), ByteBufferUtil.bytes("val3"), 3),
            new BufferCell(cellname(8L), ByteBufferUtil.bytes("val8"), 3),
            new BufferCell(cellname(9L), ByteBufferUtil.bytes("val9"), 3));
    assertRowAndColCount(1, 3, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, scfName), 100));
}
项目:cassandra-kmean    文件:ColumnFamilyStoreTest.java   
@Test
public void testDeleteStandardRowSticksAfterFlush() throws Throwable
{
    // test to make sure flushing after a delete doesn't resurrect delted cols.
    String keyspaceName = "Keyspace1";
    String cfName = "Standard1";
    Keyspace keyspace = Keyspace.open(keyspaceName);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName);
    DecoratedKey key = Util.dk("f-flush-resurrection");

    SlicePredicate sp = new SlicePredicate();
    sp.setSlice_range(new SliceRange());
    sp.getSlice_range().setCount(100);
    sp.getSlice_range().setStart(ArrayUtils.EMPTY_BYTE_ARRAY);
    sp.getSlice_range().setFinish(ArrayUtils.EMPTY_BYTE_ARRAY);

    // insert
    putColsStandard(cfs, key, column("col1", "val1", 1), column("col2", "val2", 1));
    assertRowAndColCount(1, 2, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));

    // flush.
    cfs.forceBlockingFlush();

    // insert, don't flush
    putColsStandard(cfs, key, column("col3", "val3", 1), column("col4", "val4", 1));
    assertRowAndColCount(1, 4, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));

    // delete (from sstable and memtable)
    Mutation rm = new Mutation(keyspace.getName(), key.getKey());
    rm.delete(cfs.name, 2);
    rm.apply();

    // verify delete
    assertRowAndColCount(1, 0, true, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));

    // flush
    cfs.forceBlockingFlush();

    // re-verify delete. // first breakage is right here because of CASSANDRA-1837.
    assertRowAndColCount(1, 0, true, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));

    // simulate a 'late' insertion that gets put in after the deletion. should get inserted, but fail on read.
    putColsStandard(cfs, key, column("col5", "val5", 1), column("col2", "val2", 1));

    // should still be nothing there because we deleted this row. 2nd breakage, but was undetected because of 1837.
    assertRowAndColCount(1, 0, true, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));

    // make sure that new writes are recognized.
    putColsStandard(cfs, key, column("col6", "val6", 3), column("col7", "val7", 3));
    assertRowAndColCount(1, 2, true, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));

    // and it remains so after flush. (this wasn't failing before, but it's good to check.)
    cfs.forceBlockingFlush();
    assertRowAndColCount(1, 2, true, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));
}
项目:GraphTrek    文件:ColumnFamilyStoreTest.java   
@Test
public void testDeleteSuperRowSticksAfterFlush() throws Throwable
{
    String keyspaceName = KEYSPACE1;
    String cfName= CF_SUPER1;
    ByteBuffer scfName = ByteBufferUtil.bytes("SuperDuper");
    Keyspace keyspace = Keyspace.open(keyspaceName);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName);
    DecoratedKey key = Util.dk("flush-resurrection");

    // create an isolated sstable.
    putColsSuper(cfs, key, scfName,
            new BufferCell(cellname(1L), ByteBufferUtil.bytes("val1"), 1),
            new BufferCell(cellname(2L), ByteBufferUtil.bytes("val2"), 1),
            new BufferCell(cellname(3L), ByteBufferUtil.bytes("val3"), 1));
    cfs.forceBlockingFlush();

    // insert, don't flush.
    putColsSuper(cfs, key, scfName,
            new BufferCell(cellname(4L), ByteBufferUtil.bytes("val4"), 1),
            new BufferCell(cellname(5L), ByteBufferUtil.bytes("val5"), 1),
            new BufferCell(cellname(6L), ByteBufferUtil.bytes("val6"), 1));

    // verify insert.
    final SlicePredicate sp = new SlicePredicate();
    sp.setSlice_range(new SliceRange());
    sp.getSlice_range().setCount(100);
    sp.getSlice_range().setStart(ArrayUtils.EMPTY_BYTE_ARRAY);
    sp.getSlice_range().setFinish(ArrayUtils.EMPTY_BYTE_ARRAY);

    assertRowAndColCount(1, 6, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, scfName), 100));

    // delete
    Mutation rm = new Mutation(keyspace.getName(), key.getKey());
    rm.deleteRange(cfName, SuperColumns.startOf(scfName), SuperColumns.endOf(scfName), 2);
    rm.applyUnsafe();

    // verify delete.
    assertRowAndColCount(1, 0, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, scfName), 100));

    // flush
    cfs.forceBlockingFlush();

    // re-verify delete.
    assertRowAndColCount(1, 0, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, scfName), 100));

    // late insert.
    putColsSuper(cfs, key, scfName,
            new BufferCell(cellname(4L), ByteBufferUtil.bytes("val4"), 1L),
            new BufferCell(cellname(7L), ByteBufferUtil.bytes("val7"), 1L));

    // re-verify delete.
    assertRowAndColCount(1, 0, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, scfName), 100));

    // make sure new writes are recognized.
    putColsSuper(cfs, key, scfName,
            new BufferCell(cellname(3L), ByteBufferUtil.bytes("val3"), 3),
            new BufferCell(cellname(8L), ByteBufferUtil.bytes("val8"), 3),
            new BufferCell(cellname(9L), ByteBufferUtil.bytes("val9"), 3));
    assertRowAndColCount(1, 3, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, scfName), 100));
}
项目:GraphTrek    文件:ColumnFamilyStoreTest.java   
@Test
public void testDeleteStandardRowSticksAfterFlush() throws Throwable
{
    // test to make sure flushing after a delete doesn't resurrect delted cols.
    String keyspaceName = KEYSPACE1;
    String cfName = CF_STANDARD1;
    Keyspace keyspace = Keyspace.open(keyspaceName);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName);
    DecoratedKey key = Util.dk("f-flush-resurrection");

    SlicePredicate sp = new SlicePredicate();
    sp.setSlice_range(new SliceRange());
    sp.getSlice_range().setCount(100);
    sp.getSlice_range().setStart(ArrayUtils.EMPTY_BYTE_ARRAY);
    sp.getSlice_range().setFinish(ArrayUtils.EMPTY_BYTE_ARRAY);

    // insert
    putColsStandard(cfs, key, column("col1", "val1", 1), column("col2", "val2", 1));
    assertRowAndColCount(1, 2, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));

    // flush.
    cfs.forceBlockingFlush();

    // insert, don't flush
    putColsStandard(cfs, key, column("col3", "val3", 1), column("col4", "val4", 1));
    assertRowAndColCount(1, 4, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));

    // delete (from sstable and memtable)
    Mutation rm = new Mutation(keyspace.getName(), key.getKey());
    rm.delete(cfs.name, 2);
    rm.applyUnsafe();

    // verify delete
    assertRowAndColCount(1, 0, true, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));

    // flush
    cfs.forceBlockingFlush();

    // re-verify delete. // first breakage is right here because of CASSANDRA-1837.
    assertRowAndColCount(1, 0, true, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));

    // simulate a 'late' insertion that gets put in after the deletion. should get inserted, but fail on read.
    putColsStandard(cfs, key, column("col5", "val5", 1), column("col2", "val2", 1));

    // should still be nothing there because we deleted this row. 2nd breakage, but was undetected because of 1837.
    assertRowAndColCount(1, 0, true, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));

    // make sure that new writes are recognized.
    putColsStandard(cfs, key, column("col6", "val6", 3), column("col7", "val7", 3));
    assertRowAndColCount(1, 2, true, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));

    // and it remains so after flush. (this wasn't failing before, but it's good to check.)
    cfs.forceBlockingFlush();
    assertRowAndColCount(1, 2, true, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));
}
项目:learning-hadoop    文件:CassandraInputData.java   
public void sliceModeInit(CassandraColumnMetaData meta,
    List<String> colNames, int maxRows, int maxCols, int rowBatchSize,
    int colBatchSize) throws KettleException {

  m_newSliceQuery = true;
  m_requestedCols = colNames;
  m_sliceRowsMax = maxRows;
  m_sliceColsMax = maxCols;
  m_sliceRowsBatchSize = rowBatchSize;
  m_sliceColsBatchSize = colBatchSize;
  m_rowIndex = 0;
  m_colIndex = 0;

  if (m_sliceColsBatchSize <= 0) {
    m_sliceColsBatchSize = Integer.MAX_VALUE;
  }

  if (m_sliceRowsBatchSize <= 0) {
    m_sliceRowsBatchSize = Integer.MAX_VALUE;
  }

  List<ByteBuffer> specificCols = null;
  if (m_requestedCols != null && m_requestedCols.size() > 0) {
    specificCols = new ArrayList<ByteBuffer>();

    // encode the textual column names
    for (String colName : m_requestedCols) {
      ByteBuffer encoded = meta.columnNameToByteBuffer(colName);
      specificCols.add(encoded);
    }
  }

  m_slicePredicate = new SlicePredicate();

  if (specificCols == null) {
    m_sliceRange = new SliceRange(ByteBuffer.wrap(new byte[0]),
        ByteBuffer.wrap(new byte[0]), false, m_sliceColsBatchSize);
    m_slicePredicate.setSlice_range(m_sliceRange);
  } else {
    m_slicePredicate.setColumn_names(specificCols);
  }

  m_keyRange = new KeyRange(m_sliceRowsBatchSize);
  m_keyRange.setStart_key(new byte[0]);
  m_keyRange.setEnd_key(new byte[0]);

  m_colParent = new ColumnParent(meta.getColumnFamilyName());
  m_converted = new ArrayList<Object[]>();
}
项目:hadoop-in-action    文件:SimpleWriteRead.java   
public static void main(String[] args) throws UnsupportedEncodingException,
            InvalidRequestException, UnavailableException, TimedOutException,
            TException, NotFoundException {

        TTransport tr = new TSocket(HOST, PORT);
        //new default in 0.7 is framed transport
        TFramedTransport tf = new TFramedTransport(tr);
        TProtocol proto = new TBinaryProtocol(tf);
        Cassandra.Client client = new Cassandra.Client(proto);
        tf.open();
        client.set_keyspace("Keyspace1");

        String cfName = "Standard1";
        ByteBuffer userIDKey = ByteBuffer.wrap("1".getBytes()); //this is a row key

//      Clock clock = new Clock(System.currentTimeMillis());

        ColumnParent cp = new ColumnParent(cfName);

        //insert the name column
        log.debug("Inserting row for key {}" , userIDKey.toString());
        Column nameCol = new Column(ByteBuffer.wrap("name".getBytes(UTF8)));
        nameCol.setValue(ByteBuffer.wrap("George Clinton".getBytes()));
        client.insert(userIDKey, cp, nameCol, CL);

        //insert the Age column
        Column ageCol = new Column(ByteBuffer.wrap("name".getBytes(UTF8)));
        ageCol.setValue(ByteBuffer.wrap("69".getBytes()));
        client.insert(userIDKey, cp, ageCol, CL);

        log.debug("Row insert done.");

        // read just the Name column
        log.debug("Reading Name Column:");

        //create a representation of the Name column
        ColumnPath colPathName = new ColumnPath(cfName);
        colPathName.setColumn("name".getBytes(UTF8));
        Column col = client.get(userIDKey, colPathName,
                CL).getColumn();

        /*LOG.debug("Column name: " + new String(col.name, UTF8));
        LOG.debug("Column value: " + new String(col.value, UTF8));
        LOG.debug("Column timestamp: " + col.clock.timestamp);*/

        //create a slice predicate representing the columns to read
        //start and finish are the range of columns--here, all
        SlicePredicate predicate = new SlicePredicate();
        SliceRange sliceRange = new SliceRange();
        sliceRange.setStart(new byte[0]);
        sliceRange.setFinish(new byte[0]);
        predicate.setSlice_range(sliceRange);

        log.debug("Complete Row:");
        // read all columns in the row
        ColumnParent parent = new ColumnParent(cfName);
        List<ColumnOrSuperColumn> results = 
            client.get_slice(userIDKey, 
                    parent, predicate, CL);

        //loop over columns, outputting values
        for (ColumnOrSuperColumn result : results) {
            Column column = result.column;
            log.info("Column: {}, Value: {}", new String(column.getName(), UTF8), new String(column.getValue(), UTF8));
        }
        tf.close();

        log.debug("All done.");
    }
项目:hadoop-in-action    文件:HotelApp.java   
public List<POI> findPOIByHotel(String hotel) throws Exception {

        // /query
        SlicePredicate predicate = new SlicePredicate();
        SliceRange sliceRange = new SliceRange();
        sliceRange.setStart(hotel.getBytes());
        sliceRange.setFinish(hotel.getBytes());
        predicate.setSlice_range(sliceRange);

        // read all columns in the row
        String scFamily = "PointOfInterest";
        ColumnParent parent = new ColumnParent(scFamily);

        KeyRange keyRange = new KeyRange();
        keyRange.start_key = bytes("");
        keyRange.end_key = bytes("");

        List<POI> pois = new ArrayList<POI>();

        // instead of a simple list, we get a map whose keys are row keys
        // and the values the list of columns returned for each
        // only row key + first column are indexed
        Connector cl = new Connector();
        Cassandra.Client client = cl.connect();
        List<KeySlice> slices = client.get_range_slices(parent, predicate, keyRange, CL);

        for (KeySlice slice : slices) {
            List<ColumnOrSuperColumn> cols = slice.columns;

            POI poi = new POI();
            poi.name = new String(ByteBufferUtil.string(slice.key));

            for (ColumnOrSuperColumn cosc : cols) {
                SuperColumn sc = cosc.super_column;

                List<Column> colsInSc = sc.columns;

                for (Column c : colsInSc) {
                    String colName = new String(c.name.array(), UTF8);
                    if (colName.equals("desc")) {
                        poi.desc = new String(c.value.array(), UTF8);
                    }
                    if (colName.equals("phone")) {
                        poi.phone = new String(c.value.array(), UTF8);
                    }
                }

                LOG.debug("Found something neat nearby: " + poi.name + ". \nDesc: " + poi.desc + ". \nPhone: "
                        + poi.phone);
                pois.add(poi);
            }
        }

        cl.close();
        return pois;
    }
项目:hadoop-in-action    文件:HotelApp.java   
public List<Hotel> findHotelByCity(String city, String state) throws Exception {

        LOG.debug("Seaching for hotels in " + city + ", " + state);

        String key = city + ":" + state.toUpperCase();

        // /query
        SlicePredicate predicate = new SlicePredicate();
        SliceRange sliceRange = new SliceRange();
        sliceRange.setStart(new byte[0]);
        sliceRange.setFinish(new byte[0]);
        predicate.setSlice_range(sliceRange);

        // read all columns in the row
        String columnFamily = "HotelByCity";
        ColumnParent parent = new ColumnParent(columnFamily);

        KeyRange keyRange = new KeyRange();
        keyRange.setStart_key(key.getBytes());
        keyRange.setEnd_key("".getBytes()); // just outside lexical range
        keyRange.count = 5;

        Connector cl = new Connector();
        Cassandra.Client client = cl.connect();
        List<KeySlice> keySlices = client.get_range_slices(parent, predicate, keyRange, CL);

        List<Hotel> results = new ArrayList<Hotel>();

        for (KeySlice ks : keySlices) {
            List<ColumnOrSuperColumn> coscs = ks.columns;
            LOG.debug(new String("Using key " + ks.key));

            for (ColumnOrSuperColumn cs : coscs) {

                Hotel hotel = new Hotel();
                hotel.name = ByteBufferUtil.string(cs.column.name);
                hotel.city = city;
                hotel.state = state;

                results.add(hotel);
                LOG.debug("Found hotel result for " + hotel.name);
            }
        }
        // /end query
        cl.close();

        return results;
    }
项目:stratio-cassandra    文件:ColumnFamilyStoreTest.java   
@Test
public void testDeleteSuperRowSticksAfterFlush() throws Throwable
{
    String keyspaceName = "Keyspace1";
    String cfName= "Super1";
    ByteBuffer scfName = ByteBufferUtil.bytes("SuperDuper");
    Keyspace keyspace = Keyspace.open(keyspaceName);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName);
    DecoratedKey key = Util.dk("flush-resurrection");

    // create an isolated sstable.
    putColsSuper(cfs, key, scfName,
            new BufferCell(cellname(1L), ByteBufferUtil.bytes("val1"), 1),
            new BufferCell(cellname(2L), ByteBufferUtil.bytes("val2"), 1),
            new BufferCell(cellname(3L), ByteBufferUtil.bytes("val3"), 1));
    cfs.forceBlockingFlush();

    // insert, don't flush.
    putColsSuper(cfs, key, scfName,
            new BufferCell(cellname(4L), ByteBufferUtil.bytes("val4"), 1),
            new BufferCell(cellname(5L), ByteBufferUtil.bytes("val5"), 1),
            new BufferCell(cellname(6L), ByteBufferUtil.bytes("val6"), 1));

    // verify insert.
    final SlicePredicate sp = new SlicePredicate();
    sp.setSlice_range(new SliceRange());
    sp.getSlice_range().setCount(100);
    sp.getSlice_range().setStart(ArrayUtils.EMPTY_BYTE_ARRAY);
    sp.getSlice_range().setFinish(ArrayUtils.EMPTY_BYTE_ARRAY);

    assertRowAndColCount(1, 6, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, scfName), 100));

    // delete
    Mutation rm = new Mutation(keyspace.getName(), key.getKey());
    rm.deleteRange(cfName, SuperColumns.startOf(scfName), SuperColumns.endOf(scfName), 2);
    rm.apply();

    // verify delete.
    assertRowAndColCount(1, 0, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, scfName), 100));

    // flush
    cfs.forceBlockingFlush();

    // re-verify delete.
    assertRowAndColCount(1, 0, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, scfName), 100));

    // late insert.
    putColsSuper(cfs, key, scfName,
            new BufferCell(cellname(4L), ByteBufferUtil.bytes("val4"), 1L),
            new BufferCell(cellname(7L), ByteBufferUtil.bytes("val7"), 1L));

    // re-verify delete.
    assertRowAndColCount(1, 0, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, scfName), 100));

    // make sure new writes are recognized.
    putColsSuper(cfs, key, scfName,
            new BufferCell(cellname(3L), ByteBufferUtil.bytes("val3"), 3),
            new BufferCell(cellname(8L), ByteBufferUtil.bytes("val8"), 3),
            new BufferCell(cellname(9L), ByteBufferUtil.bytes("val9"), 3));
    assertRowAndColCount(1, 3, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, scfName), 100));
}
项目:stratio-cassandra    文件:ColumnFamilyStoreTest.java   
@Test
public void testDeleteStandardRowSticksAfterFlush() throws Throwable
{
    // test to make sure flushing after a delete doesn't resurrect delted cols.
    String keyspaceName = "Keyspace1";
    String cfName = "Standard1";
    Keyspace keyspace = Keyspace.open(keyspaceName);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName);
    DecoratedKey key = Util.dk("f-flush-resurrection");

    SlicePredicate sp = new SlicePredicate();
    sp.setSlice_range(new SliceRange());
    sp.getSlice_range().setCount(100);
    sp.getSlice_range().setStart(ArrayUtils.EMPTY_BYTE_ARRAY);
    sp.getSlice_range().setFinish(ArrayUtils.EMPTY_BYTE_ARRAY);

    // insert
    putColsStandard(cfs, key, column("col1", "val1", 1), column("col2", "val2", 1));
    assertRowAndColCount(1, 2, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));

    // flush.
    cfs.forceBlockingFlush();

    // insert, don't flush
    putColsStandard(cfs, key, column("col3", "val3", 1), column("col4", "val4", 1));
    assertRowAndColCount(1, 4, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));

    // delete (from sstable and memtable)
    Mutation rm = new Mutation(keyspace.getName(), key.getKey());
    rm.delete(cfs.name, 2);
    rm.apply();

    // verify delete
    assertRowAndColCount(1, 0, true, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));

    // flush
    cfs.forceBlockingFlush();

    // re-verify delete. // first breakage is right here because of CASSANDRA-1837.
    assertRowAndColCount(1, 0, true, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));

    // simulate a 'late' insertion that gets put in after the deletion. should get inserted, but fail on read.
    putColsStandard(cfs, key, column("col5", "val5", 1), column("col2", "val2", 1));

    // should still be nothing there because we deleted this row. 2nd breakage, but was undetected because of 1837.
    assertRowAndColCount(1, 0, true, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));

    // make sure that new writes are recognized.
    putColsStandard(cfs, key, column("col6", "val6", 3), column("col7", "val7", 3));
    assertRowAndColCount(1, 2, true, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));

    // and it remains so after flush. (this wasn't failing before, but it's good to check.)
    cfs.forceBlockingFlush();
    assertRowAndColCount(1, 2, true, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));
}
项目:cassandra-cqlMod    文件:ThriftRangeSlicer.java   
@Override
public void run(final ThriftClient client) throws IOException
{
    final SlicePredicate predicate = new SlicePredicate()
            .setSlice_range(
                    new SliceRange(
                            ByteBufferUtil.EMPTY_BYTE_BUFFER,
                            ByteBufferUtil.EMPTY_BYTE_BUFFER,
                            false,
                            state.settings.columns.maxColumnsPerKey
                    )
            );

    final ByteBuffer start = getKey();
    final KeyRange range =
            new KeyRange(state.settings.columns.maxColumnsPerKey)
                    .setStart_key(start)
                    .setEnd_key(ByteBufferUtil.EMPTY_BYTE_BUFFER)
                    .setCount(((SettingsCommandMulti)state.settings.command).keysAtOnce);

    for (final ColumnParent parent : state.columnParents)
    {
        timeWithRetry(new RunOp()
        {
            private int count = 0;
            @Override
            public boolean run() throws Exception
            {
                return (count = client.get_range_slices(parent, predicate, range, state.settings.command.consistencyLevel).size()) != 0;
            }

            @Override
            public String key()
            {
                return new String(range.bufferForStart_key().array());
            }

            @Override
            public int keyCount()
            {
                return count;
            }
        });
    }
}
项目:cassandra-trunk    文件:ThriftRangeSlicer.java   
@Override
public void run(final ThriftClient client) throws IOException
{
    final SlicePredicate predicate = new SlicePredicate()
            .setSlice_range(
                    new SliceRange(
                            ByteBufferUtil.EMPTY_BYTE_BUFFER,
                            ByteBufferUtil.EMPTY_BYTE_BUFFER,
                            false,
                            state.settings.columns.maxColumnsPerKey
                    )
            );

    final ByteBuffer start = getKey();
    final KeyRange range =
            new KeyRange(state.settings.columns.maxColumnsPerKey)
                    .setStart_key(start)
                    .setEnd_key(ByteBufferUtil.EMPTY_BYTE_BUFFER)
                    .setCount(state.settings.command.keysAtOnce);

    for (final ColumnParent parent : state.columnParents)
    {
        timeWithRetry(new RunOp()
        {
            private int count = 0;
            @Override
            public boolean run() throws Exception
            {
                return (count = client.get_range_slices(parent, predicate, range, state.settings.command.consistencyLevel).size()) != 0;
            }

            @Override
            public String key()
            {
                return new String(range.bufferForStart_key().array());
            }

            @Override
            public int keyCount()
            {
                return count;
            }
        });
    }
}
项目:hive-cassandra    文件:HiveCqlInputFormat.java   
@Override
public InputSplit[] getSplits(JobConf jobConf, int numSplits) throws IOException {
    String ks = jobConf.get(AbstractCassandraSerDe.CASSANDRA_KEYSPACE_NAME);
    String cf = jobConf.get(AbstractCassandraSerDe.CASSANDRA_CF_NAME);
    int slicePredicateSize = jobConf.getInt(AbstractCassandraSerDe.CASSANDRA_SLICE_PREDICATE_SIZE,
            AbstractCassandraSerDe.DEFAULT_SLICE_PREDICATE_SIZE);
    int sliceRangeSize = jobConf.getInt(
            AbstractCassandraSerDe.CASSANDRA_RANGE_BATCH_SIZE,
            AbstractCassandraSerDe.DEFAULT_RANGE_BATCH_SIZE);
    int splitSize = jobConf.getInt(
            AbstractCassandraSerDe.CASSANDRA_SPLIT_SIZE,
            AbstractCassandraSerDe.DEFAULT_SPLIT_SIZE);
    String cassandraColumnMapping = jobConf.get(AbstractCassandraSerDe.CASSANDRA_COL_MAPPING);
    int rpcPort = jobConf.getInt(AbstractCassandraSerDe.CASSANDRA_PORT, 9160);
    String host = jobConf.get(AbstractCassandraSerDe.CASSANDRA_HOST);
    String partitioner = jobConf.get(AbstractCassandraSerDe.CASSANDRA_PARTITIONER);

    if (cassandraColumnMapping == null) {
        throw new IOException("cassandra.columns.mapping required for Cassandra Table.");
    }

    SliceRange range = new SliceRange();
    range.setStart(new byte[0]);
    range.setFinish(new byte[0]);
    range.setReversed(false);
    range.setCount(slicePredicateSize);
    SlicePredicate predicate = new SlicePredicate();
    predicate.setSlice_range(range);

    ConfigHelper.setInputRpcPort(jobConf, "" + rpcPort);
    ConfigHelper.setInputInitialAddress(jobConf, host);
    ConfigHelper.setInputPartitioner(jobConf, partitioner);
    ConfigHelper.setInputSlicePredicate(jobConf, predicate);
    ConfigHelper.setInputColumnFamily(jobConf, ks, cf);
    ConfigHelper.setRangeBatchSize(jobConf, sliceRangeSize);
    ConfigHelper.setInputSplitSize(jobConf, splitSize);

    Job job = new Job(jobConf);
    JobContext jobContext = new JobContextImpl(job.getConfiguration(), job.getJobID());

    Path[] tablePaths = FileInputFormat.getInputPaths(jobContext);
    List<org.apache.hadoop.mapreduce.InputSplit> splits = getSplits(jobContext);
    InputSplit[] results = new InputSplit[splits.size()];

    for (int i = 0; i < splits.size(); ++i) {
        HiveCassandraStandardSplit csplit = new HiveCassandraStandardSplit(
                (ColumnFamilySplit) splits.get(i), cassandraColumnMapping, tablePaths[0]);
        csplit.setKeyspace(ks);
        csplit.setColumnFamily(cf);
        csplit.setRangeBatchSize(sliceRangeSize);
        csplit.setSplitSize(splitSize);
        csplit.setHost(host);
        csplit.setPort(rpcPort);
        csplit.setSlicePredicateSize(slicePredicateSize);
        csplit.setPartitioner(partitioner);
        csplit.setColumnMapping(cassandraColumnMapping);
        results[i] = csplit;
    }
    return results;
}