@Override public void setConf(final Configuration config) { super.setConf(config); // Copy some Titan configuration keys to the Hadoop Configuration keys used by Cassandra's ColumnFamilyInputFormat ConfigHelper.setInputInitialAddress(config, titanConf.get(GraphDatabaseConfiguration.STORAGE_HOSTS)[0]); if (titanConf.has(GraphDatabaseConfiguration.STORAGE_PORT)) ConfigHelper.setInputRpcPort(config, String.valueOf(titanConf.get(GraphDatabaseConfiguration.STORAGE_PORT))); if (titanConf.has(GraphDatabaseConfiguration.AUTH_USERNAME)) ConfigHelper.setInputKeyspaceUserName(config, titanConf.get(GraphDatabaseConfiguration.AUTH_USERNAME)); if (titanConf.has(GraphDatabaseConfiguration.AUTH_PASSWORD)) ConfigHelper.setInputKeyspacePassword(config, titanConf.get(GraphDatabaseConfiguration.AUTH_PASSWORD)); // Copy keyspace, force the CF setting to edgestore, honor widerows when set final boolean wideRows = config.getBoolean(INPUT_WIDEROWS_CONFIG, false); // Use the setInputColumnFamily overload that includes a widerows argument; using the overload without this argument forces it false ConfigHelper.setInputColumnFamily(config, titanConf.get(AbstractCassandraStoreManager.CASSANDRA_KEYSPACE), mrConf.get(TitanHadoopConfiguration.COLUMN_FAMILY_NAME), wideRows); log.debug("Set keyspace: {}", titanConf.get(AbstractCassandraStoreManager.CASSANDRA_KEYSPACE)); // Set the column slice bounds via Faunus's vertex query filter final SlicePredicate predicate = new SlicePredicate(); final int rangeBatchSize = config.getInt(RANGE_BATCH_SIZE_CONFIG, Integer.MAX_VALUE); predicate.setSlice_range(getSliceRange(TitanHadoopSetupCommon.DEFAULT_SLICE_QUERY, rangeBatchSize)); // TODO stop slicing the whole row ConfigHelper.setInputSlicePredicate(config, predicate); }
public void run(final ThriftClient client) throws IOException { final SlicePredicate predicate = select().predicate(); final ByteBuffer key = getKey(); timeWithRetry(new RunOp() { @Override public boolean run() throws Exception { List<?> r = client.get_slice(key, new ColumnParent(type.table), predicate, settings.command.consistencyLevel); return r != null && r.size() > 0; } @Override public int partitionCount() { return 1; } @Override public int rowCount() { return 1; } }); }
protected SlicePredicate slicePredicate() { final SlicePredicate predicate = new SlicePredicate(); if (state.settings.columns.slice) { int count = state.rowGen.count(index); int start = sliceStart(count); predicate.setSlice_range(new SliceRange() .setStart(state.settings.columns.names.get(start)) .setFinish(new byte[] {}) .setReversed(false) .setCount(count) ); } else predicate.setColumn_names(randomNames()); return predicate; }
private SlicePredicate getSlicePredicate(String[] columnNameList) { SlicePredicate slicePredicate = new SlicePredicate(); try { if (columnNameList != null) { List<ByteBuffer> columnNameByteBufferList = new ArrayList<ByteBuffer>(); for (String columnName: columnNameList) { byte[] columnNameBytes = columnName.getBytes("UTF-8"); columnNameByteBufferList.add(ByteBuffer.wrap(columnNameBytes)); } slicePredicate.setColumn_names(columnNameByteBufferList); } else { SliceRange sliceRange = new SliceRange(); sliceRange.setStart(new byte[0]); sliceRange.setFinish(new byte[0]); // FIXME: The default column count is 100. We should tune the value. sliceRange.setCount(100000); slicePredicate.setSlice_range(sliceRange); } } catch (UnsupportedEncodingException exc) { throw new StorageException("Character encoding exception with key range", exc); } return slicePredicate; }
static boolean isSliceRangePredicate(SlicePredicate predicate) { if (predicate == null) { return false; } if (predicate.isSetColumn_names() && predicate.getSlice_range() == null) { return false; } if (predicate.getSlice_range() == null) { return false; } byte[] start = predicate.getSlice_range().getStart(); byte[] finish = predicate.getSlice_range().getFinish(); if (start != null && finish != null) { return true; } return false; }
public RangeSliceCommand deserialize(DataInputStream dis, int version) throws IOException { String keyspace = dis.readUTF(); String column_family = dis.readUTF(); int scLength = dis.readInt(); ByteBuffer super_column = null; if (scLength > 0) super_column = ByteBuffer.wrap(readBuf(scLength, dis)); TDeserializer dser = new TDeserializer(new TBinaryProtocol.Factory()); SlicePredicate pred = new SlicePredicate(); FBUtilities.deserialize(dser, pred, dis); AbstractBounds range = AbstractBounds.serializer().deserialize(dis); int max_keys = dis.readInt(); return new RangeSliceCommand(keyspace, column_family, super_column, pred, range, max_keys); }
public CasTimeReader() { try { TTransport tr = new TFramedTransport(new TSocket("10.15.61.111", 9160)); TProtocol proto = new TBinaryProtocol(tr); client = new Cassandra.Client(proto); tr.open(); client.set_keyspace("CadalSecTest"); predicate = new SlicePredicate(); SliceRange range = new SliceRange(); range.setStart(new byte[0]); range.setFinish(new byte[0]); range.setCount(10000); predicate.setSlice_range(range); columnParent = new ColumnParent(); columnParent.setColumn_family("RecordMinute"); } catch (Exception e) { System.out.println(e); } }
public CasTimeBook() { try { TTransport tr = new TFramedTransport(new TSocket("10.15.61.111", 9160)); TProtocol proto = new TBinaryProtocol(tr); client = new Cassandra.Client(proto); tr.open(); client.set_keyspace("CadalSecTest"); predicate = new SlicePredicate(); SliceRange range = new SliceRange(); range.setStart(new byte[0]); range.setFinish(new byte[0]); range.setCount(10000); predicate.setSlice_range(range); columnParent = new ColumnParent(); columnParent.setColumn_family("RecordMinute"); } catch (Exception e) { System.out.println(e); } }
/** * Create a RangeSliceCommand and run it against the StorageProxy. * <p> * To match the behavior of the standard Cassandra thrift API endpoint, the * {@code nowMillis} argument should be the number of milliseconds since the * UNIX Epoch (e.g. System.currentTimeMillis() or equivalent obtained * through a {@link TimestampProvider}). This is per * {@link org.apache.cassandra.thrift.CassandraServer#get_range_slices(ColumnParent, SlicePredicate, KeyRange, ConsistencyLevel)}, * which passes the server's System.currentTimeMillis() to the * {@code RangeSliceCommand} constructor. */ private List<Row> getKeySlice(Token start, Token end, @Nullable SliceQuery sliceQuery, int pageSize, long nowMillis) throws BackendException { IPartitioner partitioner = StorageService.getPartitioner(); SliceRange columnSlice = new SliceRange(); if (sliceQuery == null) { columnSlice.setStart(ArrayUtils.EMPTY_BYTE_ARRAY) .setFinish(ArrayUtils.EMPTY_BYTE_ARRAY) .setCount(5); } else { columnSlice.setStart(sliceQuery.getSliceStart().asByteBuffer()) .setFinish(sliceQuery.getSliceEnd().asByteBuffer()) .setCount(sliceQuery.hasLimit() ? sliceQuery.getLimit() : Integer.MAX_VALUE); } /* Note: we need to fetch columns for each row as well to remove "range ghosts" */ SlicePredicate predicate = new SlicePredicate().setSlice_range(columnSlice); RowPosition startPosition = start.minKeyBound(partitioner); RowPosition endPosition = end.minKeyBound(partitioner); List<Row> rows; try { CFMetaData cfm = Schema.instance.getCFMetaData(keyspace, columnFamily); IDiskAtomFilter filter = ThriftValidation.asIFilter(predicate, cfm, null); RangeSliceCommand cmd = new RangeSliceCommand(keyspace, columnFamily, nowMillis, filter, new Bounds<RowPosition>(startPosition, endPosition), pageSize); rows = StorageProxy.getRangeSlice(cmd, ConsistencyLevel.QUORUM); } catch (Exception e) { throw new PermanentBackendException(e); } return rows; }
/** * get 讀取所有column * * @throws Exception */ @Test public void get2() throws Exception { String KEYSPACE = "mock"; client.set_keyspace(KEYSPACE); // 讀取所有column String COLUMN_FAMILY = "student"; ColumnParent columnParent = new ColumnParent(COLUMN_FAMILY); // 術語 SlicePredicate predicate = new SlicePredicate(); // 範圍 SliceRange sliceRange = new SliceRange(); // sliceRange.setStart(ByteBufferHelper.toByteBuffer(new byte[0]));//開始 sliceRange.setStart(new byte[0]);// 開始 sliceRange.setFinish(new byte[0]);// 結束 sliceRange.setCount(100);// 筆數 // predicate.setSlice_range(sliceRange); String ROW_KEY = "Jack"; // 結果 // key, column_parent, predicate, consistency_level List<ColumnOrSuperColumn> results = client.get_slice( ByteBufferHelper.toByteBuffer(ROW_KEY), columnParent, predicate, ConsistencyLevel.ONE); for (ColumnOrSuperColumn cos : results) { Column column = cos.getColumn(); System.out.println(ROW_KEY + ", " + ByteHelper.toString(column.getName()) + ": " + ByteHelper.toString(column.getValue()) + ", " + column.getTimestamp()); // Jack, art, 87, 1380788003220 // Jack, grad, 5, 1380788003203 // Jack, math, 97, 1380788003214 } }
/** * delete * * @throws Exception */ @Test public void delete() throws Exception { String KEYSPACE = "mock"; client.set_keyspace(KEYSPACE); List<Mutation> mutations = new ArrayList<Mutation>(); // <columnFamily,mutations> Map<String, List<Mutation>> columnfamilyMutaions = new HashMap<String, List<Mutation>>();// keyMutations // <rowKey,keyMutations> Map<ByteBuffer, Map<String, List<Mutation>>> rowKeyMutations = new HashMap<ByteBuffer, Map<String, List<Mutation>>>(); // List<ByteBuffer> columns = new ArrayList<ByteBuffer>(); // Add as many supercolumns as you want here columns.add(ByteBufferHelper.toByteBuffer("grad")); columns.add(ByteBufferHelper.toByteBuffer("math")); // SlicePredicate predicate = new SlicePredicate(); predicate.setColumn_names(columns); // delete Deletion deletion = new Deletion(); deletion.setPredicate(predicate); // timestamp in microseconds long timestamp = System.nanoTime(); deletion.setTimestamp(timestamp); Mutation mutation = new Mutation(); mutation.setDeletion(deletion); mutations.add(mutation); String COLUMN_FAMILY = "student"; columnfamilyMutaions.put(COLUMN_FAMILY, mutations); String ROW_KEY = "Jack"; rowKeyMutations.put(ByteBufferHelper.toByteBuffer(ROW_KEY), columnfamilyMutaions); // mutation_map, consistency_level client.batch_mutate(rowKeyMutations, ConsistencyLevel.ONE); }
/** * Create a RangeSliceCommand and run it against the StorageProxy. * <p> * To match the behavior of the standard Cassandra thrift API endpoint, the * {@code nowMillis} argument should be the number of milliseconds since the * UNIX Epoch (e.g. System.currentTimeMillis() or equivalent obtained * through a {@link TimestampProvider}). This is per * {@link org.apache.cassandra.thrift.CassandraServer#get_range_slices(ColumnParent, SlicePredicate, KeyRange, ConsistencyLevel)}, * which passes the server's System.currentTimeMillis() to the * {@code RangeSliceCommand} constructor. */ private List<Row> getKeySlice(Token start, Token end, @Nullable SliceQuery sliceQuery, int pageSize, long nowMillis) throws BackendException { IPartitioner<?> partitioner = StorageService.getPartitioner(); SliceRange columnSlice = new SliceRange(); if (sliceQuery == null) { columnSlice.setStart(ArrayUtils.EMPTY_BYTE_ARRAY) .setFinish(ArrayUtils.EMPTY_BYTE_ARRAY) .setCount(5); } else { columnSlice.setStart(sliceQuery.getSliceStart().asByteBuffer()) .setFinish(sliceQuery.getSliceEnd().asByteBuffer()) .setCount(sliceQuery.hasLimit() ? sliceQuery.getLimit() : Integer.MAX_VALUE); } /* Note: we need to fetch columns for each row as well to remove "range ghosts" */ SlicePredicate predicate = new SlicePredicate().setSlice_range(columnSlice); RowPosition startPosition = start.minKeyBound(partitioner); RowPosition endPosition = end.minKeyBound(partitioner); List<Row> rows; try { CFMetaData cfm = Schema.instance.getCFMetaData(keyspace, columnFamily); IDiskAtomFilter filter = ThriftValidation.asIFilter(predicate, cfm, null); RangeSliceCommand cmd = new RangeSliceCommand(keyspace, columnFamily, nowMillis, filter, new Bounds<RowPosition>(startPosition, endPosition), pageSize); rows = StorageProxy.getRangeSlice(cmd, ConsistencyLevel.QUORUM); } catch (Exception e) { throw new PermanentBackendException(e); } return rows; }
@Override public void setConf(final Configuration config) { super.setConf(config); // Copy some Titan configuration keys to the Hadoop Configuration keys used by Cassandra's ColumnFamilyInputFormat ConfigHelper.setInputInitialAddress(config, inputConf.get(GraphDatabaseConfiguration.STORAGE_HOSTS)[0]); if (inputConf.has(GraphDatabaseConfiguration.STORAGE_PORT)) ConfigHelper.setInputRpcPort(config, String.valueOf(inputConf.get(GraphDatabaseConfiguration.STORAGE_PORT))); if (inputConf.has(GraphDatabaseConfiguration.AUTH_USERNAME)) ConfigHelper.setInputKeyspaceUserName(config, inputConf.get(GraphDatabaseConfiguration.AUTH_USERNAME)); if (inputConf.has(GraphDatabaseConfiguration.AUTH_PASSWORD)) ConfigHelper.setInputKeyspacePassword(config, inputConf.get(GraphDatabaseConfiguration.AUTH_PASSWORD)); // Copy keyspace, force the CF setting to edgestore, honor widerows when set final boolean wideRows = config.getBoolean(INPUT_WIDEROWS_CONFIG, INPUT_WIDEROWS_DEFAULT); // Use the setInputColumnFamily overload that includes a widerows argument; using the overload without this // argument forces it false ConfigHelper.setInputColumnFamily(config, inputConf.get(AbstractCassandraStoreManager.CASSANDRA_KEYSPACE), Backend.EDGESTORE_NAME, wideRows); // Set the column slice bounds via Faunus's vertex query filter final SlicePredicate predicate = new SlicePredicate(); final int rangeBatchSize = config.getInt(RANGE_BATCH_SIZE_CONFIG, Integer.MAX_VALUE); predicate.setSlice_range(getSliceRange(TitanHadoopSetupCommon.getDefaultSliceQuery(), rangeBatchSize)); ConfigHelper.setInputSlicePredicate(config, predicate); this.config = config; }
public static SlicePredicate asSlicePredicate(IDiskAtomFilter predicate) { SlicePredicate sp = new SlicePredicate(); if (predicate instanceof NamesQueryFilter) { sp.setColumn_names(new ArrayList<ByteBuffer>(((NamesQueryFilter)predicate).columns)); } else { SliceQueryFilter sqf = (SliceQueryFilter)predicate; sp.setSlice_range(new SliceRange(sqf.start(), sqf.finish(), sqf.reversed, sqf.count)); } return sp; }
public IndexScanCommand(String keyspace, String column_family, IndexClause index_clause, SlicePredicate predicate, AbstractBounds<RowPosition> range) { this.keyspace = keyspace; this.column_family = column_family; this.index_clause = index_clause; this.predicate = predicate; this.range = range; }
public void run(final ThriftClient client) throws IOException { SliceRange sliceRange = new SliceRange(); // start/finish sliceRange.setStart(new byte[] {}).setFinish(new byte[] {}); // reversed/count sliceRange.setReversed(false).setCount(state.settings.columns.maxColumnsPerKey); // initialize SlicePredicate with existing SliceRange final SlicePredicate predicate = new SlicePredicate().setSlice_range(sliceRange); final ByteBuffer key = getKey(); for (final ColumnParent parent : state.columnParents) { timeWithRetry(new RunOp() { @Override public boolean run() throws Exception { return client.get_slice(key, parent, predicate, state.settings.command.consistencyLevel).size() != 0; } @Override public String key() { return new String(key.array()); } @Override public int keyCount() { return 1; } }); } }
public void run(final ThriftClient client) throws IOException { final SlicePredicate predicate = new SlicePredicate(); if (state.settings.columns.names == null) predicate.setSlice_range(new SliceRange() .setStart(new byte[] {}) .setFinish(new byte[] {}) .setReversed(false) .setCount(state.settings.columns.maxColumnsPerKey) ); else // see CASSANDRA-3064 about why this is useful predicate.setColumn_names(state.settings.columns.names); final ByteBuffer key = getKey(); for (final ColumnParent parent : state.columnParents) { timeWithRetry(new RunOp() { @Override public boolean run() throws Exception { return client.get_slice(key, parent, predicate, state.settings.command.consistencyLevel).size() != 0; } @Override public String key() { return new String(key.array()); } @Override public int keyCount() { return 1; } }); } }
public void run(final ThriftClient client) throws IOException { final SlicePredicate predicate = new SlicePredicate().setSlice_range( new SliceRange( ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, state.settings.columns.maxColumnsPerKey ) ); final List<ByteBuffer> keys = getKeys(((SettingsCommandMulti) state.settings.command).keysAtOnce); for (final ColumnParent parent : state.columnParents) { timeWithRetry(new RunOp() { int count; @Override public boolean run() throws Exception { return (count = client.multiget_slice(keys, parent, predicate, state.settings.command.consistencyLevel).size()) != 0; } @Override public String key() { return keys.toString(); } @Override public int keyCount() { return count; } }); } }
public void run(final ThriftClient client) throws IOException { final SlicePredicate predicate = slicePredicate(); final ByteBuffer key = getKey(); for (final ColumnParent parent : state.columnParents) { timeWithRetry(new RunOp() { @Override public boolean run() throws Exception { List<?> r = client.get_slice(key, parent, predicate, state.settings.command.consistencyLevel); return r != null && r.size() > 0; } @Override public String key() { return new String(key.array()); } @Override public int keyCount() { return 1; } }); } }
public void run(final ThriftClient client) throws IOException { final SlicePredicate predicate = new SlicePredicate().setSlice_range( new SliceRange( ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, state.settings.columns.maxColumnsPerKey ) ); final List<ByteBuffer> keys = getKeys(state.settings.command.keysAtOnce); for (final ColumnParent parent : state.columnParents) { timeWithRetry(new RunOp() { int count; @Override public boolean run() throws Exception { return (count = client.multiget_slice(keys, parent, predicate, state.settings.command.consistencyLevel).size()) != 0; } @Override public String key() { return keys.toString(); } @Override public int keyCount() { return count; } }); } }
private static Mutation createDeleteColumnMutation(byte[] colName, long timestamp) { SlicePredicate slicePred = new SlicePredicate(); slicePred.addToColumn_names(ByteBuffer.wrap(colName)); Deletion deletion = new Deletion(); deletion.setPredicate(slicePred); deletion.setTimestamp(timestamp); Mutation mutation = new Mutation(); mutation.setDeletion(deletion); return mutation; }
private static String toString(SlicePredicate slicePred) { StringBuilder buffer = new StringBuilder(); if (slicePred.isSetColumn_names()) { buffer.append("Columns("); buffer.append(slicePred.getColumn_names().size()); buffer.append(" total)"); } else if (slicePred.isSetSlice_range()) { SliceRange sliceRange = slicePred.getSlice_range(); ByteBuffer startCol = sliceRange.start; String startColStr = "<null>"; if (startCol != null) { startColStr = Utils.toString(startCol.array(), startCol.arrayOffset(), startCol.limit()); } if (startColStr.length() == 0) { startColStr = "<first>"; } ByteBuffer endCol = sliceRange.finish; String endColStr = "<null>"; if (endCol != null) { endColStr = Utils.toString(endCol.array(), endCol.arrayOffset(), endCol.limit()); } if (endColStr.length() == 0) { endColStr = "<last>"; } if (startColStr.equals("<first>") && endColStr.equals("<last>")) { buffer.append("Slice(<all>)"); } else { buffer.append("Slice('"); buffer.append(startColStr); buffer.append("' to '"); buffer.append(endColStr); buffer.append("')"); } } return buffer.toString(); }
/** * Create a SlicePredicate that starts at the given column name, selecting up to * {@link #MAX_COLS_BATCH_SIZE} columns. * * @param startColName Starting column name as a byte[]. * @param endColName Ending column name as a byte[] * @return SlicePredicate that starts at the given starting column name, * ends at the given ending column name, selecting up to * {@link #MAX_COLS_BATCH_SIZE} columns. */ static SlicePredicate slicePredicateStartEndCol(byte[] startColName, byte[] endColName, boolean reversed) { if(startColName == null) startColName = EMPTY_BYTES; if(endColName == null) endColName = EMPTY_BYTES; SliceRange sliceRange = new SliceRange( ByteBuffer.wrap(startColName), ByteBuffer.wrap(endColName), reversed, CassandraDefs.MAX_COLS_BATCH_SIZE); SlicePredicate slicePred = new SlicePredicate(); slicePred.setSlice_range(sliceRange); return slicePred; }
/** * Create a SlicePredicate that selects the given column names. * * @param colNames A collection of column names as byte[]s. * @return SlicePredicate that selects the given column names only. */ static SlicePredicate slicePredicateColNames(Collection<byte[]> colNames) { SlicePredicate slicePred = new SlicePredicate(); for (byte[] colName : colNames) { slicePred.addToColumn_names(ByteBuffer.wrap(colName)); } return slicePred; }
private static SlicePredicate predicateFromString(String st) { assert st != null; TDeserializer deserializer = new TDeserializer(new TBinaryProtocol.Factory()); SlicePredicate predicate = new SlicePredicate(); try { deserializer.deserialize(predicate, Hex.hexToBytes(st)); } catch (TException e) { throw new RuntimeException(e); } return predicate; }
public int run(String[] args) throws Exception { String columnName = "books"; getConf().set("columnname", columnName); Job job = new Job(getConf(), "wordcount"); job.setJarByClass(WordCount.class); job.setMapperClass(MapperClass.class); // Tell the Mapper to expect Cassandra columns as input job.setInputFormatClass(ColumnFamilyInputFormat.class); // Tell the "Shuffle/Sort" phase what type of key/value pair to expect from Mapper job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(IntWritable.class); job.setReducerClass(ReducerClass.class); job.setOutputFormatClass(ColumnFamilyOutputFormat.class); job.setOutputKeyClass(ByteBuffer.class); job.setOutputValueClass(List.class); // Set keyspace and column family for the output ConfigHelper.setOutputColumnFamily(job.getConfiguration(), "wordcount", "output"); // Set keyspace and column family for the input ConfigHelper.setInputColumnFamily(job.getConfiguration(), "wordcount", "input"); ConfigHelper.setRpcPort(job.getConfiguration(), "RPC_PORT"); ConfigHelper.setInitialAddress(job.getConfiguration(), "IP_ADDRESS"); ConfigHelper.setPartitioner(job.getConfiguration(), "org.apache.cassandra.dht.RandomPartitioner"); // Set the predicate that determines what columns will be selected from each row SlicePredicate predicate = new SlicePredicate().setColumn_names( Arrays.asList(ByteBufferUtil.bytes(columnName))); // Each row will be handled by one Map job ConfigHelper.setInputSlicePredicate(job.getConfiguration(), predicate); job.waitForCompletion(true); return job.isSuccessful() ? 0:1; }
private static String predicateToString(SlicePredicate predicate) { assert predicate != null; // this is so awful it's kind of cool! TSerializer serializer = new TSerializer(new TBinaryProtocol.Factory()); try { return FBUtilities.bytesToHex(serializer.serialize(predicate)); } catch (TException e) { throw new RuntimeException(e); } }
private static SlicePredicate predicateFromString(String st) { assert st != null; TDeserializer deserializer = new TDeserializer(new TBinaryProtocol.Factory()); SlicePredicate predicate = new SlicePredicate(); try { deserializer.deserialize(predicate, FBUtilities.hexToBytes(st)); } catch (TException e) { throw new RuntimeException(e); } return predicate; }
public RangeSliceCommand(String keyspace, String column_family, ByteBuffer super_column, SlicePredicate predicate, AbstractBounds range, int max_keys) { this.keyspace = keyspace; this.column_family = column_family; this.super_column = super_column; this.predicate = predicate; this.range = range; this.max_keys = max_keys; }
public IndexScanCommand(String keyspace, String column_family, IndexClause index_clause, SlicePredicate predicate, AbstractBounds range) { this.keyspace = keyspace; this.column_family = column_family; this.index_clause = index_clause; this.predicate = predicate; this.range = range; }