public void run(final ThriftClient client) throws IOException { List<CounterColumn> columns = new ArrayList<>(); for (ByteBuffer name : select().select(settings.columns.names)) columns.add(new CounterColumn(name, counteradd.next())); List<Mutation> mutations = new ArrayList<>(columns.size()); for (CounterColumn c : columns) { ColumnOrSuperColumn cosc = new ColumnOrSuperColumn().setCounter_column(c); mutations.add(new Mutation().setColumn_or_supercolumn(cosc)); } Map<String, List<Mutation>> row = Collections.singletonMap(type.table, mutations); final ByteBuffer key = getKey(); final Map<ByteBuffer, Map<String, List<Mutation>>> record = Collections.singletonMap(key, row); timeWithRetry(new RunOp() { @Override public boolean run() throws Exception { client.batch_mutate(record, settings.command.consistencyLevel); return true; } @Override public int partitionCount() { return 1; } @Override public int rowCount() { return 1; } }); }
private IColumn unthriftifySuperCounter(CounterSuperColumn superColumn) { org.apache.cassandra.db.SuperColumn sc = new org.apache.cassandra.db.SuperColumn( superColumn.name, subComparator); for (CounterColumn column : superColumn.columns) { sc.addColumn(unthriftifyCounter(column)); } return sc; }
private IColumn unthriftifySuperCounter(CounterSuperColumn superColumn) { org.apache.cassandra.db.SuperColumn sc = new org.apache.cassandra.db.SuperColumn(superColumn.name, subComparator); for (CounterColumn column : superColumn.columns) sc.addColumn(unthriftifyCounter(column)); return sc; }
private IColumn unthriftifyCounter(CounterColumn column) { // CounterColumns read the nodeID from the System table, so need the StorageService running // and access // to cassandra.yaml. To avoid a Hadoop needing access to yaml return a regular Column. return new org.apache.cassandra.db.Column(column.name, ByteBufferUtil.bytes(column.value), 0); }
private IColumn unthriftifyCounter(CounterColumn column) { //CounterColumns read the nodeID from the System table, so need the StorageService running and access //to cassandra.yaml. To avoid a Hadoop needing access to yaml return a regular Column. return new org.apache.cassandra.db.Column(column.name, ByteBufferUtil.bytes(column.value), 0); }