Java 类org.apache.cassandra.thrift.Mutation 实例源码

项目:archived-net-virt-platform    文件:Connection.java   
private Mutation getMutation(String columnName, Object value, long timestamp) {
    byte[] columnNameBytes;
    try {
        columnNameBytes = columnName.getBytes("UTF-8");
    }
    catch (UnsupportedEncodingException exc) {
        throw new StorageException("Unsupported character encoding for column name", exc);
    }
    byte[] valueBytes = convertValueToBytes(value);
    Column column = new Column();
    column.setName(columnNameBytes);
    column.setValue(valueBytes);
    column.setTimestamp(timestamp);
    ColumnOrSuperColumn columnOrSuperColumn = new ColumnOrSuperColumn();
    columnOrSuperColumn.setColumn(column);
    Mutation mutation = new Mutation();
    mutation.setColumn_or_supercolumn(columnOrSuperColumn);
    return mutation;
}
项目:archived-net-virt-platform    文件:Connection.java   
public void updateRows(String columnFamily, String primaryKeyName, List<Map<String,Object>> rowUpdateList) {
    long timestamp = getNextTimestamp();
    for (Map<String,Object> rowUpdateMap: rowUpdateList) {
        String rowId = (String) rowUpdateMap.get(primaryKeyName);
        if (rowId == null)
            rowId = generateRowId();
        List<Mutation> rowMutationList = getRowMutationList(columnFamily, rowId);
        for (Map.Entry<String,Object> entry: rowUpdateMap.entrySet()) {
            String columnName = entry.getKey();
            // FIXME: For now we include the primary key data as column data too.
            // This is not completely efficient, because it means we're storing that
            // data twice in Cassandra, but if you don't do that, then you can't set
            // up secondary indexes on the primary key column in order to do range
            // queries on that data (not supported currently in 0.7.0, but is targeted
            // for the 0.7.1 release). Also there are (arguably pathological) cases
            // where if you don't store the data as column data too then the row could
            // be incorrectly interpreted as a deleted (tombstoned) row. So to make
            // things simpler (at least for now) we just always included the key as
            // column data too.
            //if (!columnName.equals(primaryKeyName)) {
                Mutation mutation = getMutation(columnName, entry.getValue(), timestamp);
                rowMutationList.add(mutation);
            //}
        }
    }
}
项目:Doradus    文件:CassandraTransaction.java   
private static Mutation createMutation(byte[] colName, byte[] colValue, long timestamp) {
    if (colValue == null) {
        colValue = EMPTY_BYTES;
    }
    Column col = new Column();
    col.setName(colName);
    col.setValue(colValue);
    col.setTimestamp(timestamp);

    ColumnOrSuperColumn cosc = new ColumnOrSuperColumn();
    cosc.setColumn(col);

    Mutation mutation = new Mutation();
    mutation.setColumn_or_supercolumn(cosc);
    return mutation;
}
项目:hdfs2cass    文件:ThriftTarget.java   
@Override
public boolean accept(final OutputHandler handler, final PType<?> pType) {
  if (pType instanceof PTableType) {
    PTableType pTableType = (PTableType) pType;
    PType<?> keyType = pTableType.getKeyType();
    PType<?> valueType = pTableType.getValueType();
    List<PType> subTypes = valueType.getSubTypes();

    if (ByteBuffer.class.equals(keyType.getTypeClass())
        && Collection.class.equals(valueType.getTypeClass())
        && subTypes.size() == 1
        && Mutation.class.equals(subTypes.get(0).getTypeClass())) {
      handler.configure(this, pType);
      return true;
    }
  }
  return false;
}
项目:cassandra-kmean    文件:BulkRecordWriter.java   
private void setTypes(Mutation mutation)
{
   if (cfType == null)
   {
       if (mutation.getColumn_or_supercolumn().isSetSuper_column() || mutation.getColumn_or_supercolumn().isSetCounter_super_column())
           cfType = CFType.SUPER;
       else
           cfType = CFType.NORMAL;
       if (mutation.getColumn_or_supercolumn().isSetCounter_column() || mutation.getColumn_or_supercolumn().isSetCounter_super_column())
           colType = ColType.COUNTER;
       else
           colType = ColType.NORMAL;
   }
}
项目:cassandra-kmean    文件:ThriftCounterAdder.java   
public void run(final ThriftClient client) throws IOException
{
    List<CounterColumn> columns = new ArrayList<>();
    for (ByteBuffer name : select().select(settings.columns.names))
        columns.add(new CounterColumn(name, counteradd.next()));

    List<Mutation> mutations = new ArrayList<>(columns.size());
    for (CounterColumn c : columns)
    {
        ColumnOrSuperColumn cosc = new ColumnOrSuperColumn().setCounter_column(c);
        mutations.add(new Mutation().setColumn_or_supercolumn(cosc));
    }
    Map<String, List<Mutation>> row = Collections.singletonMap(type.table, mutations);

    final ByteBuffer key = getKey();
    final Map<ByteBuffer, Map<String, List<Mutation>>> record = Collections.singletonMap(key, row);

    timeWithRetry(new RunOp()
    {
        @Override
        public boolean run() throws Exception
        {
            client.batch_mutate(record, settings.command.consistencyLevel);
            return true;
        }

        @Override
        public int partitionCount()
        {
            return 1;
        }

        @Override
        public int rowCount()
        {
            return 1;
        }
    });
}
项目:cassandra-kmean    文件:ThriftInserter.java   
public void run(final ThriftClient client) throws IOException
{
    final ByteBuffer key = getKey();
    final List<Column> columns = getColumns();

    List<Mutation> mutations = new ArrayList<>(columns.size());
    for (Column c : columns)
    {
        ColumnOrSuperColumn column = new ColumnOrSuperColumn().setColumn(c);
        mutations.add(new Mutation().setColumn_or_supercolumn(column));
    }
    Map<String, List<Mutation>> row = Collections.singletonMap(type.table, mutations);

    final Map<ByteBuffer, Map<String, List<Mutation>>> record = Collections.singletonMap(key, row);

    timeWithRetry(new RunOp()
    {
        @Override
        public boolean run() throws Exception
        {
            client.batch_mutate(record, settings.command.consistencyLevel);
            return true;
        }

        @Override
        public int partitionCount()
        {
            return 1;
        }

        @Override
        public int rowCount()
        {
            return 1;
        }
    });
}
项目:openyu-commons    文件:CassandraThriftDMLTest.java   
/**
 * update
 *
 * @throws Exception
 */
@Test
public void update() throws Exception {
    String KEYSPACE = "mock";
    client.set_keyspace(KEYSPACE);

    List<Mutation> mutations = new LinkedList<Mutation>();
    // <columnFamily,mutations>
    Map<String, List<Mutation>> columnfamilyMutaions = new HashMap<String, List<Mutation>>();// keyMutations
    // <rowKey,keyMutations>
    Map<ByteBuffer, Map<String, List<Mutation>>> rowKeyMutations = new HashMap<ByteBuffer, Map<String, List<Mutation>>>();

    long timestamp = System.nanoTime();
    //
    Column column = new Column();
    column.setName(ByteBufferHelper.toByteBuffer("grad"));
    column.setValue(ByteBufferHelper.toByteBuffer("9"));
    column.setTimestamp(timestamp);
    //
    ColumnOrSuperColumn cos = new ColumnOrSuperColumn();
    cos.setColumn(column);
    //
    Mutation mutation = new Mutation();
    mutation.setColumn_or_supercolumn(cos);
    mutations.add(mutation);

    String COLUMN_FAMILY = "student";
    columnfamilyMutaions.put(COLUMN_FAMILY, mutations);

    String ROW_KEY = "Jack";
    rowKeyMutations.put(ByteBufferHelper.toByteBuffer(ROW_KEY),
            columnfamilyMutaions);

    // mutation_map, consistency_level
    client.batch_mutate(rowKeyMutations, ConsistencyLevel.ONE);
}
项目:openyu-commons    文件:CassandraThriftDMLTest.java   
/**
 * delete
 *
 * @throws Exception
 */
@Test
public void delete() throws Exception {
    String KEYSPACE = "mock";
    client.set_keyspace(KEYSPACE);

    List<Mutation> mutations = new ArrayList<Mutation>();
    // <columnFamily,mutations>
    Map<String, List<Mutation>> columnfamilyMutaions = new HashMap<String, List<Mutation>>();// keyMutations
    // <rowKey,keyMutations>
    Map<ByteBuffer, Map<String, List<Mutation>>> rowKeyMutations = new HashMap<ByteBuffer, Map<String, List<Mutation>>>();
    //
    List<ByteBuffer> columns = new ArrayList<ByteBuffer>();
    // Add as many supercolumns as you want here
    columns.add(ByteBufferHelper.toByteBuffer("grad"));
    columns.add(ByteBufferHelper.toByteBuffer("math"));
    //
    SlicePredicate predicate = new SlicePredicate();
    predicate.setColumn_names(columns);
    // delete
    Deletion deletion = new Deletion();
    deletion.setPredicate(predicate);
    // timestamp in microseconds
    long timestamp = System.nanoTime();
    deletion.setTimestamp(timestamp);

    Mutation mutation = new Mutation();
    mutation.setDeletion(deletion);
    mutations.add(mutation);

    String COLUMN_FAMILY = "student";
    columnfamilyMutaions.put(COLUMN_FAMILY, mutations);

    String ROW_KEY = "Jack";
    rowKeyMutations.put(ByteBufferHelper.toByteBuffer(ROW_KEY),
            columnfamilyMutaions);

    // mutation_map, consistency_level
    client.batch_mutate(rowKeyMutations, ConsistencyLevel.ONE);
}
项目:scylla-tools-java    文件:ThriftCounterAdder.java   
public void run(final ThriftClient client) throws IOException
{
    List<CounterColumn> columns = new ArrayList<>();
    for (ByteBuffer name : select().select(settings.columns.names))
        columns.add(new CounterColumn(name, counteradd.next()));

    List<Mutation> mutations = new ArrayList<>(columns.size());
    for (CounterColumn c : columns)
    {
        ColumnOrSuperColumn cosc = new ColumnOrSuperColumn().setCounter_column(c);
        mutations.add(new Mutation().setColumn_or_supercolumn(cosc));
    }
    Map<String, List<Mutation>> row = Collections.singletonMap(type.table, mutations);

    final ByteBuffer key = getKey();
    final Map<ByteBuffer, Map<String, List<Mutation>>> record = Collections.singletonMap(key, row);

    timeWithRetry(new RunOp()
    {
        @Override
        public boolean run() throws Exception
        {
            client.batch_mutate(record, settings.command.consistencyLevel);
            return true;
        }

        @Override
        public int partitionCount()
        {
            return 1;
        }

        @Override
        public int rowCount()
        {
            return 1;
        }
    });
}
项目:scylla-tools-java    文件:ThriftInserter.java   
public void run(final ThriftClient client) throws IOException
{
    final ByteBuffer key = getKey();
    final List<Column> columns = getColumns();

    List<Mutation> mutations = new ArrayList<>(columns.size());
    for (Column c : columns)
    {
        ColumnOrSuperColumn column = new ColumnOrSuperColumn().setColumn(c);
        mutations.add(new Mutation().setColumn_or_supercolumn(column));
    }
    Map<String, List<Mutation>> row = Collections.singletonMap(type.table, mutations);

    final Map<ByteBuffer, Map<String, List<Mutation>>> record = Collections.singletonMap(key, row);

    timeWithRetry(new RunOp()
    {
        @Override
        public boolean run() throws Exception
        {
            client.batch_mutate(record, settings.command.consistencyLevel);
            return true;
        }

        @Override
        public int partitionCount()
        {
            return 1;
        }

        @Override
        public int rowCount()
        {
            return 1;
        }
    });
}
项目:GraphTrek    文件:BulkRecordWriter.java   
private void setTypes(Mutation mutation)
{
   if (cfType == null)
   {
       if (mutation.getColumn_or_supercolumn().isSetSuper_column() || mutation.getColumn_or_supercolumn().isSetCounter_super_column())
           cfType = CFType.SUPER;
       else
           cfType = CFType.NORMAL;
       if (mutation.getColumn_or_supercolumn().isSetCounter_column() || mutation.getColumn_or_supercolumn().isSetCounter_super_column())
           colType = ColType.COUNTER;
       else
           colType = ColType.NORMAL;
   }
}
项目:hadoop-in-action    文件:Prepopulate.java   
private void insertPOISpringTraining() throws Exception {
    Map<ByteBuffer, Map<String, List<Mutation>>> outerMap = new HashMap<ByteBuffer, Map<String, List<Mutation>>>();
    List<Mutation> columnsToAdd = new ArrayList<Mutation>();

    long timestamp = System.nanoTime();
    String keyName = "Spring Training";
    Column descCol = new Column(bytes("desc"));
    Column phoneCol = new Column(bytes("phone"));

    List<Column> cols = new ArrayList<Column>();
    cols.add(descCol);
    cols.add(phoneCol);

    Map<String, List<Mutation>> innerMap = new HashMap<String, List<Mutation>>();

    Mutation columns = new Mutation();
    ColumnOrSuperColumn descCosc = new ColumnOrSuperColumn();
    SuperColumn sc = new SuperColumn();
    sc.name = bytes(CAMBRIA_NAME);
    sc.columns = cols;

    descCosc.super_column = sc;
    columns.setColumn_or_supercolumn(descCosc);

    columnsToAdd.add(columns);

    String superCFName = "PointOfInterest";
    ColumnPath cp = new ColumnPath();
    cp.column_family = superCFName;
    cp.setSuper_column(CAMBRIA_NAME.getBytes());
    cp.setSuper_columnIsSet(true);

    innerMap.put(superCFName, columnsToAdd);
    outerMap.put(bytes(keyName), innerMap);

    client.batch_mutate(outerMap, CL);

    LOG.debug("Done inserting Spring Training.");
}
项目:hadoop-in-action    文件:Prepopulate.java   
private void insertPOICentralPark() throws Exception {

        Map<ByteBuffer, Map<String, List<Mutation>>> outerMap = new HashMap<ByteBuffer, Map<String, List<Mutation>>>();
        List<Mutation> columnsToAdd = new ArrayList<Mutation>();

        long ts = System.nanoTime();
        String keyName = "Central Park";
        Column descCol = new Column(bytes("desc"));

        // no phone column for park

        List<Column> cols = new ArrayList<Column>();
        cols.add(descCol);

        Map<String, List<Mutation>> innerMap = new HashMap<String, List<Mutation>>();

        Mutation columns = new Mutation();
        ColumnOrSuperColumn descCosc = new ColumnOrSuperColumn();
        SuperColumn waldorfSC = new SuperColumn();
        waldorfSC.name = bytes(WALDORF_NAME);
        waldorfSC.columns = cols;

        descCosc.super_column = waldorfSC;
        columns.setColumn_or_supercolumn(descCosc);

        columnsToAdd.add(columns);

        String superCFName = "PointOfInterest";
        ColumnPath cp = new ColumnPath();
        cp.column_family = superCFName;
        cp.setSuper_column(WALDORF_NAME.getBytes());
        cp.setSuper_columnIsSet(true);

        innerMap.put(superCFName, columnsToAdd);
        outerMap.put(bytes(keyName), innerMap);

        client.batch_mutate(outerMap, CL);

        LOG.debug("Done inserting Central Park.");
    }
项目:hadoop-in-action    文件:Prepopulate.java   
public void insertAllHotels() throws Exception {

        String columnFamily = "Hotel";

        // row keys
        String cambriaKey = "AZC_043";
        String clarionKey = "AZS_011";
        String wKey = "CAS_021";
        String waldorfKey = "NYN_042";

        // conveniences
        Map<ByteBuffer, Map<String, List<Mutation>>> cambriaMutationMap = createCambriaMutation(columnFamily,
                cambriaKey);

        Map<ByteBuffer, Map<String, List<Mutation>>> clarionMutationMap = createClarionMutation(columnFamily,
                clarionKey);

        Map<ByteBuffer, Map<String, List<Mutation>>> waldorfMutationMap = createWaldorfMutation(columnFamily,
                waldorfKey);

        Map<ByteBuffer, Map<String, List<Mutation>>> wMutationMap = createWMutation(columnFamily, wKey);


        client.batch_mutate(cambriaMutationMap, CL);
        LOG.debug("Inserted " + cambriaKey);
        client.batch_mutate(clarionMutationMap, CL);
        LOG.debug("Inserted " + clarionKey);
        client.batch_mutate(wMutationMap, CL);
        LOG.debug("Inserted " + wKey);
        client.batch_mutate(waldorfMutationMap, CL);
        LOG.debug("Inserted " + waldorfKey);

        LOG.debug("Done inserting at " + System.nanoTime());
    }
项目:stratio-cassandra    文件:BulkRecordWriter.java   
private void setTypes(Mutation mutation)
{
   if (cfType == null)
   {
       if (mutation.getColumn_or_supercolumn().isSetSuper_column() || mutation.getColumn_or_supercolumn().isSetCounter_super_column())
           cfType = CFType.SUPER;
       else
           cfType = CFType.NORMAL;
       if (mutation.getColumn_or_supercolumn().isSetCounter_column() || mutation.getColumn_or_supercolumn().isSetCounter_super_column())
           colType = ColType.COUNTER;
       else
           colType = ColType.NORMAL;
   }
}
项目:stratio-cassandra    文件:ThriftCounterAdder.java   
public void run(final ThriftClient client) throws IOException
{
    List<CounterColumn> columns = new ArrayList<>();
    for (ByteBuffer name : select().select(settings.columns.names))
        columns.add(new CounterColumn(name, counteradd.next()));

    List<Mutation> mutations = new ArrayList<>(columns.size());
    for (CounterColumn c : columns)
    {
        ColumnOrSuperColumn cosc = new ColumnOrSuperColumn().setCounter_column(c);
        mutations.add(new Mutation().setColumn_or_supercolumn(cosc));
    }
    Map<String, List<Mutation>> row = Collections.singletonMap(type.table, mutations);

    final ByteBuffer key = getKey();
    final Map<ByteBuffer, Map<String, List<Mutation>>> record = Collections.singletonMap(key, row);

    timeWithRetry(new RunOp()
    {
        @Override
        public boolean run() throws Exception
        {
            client.batch_mutate(record, settings.command.consistencyLevel);
            return true;
        }

        @Override
        public int partitionCount()
        {
            return 1;
        }

        @Override
        public int rowCount()
        {
            return 1;
        }
    });
}
项目:stratio-cassandra    文件:ThriftInserter.java   
public void run(final ThriftClient client) throws IOException
{
    final ByteBuffer key = getKey();
    final List<Column> columns = getColumns();

    List<Mutation> mutations = new ArrayList<>(columns.size());
    for (Column c : columns)
    {
        ColumnOrSuperColumn column = new ColumnOrSuperColumn().setColumn(c);
        mutations.add(new Mutation().setColumn_or_supercolumn(column));
    }
    Map<String, List<Mutation>> row = Collections.singletonMap(type.table, mutations);

    final Map<ByteBuffer, Map<String, List<Mutation>>> record = Collections.singletonMap(key, row);

    timeWithRetry(new RunOp()
    {
        @Override
        public boolean run() throws Exception
        {
            client.batch_mutate(record, settings.command.consistencyLevel);
            return true;
        }

        @Override
        public int partitionCount()
        {
            return 1;
        }

        @Override
        public int rowCount()
        {
            return 1;
        }
    });
}
项目:archived-net-virt-platform    文件:Connection.java   
private List<Mutation> getRowMutationList(String columnFamily, Object rowKey) {

    if (pendingMutations == null)
        pendingMutations = new HashMap<ByteBuffer,Map<String,List<Mutation>>>();

    ByteBuffer rowKeyBytes;
    try {
        rowKeyBytes = ByteBuffer.wrap(rowKey.toString().getBytes("UTF-8"));
    }
    catch (UnsupportedEncodingException exc) {
        throw new StorageException("Unsupported character encoding for row ID", exc);
    }

    Map<String,List<Mutation>> rowIdMap = pendingMutations.get(rowKeyBytes);
    if (rowIdMap == null) {
        rowIdMap = new HashMap<String,List<Mutation>>();
        pendingMutations.put(rowKeyBytes, rowIdMap);
    }

    List<Mutation> rowMutationList = rowIdMap.get(columnFamily);
    if (rowMutationList == null) {
        rowMutationList = new ArrayList<Mutation>();
        rowIdMap.put(columnFamily, rowMutationList);
    }

    return rowMutationList;
}
项目:archived-net-virt-platform    文件:Connection.java   
public void updateRows(String columnFamily, Map<Object,Map<String,Object>> rowUpdateMap) {
    long timestamp = getNextTimestamp();
    for (Map.Entry<Object,Map<String,Object>> rowEntry: rowUpdateMap.entrySet()) {
        Object rowKey = rowEntry.getKey();
        List<Mutation> rowMutationList = getRowMutationList(columnFamily, rowKey);
        for (Map.Entry<String,Object> columnEntry: rowEntry.getValue().entrySet()) {
            Mutation mutation = getMutation(columnEntry.getKey(), columnEntry.getValue(), timestamp);
            rowMutationList.add(mutation);
        }
    }
}
项目:Doradus    文件:CassandraTransaction.java   
private static Mutation createDeleteColumnMutation(byte[] colName, long timestamp) {
    SlicePredicate slicePred = new SlicePredicate();
    slicePred.addToColumn_names(ByteBuffer.wrap(colName));

    Deletion deletion = new Deletion();
    deletion.setPredicate(slicePred);
    deletion.setTimestamp(timestamp);

    Mutation mutation = new Mutation();
    mutation.setDeletion(deletion);
    return mutation;
}
项目:mapreduce-wordcount    文件:Cassandra_WordCount.java   
private static Mutation getMutation(Text word, int sum) {
    Column c = new Column();
    c.setName(Arrays.copyOf(word.getBytes(), word.getLength()));
    c.setValue(ByteBufferUtil.bytes(String.valueOf(sum)));
    c.setTimestamp(System.currentTimeMillis());

    Mutation m = new Mutation();
    m.setColumn_or_supercolumn(new ColumnOrSuperColumn());
    m.column_or_supercolumn.setColumn(c);

    return m;
}
项目:Cassandra-KVPM    文件:WordCount.java   
private static Mutation getMutation(Text word, int sum)
{
    Column c = new Column();
    c.setName(Arrays.copyOf(word.getBytes(), word.getLength()));
    c.setValue(ByteBufferUtil.bytes(String.valueOf(sum)));
    c.setTimestamp(System.currentTimeMillis());

    Mutation m = new Mutation();
    m.setColumn_or_supercolumn(new ColumnOrSuperColumn());
    m.column_or_supercolumn.setColumn(c);
    return m;
}
项目:spiff    文件:Cassandra.java   
/**
 * Creates a list of mutations based on {@code columns}.
 * 
 * @param columns the columns to update. Must be non {@code null}, all
 *        elements must be non {@code null}, may be empty.
 * @return a list of mutations. Never {@code null}.
 */
public static ImmutableList<Mutation> mutations ( final Column... columns )
{
  Preconditions.checkNotNull(columns, "columns required");
  for ( Column column : columns )
    Preconditions.checkNotNull(column, "all columns must be non null");

  ImmutableList.Builder<Mutation> b = ImmutableList.builder();
  for ( Column c : columns )
    b.add(new Mutation().setColumn_or_supercolumn(new ColumnOrSuperColumn().setColumn(c)));
  return b.build();
}
项目:hdfs2cass    文件:CrunchBulkRecordWriter.java   
private void setTypes(Mutation mutation) {
  if (cfType == null) {
    if (mutation.getColumn_or_supercolumn().isSetSuper_column()
        || mutation.getColumn_or_supercolumn().isSetCounter_super_column())
      cfType = CFType.SUPER;
    else
      cfType = CFType.NORMAL;
    if (mutation.getColumn_or_supercolumn().isSetCounter_column()
        || mutation.getColumn_or_supercolumn().isSetCounter_super_column())
      colType = ColType.COUNTER;
    else
      colType = ColType.NORMAL;
  }
}
项目:hdfs2cass    文件:LegacyHdfsToThrift.java   
/**
* Thrift-based import requires us to provide {@link org.apache.cassandra.thrift.Mutation}.
* Therefore we convert each input line into one.
*
* @param inputRow byte representation of the input row as it was read from Avro file
* @return wraps the record into something that blends nicely into Crunch
*/
 @Override
 public ThriftRecord map(ByteBuffer inputRow) {
   LegacyInputFormat row = LegacyInputFormat.parse(inputRow);
   ByteBuffer key = CassandraRecordUtils.toByteBuffer(row.getRowkey());
   long ts = Objects.firstNonNull(row.getTimestamp(), DateTimeUtils.currentTimeMillis());
   int ttl = Objects.firstNonNull(row.getTtl(), 0l).intValue();
   Mutation mutation = CassandraRecordUtils.createMutation(
       row.getColname(), row.getColval(), ts, ttl);
   return ThriftRecord.of(key, mutation);
 }
项目:hdfs2cass    文件:CassandraRecordUtils.java   
public static Mutation createMutation(Object name, Object value, long timestamp, int ttl) {
  Column column = new Column();
  column.setName(toByteBuffer(name));
  column.setValue(toByteBuffer(value));
  column.setTimestamp(timestamp);
  if (ttl > 0) {
    column.setTtl(ttl);
  }

  Mutation mutation = new Mutation();
  mutation.column_or_supercolumn = new ColumnOrSuperColumn();
  mutation.column_or_supercolumn.column = column;
  return mutation;
}
项目:hdfs2cass    文件:ThriftByFieldNamesFn.java   
private List<Mutation> getMutations(final T input) {
  List<Mutation> mutations = Lists.newArrayList();

  long timestamp = getTimestamp(input);
  Optional<Integer> ttl = getTtl(input);

  for (Schema.Field field : input.getSchema().getFields()) {
    int fieldPos = field.pos();
    if (fieldPos == rowKeyIndex || fieldPos == ttlIndex || fieldPos == timestampIndex) {
      continue;
    }

    Object fieldValue = input.get(fieldPos);

    Column column = new Column();
    column.setName(ByteBufferUtil.bytes(field.name()));
    column.setTimestamp(timestamp);
    if (ttl.isPresent()) {
      column.setTtl(ttl.get());
    }
    column.setValue(CassandraRecordUtils.toByteBuffer(fieldValue));

    Mutation mutation = new Mutation();
    mutation.column_or_supercolumn = new ColumnOrSuperColumn();
    mutation.column_or_supercolumn.column = column;

    mutations.add(mutation);
  }


  return mutations;
}
项目:learning-hadoop    文件:CassandraOutputData.java   
public static Map<ByteBuffer, Map<String, List<Mutation>>> newThriftBatch(
        int numRows) {
    return new HashMap<ByteBuffer, Map<String, List<Mutation>>>(numRows);
}
项目:hadoop-in-action    文件:Prepopulate.java   
private void insertPOIPhoenixZoo() throws Exception {

        Map<ByteBuffer, Map<String, List<Mutation>>> outerMap = new HashMap<ByteBuffer, Map<String, List<Mutation>>>();
        List<Mutation> columnsToAdd = new ArrayList<Mutation>();

        long ts = System.currentTimeMillis();
        String keyName = "Phoenix Zoo";
        Column descCol = new Column(bytes("desc"));

        Column phoneCol = new Column(bytes("phone"));

        List<Column> cols = new ArrayList<Column>();
        cols.add(descCol);
        cols.add(phoneCol);

        Map<String, List<Mutation>> innerMap = new HashMap<String, List<Mutation>>();

        String cambriaName = "Cambria Suites Hayden";

        Mutation columns = new Mutation();
        ColumnOrSuperColumn descCosc = new ColumnOrSuperColumn();
        SuperColumn sc = new SuperColumn();
        sc.name = bytes(cambriaName);
        sc.columns = cols;

        descCosc.super_column = sc;
        columns.setColumn_or_supercolumn(descCosc);

        columnsToAdd.add(columns);

        String superCFName = "PointOfInterest";
        ColumnPath cp = new ColumnPath();
        cp.column_family = superCFName;
        cp.setSuper_column(cambriaName.getBytes());
        cp.setSuper_columnIsSet(true);

        innerMap.put(superCFName, columnsToAdd);
        outerMap.put(bytes(keyName), innerMap);

        client.batch_mutate(outerMap, CL);

        LOG.debug("Done inserting Phoenix Zoo.");
    }
项目:hadoop-in-action    文件:Prepopulate.java   
private void insertPOIEmpireState() throws Exception {

        Map<ByteBuffer, Map<String, List<Mutation>>> outerMap = new HashMap<ByteBuffer, Map<String, List<Mutation>>>();

        List<Mutation> columnsToAdd = new ArrayList<Mutation>();

        long ts = System.nanoTime();
        String esbName = "Empire State Building";
        Column descCol = new Column(bytes("desc"));
        Column phoneCol = new Column(bytes("phone"));

        List<Column> esbCols = new ArrayList<Column>();
        esbCols.add(descCol);
        esbCols.add(phoneCol);

        Map<String, List<Mutation>> innerMap = new HashMap<String, List<Mutation>>();

        Mutation columns = new Mutation();
        ColumnOrSuperColumn descCosc = new ColumnOrSuperColumn();
        SuperColumn waldorfSC = new SuperColumn();
        waldorfSC.name = bytes(WALDORF_NAME);
        waldorfSC.columns = esbCols;

        descCosc.super_column = waldorfSC;
        columns.setColumn_or_supercolumn(descCosc);

        columnsToAdd.add(columns);

        String superCFName = "PointOfInterest";
        ColumnPath cp = new ColumnPath();
        cp.column_family = superCFName;
        cp.setSuper_column(WALDORF_NAME.getBytes());
        cp.setSuper_columnIsSet(true);

        innerMap.put(superCFName, columnsToAdd);
        outerMap.put(bytes(esbName), innerMap);

        client.batch_mutate(outerMap, CL);

        LOG.debug("Done inserting Empire State.");
    }
项目:hadoop-in-action    文件:Prepopulate.java   
private Map<ByteBuffer, Map<String, List<Mutation>>> createWMutation(String columnFamily, String rowKey)
        throws UnsupportedEncodingException {

    long ts = System.nanoTime();

    Column nameCol = new Column(bytes("name"));
    Column phoneCol = new Column(bytes("phone"));
    Column addressCol = new Column(bytes("address"));
    Column cityCol = new Column(bytes("city"));
    Column stateCol = new Column(bytes("state"));
    Column zipCol = new Column(bytes("zip"));

    ColumnOrSuperColumn nameCosc = new ColumnOrSuperColumn();
    nameCosc.column = nameCol;

    ColumnOrSuperColumn phoneCosc = new ColumnOrSuperColumn();
    phoneCosc.column = phoneCol;

    ColumnOrSuperColumn addressCosc = new ColumnOrSuperColumn();
    addressCosc.column = addressCol;

    ColumnOrSuperColumn cityCosc = new ColumnOrSuperColumn();
    cityCosc.column = cityCol;

    ColumnOrSuperColumn stateCosc = new ColumnOrSuperColumn();
    stateCosc.column = stateCol;

    ColumnOrSuperColumn zipCosc = new ColumnOrSuperColumn();
    zipCosc.column = zipCol;

    Mutation nameMut = new Mutation();
    nameMut.column_or_supercolumn = nameCosc;
    Mutation phoneMut = new Mutation();
    phoneMut.column_or_supercolumn = phoneCosc;
    Mutation addressMut = new Mutation();
    addressMut.column_or_supercolumn = addressCosc;
    Mutation cityMut = new Mutation();
    cityMut.column_or_supercolumn = cityCosc;
    Mutation stateMut = new Mutation();
    stateMut.column_or_supercolumn = stateCosc;
    Mutation zipMut = new Mutation();
    zipMut.column_or_supercolumn = zipCosc;

    // set up the batch
    Map<ByteBuffer, Map<String, List<Mutation>>> mutationMap = new HashMap<ByteBuffer, Map<String, List<Mutation>>>();

    Map<String, List<Mutation>> muts = new HashMap<String, List<Mutation>>();
    List<Mutation> cols = new ArrayList<Mutation>();
    cols.add(nameMut);
    cols.add(phoneMut);
    cols.add(addressMut);
    cols.add(cityMut);
    cols.add(stateMut);
    cols.add(zipMut);

    muts.put(columnFamily, cols);

    // outer map key is a row key
    // inner map key is the column family name
    mutationMap.put(bytes(rowKey), muts);
    return mutationMap;
}
项目:hadoop-in-action    文件:Prepopulate.java   
private Map<ByteBuffer, Map<String, List<Mutation>>> createWaldorfMutation(String columnFamily, String rowKey)
        throws UnsupportedEncodingException {

    long ts = System.nanoTime();

    Column nameCol = new Column(bytes("name"));
    Column phoneCol = new Column(bytes("phone"));
    Column addressCol = new Column(bytes("address"));
    Column cityCol = new Column(bytes("city"));
    Column stateCol = new Column(bytes("state"));
    Column zipCol = new Column(bytes("zip"));

    ColumnOrSuperColumn nameCosc = new ColumnOrSuperColumn();
    nameCosc.column = nameCol;

    ColumnOrSuperColumn phoneCosc = new ColumnOrSuperColumn();
    phoneCosc.column = phoneCol;

    ColumnOrSuperColumn addressCosc = new ColumnOrSuperColumn();
    addressCosc.column = addressCol;

    ColumnOrSuperColumn cityCosc = new ColumnOrSuperColumn();
    cityCosc.column = cityCol;

    ColumnOrSuperColumn stateCosc = new ColumnOrSuperColumn();
    stateCosc.column = stateCol;

    ColumnOrSuperColumn zipCosc = new ColumnOrSuperColumn();
    zipCosc.column = zipCol;

    Mutation nameMut = new Mutation();
    nameMut.column_or_supercolumn = nameCosc;
    Mutation phoneMut = new Mutation();
    phoneMut.column_or_supercolumn = phoneCosc;
    Mutation addressMut = new Mutation();
    addressMut.column_or_supercolumn = addressCosc;
    Mutation cityMut = new Mutation();
    cityMut.column_or_supercolumn = cityCosc;
    Mutation stateMut = new Mutation();
    stateMut.column_or_supercolumn = stateCosc;
    Mutation zipMut = new Mutation();
    zipMut.column_or_supercolumn = zipCosc;

    // set up the batch
    Map<ByteBuffer, Map<String, List<Mutation>>> mutationMap = new HashMap<ByteBuffer, Map<String, List<Mutation>>>();

    Map<String, List<Mutation>> muts = new HashMap<String, List<Mutation>>();
    List<Mutation> cols = new ArrayList<Mutation>();
    cols.add(nameMut);
    cols.add(phoneMut);
    cols.add(addressMut);
    cols.add(cityMut);
    cols.add(stateMut);
    cols.add(zipMut);

    muts.put(columnFamily, cols);

    // outer map key is a row key
    // inner map key is the column family name
    mutationMap.put(bytes(rowKey), muts);
    return mutationMap;
}
项目:hadoop-in-action    文件:Prepopulate.java   
private Map<ByteBuffer, Map<String, List<Mutation>>> createClarionMutation(String columnFamily, String rowKey)
        throws UnsupportedEncodingException {

    long ts = System.nanoTime();

    Column nameCol = new Column(bytes("name"));
    Column phoneCol = new Column(bytes("phone"));
    Column addressCol = new Column(bytes("address"));
    Column cityCol = new Column(bytes("city"));
    Column stateCol = new Column(bytes("state"));
    Column zipCol = new Column(bytes("zip"));

    ColumnOrSuperColumn nameCosc = new ColumnOrSuperColumn();
    nameCosc.column = nameCol;

    ColumnOrSuperColumn phoneCosc = new ColumnOrSuperColumn();
    phoneCosc.column = phoneCol;

    ColumnOrSuperColumn addressCosc = new ColumnOrSuperColumn();
    addressCosc.column = addressCol;

    ColumnOrSuperColumn cityCosc = new ColumnOrSuperColumn();
    cityCosc.column = cityCol;

    ColumnOrSuperColumn stateCosc = new ColumnOrSuperColumn();
    stateCosc.column = stateCol;

    ColumnOrSuperColumn zipCosc = new ColumnOrSuperColumn();
    zipCosc.column = zipCol;

    Mutation nameMut = new Mutation();
    nameMut.column_or_supercolumn = nameCosc;
    Mutation phoneMut = new Mutation();
    phoneMut.column_or_supercolumn = phoneCosc;
    Mutation addressMut = new Mutation();
    addressMut.column_or_supercolumn = addressCosc;
    Mutation cityMut = new Mutation();
    cityMut.column_or_supercolumn = cityCosc;
    Mutation stateMut = new Mutation();
    stateMut.column_or_supercolumn = stateCosc;
    Mutation zipMut = new Mutation();
    zipMut.column_or_supercolumn = zipCosc;

    // set up the batch
    Map<ByteBuffer, Map<String, List<Mutation>>> mutationMap = new HashMap<ByteBuffer, Map<String, List<Mutation>>>();

    Map<String, List<Mutation>> muts = new HashMap<String, List<Mutation>>();
    List<Mutation> cols = new ArrayList<Mutation>();
    cols.add(nameMut);
    cols.add(phoneMut);
    cols.add(addressMut);
    cols.add(cityMut);
    cols.add(stateMut);
    cols.add(zipMut);

    muts.put(columnFamily, cols);

    // outer map key is a row key
    // inner map key is the column family name
    mutationMap.put(bytes(rowKey), muts);
    return mutationMap;
}
项目:hadoop-in-action    文件:Prepopulate.java   
private Map<ByteBuffer, Map<String, List<Mutation>>> createCambriaMutation(String columnFamily, String cambriaKey)
        throws UnsupportedEncodingException {

    // set up columns for Cambria
    long ts = System.nanoTime();

    Column cambriaNameCol = new Column(bytes("name"));
    Column cambriaPhoneCol = new Column(bytes("phone"));
    Column cambriaAddressCol = new Column(bytes("address"));
    Column cambriaCityCol = new Column(bytes("city"));
    Column cambriaStateCol = new Column(bytes("state"));
    Column cambriaZipCol = new Column(bytes("zip"));

    ColumnOrSuperColumn nameCosc = new ColumnOrSuperColumn();
    nameCosc.column = cambriaNameCol;

    ColumnOrSuperColumn phoneCosc = new ColumnOrSuperColumn();
    phoneCosc.column = cambriaPhoneCol;

    ColumnOrSuperColumn addressCosc = new ColumnOrSuperColumn();
    addressCosc.column = cambriaAddressCol;

    ColumnOrSuperColumn cityCosc = new ColumnOrSuperColumn();
    cityCosc.column = cambriaCityCol;

    ColumnOrSuperColumn stateCosc = new ColumnOrSuperColumn();
    stateCosc.column = cambriaStateCol;

    ColumnOrSuperColumn zipCosc = new ColumnOrSuperColumn();
    zipCosc.column = cambriaZipCol;

    Mutation nameMut = new Mutation();
    nameMut.column_or_supercolumn = nameCosc;
    Mutation phoneMut = new Mutation();
    phoneMut.column_or_supercolumn = phoneCosc;
    Mutation addressMut = new Mutation();
    addressMut.column_or_supercolumn = addressCosc;
    Mutation cityMut = new Mutation();
    cityMut.column_or_supercolumn = cityCosc;
    Mutation stateMut = new Mutation();
    stateMut.column_or_supercolumn = stateCosc;
    Mutation zipMut = new Mutation();
    zipMut.column_or_supercolumn = zipCosc;

    // set up the batch
    Map<ByteBuffer, Map<String, List<Mutation>>> cambriaMutationMap = new HashMap<ByteBuffer, Map<String, List<Mutation>>>();

    Map<String, List<Mutation>> cambriaMuts = new HashMap<String, List<Mutation>>();
    List<Mutation> cambriaCols = new ArrayList<Mutation>();
    cambriaCols.add(nameMut);
    cambriaCols.add(phoneMut);
    cambriaCols.add(addressMut);
    cambriaCols.add(cityMut);
    cambriaCols.add(stateMut);
    cambriaCols.add(zipMut);

    cambriaMuts.put(columnFamily, cambriaCols);

    // outer map key is a row key
    // inner map key is the column family name
    cambriaMutationMap.put(bytes(cambriaKey), cambriaMuts);
    return cambriaMutationMap;
}
项目:hive-cassandra    文件:CassandraPut.java   
@Override
public void write(String keySpace, CassandraProxyClient client, JobConf jc) throws IOException {
    ConsistencyLevel flevel = getConsistencyLevel(jc);
    int batchMutation = getBatchMutationSize(jc);
    Map<ByteBuffer, Map<String, List<Mutation>>> mutation_map = new HashMap<ByteBuffer, Map<String, List<Mutation>>>();

    Map<String, List<Mutation>> maps = new HashMap<String, List<Mutation>>();

    int count = 0;
    // TODO check for counter
    for (CassandraColumn col : columns) {
        Column cassCol = new Column();
        cassCol.setValue(col.getValue());
        cassCol.setTimestamp(col.getTimeStamp());
        cassCol.setName(col.getColumn());

        ColumnOrSuperColumn thisCol = new ColumnOrSuperColumn();
        thisCol.setColumn(cassCol);

        Mutation mutation = new Mutation();
        mutation.setColumn_or_supercolumn(thisCol);

        List<Mutation> mutList = maps.get(col.getColumnFamily());
        if (mutList == null) {
            mutList = new ArrayList<Mutation>();
            maps.put(col.getColumnFamily(), mutList);
        }

        mutList.add(mutation);
        count++;

        if (count == batchMutation) {
            mutation_map.put(key, maps);

            commitChanges(keySpace, client, flevel, mutation_map);

            //reset mutation map, maps and count;
            mutation_map = new HashMap<ByteBuffer, Map<String, List<Mutation>>>();
            maps = new HashMap<String, List<Mutation>>();
            count = 0;
        }
    }

    if (count > 0) {
        mutation_map.put(key, maps);
        commitChanges(keySpace, client, flevel, mutation_map);
    }
}
项目:hive-cassandra    文件:CassandraSuperPut.java   
@Override
public void write(String keySpace, CassandraProxyClient client, JobConf jc) throws IOException {
    ConsistencyLevel flevel = getConsistencyLevel(jc);
    int batchMutation = getBatchMutationSize(jc);
    Map<ByteBuffer, Map<String, List<Mutation>>> mutation_map = new HashMap<ByteBuffer, Map<String, List<Mutation>>>();
    Map<String, List<Mutation>> maps = new HashMap<String, List<Mutation>>();

    int count = 0;
    for (CassandraPut c : subColumns) {
        List<Column> columns = new ArrayList<Column>();
        for (CassandraColumn col : c.getColumns()) {
            Column cassCol = new Column();
            cassCol.setValue(col.getValue());
            cassCol.setTimestamp(col.getTimeStamp());
            cassCol.setName(col.getColumn());
            columns.add(cassCol);

            ColumnOrSuperColumn thisSuperCol = new ColumnOrSuperColumn();
            thisSuperCol.setSuper_column(new SuperColumn(c.getKey(), columns));

            Mutation mutation = new Mutation();
            mutation.setColumn_or_supercolumn(thisSuperCol);

            List<Mutation> mutList = maps.get(col.getColumnFamily());
            if (mutList == null) {
                mutList = new ArrayList<Mutation>();
                maps.put(col.getColumnFamily(), mutList);
            }

            mutList.add(mutation);

            count++;

            if (count == batchMutation) {
                mutation_map.put(key, maps);

                commitChanges(keySpace, client, flevel, mutation_map);

                //reset mutation map, maps and count;
                mutation_map = new HashMap<ByteBuffer, Map<String, List<Mutation>>>();
                maps = new HashMap<String, List<Mutation>>();
                count = 0;
            }

        }
    }

    if (count > 0) {
        mutation_map.put(key, maps);
        commitChanges(keySpace, client, flevel, mutation_map);
    }
}
项目:Hive-Cassandra    文件:CassandraPut.java   
@Override
public void write(String keySpace, CassandraProxyClient client, JobConf jc) throws IOException {
  ConsistencyLevel flevel = getConsistencyLevel(jc);
  int batchMutation = getBatchMutationSize(jc);
  Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map = new HashMap<ByteBuffer,Map<String,List<Mutation>>>();

  Map<String, List<Mutation>> maps = new HashMap<String, List<Mutation>>();

  int count = 0;
  // TODO check for counter
  for (CassandraColumn col : columns) {
    Column cassCol = new Column();
    cassCol.setValue(col.getValue());
    cassCol.setTimestamp(col.getTimeStamp());
    cassCol.setName(col.getColumn());

    ColumnOrSuperColumn thisCol = new ColumnOrSuperColumn();
    thisCol.setColumn(cassCol);

    Mutation mutation = new Mutation();
    mutation.setColumn_or_supercolumn(thisCol);

    List<Mutation> mutList = maps.get(col.getColumnFamily());
    if (mutList == null) {
      mutList = new ArrayList<Mutation>();
      maps.put(col.getColumnFamily(), mutList);
    }

    mutList.add(mutation);
    count ++;

    if (count == batchMutation) {
      mutation_map.put(key, maps);

      commitChanges(keySpace, client, flevel, mutation_map);

      //reset mutation map, maps and count;
      mutation_map = new HashMap<ByteBuffer,Map<String,List<Mutation>>>();
      maps = new HashMap<String, List<Mutation>>();
      count = 0;
    }
  }

  if(count > 0) {
    mutation_map.put(key, maps);
    commitChanges(keySpace, client, flevel, mutation_map);
  }
}
项目:Hive-Cassandra    文件:CassandraSuperPut.java   
@Override
public void write(String keySpace, CassandraProxyClient client, JobConf jc) throws IOException {
  ConsistencyLevel flevel = getConsistencyLevel(jc);
  int batchMutation = getBatchMutationSize(jc);
  Map<ByteBuffer,Map<String,List<Mutation>>> mutation_map = new HashMap<ByteBuffer,Map<String,List<Mutation>>>();
  Map<String, List<Mutation>> maps = new HashMap<String, List<Mutation>>();

  int count = 0;
  for (CassandraPut c : subColumns) {
    List<Column> columns = new ArrayList<Column>();
    for (CassandraColumn col : c.getColumns()) {
      Column cassCol = new Column();
      cassCol.setValue(col.getValue());
      cassCol.setTimestamp(col.getTimeStamp());
      cassCol.setName(col.getColumn());
      columns.add(cassCol);

      ColumnOrSuperColumn thisSuperCol = new ColumnOrSuperColumn();
      thisSuperCol.setSuper_column(new SuperColumn(c.getKey(), columns));

      Mutation mutation = new Mutation();
      mutation.setColumn_or_supercolumn(thisSuperCol);

      List<Mutation> mutList = maps.get(col.getColumnFamily());
      if (mutList == null) {
        mutList = new ArrayList<Mutation>();
        maps.put(col.getColumnFamily(), mutList);
      }

      mutList.add(mutation);

      count ++;

      if (count == batchMutation) {
        mutation_map.put(key, maps);

        commitChanges(keySpace, client, flevel, mutation_map);

        //reset mutation map, maps and count;
        mutation_map = new HashMap<ByteBuffer,Map<String,List<Mutation>>>();
        maps = new HashMap<String, List<Mutation>>();
        count = 0;
      }

    }
  }

  if(count > 0) {
    mutation_map.put(key, maps);
    commitChanges(keySpace, client, flevel, mutation_map);
  }
}
项目:CadalWorkspace    文件:CassandraInsert.java   
/**
 * Insert into IpUser Column Family
 */
public boolean InsertIpUser(String ip, String name, String time, int times) {
    System.out.println("------------InsertIpUser--------------");
    String IP_KEY = ip;
    String NAME_SUPER_KEY = name;
    String TIMES = String.valueOf(times);
    String LAST = this.tc.NormalTime(time);

    String COLUMN_NAME_TIMES = "times";
    String COLUMN_NAME_LAST  = "last";

    String COLUMN_FAMILY_NAME = "IpUser";

    try {
        Map<ByteBuffer, Map<String, List<Mutation>>> outerMap = new HashMap<ByteBuffer, Map<String, List<Mutation>>>();
        List<Mutation> columnToAdd = new ArrayList<Mutation>();

        long timeStamp = System.currentTimeMillis();

        // Construct Column
        Column idColumnTimes = new Column();
        idColumnTimes.setName(this.cassandraUtil.toByteBuffer(COLUMN_NAME_TIMES));
        idColumnTimes.setValue(this.cassandraUtil.toByteBuffer(TIMES));
        idColumnTimes.setTimestamp(timeStamp);

        Column idColumnLast = new Column();
        idColumnLast.setName(this.cassandraUtil.toByteBuffer(COLUMN_NAME_LAST));
        idColumnLast.setValue(this.cassandraUtil.toByteBuffer(LAST));
        idColumnLast.setTimestamp(timeStamp);

        List<Column> cols = new ArrayList<Column>();
        cols.add(idColumnTimes);
        cols.add(idColumnLast);

        // Map Key-super_key-column
        Map<String, List<Mutation>> innerMap = new HashMap<String, List<Mutation>>();

        Mutation columns = new Mutation();
        ColumnOrSuperColumn columnOrSuperColumn = new ColumnOrSuperColumn();
        SuperColumn superColumn = new SuperColumn();

        superColumn.name = this.cassandraUtil.toByteBuffer(NAME_SUPER_KEY);
        superColumn.columns = cols;

        columnOrSuperColumn.super_column = superColumn;
        columns.setColumn_or_supercolumn(columnOrSuperColumn);

        columnToAdd.add(columns);

        ColumnPath columnPath = new ColumnPath();
        columnPath.column_family = COLUMN_FAMILY_NAME;
        columnPath.setSuper_column(this.cassandraUtil.toByteBuffer(NAME_SUPER_KEY));
        columnPath.setSuper_columnIsSet(true);

        innerMap.put(COLUMN_FAMILY_NAME, columnToAdd);

        // Insert Operator
        outerMap.put(this.cassandraUtil.toByteBuffer(IP_KEY), innerMap);

        client.batch_mutate(outerMap, ConsistencyLevel.ONE);

        return true;
    }catch(Exception e){
        e.printStackTrace();
        return false;
    }
}