Java 类org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations 实例源码

项目:hadoop    文件:BlockManager.java   
/**
 * Get all valid locations of the block & add the block to results
 * return the length of the added block; 0 if the block is not added
 */
private long addBlock(Block block, List<BlockWithLocations> results) {
  final List<DatanodeStorageInfo> locations = getValidLocations(block);
  if(locations.size() == 0) {
    return 0;
  } else {
    final String[] datanodeUuids = new String[locations.size()];
    final String[] storageIDs = new String[datanodeUuids.length];
    final StorageType[] storageTypes = new StorageType[datanodeUuids.length];
    for(int i = 0; i < locations.size(); i++) {
      final DatanodeStorageInfo s = locations.get(i);
      datanodeUuids[i] = s.getDatanodeDescriptor().getDatanodeUuid();
      storageIDs[i] = s.getStorageID();
      storageTypes[i] = s.getStorageType();
    }
    results.add(new BlockWithLocations(block, datanodeUuids, storageIDs,
        storageTypes));
    return block.getNumBytes();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestPBHelper.java   
private static BlockWithLocations getBlockWithLocations(
    int bid, boolean isStriped) {
  final String[] datanodeUuids = {"dn1", "dn2", "dn3"};
  final String[] storageIDs = {"s1", "s2", "s3"};
  final StorageType[] storageTypes = {
      StorageType.DISK, StorageType.DISK, StorageType.DISK};
  final byte[] indices = {0, 1, 2};
  final short dataBlkNum = 6;
  BlockWithLocations blkLocs = new BlockWithLocations(new Block(bid, 0, 1),
      datanodeUuids, storageIDs, storageTypes);
  if (isStriped) {
    blkLocs = new StripedBlockWithLocations(blkLocs, indices, dataBlkNum,
        StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE);
  }
  return blkLocs;
}
项目:aliyun-oss-hadoop-fs    文件:TestPBHelper.java   
@Test
public void testConvertBlocksWithLocations() {
  boolean[] testSuite = new boolean[]{false, true};
  for (int i = 0; i < testSuite.length; i++) {
    BlockWithLocations[] list = new BlockWithLocations[]{
        getBlockWithLocations(1, testSuite[i]),
        getBlockWithLocations(2, testSuite[i])};
    BlocksWithLocations locs = new BlocksWithLocations(list);
    BlocksWithLocationsProto locsProto = PBHelper.convert(locs);
    BlocksWithLocations locs2 = PBHelper.convert(locsProto);
    BlockWithLocations[] blocks = locs.getBlocks();
    BlockWithLocations[] blocks2 = locs2.getBlocks();
    assertEquals(blocks.length, blocks2.length);
    for (int j = 0; j < blocks.length; j++) {
      compare(blocks[j], blocks2[j]);
    }
  }
}
项目:big-c    文件:BlockManager.java   
/**
 * Get all valid locations of the block & add the block to results
 * return the length of the added block; 0 if the block is not added
 */
private long addBlock(Block block, List<BlockWithLocations> results) {
  final List<DatanodeStorageInfo> locations = getValidLocations(block);
  if(locations.size() == 0) {
    return 0;
  } else {
    final String[] datanodeUuids = new String[locations.size()];
    final String[] storageIDs = new String[datanodeUuids.length];
    final StorageType[] storageTypes = new StorageType[datanodeUuids.length];
    for(int i = 0; i < locations.size(); i++) {
      final DatanodeStorageInfo s = locations.get(i);
      datanodeUuids[i] = s.getDatanodeDescriptor().getDatanodeUuid();
      storageIDs[i] = s.getStorageID();
      storageTypes[i] = s.getStorageType();
    }
    results.add(new BlockWithLocations(block, datanodeUuids, storageIDs,
        storageTypes));
    return block.getNumBytes();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:BlockManager.java   
/**
 * Get all valid locations of the block & add the block to results
 * return the length of the added block; 0 if the block is not added
 */
private long addBlock(Block block, List<BlockWithLocations> results) {
  final List<DatanodeStorageInfo> locations = getValidLocations(block);
  if(locations.size() == 0) {
    return 0;
  } else {
    final String[] datanodeUuids = new String[locations.size()];
    final String[] storageIDs = new String[datanodeUuids.length];
    final StorageType[] storageTypes = new StorageType[datanodeUuids.length];
    for(int i = 0; i < locations.size(); i++) {
      final DatanodeStorageInfo s = locations.get(i);
      datanodeUuids[i] = s.getDatanodeDescriptor().getDatanodeUuid();
      storageIDs[i] = s.getStorageID();
      storageTypes[i] = s.getStorageType();
    }
    results.add(new BlockWithLocations(block, datanodeUuids, storageIDs,
        storageTypes));
    return block.getNumBytes();
  }
}
项目:hadoop-EAR    文件:FSNamesystem.java   
/**
 * Get all valid locations of the block & add the block to results
 * return the length of the added block; 0 if the block is not added
 */
private long addBlock(Block block, List<BlockWithLocations> results) {
  ArrayList<String> machineSet =
    new ArrayList<String>(blocksMap.numNodes(block));
  for (Iterator<DatanodeDescriptor> it =
    blocksMap.nodeIterator(block); it.hasNext();) {
    String storageID = it.next().getStorageID();
    // filter invalidate replicas
    LightWeightHashSet<Block> blocks = recentInvalidateSets.get(storageID);
    if (blocks == null || !blocks.contains(block)) {
      machineSet.add(storageID);
    }
  }
  if (machineSet.size() == 0) {
    return 0;
  } else {
    results.add(new BlockWithLocations(block,
      machineSet.toArray(new String[machineSet.size()])));
    return block.getNumBytes();
  }
}
项目:FlexMap    文件:BlockManager.java   
/**
 * Get all valid locations of the block & add the block to results
 * return the length of the added block; 0 if the block is not added
 */
private long addBlock(Block block, List<BlockWithLocations> results) {
  final List<DatanodeStorageInfo> locations = getValidLocations(block);
  if(locations.size() == 0) {
    return 0;
  } else {
    final String[] datanodeUuids = new String[locations.size()];
    final String[] storageIDs = new String[datanodeUuids.length];
    final StorageType[] storageTypes = new StorageType[datanodeUuids.length];
    for(int i = 0; i < locations.size(); i++) {
      final DatanodeStorageInfo s = locations.get(i);
      datanodeUuids[i] = s.getDatanodeDescriptor().getDatanodeUuid();
      storageIDs[i] = s.getStorageID();
      storageTypes[i] = s.getStorageType();
    }
    results.add(new BlockWithLocations(block, datanodeUuids, storageIDs,
        storageTypes));
    return block.getNumBytes();
  }
}
项目:hadoop-on-lustre    文件:FSNamesystem.java   
/**
 * Get all valid locations of the block & add the block to results
 * return the length of the added block; 0 if the block is not added
 */
private long addBlock(Block block, List<BlockWithLocations> results) {
  ArrayList<String> machineSet =
    new ArrayList<String>(blocksMap.numNodes(block));
  for(Iterator<DatanodeDescriptor> it = 
    blocksMap.nodeIterator(block); it.hasNext();) {
    String storageID = it.next().getStorageID();
    // filter invalidate replicas
    Collection<Block> blocks = recentInvalidateSets.get(storageID); 
    if(blocks==null || !blocks.contains(block)) {
      machineSet.add(storageID);
    }
  }
  if(machineSet.size() == 0) {
    return 0;
  } else {
    results.add(new BlockWithLocations(block, 
        machineSet.toArray(new String[machineSet.size()])));
    return block.getNumBytes();
  }
}
项目:hadoop-on-lustre2    文件:BlockManager.java   
/**
 * Get all valid locations of the block & add the block to results
 * return the length of the added block; 0 if the block is not added
 */
private long addBlock(Block block, List<BlockWithLocations> results) {
  final List<DatanodeStorageInfo> locations = getValidLocations(block);
  if(locations.size() == 0) {
    return 0;
  } else {
    final String[] datanodeUuids = new String[locations.size()];
    final String[] storageIDs = new String[datanodeUuids.length];
    for(int i = 0; i < locations.size(); i++) {
      final DatanodeStorageInfo s = locations.get(i);
      datanodeUuids[i] = s.getDatanodeDescriptor().getDatanodeUuid();
      storageIDs[i] = s.getStorageID();
    }
    results.add(new BlockWithLocations(block, datanodeUuids, storageIDs));
    return block.getNumBytes();
  }
}
项目:RDFS    文件:FSNamesystem.java   
/**
 * Get all valid locations of the block & add the block to results
 * return the length of the added block; 0 if the block is not added
 */
private long addBlock(Block block, List<BlockWithLocations> results) {
  ArrayList<String> machineSet =
    new ArrayList<String>(blocksMap.numNodes(block));
  for (Iterator<DatanodeDescriptor> it =
    blocksMap.nodeIterator(block); it.hasNext();) {
    String storageID = it.next().getStorageID();
    // filter invalidate replicas
    LightWeightHashSet<Block> blocks = recentInvalidateSets.get(storageID);
    if (blocks == null || !blocks.contains(block)) {
      machineSet.add(storageID);
    }
  }
  if (machineSet.size() == 0) {
    return 0;
  } else {
    results.add(new BlockWithLocations(block,
      machineSet.toArray(new String[machineSet.size()])));
    return block.getNumBytes();
  }
}
项目:hadoop-0.20    文件:FSNamesystem.java   
/**
 * Get all valid locations of the block & add the block to results
 * return the length of the added block; 0 if the block is not added
 */
private long addBlock(Block block, List<BlockWithLocations> results) {
  ArrayList<String> machineSet =
    new ArrayList<String>(blocksMap.numNodes(block));
  for(Iterator<DatanodeDescriptor> it = 
    blocksMap.nodeIterator(block); it.hasNext();) {
    String storageID = it.next().getStorageID();
    // filter invalidate replicas
    Collection<Block> blocks = recentInvalidateSets.get(storageID); 
    if(blocks==null || !blocks.contains(block)) {
      machineSet.add(storageID);
    }
  }
  if(machineSet.size() == 0) {
    return 0;
  } else {
    results.add(new BlockWithLocations(block, 
        machineSet.toArray(new String[machineSet.size()])));
    return block.getNumBytes();
  }
}
项目:hortonworks-extension    文件:FSNamesystem.java   
/**
 * Get all valid locations of the block & add the block to results
 * return the length of the added block; 0 if the block is not added
 */
private long addBlock(Block block, List<BlockWithLocations> results) {
  ArrayList<String> machineSet =
    new ArrayList<String>(blocksMap.numNodes(block));
  for(Iterator<DatanodeDescriptor> it = 
    blocksMap.nodeIterator(block); it.hasNext();) {
    String storageID = it.next().getStorageID();
    // filter invalidate replicas
    Collection<Block> blocks = recentInvalidateSets.get(storageID); 
    if(blocks==null || !blocks.contains(block)) {
      machineSet.add(storageID);
    }
  }
  if(machineSet.size() == 0) {
    return 0;
  } else {
    results.add(new BlockWithLocations(block, 
        machineSet.toArray(new String[machineSet.size()])));
    return block.getNumBytes();
  }
}
项目:hortonworks-extension    文件:FSNamesystem.java   
/**
 * Get all valid locations of the block & add the block to results
 * return the length of the added block; 0 if the block is not added
 */
private long addBlock(Block block, List<BlockWithLocations> results) {
  ArrayList<String> machineSet =
    new ArrayList<String>(blocksMap.numNodes(block));
  for(Iterator<DatanodeDescriptor> it = 
    blocksMap.nodeIterator(block); it.hasNext();) {
    String storageID = it.next().getStorageID();
    // filter invalidate replicas
    Collection<Block> blocks = recentInvalidateSets.get(storageID); 
    if(blocks==null || !blocks.contains(block)) {
      machineSet.add(storageID);
    }
  }
  if(machineSet.size() == 0) {
    return 0;
  } else {
    results.add(new BlockWithLocations(block, 
        machineSet.toArray(new String[machineSet.size()])));
    return block.getNumBytes();
  }
}
项目:hadoop-gpu    文件:FSNamesystem.java   
/**
 * Get all valid locations of the block & add the block to results
 * return the length of the added block; 0 if the block is not added
 */
private long addBlock(Block block, List<BlockWithLocations> results) {
  ArrayList<String> machineSet =
    new ArrayList<String>(blocksMap.numNodes(block));
  for(Iterator<DatanodeDescriptor> it = 
    blocksMap.nodeIterator(block); it.hasNext();) {
    String storageID = it.next().getStorageID();
    // filter invalidate replicas
    Collection<Block> blocks = recentInvalidateSets.get(storageID); 
    if(blocks==null || !blocks.contains(block)) {
      machineSet.add(storageID);
    }
  }
  if(machineSet.size() == 0) {
    return 0;
  } else {
    results.add(new BlockWithLocations(block, 
        machineSet.toArray(new String[machineSet.size()])));
    return block.getNumBytes();
  }
}
项目:hadoop    文件:Dispatcher.java   
/**
 * Fetch new blocks of this source from namenode and update this source's
 * block list & {@link Dispatcher#globalBlocks}.
 * 
 * @return the total size of the received blocks in the number of bytes.
 */
private long getBlockList() throws IOException {
  final long size = Math.min(MAX_BLOCKS_SIZE_TO_FETCH, blocksToReceive);
  final BlocksWithLocations newBlocks = nnc.getBlocks(getDatanodeInfo(), size);

  long bytesReceived = 0;
  for (BlockWithLocations blk : newBlocks.getBlocks()) {
    bytesReceived += blk.getBlock().getNumBytes();
    synchronized (globalBlocks) {
      final DBlock block = globalBlocks.get(blk.getBlock());
      synchronized (block) {
        block.clearLocations();

        // update locations
        final String[] datanodeUuids = blk.getDatanodeUuids();
        final StorageType[] storageTypes = blk.getStorageTypes();
        for (int i = 0; i < datanodeUuids.length; i++) {
          final StorageGroup g = storageGroupMap.get(
              datanodeUuids[i], storageTypes[i]);
          if (g != null) { // not unknown
            block.addLocation(g);
          }
        }
      }
      if (!srcBlocks.contains(block) && isGoodBlockCandidate(block)) {
        // filter bad candidates
        srcBlocks.add(block);
      }
    }
  }
  return bytesReceived;
}
项目:hadoop    文件:PBHelper.java   
public static BlockWithLocationsProto convert(BlockWithLocations blk) {
  return BlockWithLocationsProto.newBuilder()
      .setBlock(convert(blk.getBlock()))
      .addAllDatanodeUuids(Arrays.asList(blk.getDatanodeUuids()))
      .addAllStorageUuids(Arrays.asList(blk.getStorageIDs()))
      .addAllStorageTypes(convertStorageTypes(blk.getStorageTypes()))
      .build();
}
项目:hadoop    文件:PBHelper.java   
public static BlockWithLocations convert(BlockWithLocationsProto b) {
  final List<String> datanodeUuids = b.getDatanodeUuidsList();
  final List<String> storageUuids = b.getStorageUuidsList();
  final List<StorageTypeProto> storageTypes = b.getStorageTypesList();
  return new BlockWithLocations(convert(b.getBlock()),
      datanodeUuids.toArray(new String[datanodeUuids.size()]),
      storageUuids.toArray(new String[storageUuids.size()]),
      convertStorageTypes(storageTypes, storageUuids.size()));
}
项目:hadoop    文件:PBHelper.java   
public static BlocksWithLocationsProto convert(BlocksWithLocations blks) {
  BlocksWithLocationsProto.Builder builder = BlocksWithLocationsProto
      .newBuilder();
  for (BlockWithLocations b : blks.getBlocks()) {
    builder.addBlocks(convert(b));
  }
  return builder.build();
}
项目:hadoop    文件:PBHelper.java   
public static BlocksWithLocations convert(BlocksWithLocationsProto blocks) {
  List<BlockWithLocationsProto> b = blocks.getBlocksList();
  BlockWithLocations[] ret = new BlockWithLocations[b.size()];
  int i = 0;
  for (BlockWithLocationsProto entry : b) {
    ret[i++] = convert(entry);
  }
  return new BlocksWithLocations(ret);
}
项目:hadoop    文件:TestPBHelper.java   
private static BlockWithLocations getBlockWithLocations(int bid) {
  final String[] datanodeUuids = {"dn1", "dn2", "dn3"};
  final String[] storageIDs = {"s1", "s2", "s3"};
  final StorageType[] storageTypes = {
      StorageType.DISK, StorageType.DISK, StorageType.DISK};
  return new BlockWithLocations(new Block(bid, 0, 1),
      datanodeUuids, storageIDs, storageTypes);
}
项目:hadoop    文件:TestPBHelper.java   
@Test
public void testConvertBlockWithLocations() {
  BlockWithLocations locs = getBlockWithLocations(1);
  BlockWithLocationsProto locsProto = PBHelper.convert(locs);
  BlockWithLocations locs2 = PBHelper.convert(locsProto);
  compare(locs, locs2);
}
项目:hadoop    文件:TestPBHelper.java   
@Test
public void testConvertBlocksWithLocations() {
  BlockWithLocations[] list = new BlockWithLocations[] {
      getBlockWithLocations(1), getBlockWithLocations(2) };
  BlocksWithLocations locs = new BlocksWithLocations(list);
  BlocksWithLocationsProto locsProto = PBHelper.convert(locs);
  BlocksWithLocations locs2 = PBHelper.convert(locsProto);
  BlockWithLocations[] blocks = locs.getBlocks();
  BlockWithLocations[] blocks2 = locs2.getBlocks();
  assertEquals(blocks.length, blocks2.length);
  for (int i = 0; i < blocks.length; i++) {
    compare(blocks[i], blocks2[i]);
  }
}
项目:aliyun-oss-hadoop-fs    文件:BlockManager.java   
/**
 * Get all valid locations of the block & add the block to results
 * @return the length of the added block; 0 if the block is not added. If the
 * added block is a block group, return its approximate internal block size
 */
private long addBlock(BlockInfo block, List<BlockWithLocations> results) {
  final List<DatanodeStorageInfo> locations = getValidLocations(block);
  if(locations.size() == 0) {
    return 0;
  } else {
    final String[] datanodeUuids = new String[locations.size()];
    final String[] storageIDs = new String[datanodeUuids.length];
    final StorageType[] storageTypes = new StorageType[datanodeUuids.length];
    for(int i = 0; i < locations.size(); i++) {
      final DatanodeStorageInfo s = locations.get(i);
      datanodeUuids[i] = s.getDatanodeDescriptor().getDatanodeUuid();
      storageIDs[i] = s.getStorageID();
      storageTypes[i] = s.getStorageType();
    }
    BlockWithLocations blkWithLocs = new BlockWithLocations(block,
        datanodeUuids, storageIDs, storageTypes);
    if(block.isStriped()) {
      BlockInfoStriped blockStriped = (BlockInfoStriped) block;
      byte[] indices = new byte[locations.size()];
      for (int i = 0; i < locations.size(); i++) {
        indices[i] =
            (byte) blockStriped.getStorageBlockIndex(locations.get(i));
      }
      results.add(new StripedBlockWithLocations(blkWithLocs, indices,
          blockStriped.getDataBlockNum(), blockStriped.getCellSize()));
      // approximate size
      return block.getNumBytes() / blockStriped.getDataBlockNum();
    }else{
      results.add(blkWithLocs);
      return block.getNumBytes();
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:PBHelper.java   
public static BlockWithLocationsProto convert(BlockWithLocations blk) {
  BlockWithLocationsProto.Builder builder = BlockWithLocationsProto
      .newBuilder().setBlock(PBHelperClient.convert(blk.getBlock()))
      .addAllDatanodeUuids(Arrays.asList(blk.getDatanodeUuids()))
      .addAllStorageUuids(Arrays.asList(blk.getStorageIDs()))
      .addAllStorageTypes(PBHelperClient.convertStorageTypes(blk.getStorageTypes()));
  if (blk instanceof StripedBlockWithLocations) {
    StripedBlockWithLocations sblk = (StripedBlockWithLocations) blk;
    builder.setIndices(PBHelperClient.getByteString(sblk.getIndices()));
    builder.setDataBlockNum(sblk.getDataBlockNum());
    builder.setCellSize(sblk.getCellSize());
  }
  return builder.build();
}
项目:aliyun-oss-hadoop-fs    文件:PBHelper.java   
public static BlockWithLocations convert(BlockWithLocationsProto b) {
  final List<String> datanodeUuids = b.getDatanodeUuidsList();
  final List<String> storageUuids = b.getStorageUuidsList();
  final List<StorageTypeProto> storageTypes = b.getStorageTypesList();
  BlockWithLocations blk = new BlockWithLocations(PBHelperClient.
      convert(b.getBlock()),
      datanodeUuids.toArray(new String[datanodeUuids.size()]),
      storageUuids.toArray(new String[storageUuids.size()]),
      PBHelperClient.convertStorageTypes(storageTypes, storageUuids.size()));
  if (b.hasIndices()) {
    blk = new StripedBlockWithLocations(blk, b.getIndices().toByteArray(),
        (short) b.getDataBlockNum(), b.getCellSize());
  }
  return blk;
}
项目:aliyun-oss-hadoop-fs    文件:PBHelper.java   
public static BlocksWithLocationsProto convert(BlocksWithLocations blks) {
  BlocksWithLocationsProto.Builder builder = BlocksWithLocationsProto
      .newBuilder();
  for (BlockWithLocations b : blks.getBlocks()) {
    builder.addBlocks(convert(b));
  }
  return builder.build();
}
项目:aliyun-oss-hadoop-fs    文件:PBHelper.java   
public static BlocksWithLocations convert(BlocksWithLocationsProto blocks) {
  List<BlockWithLocationsProto> b = blocks.getBlocksList();
  BlockWithLocations[] ret = new BlockWithLocations[b.size()];
  int i = 0;
  for (BlockWithLocationsProto entry : b) {
    ret[i++] = convert(entry);
  }
  return new BlocksWithLocations(ret);
}
项目:aliyun-oss-hadoop-fs    文件:TestPBHelper.java   
private void compare(BlockWithLocations locs1, BlockWithLocations locs2) {
  assertEquals(locs1.getBlock(), locs2.getBlock());
  assertTrue(Arrays.equals(locs1.getStorageIDs(), locs2.getStorageIDs()));
  if (locs1 instanceof StripedBlockWithLocations) {
    assertTrue(Arrays.equals(((StripedBlockWithLocations) locs1).getIndices(),
        ((StripedBlockWithLocations) locs2).getIndices()));
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestPBHelper.java   
@Test
public void testConvertBlockWithLocations() {
  boolean[] testSuite = new boolean[]{false, true};
  for (int i = 0; i < testSuite.length; i++) {
    BlockWithLocations locs = getBlockWithLocations(1, testSuite[i]);
    BlockWithLocationsProto locsProto = PBHelper.convert(locs);
    BlockWithLocations locs2 = PBHelper.convert(locsProto);
    compare(locs, locs2);
  }
}
项目:big-c    文件:Dispatcher.java   
/**
 * Fetch new blocks of this source from namenode and update this source's
 * block list & {@link Dispatcher#globalBlocks}.
 * 
 * @return the total size of the received blocks in the number of bytes.
 */
private long getBlockList() throws IOException {
  final long size = Math.min(MAX_BLOCKS_SIZE_TO_FETCH, blocksToReceive);
  final BlocksWithLocations newBlocks = nnc.getBlocks(getDatanodeInfo(), size);

  long bytesReceived = 0;
  for (BlockWithLocations blk : newBlocks.getBlocks()) {
    bytesReceived += blk.getBlock().getNumBytes();
    synchronized (globalBlocks) {
      final DBlock block = globalBlocks.get(blk.getBlock());
      synchronized (block) {
        block.clearLocations();

        // update locations
        final String[] datanodeUuids = blk.getDatanodeUuids();
        final StorageType[] storageTypes = blk.getStorageTypes();
        for (int i = 0; i < datanodeUuids.length; i++) {
          final StorageGroup g = storageGroupMap.get(
              datanodeUuids[i], storageTypes[i]);
          if (g != null) { // not unknown
            block.addLocation(g);
          }
        }
      }
      if (!srcBlocks.contains(block) && isGoodBlockCandidate(block)) {
        // filter bad candidates
        srcBlocks.add(block);
      }
    }
  }
  return bytesReceived;
}
项目:big-c    文件:PBHelper.java   
public static BlockWithLocationsProto convert(BlockWithLocations blk) {
  return BlockWithLocationsProto.newBuilder()
      .setBlock(convert(blk.getBlock()))
      .addAllDatanodeUuids(Arrays.asList(blk.getDatanodeUuids()))
      .addAllStorageUuids(Arrays.asList(blk.getStorageIDs()))
      .addAllStorageTypes(convertStorageTypes(blk.getStorageTypes()))
      .build();
}
项目:big-c    文件:PBHelper.java   
public static BlockWithLocations convert(BlockWithLocationsProto b) {
  final List<String> datanodeUuids = b.getDatanodeUuidsList();
  final List<String> storageUuids = b.getStorageUuidsList();
  final List<StorageTypeProto> storageTypes = b.getStorageTypesList();
  return new BlockWithLocations(convert(b.getBlock()),
      datanodeUuids.toArray(new String[datanodeUuids.size()]),
      storageUuids.toArray(new String[storageUuids.size()]),
      convertStorageTypes(storageTypes, storageUuids.size()));
}
项目:big-c    文件:PBHelper.java   
public static BlocksWithLocationsProto convert(BlocksWithLocations blks) {
  BlocksWithLocationsProto.Builder builder = BlocksWithLocationsProto
      .newBuilder();
  for (BlockWithLocations b : blks.getBlocks()) {
    builder.addBlocks(convert(b));
  }
  return builder.build();
}
项目:big-c    文件:PBHelper.java   
public static BlocksWithLocations convert(BlocksWithLocationsProto blocks) {
  List<BlockWithLocationsProto> b = blocks.getBlocksList();
  BlockWithLocations[] ret = new BlockWithLocations[b.size()];
  int i = 0;
  for (BlockWithLocationsProto entry : b) {
    ret[i++] = convert(entry);
  }
  return new BlocksWithLocations(ret);
}
项目:big-c    文件:TestPBHelper.java   
private static BlockWithLocations getBlockWithLocations(int bid) {
  final String[] datanodeUuids = {"dn1", "dn2", "dn3"};
  final String[] storageIDs = {"s1", "s2", "s3"};
  final StorageType[] storageTypes = {
      StorageType.DISK, StorageType.DISK, StorageType.DISK};
  return new BlockWithLocations(new Block(bid, 0, 1),
      datanodeUuids, storageIDs, storageTypes);
}
项目:big-c    文件:TestPBHelper.java   
@Test
public void testConvertBlockWithLocations() {
  BlockWithLocations locs = getBlockWithLocations(1);
  BlockWithLocationsProto locsProto = PBHelper.convert(locs);
  BlockWithLocations locs2 = PBHelper.convert(locsProto);
  compare(locs, locs2);
}
项目:big-c    文件:TestPBHelper.java   
@Test
public void testConvertBlocksWithLocations() {
  BlockWithLocations[] list = new BlockWithLocations[] {
      getBlockWithLocations(1), getBlockWithLocations(2) };
  BlocksWithLocations locs = new BlocksWithLocations(list);
  BlocksWithLocationsProto locsProto = PBHelper.convert(locs);
  BlocksWithLocations locs2 = PBHelper.convert(locsProto);
  BlockWithLocations[] blocks = locs.getBlocks();
  BlockWithLocations[] blocks2 = locs2.getBlocks();
  assertEquals(blocks.length, blocks2.length);
  for (int i = 0; i < blocks.length; i++) {
    compare(blocks[i], blocks2[i]);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:Dispatcher.java   
/**
 * Fetch new blocks of this source from namenode and update this source's
 * block list & {@link Dispatcher#globalBlocks}.
 * 
 * @return the total size of the received blocks in the number of bytes.
 */
private long getBlockList() throws IOException {
  final long size = Math.min(MAX_BLOCKS_SIZE_TO_FETCH, blocksToReceive);
  final BlocksWithLocations newBlocks = nnc.getBlocks(getDatanodeInfo(), size);

  long bytesReceived = 0;
  for (BlockWithLocations blk : newBlocks.getBlocks()) {
    bytesReceived += blk.getBlock().getNumBytes();
    synchronized (globalBlocks) {
      final DBlock block = globalBlocks.get(blk.getBlock());
      synchronized (block) {
        block.clearLocations();

        // update locations
        final String[] datanodeUuids = blk.getDatanodeUuids();
        final StorageType[] storageTypes = blk.getStorageTypes();
        for (int i = 0; i < datanodeUuids.length; i++) {
          final StorageGroup g = storageGroupMap.get(
              datanodeUuids[i], storageTypes[i]);
          if (g != null) { // not unknown
            block.addLocation(g);
          }
        }
      }
      if (!srcBlocks.contains(block) && isGoodBlockCandidate(block)) {
        // filter bad candidates
        srcBlocks.add(block);
      }
    }
  }
  return bytesReceived;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:PBHelper.java   
public static BlockWithLocationsProto convert(BlockWithLocations blk) {
  return BlockWithLocationsProto.newBuilder()
      .setBlock(convert(blk.getBlock()))
      .addAllDatanodeUuids(Arrays.asList(blk.getDatanodeUuids()))
      .addAllStorageUuids(Arrays.asList(blk.getStorageIDs()))
      .addAllStorageTypes(convertStorageTypes(blk.getStorageTypes()))
      .build();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:PBHelper.java   
public static BlockWithLocations convert(BlockWithLocationsProto b) {
  final List<String> datanodeUuids = b.getDatanodeUuidsList();
  final List<String> storageUuids = b.getStorageUuidsList();
  final List<StorageTypeProto> storageTypes = b.getStorageTypesList();
  return new BlockWithLocations(convert(b.getBlock()),
      datanodeUuids.toArray(new String[datanodeUuids.size()]),
      storageUuids.toArray(new String[storageUuids.size()]),
      convertStorageTypes(storageTypes, storageUuids.size()));
}