Java 类org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo 实例源码

项目:hadoop    文件:INodeFile.java   
@Override // BlockCollection, the file should be under construction
public BlockInfoContiguousUnderConstruction setLastBlock(
    BlockInfoContiguous lastBlock, DatanodeStorageInfo[] locations)
    throws IOException {
  Preconditions.checkState(isUnderConstruction(),
      "file is no longer under construction");

  if (numBlocks() == 0) {
    throw new IOException("Failed to set last block: File is empty.");
  }
  BlockInfoContiguousUnderConstruction ucBlock =
    lastBlock.convertToBlockUnderConstruction(
        BlockUCState.UNDER_CONSTRUCTION, locations);
  setBlock(numBlocks() - 1, ucBlock);
  return ucBlock;
}
项目:hadoop    文件:BlockCommand.java   
/**
 * Create BlockCommand for transferring blocks to another datanode
 * @param blocktargetlist    blocks to be transferred 
 */
public BlockCommand(int action, String poolId,
    List<BlockTargetPair> blocktargetlist) {
  super(action);
  this.poolId = poolId;
  blocks = new Block[blocktargetlist.size()]; 
  targets = new DatanodeInfo[blocks.length][];
  targetStorageTypes = new StorageType[blocks.length][];
  targetStorageIDs = new String[blocks.length][];

  for(int i = 0; i < blocks.length; i++) {
    BlockTargetPair p = blocktargetlist.get(i);
    blocks[i] = p.block;
    targets[i] = DatanodeStorageInfo.toDatanodeInfos(p.targets);
    targetStorageTypes[i] = DatanodeStorageInfo.toStorageTypes(p.targets);
    targetStorageIDs[i] = DatanodeStorageInfo.toStorageIDs(p.targets);
  }
}
项目:hadoop    文件:DFSTestUtil.java   
public static DatanodeStorageInfo[] createDatanodeStorageInfos(
    int n, String[] racks, String[] hostnames, StorageType[] types) {
  DatanodeStorageInfo[] storages = new DatanodeStorageInfo[n];
  for(int i = storages.length; i > 0; ) {
    final String storageID = "s" + i;
    final String ip = i + "." + i + "." + i + "." + i;
    i--;
    final String rack = (racks!=null && i < racks.length)? racks[i]: "defaultRack";
    final String hostname = (hostnames!=null && i < hostnames.length)? hostnames[i]: "host";
    final StorageType type = (types != null && i < types.length) ? types[i]
        : StorageType.DEFAULT;
    storages[i] = createDatanodeStorageInfo(storageID, ip, rack, hostname,
        type);
  }
  return storages;
}
项目:hadoop    文件:DFSTestUtil.java   
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfoContiguous storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock,
      storedBlock instanceof BlockInfoContiguousUnderConstruction);
  BlockInfoContiguousUnderConstruction ucBlock =
    (BlockInfoContiguousUnderConstruction)storedBlock;
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations();
  DatanodeStorageInfo expectedPrimary = storages[0];
  long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor()
      .getLastUpdateMonotonic();
  for (int i = 1; i < storages.length; i++) {
    final long lastUpdate = storages[i].getDatanodeDescriptor()
        .getLastUpdateMonotonic();
    if (lastUpdate > mostRecentLastUpdate) {
      expectedPrimary = storages[i];
      mostRecentLastUpdate = lastUpdate;
    }
  }
  return expectedPrimary.getDatanodeDescriptor();
}
项目:hadoop    文件:TestDeleteRace.java   
@Override
public DatanodeStorageInfo[] chooseTarget(String srcPath,
                                  int numOfReplicas,
                                  Node writer,
                                  List<DatanodeStorageInfo> chosenNodes,
                                  boolean returnChosenNodes,
                                  Set<Node> excludedNodes,
                                  long blocksize,
                                  final BlockStoragePolicy storagePolicy) {
  DatanodeStorageInfo[] results = super.chooseTarget(srcPath,
      numOfReplicas, writer, chosenNodes, returnChosenNodes, excludedNodes,
      blocksize, storagePolicy);
  try {
    Thread.sleep(3000);
  } catch (InterruptedException e) {}
  return results;
}
项目:hadoop    文件:TestIncrementalBrVariations.java   
/**
 * Verify that the NameNode can learn about new storages from incremental
 * block reports.
 * This tests the fix for the error condition seen in HDFS-6904.
 *
 * @throws IOException
 * @throws InterruptedException
 */
@Test (timeout=60000)
public void testNnLearnsNewStorages()
    throws IOException, InterruptedException {

  // Generate a report for a fake block on a fake storage.
  final String newStorageUuid = UUID.randomUUID().toString();
  final DatanodeStorage newStorage = new DatanodeStorage(newStorageUuid);
  StorageReceivedDeletedBlocks[] reports = makeReportForReceivedBlock(
      getDummyBlock(), newStorage);

  // Send the report to the NN.
  cluster.getNameNodeRpc().blockReceivedAndDeleted(dn0Reg, poolId, reports);

  // Make sure that the NN has learned of the new storage.
  DatanodeStorageInfo storageInfo = cluster.getNameNode()
                                           .getNamesystem()
                                           .getBlockManager()
                                           .getDatanodeManager()
                                           .getDatanode(dn0.getDatanodeId())
                                           .getStorageInfo(newStorageUuid);
  assertNotNull(storageInfo);
}
项目:aliyun-oss-hadoop-fs    文件:FSDirWriteFileOp.java   
static DatanodeStorageInfo[] chooseTargetForNewBlock(
    BlockManager bm, String src, DatanodeInfo[] excludedNodes, String[]
    favoredNodes, ValidateAddBlockResult r) throws IOException {
  Node clientNode = bm.getDatanodeManager()
      .getDatanodeByHost(r.clientMachine);
  if (clientNode == null) {
    clientNode = getClientNode(bm, r.clientMachine);
  }

  Set<Node> excludedNodesSet = null;
  if (excludedNodes != null) {
    excludedNodesSet = new HashSet<>(excludedNodes.length);
    Collections.addAll(excludedNodesSet, excludedNodes);
  }
  List<String> favoredNodesList = (favoredNodes == null) ? null
      : Arrays.asList(favoredNodes);

  // choose targets for the new block to be allocated.
  return bm.chooseTarget4NewBlock(src, r.numTargets, clientNode,
                                  excludedNodesSet, r.blockSize,
                                  favoredNodesList, r.storagePolicyID,
                                  r.isStriped);
}
项目:aliyun-oss-hadoop-fs    文件:BlockCommand.java   
/**
 * Create BlockCommand for transferring blocks to another datanode
 * @param blocktargetlist    blocks to be transferred 
 */
public BlockCommand(int action, String poolId,
    List<BlockTargetPair> blocktargetlist) {
  super(action);
  this.poolId = poolId;
  blocks = new Block[blocktargetlist.size()]; 
  targets = new DatanodeInfo[blocks.length][];
  targetStorageTypes = new StorageType[blocks.length][];
  targetStorageIDs = new String[blocks.length][];

  for(int i = 0; i < blocks.length; i++) {
    BlockTargetPair p = blocktargetlist.get(i);
    blocks[i] = p.block;
    targets[i] = DatanodeStorageInfo.toDatanodeInfos(p.targets);
    targetStorageTypes[i] = DatanodeStorageInfo.toStorageTypes(p.targets);
    targetStorageIDs[i] = DatanodeStorageInfo.toStorageIDs(p.targets);
  }
}
项目:aliyun-oss-hadoop-fs    文件:DFSTestUtil.java   
public static DatanodeStorageInfo[] createDatanodeStorageInfos(
    int n, String[] racks, String[] hostnames, StorageType[] types) {
  DatanodeStorageInfo[] storages = new DatanodeStorageInfo[n];
  for(int i = storages.length; i > 0; ) {
    final String storageID = "s" + i;
    final String ip = i + "." + i + "." + i + "." + i;
    i--;
    final String rack = (racks!=null && i < racks.length)? racks[i]: "defaultRack";
    final String hostname = (hostnames!=null && i < hostnames.length)? hostnames[i]: "host";
    final StorageType type = (types != null && i < types.length) ? types[i]
        : StorageType.DEFAULT;
    storages[i] = createDatanodeStorageInfo(storageID, ip, rack, hostname,
        type, null);
  }
  return storages;
}
项目:aliyun-oss-hadoop-fs    文件:DFSTestUtil.java   
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock, !storedBlock.isComplete());
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  final DatanodeStorageInfo[] storages = storedBlock
      .getUnderConstructionFeature().getExpectedStorageLocations();
  DatanodeStorageInfo expectedPrimary = storages[0];
  long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor()
      .getLastUpdateMonotonic();
  for (int i = 1; i < storages.length; i++) {
    final long lastUpdate = storages[i].getDatanodeDescriptor()
        .getLastUpdateMonotonic();
    if (lastUpdate > mostRecentLastUpdate) {
      expectedPrimary = storages[i];
      mostRecentLastUpdate = lastUpdate;
    }
  }
  return expectedPrimary.getDatanodeDescriptor();
}
项目:aliyun-oss-hadoop-fs    文件:TestDeleteRace.java   
@Override
public DatanodeStorageInfo[] chooseTarget(String srcPath,
                                  int numOfReplicas,
                                  Node writer,
                                  List<DatanodeStorageInfo> chosenNodes,
                                  boolean returnChosenNodes,
                                  Set<Node> excludedNodes,
                                  long blocksize,
                                  final BlockStoragePolicy storagePolicy) {
  DatanodeStorageInfo[] results = super.chooseTarget(srcPath,
      numOfReplicas, writer, chosenNodes, returnChosenNodes, excludedNodes,
      blocksize, storagePolicy);
  try {
    Thread.sleep(3000);
  } catch (InterruptedException e) {}
  return results;
}
项目:aliyun-oss-hadoop-fs    文件:TestAddStripedBlocks.java   
private void checkStripedBlockUC(BlockInfoStriped block,
    boolean checkReplica) {
  assertEquals(0, block.numNodes());
  Assert.assertFalse(block.isComplete());
  Assert.assertEquals(StripedFileTestUtil.NUM_DATA_BLOCKS, block.getDataBlockNum());
  Assert.assertEquals(StripedFileTestUtil.NUM_PARITY_BLOCKS,
      block.getParityBlockNum());
  Assert.assertEquals(0,
      block.getBlockId() & HdfsServerConstants.BLOCK_GROUP_INDEX_MASK);

  Assert.assertEquals(HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION,
      block.getBlockUCState());
  if (checkReplica) {
    Assert.assertEquals(GROUP_SIZE,
        block.getUnderConstructionFeature().getNumExpectedLocations());
    DatanodeStorageInfo[] storages = block.getUnderConstructionFeature()
        .getExpectedStorageLocations();
    for (DataNode dn : cluster.getDataNodes()) {
      Assert.assertTrue(includeDataNode(dn.getDatanodeId(), storages));
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestIncrementalBrVariations.java   
/**
 * Verify that the NameNode can learn about new storages from incremental
 * block reports.
 * This tests the fix for the error condition seen in HDFS-6904.
 *
 * @throws IOException
 * @throws InterruptedException
 */
@Test (timeout=60000)
public void testNnLearnsNewStorages()
    throws IOException, InterruptedException {

  // Generate a report for a fake block on a fake storage.
  final String newStorageUuid = UUID.randomUUID().toString();
  final DatanodeStorage newStorage = new DatanodeStorage(newStorageUuid);
  StorageReceivedDeletedBlocks[] reports = DFSTestUtil.
      makeReportForReceivedBlock(getDummyBlock(), BlockStatus.RECEIVED_BLOCK,
          newStorage);

  // Send the report to the NN.
  cluster.getNameNodeRpc().blockReceivedAndDeleted(dn0Reg, poolId, reports);
  // IBRs are async, make sure the NN processes all of them.
  cluster.getNamesystem().getBlockManager().flushBlockOps();
  // Make sure that the NN has learned of the new storage.
  DatanodeStorageInfo storageInfo = cluster.getNameNode()
                                           .getNamesystem()
                                           .getBlockManager()
                                           .getDatanodeManager()
                                           .getDatanode(dn0.getDatanodeId())
                                           .getStorageInfo(newStorageUuid);
  assertNotNull(storageInfo);
}
项目:aliyun-oss-hadoop-fs    文件:TestRecoverStripedFile.java   
@Test
public void testProcessErasureCodingTasksSubmitionShouldSucceed()
    throws Exception {
  DataNode dataNode = cluster.dataNodes.get(0).datanode;

  // Pack invalid(dummy) parameters in ecTasks. Irrespective of parameters, each task
  // thread pool submission should succeed, so that it will not prevent
  // processing other tasks in the list if any exceptions.
  int size = cluster.dataNodes.size();
  byte[] liveIndices = new byte[size];
  DatanodeInfo[] dataDNs = new DatanodeInfo[size + 1];
  DatanodeStorageInfo targetDnInfos_1 = BlockManagerTestUtil
      .newDatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(),
          new DatanodeStorage("s01"));
  DatanodeStorageInfo[] dnStorageInfo = new DatanodeStorageInfo[] {
      targetDnInfos_1 };

  BlockECRecoveryInfo invalidECInfo = new BlockECRecoveryInfo(
      new ExtendedBlock("bp-id", 123456), dataDNs, dnStorageInfo, liveIndices,
      ErasureCodingPolicyManager.getSystemDefaultPolicy());
  List<BlockECRecoveryInfo> ecTasks = new ArrayList<BlockECRecoveryInfo>();
  ecTasks.add(invalidECInfo);
  dataNode.getErasureCodingWorker().processErasureCodingTasks(ecTasks);
}
项目:aliyun-oss-hadoop-fs    文件:TestDatanodeRegistration.java   
private boolean waitForBlockReport(final DataNode dn,
    final DatanodeDescriptor dnd) throws Exception {
  final DatanodeStorageInfo storage = dnd.getStorageInfos()[0];
  final long lastCount = storage.getBlockReportCount();
  dn.triggerBlockReport(
      new BlockReportOptions.Factory().setIncremental(false).build());
  try {
    GenericTestUtils.waitFor(new Supplier<Boolean>() {
      @Override
      public Boolean get() {
        return lastCount != storage.getBlockReportCount();
      }
    }, 10, 100);
  } catch (TimeoutException te) {
    return false;
  }
  return true;
}
项目:big-c    文件:INodeFile.java   
@Override // BlockCollection, the file should be under construction
public BlockInfoContiguousUnderConstruction setLastBlock(
    BlockInfoContiguous lastBlock, DatanodeStorageInfo[] locations)
    throws IOException {
  Preconditions.checkState(isUnderConstruction(),
      "file is no longer under construction");

  if (numBlocks() == 0) {
    throw new IOException("Failed to set last block: File is empty.");
  }
  BlockInfoContiguousUnderConstruction ucBlock =
    lastBlock.convertToBlockUnderConstruction(
        BlockUCState.UNDER_CONSTRUCTION, locations);
  setBlock(numBlocks() - 1, ucBlock);
  return ucBlock;
}
项目:big-c    文件:BlockCommand.java   
/**
 * Create BlockCommand for transferring blocks to another datanode
 * @param blocktargetlist    blocks to be transferred 
 */
public BlockCommand(int action, String poolId,
    List<BlockTargetPair> blocktargetlist) {
  super(action);
  this.poolId = poolId;
  blocks = new Block[blocktargetlist.size()]; 
  targets = new DatanodeInfo[blocks.length][];
  targetStorageTypes = new StorageType[blocks.length][];
  targetStorageIDs = new String[blocks.length][];

  for(int i = 0; i < blocks.length; i++) {
    BlockTargetPair p = blocktargetlist.get(i);
    blocks[i] = p.block;
    targets[i] = DatanodeStorageInfo.toDatanodeInfos(p.targets);
    targetStorageTypes[i] = DatanodeStorageInfo.toStorageTypes(p.targets);
    targetStorageIDs[i] = DatanodeStorageInfo.toStorageIDs(p.targets);
  }
}
项目:big-c    文件:DFSTestUtil.java   
public static DatanodeStorageInfo[] createDatanodeStorageInfos(
    int n, String[] racks, String[] hostnames, StorageType[] types) {
  DatanodeStorageInfo[] storages = new DatanodeStorageInfo[n];
  for(int i = storages.length; i > 0; ) {
    final String storageID = "s" + i;
    final String ip = i + "." + i + "." + i + "." + i;
    i--;
    final String rack = (racks!=null && i < racks.length)? racks[i]: "defaultRack";
    final String hostname = (hostnames!=null && i < hostnames.length)? hostnames[i]: "host";
    final StorageType type = (types != null && i < types.length) ? types[i]
        : StorageType.DEFAULT;
    storages[i] = createDatanodeStorageInfo(storageID, ip, rack, hostname,
        type);
  }
  return storages;
}
项目:big-c    文件:DFSTestUtil.java   
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfoContiguous storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock,
      storedBlock instanceof BlockInfoContiguousUnderConstruction);
  BlockInfoContiguousUnderConstruction ucBlock =
    (BlockInfoContiguousUnderConstruction)storedBlock;
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations();
  DatanodeStorageInfo expectedPrimary = storages[0];
  long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor()
      .getLastUpdateMonotonic();
  for (int i = 1; i < storages.length; i++) {
    final long lastUpdate = storages[i].getDatanodeDescriptor()
        .getLastUpdateMonotonic();
    if (lastUpdate > mostRecentLastUpdate) {
      expectedPrimary = storages[i];
      mostRecentLastUpdate = lastUpdate;
    }
  }
  return expectedPrimary.getDatanodeDescriptor();
}
项目:big-c    文件:TestDeleteRace.java   
@Override
public DatanodeStorageInfo[] chooseTarget(String srcPath,
                                  int numOfReplicas,
                                  Node writer,
                                  List<DatanodeStorageInfo> chosenNodes,
                                  boolean returnChosenNodes,
                                  Set<Node> excludedNodes,
                                  long blocksize,
                                  final BlockStoragePolicy storagePolicy) {
  DatanodeStorageInfo[] results = super.chooseTarget(srcPath,
      numOfReplicas, writer, chosenNodes, returnChosenNodes, excludedNodes,
      blocksize, storagePolicy);
  try {
    Thread.sleep(3000);
  } catch (InterruptedException e) {}
  return results;
}
项目:big-c    文件:TestIncrementalBrVariations.java   
/**
 * Verify that the NameNode can learn about new storages from incremental
 * block reports.
 * This tests the fix for the error condition seen in HDFS-6904.
 *
 * @throws IOException
 * @throws InterruptedException
 */
@Test (timeout=60000)
public void testNnLearnsNewStorages()
    throws IOException, InterruptedException {

  // Generate a report for a fake block on a fake storage.
  final String newStorageUuid = UUID.randomUUID().toString();
  final DatanodeStorage newStorage = new DatanodeStorage(newStorageUuid);
  StorageReceivedDeletedBlocks[] reports = makeReportForReceivedBlock(
      getDummyBlock(), newStorage);

  // Send the report to the NN.
  cluster.getNameNodeRpc().blockReceivedAndDeleted(dn0Reg, poolId, reports);

  // Make sure that the NN has learned of the new storage.
  DatanodeStorageInfo storageInfo = cluster.getNameNode()
                                           .getNamesystem()
                                           .getBlockManager()
                                           .getDatanodeManager()
                                           .getDatanode(dn0.getDatanodeId())
                                           .getStorageInfo(newStorageUuid);
  assertNotNull(storageInfo);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:INodeFile.java   
@Override // BlockCollection, the file should be under construction
public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
    DatanodeStorageInfo[] locations) throws IOException {
  Preconditions.checkState(isUnderConstruction(),
      "file is no longer under construction");

  if (numBlocks() == 0) {
    throw new IOException("Failed to set last block: File is empty.");
  }
  BlockInfoUnderConstruction ucBlock =
    lastBlock.convertToBlockUnderConstruction(
        BlockUCState.UNDER_CONSTRUCTION, locations);
  ucBlock.setBlockCollection(this);
  setBlock(numBlocks() - 1, ucBlock);
  return ucBlock;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:BlockCommand.java   
/**
 * Create BlockCommand for transferring blocks to another datanode
 * @param blocktargetlist    blocks to be transferred 
 */
public BlockCommand(int action, String poolId,
    List<BlockTargetPair> blocktargetlist) {
  super(action);
  this.poolId = poolId;
  blocks = new Block[blocktargetlist.size()]; 
  targets = new DatanodeInfo[blocks.length][];
  targetStorageTypes = new StorageType[blocks.length][];
  targetStorageIDs = new String[blocks.length][];

  for(int i = 0; i < blocks.length; i++) {
    BlockTargetPair p = blocktargetlist.get(i);
    blocks[i] = p.block;
    targets[i] = DatanodeStorageInfo.toDatanodeInfos(p.targets);
    targetStorageTypes[i] = DatanodeStorageInfo.toStorageTypes(p.targets);
    targetStorageIDs[i] = DatanodeStorageInfo.toStorageIDs(p.targets);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DFSTestUtil.java   
public static DatanodeStorageInfo[] createDatanodeStorageInfos(
    int n, String[] racks, String[] hostnames, StorageType[] types) {
  DatanodeStorageInfo[] storages = new DatanodeStorageInfo[n];
  for(int i = storages.length; i > 0; ) {
    final String storageID = "s" + i;
    final String ip = i + "." + i + "." + i + "." + i;
    i--;
    final String rack = (racks!=null && i < racks.length)? racks[i]: "defaultRack";
    final String hostname = (hostnames!=null && i < hostnames.length)? hostnames[i]: "host";
    final StorageType type = (types != null && i < types.length) ? types[i]
        : StorageType.DEFAULT;
    storages[i] = createDatanodeStorageInfo(storageID, ip, rack, hostname,
        type);
  }
  return storages;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DFSTestUtil.java   
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock,
      storedBlock instanceof BlockInfoUnderConstruction);
  BlockInfoUnderConstruction ucBlock =
    (BlockInfoUnderConstruction)storedBlock;
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations();
  DatanodeStorageInfo expectedPrimary = storages[0];
  long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor().getLastUpdate();
  for (int i = 1; i < storages.length; i++) {
    final long lastUpdate = storages[i].getDatanodeDescriptor().getLastUpdate();
    if (lastUpdate > mostRecentLastUpdate) {
      expectedPrimary = storages[i];
      mostRecentLastUpdate = lastUpdate;
    }
  }
  return expectedPrimary.getDatanodeDescriptor();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDeleteRace.java   
@Override
public DatanodeStorageInfo[] chooseTarget(String srcPath,
                                  int numOfReplicas,
                                  Node writer,
                                  List<DatanodeStorageInfo> chosenNodes,
                                  boolean returnChosenNodes,
                                  Set<Node> excludedNodes,
                                  long blocksize,
                                  final BlockStoragePolicy storagePolicy) {
  DatanodeStorageInfo[] results = super.chooseTarget(srcPath,
      numOfReplicas, writer, chosenNodes, returnChosenNodes, excludedNodes,
      blocksize, storagePolicy);
  try {
    Thread.sleep(3000);
  } catch (InterruptedException e) {}
  return results;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestIncrementalBrVariations.java   
/**
 * Verify that the NameNode can learn about new storages from incremental
 * block reports.
 * This tests the fix for the error condition seen in HDFS-6904.
 *
 * @throws IOException
 * @throws InterruptedException
 */
@Test (timeout=60000)
public void testNnLearnsNewStorages()
    throws IOException, InterruptedException {

  // Generate a report for a fake block on a fake storage.
  final String newStorageUuid = UUID.randomUUID().toString();
  final DatanodeStorage newStorage = new DatanodeStorage(newStorageUuid);
  StorageReceivedDeletedBlocks[] reports = makeReportForReceivedBlock(
      getDummyBlock(), newStorage);

  // Send the report to the NN.
  cluster.getNameNodeRpc().blockReceivedAndDeleted(dn0Reg, poolId, reports);

  // Make sure that the NN has learned of the new storage.
  DatanodeStorageInfo storageInfo = cluster.getNameNode()
                                           .getNamesystem()
                                           .getBlockManager()
                                           .getDatanodeManager()
                                           .getDatanode(dn0.getDatanodeId())
                                           .getStorageInfo(newStorageUuid);
  assertNotNull(storageInfo);
}
项目:FlexMap    文件:INodeFile.java   
@Override // BlockCollection, the file should be under construction
public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
    DatanodeStorageInfo[] locations) throws IOException {
  Preconditions.checkState(isUnderConstruction(),
      "file is no longer under construction");

  if (numBlocks() == 0) {
    throw new IOException("Failed to set last block: File is empty.");
  }
  BlockInfoUnderConstruction ucBlock =
    lastBlock.convertToBlockUnderConstruction(
        BlockUCState.UNDER_CONSTRUCTION, locations);
  ucBlock.setBlockCollection(this);
  setBlock(numBlocks() - 1, ucBlock);
  return ucBlock;
}
项目:FlexMap    文件:BlockCommand.java   
/**
 * Create BlockCommand for transferring blocks to another datanode
 * @param blocktargetlist    blocks to be transferred 
 */
public BlockCommand(int action, String poolId,
    List<BlockTargetPair> blocktargetlist) {
  super(action);
  this.poolId = poolId;
  blocks = new Block[blocktargetlist.size()]; 
  targets = new DatanodeInfo[blocks.length][];
  targetStorageTypes = new StorageType[blocks.length][];
  targetStorageIDs = new String[blocks.length][];

  for(int i = 0; i < blocks.length; i++) {
    BlockTargetPair p = blocktargetlist.get(i);
    blocks[i] = p.block;
    targets[i] = DatanodeStorageInfo.toDatanodeInfos(p.targets);
    targetStorageTypes[i] = DatanodeStorageInfo.toStorageTypes(p.targets);
    targetStorageIDs[i] = DatanodeStorageInfo.toStorageIDs(p.targets);
  }
}
项目:FlexMap    文件:DFSTestUtil.java   
public static DatanodeStorageInfo[] createDatanodeStorageInfos(
    int n, String[] racks, String[] hostnames, StorageType[] types) {
  DatanodeStorageInfo[] storages = new DatanodeStorageInfo[n];
  for(int i = storages.length; i > 0; ) {
    final String storageID = "s" + i;
    final String ip = i + "." + i + "." + i + "." + i;
    i--;
    final String rack = (racks!=null && i < racks.length)? racks[i]: "defaultRack";
    final String hostname = (hostnames!=null && i < hostnames.length)? hostnames[i]: "host";
    final StorageType type = (types != null && i < types.length) ? types[i]
        : StorageType.DEFAULT;
    storages[i] = createDatanodeStorageInfo(storageID, ip, rack, hostname,
        type);
  }
  return storages;
}
项目:FlexMap    文件:DFSTestUtil.java   
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock,
      storedBlock instanceof BlockInfoUnderConstruction);
  BlockInfoUnderConstruction ucBlock =
    (BlockInfoUnderConstruction)storedBlock;
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations();
  DatanodeStorageInfo expectedPrimary = storages[0];
  long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor().getLastUpdate();
  for (int i = 1; i < storages.length; i++) {
    final long lastUpdate = storages[i].getDatanodeDescriptor().getLastUpdate();
    if (lastUpdate > mostRecentLastUpdate) {
      expectedPrimary = storages[i];
      mostRecentLastUpdate = lastUpdate;
    }
  }
  return expectedPrimary.getDatanodeDescriptor();
}
项目:FlexMap    文件:TestDeleteRace.java   
@Override
public DatanodeStorageInfo[] chooseTarget(String srcPath,
                                  int numOfReplicas,
                                  Node writer,
                                  List<DatanodeStorageInfo> chosenNodes,
                                  boolean returnChosenNodes,
                                  Set<Node> excludedNodes,
                                  long blocksize,
                                  final BlockStoragePolicy storagePolicy) {
  DatanodeStorageInfo[] results = super.chooseTarget(srcPath,
      numOfReplicas, writer, chosenNodes, returnChosenNodes, excludedNodes,
      blocksize, storagePolicy);
  try {
    Thread.sleep(3000);
  } catch (InterruptedException e) {}
  return results;
}
项目:FlexMap    文件:TestIncrementalBrVariations.java   
/**
 * Verify that the NameNode can learn about new storages from incremental
 * block reports.
 * This tests the fix for the error condition seen in HDFS-6904.
 *
 * @throws IOException
 * @throws InterruptedException
 */
@Test (timeout=60000)
public void testNnLearnsNewStorages()
    throws IOException, InterruptedException {

  // Generate a report for a fake block on a fake storage.
  final String newStorageUuid = UUID.randomUUID().toString();
  final DatanodeStorage newStorage = new DatanodeStorage(newStorageUuid);
  StorageReceivedDeletedBlocks[] reports = makeReportForReceivedBlock(
      getDummyBlock(), newStorage);

  // Send the report to the NN.
  cluster.getNameNodeRpc().blockReceivedAndDeleted(dn0Reg, poolId, reports);

  // Make sure that the NN has learned of the new storage.
  DatanodeStorageInfo storageInfo = cluster.getNameNode()
                                           .getNamesystem()
                                           .getBlockManager()
                                           .getDatanodeManager()
                                           .getDatanode(dn0.getDatanodeId())
                                           .getStorageInfo(newStorageUuid);
  assertNotNull(storageInfo);
}
项目:hadoop-on-lustre2    文件:INodeFile.java   
@Override // BlockCollection, the file should be under construction
public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
    DatanodeStorageInfo[] locations) throws IOException {
  Preconditions.checkState(isUnderConstruction(),
      "file is no longer under construction");

  if (numBlocks() == 0) {
    throw new IOException("Failed to set last block: File is empty.");
  }
  BlockInfoUnderConstruction ucBlock =
    lastBlock.convertToBlockUnderConstruction(
        BlockUCState.UNDER_CONSTRUCTION, locations);
  ucBlock.setBlockCollection(this);
  setBlock(numBlocks() - 1, ucBlock);
  return ucBlock;
}
项目:hadoop-on-lustre2    文件:BlockCommand.java   
/**
 * Create BlockCommand for transferring blocks to another datanode
 * @param blocktargetlist    blocks to be transferred 
 */
public BlockCommand(int action, String poolId,
    List<BlockTargetPair> blocktargetlist) {
  super(action);
  this.poolId = poolId;
  blocks = new Block[blocktargetlist.size()]; 
  targets = new DatanodeInfo[blocks.length][];
  targetStorageIDs = new String[blocks.length][];

  for(int i = 0; i < blocks.length; i++) {
    BlockTargetPair p = blocktargetlist.get(i);
    blocks[i] = p.block;
    targets[i] = DatanodeStorageInfo.toDatanodeInfos(p.targets);
    targetStorageIDs[i] = DatanodeStorageInfo.toStorageIDs(p.targets);
  }
}
项目:hadoop-on-lustre2    文件:TestCommitBlockSynchronization.java   
private FSNamesystem makeNameSystemSpy(Block block, INodeFile file)
    throws IOException {
  Configuration conf = new Configuration();
  FSImage image = new FSImage(conf);
  final DatanodeStorageInfo[] targets = {};

  FSNamesystem namesystem = new FSNamesystem(conf, image);
  FSNamesystem namesystemSpy = spy(namesystem);
  BlockInfoUnderConstruction blockInfo = new BlockInfoUnderConstruction(
      block, 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
  blockInfo.setBlockCollection(file);
  blockInfo.setGenerationStamp(genStamp);
  blockInfo.initializeBlockRecovery(genStamp);
  doReturn(true).when(file).removeLastBlock(any(Block.class));
  doReturn(true).when(file).isUnderConstruction();

  doReturn(blockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
  doReturn("").when(namesystemSpy).closeFileCommitBlocks(
      any(INodeFile.class), any(BlockInfo.class));
  doReturn("").when(namesystemSpy).persistBlocks(
      any(INodeFile.class), anyBoolean());
  doReturn(mock(FSEditLog.class)).when(namesystemSpy).getEditLog();

  return namesystemSpy;
}
项目:hadoop-on-lustre2    文件:TestPipelinesFailover.java   
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
private DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock,
      storedBlock instanceof BlockInfoUnderConstruction);
  BlockInfoUnderConstruction ucBlock =
    (BlockInfoUnderConstruction)storedBlock;
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations();
  DatanodeStorageInfo expectedPrimary = storages[0];
  long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor().getLastUpdate();
  for (int i = 1; i < storages.length; i++) {
    final long lastUpdate = storages[i].getDatanodeDescriptor().getLastUpdate();
    if (lastUpdate > mostRecentLastUpdate) {
      expectedPrimary = storages[i];
      mostRecentLastUpdate = lastUpdate;
    }
  }
  return expectedPrimary.getDatanodeDescriptor();
}
项目:hadoop-on-lustre2    文件:TestIncrementalBrVariations.java   
/**
 * Verify that the NameNode can learn about new storages from incremental
 * block reports.
 * This tests the fix for the error condition seen in HDFS-6904.
 *
 * @throws IOException
 * @throws InterruptedException
 */
@Test (timeout=60000)
public void testNnLearnsNewStorages()
    throws IOException, InterruptedException {

  // Generate a report for a fake block on a fake storage.
  final String newStorageUuid = UUID.randomUUID().toString();
  final DatanodeStorage newStorage = new DatanodeStorage(newStorageUuid);
  StorageReceivedDeletedBlocks[] reports = makeReportForReceivedBlock(
      getDummyBlock(), newStorage);

  // Send the report to the NN.
  cluster.getNameNodeRpc().blockReceivedAndDeleted(dn0Reg, poolId, reports);

  // Make sure that the NN has learned of the new storage.
  DatanodeStorageInfo storageInfo = cluster.getNameNode()
                                           .getNamesystem()
                                           .getBlockManager()
                                           .getDatanodeManager()
                                           .getDatanode(dn0.getDatanodeId())
                                           .getStorageInfo(newStorageUuid);
  assertNotNull(storageInfo);
}
项目:hadoop    文件:LocatedBlock.java   
public LocatedBlock(ExtendedBlock b, DatanodeStorageInfo[] storages,
    long startOffset, boolean corrupt) {
  this(b, DatanodeStorageInfo.toDatanodeInfos(storages),
      DatanodeStorageInfo.toStorageIDs(storages),
      DatanodeStorageInfo.toStorageTypes(storages),
      startOffset, corrupt, EMPTY_LOCS); // startOffset is unknown
}
项目:hadoop    文件:FSDirectory.java   
/**
 * Add a block to the file. Returns a reference to the added block.
 */
BlockInfoContiguous addBlock(String path, INodesInPath inodesInPath,
    Block block, DatanodeStorageInfo[] targets) throws IOException {
  writeLock();
  try {
    final INodeFile fileINode = inodesInPath.getLastINode().asFile();
    Preconditions.checkState(fileINode.isUnderConstruction());

    // check quota limits and updated space consumed
    updateCount(inodesInPath, 0, fileINode.getPreferredBlockSize(),
        fileINode.getBlockReplication(), true);

    // associate new last block for the file
    BlockInfoContiguousUnderConstruction blockInfo =
      new BlockInfoContiguousUnderConstruction(
          block,
          fileINode.getFileReplication(),
          BlockUCState.UNDER_CONSTRUCTION,
          targets);
    getBlockManager().addBlockCollection(blockInfo, fileINode);
    fileINode.addBlock(blockInfo);

    if(NameNode.stateChangeLog.isDebugEnabled()) {
      NameNode.stateChangeLog.debug("DIR* FSDirectory.addBlock: "
          + path + " with " + block
          + " block is added to the in-memory "
          + "file system");
    }
    return blockInfo;
  } finally {
    writeUnlock();
  }
}