Java 类org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.AddBlockResult 实例源码

项目:hadoop    文件:TestBlockInfo.java   
@Test
public void testReplaceStorage() throws Exception {

  // Create two dummy storages.
  final DatanodeStorageInfo storage1 = DFSTestUtil.createDatanodeStorageInfo("storageID1", "127.0.0.1");
  final DatanodeStorageInfo storage2 = new DatanodeStorageInfo(storage1.getDatanodeDescriptor(), new DatanodeStorage("storageID2"));
  final int NUM_BLOCKS = 10;
  BlockInfoContiguous[] blockInfos = new BlockInfoContiguous[NUM_BLOCKS];

  // Create a few dummy blocks and add them to the first storage.
  for (int i = 0; i < NUM_BLOCKS; ++i) {
    blockInfos[i] = new BlockInfoContiguous((short) 3);
    storage1.addBlock(blockInfos[i]);
  }

  // Try to move one of the blocks to a different storage.
  boolean added =
      storage2.addBlock(blockInfos[NUM_BLOCKS / 2]) == AddBlockResult.ADDED;
  Assert.assertThat(added, is(false));
  Assert.assertThat(blockInfos[NUM_BLOCKS/2].getStorageInfo(0), is(storage2));
}
项目:aliyun-oss-hadoop-fs    文件:TestBlockInfo.java   
@Test
public void testReplaceStorage() throws Exception {

  // Create two dummy storages.
  final DatanodeStorageInfo storage1 = DFSTestUtil.createDatanodeStorageInfo("storageID1", "127.0.0.1");
  final DatanodeStorageInfo storage2 = new DatanodeStorageInfo(storage1.getDatanodeDescriptor(), new DatanodeStorage("storageID2"));
  final int NUM_BLOCKS = 10;
  BlockInfo[] blockInfos = new BlockInfo[NUM_BLOCKS];

  // Create a few dummy blocks and add them to the first storage.
  for (int i = 0; i < NUM_BLOCKS; ++i) {
    blockInfos[i] = new BlockInfoContiguous((short) 3);
    storage1.addBlock(blockInfos[i]);
  }

  // Try to move one of the blocks to a different storage.
  boolean added =
      storage2.addBlock(blockInfos[NUM_BLOCKS / 2]) == AddBlockResult.ADDED;
  Assert.assertThat(added, is(false));
  Assert.assertThat(blockInfos[NUM_BLOCKS/2].getStorageInfo(0), is(storage2));
}
项目:big-c    文件:TestBlockInfo.java   
@Test
public void testReplaceStorage() throws Exception {

  // Create two dummy storages.
  final DatanodeStorageInfo storage1 = DFSTestUtil.createDatanodeStorageInfo("storageID1", "127.0.0.1");
  final DatanodeStorageInfo storage2 = new DatanodeStorageInfo(storage1.getDatanodeDescriptor(), new DatanodeStorage("storageID2"));
  final int NUM_BLOCKS = 10;
  BlockInfoContiguous[] blockInfos = new BlockInfoContiguous[NUM_BLOCKS];

  // Create a few dummy blocks and add them to the first storage.
  for (int i = 0; i < NUM_BLOCKS; ++i) {
    blockInfos[i] = new BlockInfoContiguous((short) 3);
    storage1.addBlock(blockInfos[i]);
  }

  // Try to move one of the blocks to a different storage.
  boolean added =
      storage2.addBlock(blockInfos[NUM_BLOCKS / 2]) == AddBlockResult.ADDED;
  Assert.assertThat(added, is(false));
  Assert.assertThat(blockInfos[NUM_BLOCKS/2].getStorageInfo(0), is(storage2));
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestBlockInfo.java   
@Test
public void testReplaceStorage() throws Exception {

  // Create two dummy storages.
  final DatanodeStorageInfo storage1 = DFSTestUtil.createDatanodeStorageInfo("storageID1", "127.0.0.1");
  final DatanodeStorageInfo storage2 = new DatanodeStorageInfo(storage1.getDatanodeDescriptor(), new DatanodeStorage("storageID2"));
  final int NUM_BLOCKS = 10;
  BlockInfo[] blockInfos = new BlockInfo[NUM_BLOCKS];

  // Create a few dummy blocks and add them to the first storage.
  for (int i = 0; i < NUM_BLOCKS; ++i) {
    blockInfos[i] = new BlockInfo((short) 3);
    storage1.addBlock(blockInfos[i]);
  }

  // Try to move one of the blocks to a different storage.
  boolean added =
      storage2.addBlock(blockInfos[NUM_BLOCKS / 2]) == AddBlockResult.ADDED;
  Assert.assertThat(added, is(false));
  Assert.assertThat(blockInfos[NUM_BLOCKS/2].getStorageInfo(0), is(storage2));
}
项目:hadoop    文件:BlockManager.java   
/**
 * Faster version of {@link #addStoredBlock},
 * intended for use with initial block report at startup. If not in startup
 * safe mode, will call standard addStoredBlock(). Assumes this method is
 * called "immediately" so there is no need to refresh the storedBlock from
 * blocksMap. Doesn't handle underReplication/overReplication, or worry about
 * pendingReplications or corruptReplicas, because it's in startup safe mode.
 * Doesn't log every block, because there are typically millions of them.
 * 
 * @throws IOException
 */
private void addStoredBlockImmediate(BlockInfoContiguous storedBlock,
    DatanodeStorageInfo storageInfo)
throws IOException {
  assert (storedBlock != null && namesystem.hasWriteLock());
  if (!namesystem.isInStartupSafeMode() 
      || namesystem.isPopulatingReplQueues()) {
    addStoredBlock(storedBlock, storageInfo, null, false);
    return;
  }

  // just add it
  AddBlockResult result = storageInfo.addBlock(storedBlock);

  // Now check for completion of blocks and safe block count
  int numCurrentReplica = countLiveNodes(storedBlock);
  if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED
      && numCurrentReplica >= minReplication) {
    completeBlock(storedBlock.getBlockCollection(), storedBlock, false);
  } else if (storedBlock.isComplete() && result == AddBlockResult.ADDED) {
    // check whether safe replication is reached for the block
    // only complete blocks are counted towards that.
    // In the case that the block just became complete above, completeBlock()
    // handles the safe block count maintenance.
    namesystem.incrementSafeBlockCount(numCurrentReplica);
  }
}
项目:hadoop    文件:TestDatanodeDescriptor.java   
@Test
public void testBlocksCounter() throws Exception {
  DatanodeDescriptor dd = BlockManagerTestUtil.getLocalDatanodeDescriptor(true);
  assertEquals(0, dd.numBlocks());
  BlockInfoContiguous blk = new BlockInfoContiguous(new Block(1L), (short) 1);
  BlockInfoContiguous blk1 = new BlockInfoContiguous(new Block(2L), (short) 2);
  DatanodeStorageInfo[] storages = dd.getStorageInfos();
  assertTrue(storages.length > 0);
  // add first block
  assertTrue(storages[0].addBlock(blk) == AddBlockResult.ADDED);
  assertEquals(1, dd.numBlocks());
  // remove a non-existent block
  assertFalse(dd.removeBlock(blk1));
  assertEquals(1, dd.numBlocks());
  // add an existent block
  assertFalse(storages[0].addBlock(blk) == AddBlockResult.ADDED);
  assertEquals(1, dd.numBlocks());
  // add second block
  assertTrue(storages[0].addBlock(blk1) == AddBlockResult.ADDED);
  assertEquals(2, dd.numBlocks());
  // remove first block
  assertTrue(dd.removeBlock(blk));
  assertEquals(1, dd.numBlocks());
  // remove second block
  assertTrue(dd.removeBlock(blk1));
  assertEquals(0, dd.numBlocks());    
}
项目:aliyun-oss-hadoop-fs    文件:BlockManager.java   
/**
 * Faster version of {@link #addStoredBlock},
 * intended for use with initial block report at startup. If not in startup
 * safe mode, will call standard addStoredBlock(). Assumes this method is
 * called "immediately" so there is no need to refresh the storedBlock from
 * blocksMap. Doesn't handle underReplication/overReplication, or worry about
 * pendingReplications or corruptReplicas, because it's in startup safe mode.
 * Doesn't log every block, because there are typically millions of them.
 * 
 * @throws IOException
 */
private void addStoredBlockImmediate(BlockInfo storedBlock, Block reported,
    DatanodeStorageInfo storageInfo)
throws IOException {
  assert (storedBlock != null && namesystem.hasWriteLock());
  if (!namesystem.isInStartupSafeMode()
      || isPopulatingReplQueues()) {
    addStoredBlock(storedBlock, reported, storageInfo, null, false);
    return;
  }

  // just add it
  AddBlockResult result = storageInfo.addBlock(storedBlock, reported);

  // Now check for completion of blocks and safe block count
  int numCurrentReplica = countLiveNodes(storedBlock);
  if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED
      && hasMinStorage(storedBlock, numCurrentReplica)) {
    completeBlock(storedBlock, false);
  } else if (storedBlock.isComplete() && result == AddBlockResult.ADDED) {
    // check whether safe replication is reached for the block
    // only complete blocks are counted towards that.
    // In the case that the block just became complete above, completeBlock()
    // handles the safe block count maintenance.
    bmSafeMode.incrementSafeBlockCount(numCurrentReplica, storedBlock);
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestDatanodeDescriptor.java   
@Test
public void testBlocksCounter() throws Exception {
  DatanodeDescriptor dd = BlockManagerTestUtil.getLocalDatanodeDescriptor(true);
  assertEquals(0, dd.numBlocks());
  BlockInfo blk = new BlockInfoContiguous(new Block(1L), (short) 1);
  BlockInfo blk1 = new BlockInfoContiguous(new Block(2L), (short) 2);
  DatanodeStorageInfo[] storages = dd.getStorageInfos();
  assertTrue(storages.length > 0);
  // add first block
  assertTrue(storages[0].addBlock(blk) == AddBlockResult.ADDED);
  assertEquals(1, dd.numBlocks());
  // remove a non-existent block
  assertFalse(BlocksMap.removeBlock(dd, blk1));
  assertEquals(1, dd.numBlocks());
  // add an existent block
  assertFalse(storages[0].addBlock(blk) == AddBlockResult.ADDED);
  assertEquals(1, dd.numBlocks());
  // add second block
  assertTrue(storages[0].addBlock(blk1) == AddBlockResult.ADDED);
  assertEquals(2, dd.numBlocks());
  // remove first block
  assertTrue(BlocksMap.removeBlock(dd, blk));
  assertEquals(1, dd.numBlocks());
  // remove second block
  assertTrue(BlocksMap.removeBlock(dd, blk1));
  assertEquals(0, dd.numBlocks());    
}
项目:big-c    文件:BlockManager.java   
/**
 * Faster version of {@link #addStoredBlock},
 * intended for use with initial block report at startup. If not in startup
 * safe mode, will call standard addStoredBlock(). Assumes this method is
 * called "immediately" so there is no need to refresh the storedBlock from
 * blocksMap. Doesn't handle underReplication/overReplication, or worry about
 * pendingReplications or corruptReplicas, because it's in startup safe mode.
 * Doesn't log every block, because there are typically millions of them.
 * 
 * @throws IOException
 */
private void addStoredBlockImmediate(BlockInfoContiguous storedBlock,
    DatanodeStorageInfo storageInfo)
throws IOException {
  assert (storedBlock != null && namesystem.hasWriteLock());
  if (!namesystem.isInStartupSafeMode() 
      || namesystem.isPopulatingReplQueues()) {
    addStoredBlock(storedBlock, storageInfo, null, false);
    return;
  }

  // just add it
  AddBlockResult result = storageInfo.addBlock(storedBlock);

  // Now check for completion of blocks and safe block count
  int numCurrentReplica = countLiveNodes(storedBlock);
  if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED
      && numCurrentReplica >= minReplication) {
    completeBlock(storedBlock.getBlockCollection(), storedBlock, false);
  } else if (storedBlock.isComplete() && result == AddBlockResult.ADDED) {
    // check whether safe replication is reached for the block
    // only complete blocks are counted towards that.
    // In the case that the block just became complete above, completeBlock()
    // handles the safe block count maintenance.
    namesystem.incrementSafeBlockCount(numCurrentReplica);
  }
}
项目:big-c    文件:TestDatanodeDescriptor.java   
@Test
public void testBlocksCounter() throws Exception {
  DatanodeDescriptor dd = BlockManagerTestUtil.getLocalDatanodeDescriptor(true);
  assertEquals(0, dd.numBlocks());
  BlockInfoContiguous blk = new BlockInfoContiguous(new Block(1L), (short) 1);
  BlockInfoContiguous blk1 = new BlockInfoContiguous(new Block(2L), (short) 2);
  DatanodeStorageInfo[] storages = dd.getStorageInfos();
  assertTrue(storages.length > 0);
  // add first block
  assertTrue(storages[0].addBlock(blk) == AddBlockResult.ADDED);
  assertEquals(1, dd.numBlocks());
  // remove a non-existent block
  assertFalse(dd.removeBlock(blk1));
  assertEquals(1, dd.numBlocks());
  // add an existent block
  assertFalse(storages[0].addBlock(blk) == AddBlockResult.ADDED);
  assertEquals(1, dd.numBlocks());
  // add second block
  assertTrue(storages[0].addBlock(blk1) == AddBlockResult.ADDED);
  assertEquals(2, dd.numBlocks());
  // remove first block
  assertTrue(dd.removeBlock(blk));
  assertEquals(1, dd.numBlocks());
  // remove second block
  assertTrue(dd.removeBlock(blk1));
  assertEquals(0, dd.numBlocks());    
}
项目:hadoop-2.6.0-cdh5.4.3    文件:BlockManager.java   
/**
 * Faster version of
 * {@link #addStoredBlock(BlockInfo, DatanodeStorageInfo, DatanodeDescriptor, boolean)}
 * , intended for use with initial block report at startup. If not in startup
 * safe mode, will call standard addStoredBlock(). Assumes this method is
 * called "immediately" so there is no need to refresh the storedBlock from
 * blocksMap. Doesn't handle underReplication/overReplication, or worry about
 * pendingReplications or corruptReplicas, because it's in startup safe mode.
 * Doesn't log every block, because there are typically millions of them.
 * 
 * @throws IOException
 */
private void addStoredBlockImmediate(BlockInfo storedBlock,
    DatanodeStorageInfo storageInfo)
throws IOException {
  assert (storedBlock != null && namesystem.hasWriteLock());
  if (!namesystem.isInStartupSafeMode() 
      || namesystem.isPopulatingReplQueues()) {
    addStoredBlock(storedBlock, storageInfo, null, false);
    return;
  }

  // just add it
  AddBlockResult result = storageInfo.addBlock(storedBlock);

  // Now check for completion of blocks and safe block count
  int numCurrentReplica = countLiveNodes(storedBlock);
  if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED
      && numCurrentReplica >= minReplication) {
    completeBlock(storedBlock.getBlockCollection(), storedBlock, false);
  } else if (storedBlock.isComplete() && result == AddBlockResult.ADDED) {
    // check whether safe replication is reached for the block
    // only complete blocks are counted towards that.
    // In the case that the block just became complete above, completeBlock()
    // handles the safe block count maintenance.
    namesystem.incrementSafeBlockCount(numCurrentReplica);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDatanodeDescriptor.java   
@Test
public void testBlocksCounter() throws Exception {
  DatanodeDescriptor dd = BlockManagerTestUtil.getLocalDatanodeDescriptor(true);
  assertEquals(0, dd.numBlocks());
  BlockInfo blk = new BlockInfo(new Block(1L), (short) 1);
  BlockInfo blk1 = new BlockInfo(new Block(2L), (short) 2);
  DatanodeStorageInfo[] storages = dd.getStorageInfos();
  assertTrue(storages.length > 0);
  // add first block
  assertTrue(storages[0].addBlock(blk) == AddBlockResult.ADDED);
  assertEquals(1, dd.numBlocks());
  // remove a non-existent block
  assertFalse(dd.removeBlock(blk1));
  assertEquals(1, dd.numBlocks());
  // add an existent block
  assertFalse(storages[0].addBlock(blk) == AddBlockResult.ADDED);
  assertEquals(1, dd.numBlocks());
  // add second block
  assertTrue(storages[0].addBlock(blk1) == AddBlockResult.ADDED);
  assertEquals(2, dd.numBlocks());
  // remove first block
  assertTrue(dd.removeBlock(blk));
  assertEquals(1, dd.numBlocks());
  // remove second block
  assertTrue(dd.removeBlock(blk1));
  assertEquals(0, dd.numBlocks());    
}