Java 类org.apache.hadoop.hdfs.TestBlockStoragePolicy 实例源码

项目:aliyun-oss-hadoop-fs    文件:TestAvailableSpaceBlockPlacementPolicy.java   
@Test
public void testChooseTarget() {
  int total = 0;
  int moreRemainingNode = 0;
  for (int i = 0; i < chooseTimes; i++) {
    DatanodeStorageInfo[] targets =
        namenode
            .getNamesystem()
            .getBlockManager()
            .getBlockPlacementPolicy()
            .chooseTarget(file, replica, null, new ArrayList<DatanodeStorageInfo>(), false, null,
              blockSize, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);

    Assert.assertTrue(targets.length == replica);
    for (int j = 0; j < replica; j++) {
      total++;
      if (targets[j].getDatanodeDescriptor().getRemainingPercent() > 60) {
        moreRemainingNode++;
      }
    }
  }
  Assert.assertTrue(total == replica * chooseTimes);
  double possibility = 1.0 * moreRemainingNode / total;
  Assert.assertTrue(possibility > 0.52);
  Assert.assertTrue(possibility < 0.55);
}
项目:hadoop    文件:TestReplicationPolicyWithNodeGroup.java   
private DatanodeStorageInfo[] chooseTarget(
    int numOfReplicas,
    DatanodeDescriptor writer,
    List<DatanodeStorageInfo> chosenNodes,
    Set<Node> excludedNodes) {
  return replicator.chooseTarget(filename, numOfReplicas, writer, chosenNodes,
      false, excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
}
项目:hadoop    文件:TestReplicationPolicy.java   
private static DatanodeStorageInfo[] chooseTarget(
    int numOfReplicas,
    DatanodeDescriptor writer,
    List<DatanodeStorageInfo> chosenNodes,
    Set<Node> excludedNodes) {
  return replicator.chooseTarget(filename, numOfReplicas, writer, chosenNodes,
      false, excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
}
项目:aliyun-oss-hadoop-fs    文件:TestReplicationPolicyWithNodeGroup.java   
private DatanodeStorageInfo[] chooseTarget(
    int numOfReplicas,
    DatanodeDescriptor writer,
    Set<Node> excludedNodes,
    List<DatanodeDescriptor> favoredNodes) {
  return replicator.chooseTarget(filename, numOfReplicas, writer,
    excludedNodes, BLOCK_SIZE, favoredNodes,
    TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
}
项目:aliyun-oss-hadoop-fs    文件:TestReplicationPolicy.java   
private DatanodeStorageInfo[] chooseTarget(int numOfReplicas,
    DatanodeDescriptor writer, Set<Node> excludedNodes,
    List<DatanodeDescriptor> favoredNodes) {
  return replicator.chooseTarget(filename, numOfReplicas, writer,
      excludedNodes, BLOCK_SIZE, favoredNodes,
      TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
}
项目:aliyun-oss-hadoop-fs    文件:BaseReplicationPolicyTest.java   
DatanodeStorageInfo[] chooseTarget(int numOfReplicas,
   DatanodeDescriptor writer, List<DatanodeStorageInfo> chosenNodes,
   Set<Node> excludedNodes) {
  return replicator.chooseTarget(filename, numOfReplicas, writer,
      chosenNodes, false, excludedNodes, BLOCK_SIZE,
      TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
}
项目:big-c    文件:TestReplicationPolicyWithNodeGroup.java   
private DatanodeStorageInfo[] chooseTarget(
    int numOfReplicas,
    DatanodeDescriptor writer,
    List<DatanodeStorageInfo> chosenNodes,
    Set<Node> excludedNodes) {
  return replicator.chooseTarget(filename, numOfReplicas, writer, chosenNodes,
      false, excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
}
项目:big-c    文件:TestReplicationPolicy.java   
private static DatanodeStorageInfo[] chooseTarget(
    int numOfReplicas,
    DatanodeDescriptor writer,
    List<DatanodeStorageInfo> chosenNodes,
    Set<Node> excludedNodes) {
  return replicator.chooseTarget(filename, numOfReplicas, writer, chosenNodes,
      false, excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestReplicationPolicyWithNodeGroup.java   
private DatanodeStorageInfo[] chooseTarget(
    int numOfReplicas,
    DatanodeDescriptor writer,
    List<DatanodeStorageInfo> chosenNodes,
    Set<Node> excludedNodes) {
  return replicator.chooseTarget(filename, numOfReplicas, writer, chosenNodes,
      false, excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestReplicationPolicy.java   
private static DatanodeStorageInfo[] chooseTarget(
    int numOfReplicas,
    DatanodeDescriptor writer,
    List<DatanodeStorageInfo> chosenNodes,
    Set<Node> excludedNodes) {
  return replicator.chooseTarget(filename, numOfReplicas, writer, chosenNodes,
      false, excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
}
项目:FlexMap    文件:TestReplicationPolicyWithNodeGroup.java   
private DatanodeStorageInfo[] chooseTarget(
    int numOfReplicas,
    DatanodeDescriptor writer,
    List<DatanodeStorageInfo> chosenNodes,
    Set<Node> excludedNodes) {
  return replicator.chooseTarget(filename, numOfReplicas, writer, chosenNodes,
      false, excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
}
项目:FlexMap    文件:TestReplicationPolicy.java   
private static DatanodeStorageInfo[] chooseTarget(
    int numOfReplicas,
    DatanodeDescriptor writer,
    List<DatanodeStorageInfo> chosenNodes,
    Set<Node> excludedNodes) {
  return replicator.chooseTarget(filename, numOfReplicas, writer, chosenNodes,
      false, excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
}
项目:hadoop    文件:TestReplicationPolicyConsiderLoad.java   
/**
 * Tests that chooseTarget with considerLoad set to true correctly calculates
 * load with decommissioned nodes.
 */
@Test
public void testChooseTargetWithDecomNodes() throws IOException {
  namenode.getNamesystem().writeLock();
  try {
    String blockPoolId = namenode.getNamesystem().getBlockPoolId();
    dnManager.handleHeartbeat(dnrList.get(3),
        BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[3]),
        blockPoolId, dataNodes[3].getCacheCapacity(),
        dataNodes[3].getCacheRemaining(),
        2, 0, 0, null);
    dnManager.handleHeartbeat(dnrList.get(4),
        BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[4]),
        blockPoolId, dataNodes[4].getCacheCapacity(),
        dataNodes[4].getCacheRemaining(),
        4, 0, 0, null);
    dnManager.handleHeartbeat(dnrList.get(5),
        BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[5]),
        blockPoolId, dataNodes[5].getCacheCapacity(),
        dataNodes[5].getCacheRemaining(),
        4, 0, 0, null);
    // value in the above heartbeats
    final int load = 2 + 4 + 4;

    FSNamesystem fsn = namenode.getNamesystem();
    assertEquals((double)load/6, dnManager.getFSClusterStats()
      .getInServiceXceiverAverage(), EPSILON);

    // Decommission DNs so BlockPlacementPolicyDefault.isGoodTarget()
    // returns false
    for (int i = 0; i < 3; i++) {
      DatanodeDescriptor d = dnManager.getDatanode(dnrList.get(i));
      dnManager.getDecomManager().startDecommission(d);
      d.setDecommissioned();
    }
    assertEquals((double)load/3, dnManager.getFSClusterStats()
      .getInServiceXceiverAverage(), EPSILON);

    // update references of writer DN to update the de-commissioned state
    List<DatanodeDescriptor> liveNodes = new ArrayList<DatanodeDescriptor>();
    dnManager.fetchDatanodes(liveNodes, null, false);
    DatanodeDescriptor writerDn = null;
    if (liveNodes.contains(dataNodes[0])) {
      writerDn = liveNodes.get(liveNodes.indexOf(dataNodes[0]));
    }

    // Call chooseTarget()
    DatanodeStorageInfo[] targets = namenode.getNamesystem().getBlockManager()
        .getBlockPlacementPolicy().chooseTarget("testFile.txt", 3,
            writerDn, new ArrayList<DatanodeStorageInfo>(), false, null,
            1024, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);

    assertEquals(3, targets.length);
    Set<DatanodeStorageInfo> targetSet = new HashSet<DatanodeStorageInfo>(
        Arrays.asList(targets));
    for (int i = 3; i < storages.length; i++) {
      assertTrue(targetSet.contains(storages[i]));
    }
  } finally {
    dataNodes[0].stopDecommission();
    dataNodes[1].stopDecommission();
    dataNodes[2].stopDecommission();
    namenode.getNamesystem().writeUnlock();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestReplicationPolicyConsiderLoad.java   
/**
 * Tests that chooseTarget with considerLoad set to true correctly calculates
 * load with decommissioned nodes.
 */
@Test
public void testChooseTargetWithDecomNodes() throws IOException {
  namenode.getNamesystem().writeLock();
  try {
    dnManager.getHeartbeatManager().updateHeartbeat(dataNodes[3],
        BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[3]),
        dataNodes[3].getCacheCapacity(),
        dataNodes[3].getCacheUsed(),
        2, 0, null);
    dnManager.getHeartbeatManager().updateHeartbeat(dataNodes[4],
        BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[4]),
        dataNodes[4].getCacheCapacity(),
        dataNodes[4].getCacheUsed(),
        4, 0, null);
    dnManager.getHeartbeatManager().updateHeartbeat(dataNodes[5],
        BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[5]),
        dataNodes[5].getCacheCapacity(),
        dataNodes[5].getCacheUsed(),
        4, 0, null);

    // value in the above heartbeats
    final int load = 2 + 4 + 4;

    assertEquals((double)load/6, dnManager.getFSClusterStats()
      .getInServiceXceiverAverage(), EPSILON);

    // Decommission DNs so BlockPlacementPolicyDefault.isGoodTarget()
    // returns false
    for (int i = 0; i < 3; i++) {
      DatanodeDescriptor d = dataNodes[i];
      dnManager.getDecomManager().startDecommission(d);
      d.setDecommissioned();
    }
    assertEquals((double)load/3, dnManager.getFSClusterStats()
      .getInServiceXceiverAverage(), EPSILON);

    DatanodeDescriptor writerDn = dataNodes[0];

    // Call chooseTarget()
    DatanodeStorageInfo[] targets = namenode.getNamesystem().getBlockManager()
        .getBlockPlacementPolicy().chooseTarget("testFile.txt", 3,
            writerDn, new ArrayList<DatanodeStorageInfo>(), false, null,
            1024, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);

    assertEquals(3, targets.length);
    Set<DatanodeStorageInfo> targetSet = new HashSet<>(
        Arrays.asList(targets));
    for (int i = 3; i < storages.length; i++) {
      assertTrue(targetSet.contains(storages[i]));
    }
  } finally {
    dataNodes[0].stopDecommission();
    dataNodes[1].stopDecommission();
    dataNodes[2].stopDecommission();
    namenode.getNamesystem().writeUnlock();
  }
  NameNode.LOG.info("Done working on it");
}
项目:aliyun-oss-hadoop-fs    文件:TestReplicationPolicyConsiderLoad.java   
@Test
public void testConsiderLoadFactor() throws IOException {
  namenode.getNamesystem().writeLock();
  try {
    dnManager.getHeartbeatManager().updateHeartbeat(dataNodes[0],
        BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[0]),
        dataNodes[0].getCacheCapacity(),
        dataNodes[0].getCacheUsed(),
        5, 0, null);
    dnManager.getHeartbeatManager().updateHeartbeat(dataNodes[1],
        BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[1]),
        dataNodes[1].getCacheCapacity(),
        dataNodes[1].getCacheUsed(),
        10, 0, null);
    dnManager.getHeartbeatManager().updateHeartbeat(dataNodes[2],
        BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[2]),
        dataNodes[2].getCacheCapacity(),
        dataNodes[2].getCacheUsed(),
        5, 0, null);

    dnManager.getHeartbeatManager().updateHeartbeat(dataNodes[3],
        BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[3]),
        dataNodes[3].getCacheCapacity(),
        dataNodes[3].getCacheUsed(),
        10, 0, null);
    dnManager.getHeartbeatManager().updateHeartbeat(dataNodes[4],
        BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[4]),
        dataNodes[4].getCacheCapacity(),
        dataNodes[4].getCacheUsed(),
        15, 0, null);
    dnManager.getHeartbeatManager().updateHeartbeat(dataNodes[5],
        BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[5]),
        dataNodes[5].getCacheCapacity(),
        dataNodes[5].getCacheUsed(),
        15, 0, null);
    //Add values in above heartbeats
    double load = 5 + 10 + 15 + 10 + 15 + 5;
    // Call chooseTarget()
    DatanodeDescriptor writerDn = dataNodes[0];
    DatanodeStorageInfo[] targets = namenode.getNamesystem().getBlockManager()
        .getBlockPlacementPolicy().chooseTarget("testFile.txt", 3, writerDn,
            new ArrayList<DatanodeStorageInfo>(), false, null,
            1024, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
    for(DatanodeStorageInfo info : targets) {
      assertTrue("The node "+info.getDatanodeDescriptor().getName()+
              " has higher load and should not have been picked!",
          info.getDatanodeDescriptor().getXceiverCount() <= (load/6)*1.2);
    }
  } finally {
    namenode.getNamesystem().writeUnlock();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestReplicationPolicyWithUpgradeDomain.java   
/**
 * Verify the rack and upgrade domain policies when excludeNodes are
 * specified.
 * @throws Exception
 */
@Test
public void testChooseTargetWithExcludeNodes() throws Exception {
  Set<Node> excludedNodes = new HashSet<>();
  DatanodeStorageInfo[] targets;
  List<DatanodeStorageInfo> chosenNodes = new ArrayList<>();

  excludedNodes.clear();
  chosenNodes.clear();
  excludedNodes.add(dataNodes[4]);
  targets = chooseTarget(3, chosenNodes, excludedNodes);
  assertEquals(targets.length, 3);
  assertEquals(storages[0], targets[0]);
  assertEquals(getRacks(targets).size(), 2);
  assertEquals(getUpgradeDomains(targets).size(), 3);

  excludedNodes.clear();
  chosenNodes.clear();
  excludedNodes.add(dataNodes[4]);
  excludedNodes.add(dataNodes[8]);
  targets = chooseTarget(3, chosenNodes, excludedNodes);
  assertEquals(targets.length, 3);
  assertEquals(storages[0], targets[0]);
  assertEquals(getRacks(targets).size(), 2);
  assertEquals(getUpgradeDomains(targets).size(), 3);

  excludedNodes.clear();
  chosenNodes.clear();
  excludedNodes.add(dataNodes[4]);
  excludedNodes.add(dataNodes[5]);
  excludedNodes.add(dataNodes[8]);
  targets = chooseTarget(3, chosenNodes, excludedNodes);
  assertEquals(targets.length, 3);
  assertEquals(storages[0], targets[0]);
  assertEquals(storages[2], targets[1]);
  assertEquals(storages[7], targets[2]);

  excludedNodes.clear();
  chosenNodes.clear();
  excludedNodes.add(dataNodes[4]);
  targets = chooseTarget(4, chosenNodes, excludedNodes);
  assertEquals(targets.length, 4);
  assertEquals(storages[0], targets[0]);
  assertTrue(getRacks(targets).size()>=2);
  assertEquals(getUpgradeDomains(targets).size(), 3);

  excludedNodes.clear();
  chosenNodes.clear();
  excludedNodes.add(dataNodes[4]);
  excludedNodes.add(dataNodes[8]);
  targets = chooseTarget(4, chosenNodes, excludedNodes);
  assertEquals(targets.length, 4);
  assertEquals(storages[0], targets[0]);
  assertTrue(getRacks(targets).size()>=2);
  assertEquals(getUpgradeDomains(targets).size(), 3);

  excludedNodes.clear();
  chosenNodes.clear();
  excludedNodes.add(dataNodes[1]);
  chosenNodes.add(storages[2]);
  targets = replicator.chooseTarget(filename, 1, dataNodes[0], chosenNodes,
      true, excludedNodes, BLOCK_SIZE,
      TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
  System.out.println("targets=" + Arrays.asList(targets));
  assertEquals(2, targets.length);
}
项目:big-c    文件:TestReplicationPolicyConsiderLoad.java   
/**
 * Tests that chooseTarget with considerLoad set to true correctly calculates
 * load with decommissioned nodes.
 */
@Test
public void testChooseTargetWithDecomNodes() throws IOException {
  namenode.getNamesystem().writeLock();
  try {
    String blockPoolId = namenode.getNamesystem().getBlockPoolId();
    dnManager.handleHeartbeat(dnrList.get(3),
        BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[3]),
        blockPoolId, dataNodes[3].getCacheCapacity(),
        dataNodes[3].getCacheRemaining(),
        2, 0, 0, null);
    dnManager.handleHeartbeat(dnrList.get(4),
        BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[4]),
        blockPoolId, dataNodes[4].getCacheCapacity(),
        dataNodes[4].getCacheRemaining(),
        4, 0, 0, null);
    dnManager.handleHeartbeat(dnrList.get(5),
        BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[5]),
        blockPoolId, dataNodes[5].getCacheCapacity(),
        dataNodes[5].getCacheRemaining(),
        4, 0, 0, null);
    // value in the above heartbeats
    final int load = 2 + 4 + 4;

    FSNamesystem fsn = namenode.getNamesystem();
    assertEquals((double)load/6, dnManager.getFSClusterStats()
      .getInServiceXceiverAverage(), EPSILON);

    // Decommission DNs so BlockPlacementPolicyDefault.isGoodTarget()
    // returns false
    for (int i = 0; i < 3; i++) {
      DatanodeDescriptor d = dnManager.getDatanode(dnrList.get(i));
      dnManager.getDecomManager().startDecommission(d);
      d.setDecommissioned();
    }
    assertEquals((double)load/3, dnManager.getFSClusterStats()
      .getInServiceXceiverAverage(), EPSILON);

    // update references of writer DN to update the de-commissioned state
    List<DatanodeDescriptor> liveNodes = new ArrayList<DatanodeDescriptor>();
    dnManager.fetchDatanodes(liveNodes, null, false);
    DatanodeDescriptor writerDn = null;
    if (liveNodes.contains(dataNodes[0])) {
      writerDn = liveNodes.get(liveNodes.indexOf(dataNodes[0]));
    }

    // Call chooseTarget()
    DatanodeStorageInfo[] targets = namenode.getNamesystem().getBlockManager()
        .getBlockPlacementPolicy().chooseTarget("testFile.txt", 3,
            writerDn, new ArrayList<DatanodeStorageInfo>(), false, null,
            1024, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);

    assertEquals(3, targets.length);
    Set<DatanodeStorageInfo> targetSet = new HashSet<DatanodeStorageInfo>(
        Arrays.asList(targets));
    for (int i = 3; i < storages.length; i++) {
      assertTrue(targetSet.contains(storages[i]));
    }
  } finally {
    dataNodes[0].stopDecommission();
    dataNodes[1].stopDecommission();
    dataNodes[2].stopDecommission();
    namenode.getNamesystem().writeUnlock();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestReplicationPolicyConsiderLoad.java   
/**
 * Tests that chooseTarget with considerLoad set to true correctly calculates
 * load with decommissioned nodes.
 */
@Test
public void testChooseTargetWithDecomNodes() throws IOException {
  namenode.getNamesystem().writeLock();
  try {
    String blockPoolId = namenode.getNamesystem().getBlockPoolId();
    dnManager.handleHeartbeat(dnrList.get(3),
        BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[3]),
        blockPoolId, dataNodes[3].getCacheCapacity(),
        dataNodes[3].getCacheRemaining(),
        2, 0, 0, null);
    dnManager.handleHeartbeat(dnrList.get(4),
        BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[4]),
        blockPoolId, dataNodes[4].getCacheCapacity(),
        dataNodes[4].getCacheRemaining(),
        4, 0, 0, null);
    dnManager.handleHeartbeat(dnrList.get(5),
        BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[5]),
        blockPoolId, dataNodes[5].getCacheCapacity(),
        dataNodes[5].getCacheRemaining(),
        4, 0, 0, null);
    // value in the above heartbeats
    final int load = 2 + 4 + 4;

    FSNamesystem fsn = namenode.getNamesystem();
    assertEquals((double)load/6, fsn.getInServiceXceiverAverage(), EPSILON);

    // Decommission DNs so BlockPlacementPolicyDefault.isGoodTarget()
    // returns false
    for (int i = 0; i < 3; i++) {
      DatanodeDescriptor d = dnManager.getDatanode(dnrList.get(i));
      dnManager.startDecommission(d);
      d.setDecommissioned();
    }
    assertEquals((double)load/3, fsn.getInServiceXceiverAverage(), EPSILON);

    // update references of writer DN to update the de-commissioned state
    List<DatanodeDescriptor> liveNodes = new ArrayList<DatanodeDescriptor>();
    dnManager.fetchDatanodes(liveNodes, null, false);
    DatanodeDescriptor writerDn = null;
    if (liveNodes.contains(dataNodes[0])) {
      writerDn = liveNodes.get(liveNodes.indexOf(dataNodes[0]));
    }

    // Call chooseTarget()
    DatanodeStorageInfo[] targets = namenode.getNamesystem().getBlockManager()
        .getBlockPlacementPolicy().chooseTarget("testFile.txt", 3,
            writerDn, new ArrayList<DatanodeStorageInfo>(), false, null,
            1024, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);

    assertEquals(3, targets.length);
    Set<DatanodeStorageInfo> targetSet = new HashSet<DatanodeStorageInfo>(
        Arrays.asList(targets));
    for (int i = 3; i < storages.length; i++) {
      assertTrue(targetSet.contains(storages[i]));
    }
  } finally {
    dataNodes[0].stopDecommission();
    dataNodes[1].stopDecommission();
    dataNodes[2].stopDecommission();
    namenode.getNamesystem().writeUnlock();
  }
}
项目:FlexMap    文件:TestReplicationPolicyConsiderLoad.java   
/**
 * Tests that chooseTarget with considerLoad set to true correctly calculates
 * load with decommissioned nodes.
 */
@Test
public void testChooseTargetWithDecomNodes() throws IOException {
  namenode.getNamesystem().writeLock();
  try {
    String blockPoolId = namenode.getNamesystem().getBlockPoolId();
    dnManager.handleHeartbeat(dnrList.get(3),
        BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[3]),
        blockPoolId, dataNodes[3].getCacheCapacity(),
        dataNodes[3].getCacheRemaining(),
        2, 0, 0);
    dnManager.handleHeartbeat(dnrList.get(4),
        BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[4]),
        blockPoolId, dataNodes[4].getCacheCapacity(),
        dataNodes[4].getCacheRemaining(),
        4, 0, 0);
    dnManager.handleHeartbeat(dnrList.get(5),
        BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[5]),
        blockPoolId, dataNodes[5].getCacheCapacity(),
        dataNodes[5].getCacheRemaining(),
        4, 0, 0);
    // value in the above heartbeats
    final int load = 2 + 4 + 4;

    FSNamesystem fsn = namenode.getNamesystem();
    assertEquals((double)load/6, fsn.getInServiceXceiverAverage(), EPSILON);

    // Decommission DNs so BlockPlacementPolicyDefault.isGoodTarget()
    // returns false
    for (int i = 0; i < 3; i++) {
      DatanodeDescriptor d = dnManager.getDatanode(dnrList.get(i));
      dnManager.startDecommission(d);
      d.setDecommissioned();
    }
    assertEquals((double)load/3, fsn.getInServiceXceiverAverage(), EPSILON);

    // update references of writer DN to update the de-commissioned state
    List<DatanodeDescriptor> liveNodes = new ArrayList<DatanodeDescriptor>();
    dnManager.fetchDatanodes(liveNodes, null, false);
    DatanodeDescriptor writerDn = null;
    if (liveNodes.contains(dataNodes[0])) {
      writerDn = liveNodes.get(liveNodes.indexOf(dataNodes[0]));
    }

    // Call chooseTarget()
    DatanodeStorageInfo[] targets = namenode.getNamesystem().getBlockManager()
        .getBlockPlacementPolicy().chooseTarget("testFile.txt", 3,
            writerDn, new ArrayList<DatanodeStorageInfo>(), false, null,
            1024, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);

    assertEquals(3, targets.length);
    Set<DatanodeStorageInfo> targetSet = new HashSet<DatanodeStorageInfo>(
        Arrays.asList(targets));
    for (int i = 3; i < storages.length; i++) {
      assertTrue(targetSet.contains(storages[i]));
    }
  } finally {
    dataNodes[0].stopDecommission();
    dataNodes[1].stopDecommission();
    dataNodes[2].stopDecommission();
    namenode.getNamesystem().writeUnlock();
  }
}