Java 类org.apache.hadoop.hdfs.server.blockmanagement.DecommissionManager 实例源码

项目:hadoop    文件:TestDecommission.java   
@Deprecated
@Test(timeout=120000)
public void testNodesPerInterval() throws Exception {
  Configuration newConf = new Configuration(conf);
  org.apache.log4j.Logger.getLogger(DecommissionManager.class)
      .setLevel(Level.TRACE);
  // Set the deprecated configuration key which limits the # of nodes per 
  // interval
  newConf.setInt("dfs.namenode.decommission.nodes.per.interval", 1);
  // Disable the normal monitor runs
  newConf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY,
      Integer.MAX_VALUE);
  startCluster(1, 3, newConf);
  final FileSystem fs = cluster.getFileSystem();
  final DatanodeManager datanodeManager =
      cluster.getNamesystem().getBlockManager().getDatanodeManager();
  final DecommissionManager decomManager = datanodeManager.getDecomManager();

  // Write a 3 block file, so each node has one block. Should scan 1 node 
  // each time.
  DFSTestUtil.createFile(fs, new Path("/file1"), 64, (short) 3, 0xBAD1DEA);
  for (int i=0; i<3; i++) {
    doDecomCheck(datanodeManager, decomManager, 1);
  }
}
项目:big-c    文件:TestDecommission.java   
@Deprecated
@Test(timeout=120000)
public void testNodesPerInterval() throws Exception {
  Configuration newConf = new Configuration(conf);
  org.apache.log4j.Logger.getLogger(DecommissionManager.class)
      .setLevel(Level.TRACE);
  // Set the deprecated configuration key which limits the # of nodes per 
  // interval
  newConf.setInt("dfs.namenode.decommission.nodes.per.interval", 1);
  // Disable the normal monitor runs
  newConf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY,
      Integer.MAX_VALUE);
  startCluster(1, 3, newConf);
  final FileSystem fs = cluster.getFileSystem();
  final DatanodeManager datanodeManager =
      cluster.getNamesystem().getBlockManager().getDatanodeManager();
  final DecommissionManager decomManager = datanodeManager.getDecomManager();

  // Write a 3 block file, so each node has one block. Should scan 1 node 
  // each time.
  DFSTestUtil.createFile(fs, new Path("/file1"), 64, (short) 3, 0xBAD1DEA);
  for (int i=0; i<3; i++) {
    doDecomCheck(datanodeManager, decomManager, 1);
  }
}
项目:hadoop    文件:TestDecommissioningStatus.java   
@BeforeClass
public static void setUp() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
      false);

  // Set up the hosts/exclude files.
  localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  dir = new Path(workingDir, "build/test/data/work-dir/decommission");
  assertTrue(localFileSys.mkdirs(dir));
  excludeFile = new Path(dir, "exclude");
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  Path includeFile = new Path(dir, "include");
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 
      1000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
      4);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1);
  conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, 1);

  writeConfigFile(localFileSys, excludeFile, null);
  writeConfigFile(localFileSys, includeFile, null);

  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
  cluster.waitActive();
  fileSys = cluster.getFileSystem();
  cluster.getNamesystem().getBlockManager().getDatanodeManager()
      .setHeartbeatExpireInterval(3000);
  Logger.getLogger(DecommissionManager.class).setLevel(Level.DEBUG);
}
项目:hadoop    文件:TestDecommissioningStatus.java   
/**
 * Verify the support for decommissioning a datanode that is already dead.
 * Under this scenario the datanode should immediately be marked as
 * DECOMMISSIONED
 */
@Test(timeout=120000)
public void testDecommissionDeadDN() throws Exception {
  Logger log = Logger.getLogger(DecommissionManager.class);
  log.setLevel(Level.DEBUG);
  DatanodeID dnID = cluster.getDataNodes().get(0).getDatanodeId();
  String dnName = dnID.getXferAddr();
  DataNodeProperties stoppedDN = cluster.stopDataNode(0);
  DFSTestUtil.waitForDatanodeState(cluster, dnID.getDatanodeUuid(),
      false, 30000);
  FSNamesystem fsn = cluster.getNamesystem();
  final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
  DatanodeDescriptor dnDescriptor = dm.getDatanode(dnID);
  decommissionNode(fsn, localFileSys, dnName);
  dm.refreshNodes(conf);
  BlockManagerTestUtil.recheckDecommissionState(dm);
  assertTrue(dnDescriptor.isDecommissioned());

  // Add the node back
  cluster.restartDataNode(stoppedDN, true);
  cluster.waitActive();

  // Call refreshNodes on FSNamesystem with empty exclude file to remove the
  // datanode from decommissioning list and make it available again.
  writeConfigFile(localFileSys, excludeFile, null);
  dm.refreshNodes(conf);
}
项目:hadoop    文件:TestDecommission.java   
@Test(timeout=120000)
public void testBlocksPerInterval() throws Exception {
  Configuration newConf = new Configuration(conf);
  org.apache.log4j.Logger.getLogger(DecommissionManager.class)
      .setLevel(Level.TRACE);
  // Turn the blocks per interval way down
  newConf.setInt(
      DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY,
      3);
  // Disable the normal monitor runs
  newConf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY,
      Integer.MAX_VALUE);
  startCluster(1, 3, newConf);
  final FileSystem fs = cluster.getFileSystem();
  final DatanodeManager datanodeManager =
      cluster.getNamesystem().getBlockManager().getDatanodeManager();
  final DecommissionManager decomManager = datanodeManager.getDecomManager();

  // Write a 3 block file, so each node has one block. Should scan 3 nodes.
  DFSTestUtil.createFile(fs, new Path("/file1"), 64, (short) 3, 0xBAD1DEA);
  doDecomCheck(datanodeManager, decomManager, 3);
  // Write another file, should only scan two
  DFSTestUtil.createFile(fs, new Path("/file2"), 64, (short)3, 0xBAD1DEA);
  doDecomCheck(datanodeManager, decomManager, 2);
  // One more file, should only scan 1
  DFSTestUtil.createFile(fs, new Path("/file3"), 64, (short)3, 0xBAD1DEA);
  doDecomCheck(datanodeManager, decomManager, 1);
  // blocks on each DN now exceeds limit, still scan at least one node
  DFSTestUtil.createFile(fs, new Path("/file4"), 64, (short)3, 0xBAD1DEA);
  doDecomCheck(datanodeManager, decomManager, 1);
}
项目:hadoop    文件:TestDecommission.java   
private void assertTrackedAndPending(DecommissionManager decomManager,
    int tracked, int pending) {
  assertEquals("Unexpected number of tracked nodes", tracked,
      decomManager.getNumTrackedNodes());
  assertEquals("Unexpected number of pending nodes", pending,
      decomManager.getNumPendingNodes());
}
项目:aliyun-oss-hadoop-fs    文件:TestDecommissioningStatus.java   
@BeforeClass
public static void setUp() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
      false);

  // Set up the hosts/exclude files.
  localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  dir = new Path(workingDir, "build/test/data/work-dir/decommission");
  assertTrue(localFileSys.mkdirs(dir));
  excludeFile = new Path(dir, "exclude");
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  Path includeFile = new Path(dir, "include");
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 
      1000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
      4);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1);
  conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, 1);

  writeConfigFile(localFileSys, excludeFile, null);
  writeConfigFile(localFileSys, includeFile, null);

  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
  cluster.waitActive();
  fileSys = cluster.getFileSystem();
  cluster.getNamesystem().getBlockManager().getDatanodeManager()
      .setHeartbeatExpireInterval(3000);
  Logger.getLogger(DecommissionManager.class).setLevel(Level.DEBUG);
}
项目:aliyun-oss-hadoop-fs    文件:TestDecommissioningStatus.java   
/**
 * Verify the support for decommissioning a datanode that is already dead.
 * Under this scenario the datanode should immediately be marked as
 * DECOMMISSIONED
 */
@Test(timeout=120000)
public void testDecommissionDeadDN() throws Exception {
  Logger log = Logger.getLogger(DecommissionManager.class);
  log.setLevel(Level.DEBUG);
  DatanodeID dnID = cluster.getDataNodes().get(0).getDatanodeId();
  String dnName = dnID.getXferAddr();
  DataNodeProperties stoppedDN = cluster.stopDataNode(0);
  DFSTestUtil.waitForDatanodeState(cluster, dnID.getDatanodeUuid(),
      false, 30000);
  FSNamesystem fsn = cluster.getNamesystem();
  final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
  DatanodeDescriptor dnDescriptor = dm.getDatanode(dnID);
  decommissionNode(fsn, localFileSys, dnName);
  dm.refreshNodes(conf);
  BlockManagerTestUtil.recheckDecommissionState(dm);
  assertTrue(dnDescriptor.isDecommissioned());

  // Add the node back
  cluster.restartDataNode(stoppedDN, true);
  cluster.waitActive();

  // Call refreshNodes on FSNamesystem with empty exclude file to remove the
  // datanode from decommissioning list and make it available again.
  writeConfigFile(localFileSys, excludeFile, null);
  dm.refreshNodes(conf);
}
项目:aliyun-oss-hadoop-fs    文件:TestDecommission.java   
@Test(timeout=120000)
public void testBlocksPerInterval() throws Exception {
  Configuration newConf = new Configuration(conf);
  org.apache.log4j.Logger.getLogger(DecommissionManager.class)
      .setLevel(Level.TRACE);
  // Turn the blocks per interval way down
  newConf.setInt(
      DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY,
      3);
  // Disable the normal monitor runs
  newConf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY,
      Integer.MAX_VALUE);
  startCluster(1, 3, newConf);
  final FileSystem fs = cluster.getFileSystem();
  final DatanodeManager datanodeManager =
      cluster.getNamesystem().getBlockManager().getDatanodeManager();
  final DecommissionManager decomManager = datanodeManager.getDecomManager();

  // Write a 3 block file, so each node has one block. Should scan 3 nodes.
  DFSTestUtil.createFile(fs, new Path("/file1"), 64, (short) 3, 0xBAD1DEA);
  doDecomCheck(datanodeManager, decomManager, 3);
  // Write another file, should only scan two
  DFSTestUtil.createFile(fs, new Path("/file2"), 64, (short)3, 0xBAD1DEA);
  doDecomCheck(datanodeManager, decomManager, 2);
  // One more file, should only scan 1
  DFSTestUtil.createFile(fs, new Path("/file3"), 64, (short)3, 0xBAD1DEA);
  doDecomCheck(datanodeManager, decomManager, 1);
  // blocks on each DN now exceeds limit, still scan at least one node
  DFSTestUtil.createFile(fs, new Path("/file4"), 64, (short)3, 0xBAD1DEA);
  doDecomCheck(datanodeManager, decomManager, 1);
}
项目:aliyun-oss-hadoop-fs    文件:TestDecommission.java   
private void assertTrackedAndPending(DecommissionManager decomManager,
    int tracked, int pending) {
  assertEquals("Unexpected number of tracked nodes", tracked,
      decomManager.getNumTrackedNodes());
  assertEquals("Unexpected number of pending nodes", pending,
      decomManager.getNumPendingNodes());
}
项目:big-c    文件:TestDecommissioningStatus.java   
@BeforeClass
public static void setUp() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
      false);

  // Set up the hosts/exclude files.
  localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  dir = new Path(workingDir, "build/test/data/work-dir/decommission");
  assertTrue(localFileSys.mkdirs(dir));
  excludeFile = new Path(dir, "exclude");
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  Path includeFile = new Path(dir, "include");
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 
      1000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
      4);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1);
  conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, 1);

  writeConfigFile(localFileSys, excludeFile, null);
  writeConfigFile(localFileSys, includeFile, null);

  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
  cluster.waitActive();
  fileSys = cluster.getFileSystem();
  cluster.getNamesystem().getBlockManager().getDatanodeManager()
      .setHeartbeatExpireInterval(3000);
  Logger.getLogger(DecommissionManager.class).setLevel(Level.DEBUG);
}
项目:big-c    文件:TestDecommissioningStatus.java   
/**
 * Verify the support for decommissioning a datanode that is already dead.
 * Under this scenario the datanode should immediately be marked as
 * DECOMMISSIONED
 */
@Test(timeout=120000)
public void testDecommissionDeadDN() throws Exception {
  Logger log = Logger.getLogger(DecommissionManager.class);
  log.setLevel(Level.DEBUG);
  DatanodeID dnID = cluster.getDataNodes().get(0).getDatanodeId();
  String dnName = dnID.getXferAddr();
  DataNodeProperties stoppedDN = cluster.stopDataNode(0);
  DFSTestUtil.waitForDatanodeState(cluster, dnID.getDatanodeUuid(),
      false, 30000);
  FSNamesystem fsn = cluster.getNamesystem();
  final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
  DatanodeDescriptor dnDescriptor = dm.getDatanode(dnID);
  decommissionNode(fsn, localFileSys, dnName);
  dm.refreshNodes(conf);
  BlockManagerTestUtil.recheckDecommissionState(dm);
  assertTrue(dnDescriptor.isDecommissioned());

  // Add the node back
  cluster.restartDataNode(stoppedDN, true);
  cluster.waitActive();

  // Call refreshNodes on FSNamesystem with empty exclude file to remove the
  // datanode from decommissioning list and make it available again.
  writeConfigFile(localFileSys, excludeFile, null);
  dm.refreshNodes(conf);
}
项目:big-c    文件:TestDecommission.java   
@Test(timeout=120000)
public void testBlocksPerInterval() throws Exception {
  Configuration newConf = new Configuration(conf);
  org.apache.log4j.Logger.getLogger(DecommissionManager.class)
      .setLevel(Level.TRACE);
  // Turn the blocks per interval way down
  newConf.setInt(
      DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY,
      3);
  // Disable the normal monitor runs
  newConf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY,
      Integer.MAX_VALUE);
  startCluster(1, 3, newConf);
  final FileSystem fs = cluster.getFileSystem();
  final DatanodeManager datanodeManager =
      cluster.getNamesystem().getBlockManager().getDatanodeManager();
  final DecommissionManager decomManager = datanodeManager.getDecomManager();

  // Write a 3 block file, so each node has one block. Should scan 3 nodes.
  DFSTestUtil.createFile(fs, new Path("/file1"), 64, (short) 3, 0xBAD1DEA);
  doDecomCheck(datanodeManager, decomManager, 3);
  // Write another file, should only scan two
  DFSTestUtil.createFile(fs, new Path("/file2"), 64, (short)3, 0xBAD1DEA);
  doDecomCheck(datanodeManager, decomManager, 2);
  // One more file, should only scan 1
  DFSTestUtil.createFile(fs, new Path("/file3"), 64, (short)3, 0xBAD1DEA);
  doDecomCheck(datanodeManager, decomManager, 1);
  // blocks on each DN now exceeds limit, still scan at least one node
  DFSTestUtil.createFile(fs, new Path("/file4"), 64, (short)3, 0xBAD1DEA);
  doDecomCheck(datanodeManager, decomManager, 1);
}
项目:big-c    文件:TestDecommission.java   
private void assertTrackedAndPending(DecommissionManager decomManager,
    int tracked, int pending) {
  assertEquals("Unexpected number of tracked nodes", tracked,
      decomManager.getNumTrackedNodes());
  assertEquals("Unexpected number of pending nodes", pending,
      decomManager.getNumPendingNodes());
}