Java 类org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil 实例源码

项目:hadoop    文件:TestDFSStorageStateRecovery.java   
/**
 * For NameNode, verify that the current and/or previous exist as indicated by 
 * the method parameters.  If previous exists, verify that
 * it hasn't been modified by comparing the checksum of all it's
 * containing files with their original checksum.  It is assumed that
 * the server has recovered.
 */
void checkResultNameNode(String[] baseDirs, 
                 boolean currentShouldExist, boolean previousShouldExist) 
  throws IOException
{
  if (currentShouldExist) {
    for (int i = 0; i < baseDirs.length; i++) {
      assertTrue(new File(baseDirs[i],"current").isDirectory());
      assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
      assertNotNull(FSImageTestUtil.findNewestImageFile(
          baseDirs[i] + "/current"));
      assertTrue(new File(baseDirs[i],"current/seen_txid").isFile());
    }
  }
  if (previousShouldExist) {
    for (int i = 0; i < baseDirs.length; i++) {
      assertTrue(new File(baseDirs[i],"previous").isDirectory());
      assertEquals(
                   UpgradeUtilities.checksumContents(
                   NAME_NODE, new File(baseDirs[i],"previous"), false),
                   UpgradeUtilities.checksumMasterNameNodeContents());
    }
  }
}
项目:hadoop    文件:HATestUtil.java   
public static void waitForCheckpoint(MiniDFSCluster cluster, int nnIdx,
    List<Integer> txids) throws InterruptedException {
  long start = Time.now();
  while (true) {
    try {
      FSImageTestUtil.assertNNHasCheckpoints(cluster, nnIdx, txids);
      return;
    } catch (AssertionError err) {
      if (Time.now() - start > 10000) {
        throw err;
      } else {
        Thread.sleep(300);
      }
    }
  }
}
项目:hadoop    文件:TestBootstrapStandbyWithQJM.java   
/** BootstrapStandby when the existing NN is standby */
@Test
public void testBootstrapStandbyWithStandbyNN() throws Exception {
  // make the first NN in standby state
  cluster.transitionToStandby(0);
  Configuration confNN1 = cluster.getConfiguration(1);

  // shut down nn1
  cluster.shutdownNameNode(1);

  int rc = BootstrapStandby.run(new String[] { "-force" }, confNN1);
  assertEquals(0, rc);

  // Should have copied over the namespace from the standby
  FSImageTestUtil.assertNNHasCheckpoints(cluster, 1,
      ImmutableList.of(0));
  FSImageTestUtil.assertNNFilesMatch(cluster);
}
项目:hadoop    文件:TestBootstrapStandbyWithQJM.java   
/** BootstrapStandby when the existing NN is active */
@Test
public void testBootstrapStandbyWithActiveNN() throws Exception {
  // make the first NN in active state
  cluster.transitionToActive(0);
  Configuration confNN1 = cluster.getConfiguration(1);

  // shut down nn1
  cluster.shutdownNameNode(1);

  int rc = BootstrapStandby.run(new String[] { "-force" }, confNN1);
  assertEquals(0, rc);

  // Should have copied over the namespace from the standby
  FSImageTestUtil.assertNNHasCheckpoints(cluster, 1,
      ImmutableList.of(0));
  FSImageTestUtil.assertNNFilesMatch(cluster);
}
项目:hadoop    文件:TestDFSRollback.java   
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
  List<File> curDirs = Lists.newArrayList();
  for (String baseDir : baseDirs) {
    File curDir = new File(baseDir, "current");
    curDirs.add(curDir);
    switch (nodeType) {
    case NAME_NODE:
      FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
      break;
    case DATA_NODE:
      assertEquals(
          UpgradeUtilities.checksumContents(nodeType, curDir, false),
          UpgradeUtilities.checksumMasterDataNodeContents());
      break;
    }
  }

  FSImageTestUtil.assertParallelFilesAreIdentical(
      curDirs, Collections.<String>emptySet());

  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestDFSStorageStateRecovery.java   
/**
 * For NameNode, verify that the current and/or previous exist as indicated by 
 * the method parameters.  If previous exists, verify that
 * it hasn't been modified by comparing the checksum of all it's
 * containing files with their original checksum.  It is assumed that
 * the server has recovered.
 */
void checkResultNameNode(String[] baseDirs, 
                 boolean currentShouldExist, boolean previousShouldExist) 
  throws IOException
{
  if (currentShouldExist) {
    for (int i = 0; i < baseDirs.length; i++) {
      assertTrue(new File(baseDirs[i],"current").isDirectory());
      assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
      assertNotNull(FSImageTestUtil.findNewestImageFile(
          baseDirs[i] + "/current"));
      assertTrue(new File(baseDirs[i],"current/seen_txid").isFile());
    }
  }
  if (previousShouldExist) {
    for (int i = 0; i < baseDirs.length; i++) {
      assertTrue(new File(baseDirs[i],"previous").isDirectory());
      assertEquals(
                   UpgradeUtilities.checksumContents(
                   NAME_NODE, new File(baseDirs[i],"previous"), false),
                   UpgradeUtilities.checksumMasterNameNodeContents());
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:HATestUtil.java   
public static void waitForCheckpoint(MiniDFSCluster cluster, int nnIdx,
    List<Integer> txids) throws InterruptedException {
  long start = Time.now();
  while (true) {
    try {
      FSImageTestUtil.assertNNHasCheckpoints(cluster, nnIdx, txids);
      return;
    } catch (AssertionError err) {
      if (Time.now() - start > 10000) {
        throw err;
      } else {
        Thread.sleep(300);
      }
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestBootstrapStandbyWithQJM.java   
private void bootstrapStandbys() throws Exception {
  // shutdown and bootstrap all the other nns, except the first (start 1, not 0)
  for (int i = 1; i < nnCount; i++) {
    Configuration otherNNConf = cluster.getConfiguration(i);

    // shut down other nn
    cluster.shutdownNameNode(i);

    int rc = BootstrapStandby.run(new String[] { "-force" }, otherNNConf);
    assertEquals(0, rc);

    // Should have copied over the namespace from the standby
    FSImageTestUtil.assertNNHasCheckpoints(cluster, i, ImmutableList.of(0));
  }
  FSImageTestUtil.assertNNFilesMatch(cluster);
}
项目:aliyun-oss-hadoop-fs    文件:TestDFSRollback.java   
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
  List<File> curDirs = Lists.newArrayList();
  for (String baseDir : baseDirs) {
    File curDir = new File(baseDir, "current");
    curDirs.add(curDir);
    switch (nodeType) {
    case NAME_NODE:
      FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
      break;
    case DATA_NODE:
      assertEquals(
          UpgradeUtilities.checksumContents(nodeType, curDir, false),
          UpgradeUtilities.checksumMasterDataNodeContents());
      break;
    }
  }

  FSImageTestUtil.assertParallelFilesAreIdentical(
      curDirs, Collections.<String>emptySet());

  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
项目:big-c    文件:TestDFSStorageStateRecovery.java   
/**
 * For NameNode, verify that the current and/or previous exist as indicated by 
 * the method parameters.  If previous exists, verify that
 * it hasn't been modified by comparing the checksum of all it's
 * containing files with their original checksum.  It is assumed that
 * the server has recovered.
 */
void checkResultNameNode(String[] baseDirs, 
                 boolean currentShouldExist, boolean previousShouldExist) 
  throws IOException
{
  if (currentShouldExist) {
    for (int i = 0; i < baseDirs.length; i++) {
      assertTrue(new File(baseDirs[i],"current").isDirectory());
      assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
      assertNotNull(FSImageTestUtil.findNewestImageFile(
          baseDirs[i] + "/current"));
      assertTrue(new File(baseDirs[i],"current/seen_txid").isFile());
    }
  }
  if (previousShouldExist) {
    for (int i = 0; i < baseDirs.length; i++) {
      assertTrue(new File(baseDirs[i],"previous").isDirectory());
      assertEquals(
                   UpgradeUtilities.checksumContents(
                   NAME_NODE, new File(baseDirs[i],"previous"), false),
                   UpgradeUtilities.checksumMasterNameNodeContents());
    }
  }
}
项目:big-c    文件:HATestUtil.java   
public static void waitForCheckpoint(MiniDFSCluster cluster, int nnIdx,
    List<Integer> txids) throws InterruptedException {
  long start = Time.now();
  while (true) {
    try {
      FSImageTestUtil.assertNNHasCheckpoints(cluster, nnIdx, txids);
      return;
    } catch (AssertionError err) {
      if (Time.now() - start > 10000) {
        throw err;
      } else {
        Thread.sleep(300);
      }
    }
  }
}
项目:big-c    文件:TestBootstrapStandbyWithQJM.java   
/** BootstrapStandby when the existing NN is standby */
@Test
public void testBootstrapStandbyWithStandbyNN() throws Exception {
  // make the first NN in standby state
  cluster.transitionToStandby(0);
  Configuration confNN1 = cluster.getConfiguration(1);

  // shut down nn1
  cluster.shutdownNameNode(1);

  int rc = BootstrapStandby.run(new String[] { "-force" }, confNN1);
  assertEquals(0, rc);

  // Should have copied over the namespace from the standby
  FSImageTestUtil.assertNNHasCheckpoints(cluster, 1,
      ImmutableList.of(0));
  FSImageTestUtil.assertNNFilesMatch(cluster);
}
项目:big-c    文件:TestBootstrapStandbyWithQJM.java   
/** BootstrapStandby when the existing NN is active */
@Test
public void testBootstrapStandbyWithActiveNN() throws Exception {
  // make the first NN in active state
  cluster.transitionToActive(0);
  Configuration confNN1 = cluster.getConfiguration(1);

  // shut down nn1
  cluster.shutdownNameNode(1);

  int rc = BootstrapStandby.run(new String[] { "-force" }, confNN1);
  assertEquals(0, rc);

  // Should have copied over the namespace from the standby
  FSImageTestUtil.assertNNHasCheckpoints(cluster, 1,
      ImmutableList.of(0));
  FSImageTestUtil.assertNNFilesMatch(cluster);
}
项目:big-c    文件:TestDFSRollback.java   
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
  List<File> curDirs = Lists.newArrayList();
  for (String baseDir : baseDirs) {
    File curDir = new File(baseDir, "current");
    curDirs.add(curDir);
    switch (nodeType) {
    case NAME_NODE:
      FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
      break;
    case DATA_NODE:
      assertEquals(
          UpgradeUtilities.checksumContents(nodeType, curDir, false),
          UpgradeUtilities.checksumMasterDataNodeContents());
      break;
    }
  }

  FSImageTestUtil.assertParallelFilesAreIdentical(
      curDirs, Collections.<String>emptySet());

  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDFSStorageStateRecovery.java   
/**
 * For NameNode, verify that the current and/or previous exist as indicated by 
 * the method parameters.  If previous exists, verify that
 * it hasn't been modified by comparing the checksum of all it's
 * containing files with their original checksum.  It is assumed that
 * the server has recovered.
 */
void checkResultNameNode(String[] baseDirs, 
                 boolean currentShouldExist, boolean previousShouldExist) 
  throws IOException
{
  if (currentShouldExist) {
    for (int i = 0; i < baseDirs.length; i++) {
      assertTrue(new File(baseDirs[i],"current").isDirectory());
      assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
      assertNotNull(FSImageTestUtil.findNewestImageFile(
          baseDirs[i] + "/current"));
      assertTrue(new File(baseDirs[i],"current/seen_txid").isFile());
    }
  }
  if (previousShouldExist) {
    for (int i = 0; i < baseDirs.length; i++) {
      assertTrue(new File(baseDirs[i],"previous").isDirectory());
      assertEquals(
                   UpgradeUtilities.checksumContents(
                   NAME_NODE, new File(baseDirs[i],"previous"), false),
                   UpgradeUtilities.checksumMasterNameNodeContents());
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:HATestUtil.java   
public static void waitForCheckpoint(MiniDFSCluster cluster, int nnIdx,
    List<Integer> txids) throws InterruptedException {
  long start = Time.now();
  while (true) {
    try {
      FSImageTestUtil.assertNNHasCheckpoints(cluster, nnIdx, txids);
      return;
    } catch (AssertionError err) {
      if (Time.now() - start > 10000) {
        throw err;
      } else {
        Thread.sleep(300);
      }
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestBootstrapStandbyWithQJM.java   
/** BootstrapStandby when the existing NN is standby */
@Test
public void testBootstrapStandbyWithStandbyNN() throws Exception {
  // make the first NN in standby state
  cluster.transitionToStandby(0);
  Configuration confNN1 = cluster.getConfiguration(1);

  // shut down nn1
  cluster.shutdownNameNode(1);

  int rc = BootstrapStandby.run(new String[] { "-force" }, confNN1);
  assertEquals(0, rc);

  // Should have copied over the namespace from the standby
  FSImageTestUtil.assertNNHasCheckpoints(cluster, 1,
      ImmutableList.of(0));
  FSImageTestUtil.assertNNFilesMatch(cluster);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestBootstrapStandbyWithQJM.java   
/** BootstrapStandby when the existing NN is active */
@Test
public void testBootstrapStandbyWithActiveNN() throws Exception {
  // make the first NN in active state
  cluster.transitionToActive(0);
  Configuration confNN1 = cluster.getConfiguration(1);

  // shut down nn1
  cluster.shutdownNameNode(1);

  int rc = BootstrapStandby.run(new String[] { "-force" }, confNN1);
  assertEquals(0, rc);

  // Should have copied over the namespace from the standby
  FSImageTestUtil.assertNNHasCheckpoints(cluster, 1,
      ImmutableList.of(0));
  FSImageTestUtil.assertNNFilesMatch(cluster);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestSnapshot.java   
/**
 * Test if the OfflineImageViewerPB can correctly parse a fsimage containing
 * snapshots
 */
@Test
public void testOfflineImageViewer() throws Exception {
  runTestSnapshot(1);

  // retrieve the fsimage. Note that we already save namespace to fsimage at
  // the end of each iteration of runTestSnapshot.
  File originalFsimage = FSImageTestUtil.findLatestImageFile(
      FSImageTestUtil.getFSImage(
      cluster.getNameNode()).getStorage().getStorageDir(0));
  assertNotNull("Didn't generate or can't find fsimage", originalFsimage);
  StringWriter output = new StringWriter();
  PrintWriter o = new PrintWriter(output);
  PBImageXmlWriter v = new PBImageXmlWriter(new Configuration(), o);
  v.visit(new RandomAccessFile(originalFsimage, "r"));
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDFSRollback.java   
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
  List<File> curDirs = Lists.newArrayList();
  for (String baseDir : baseDirs) {
    File curDir = new File(baseDir, "current");
    curDirs.add(curDir);
    switch (nodeType) {
    case NAME_NODE:
      FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
      break;
    case DATA_NODE:
      assertEquals(
          UpgradeUtilities.checksumContents(nodeType, curDir, false),
          UpgradeUtilities.checksumMasterDataNodeContents());
      break;
    }
  }

  FSImageTestUtil.assertParallelFilesAreIdentical(
      curDirs, Collections.<String>emptySet());

  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
项目:hadoop-plus    文件:TestDFSStorageStateRecovery.java   
/**
 * For NameNode, verify that the current and/or previous exist as indicated by 
 * the method parameters.  If previous exists, verify that
 * it hasn't been modified by comparing the checksum of all it's
 * containing files with their original checksum.  It is assumed that
 * the server has recovered.
 */
void checkResultNameNode(String[] baseDirs, 
                 boolean currentShouldExist, boolean previousShouldExist) 
  throws IOException
{
  if (currentShouldExist) {
    for (int i = 0; i < baseDirs.length; i++) {
      assertTrue(new File(baseDirs[i],"current").isDirectory());
      assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
      assertNotNull(FSImageTestUtil.findNewestImageFile(
          baseDirs[i] + "/current"));
      assertTrue(new File(baseDirs[i],"current/seen_txid").isFile());
    }
  }
  if (previousShouldExist) {
    for (int i = 0; i < baseDirs.length; i++) {
      assertTrue(new File(baseDirs[i],"previous").isDirectory());
      assertEquals(
                   UpgradeUtilities.checksumContents(
                                                     NAME_NODE, new File(baseDirs[i],"previous")),
                   UpgradeUtilities.checksumMasterNameNodeContents());
    }
  }
}
项目:hadoop-plus    文件:TestDFSFinalize.java   
/**
 * Verify that the current directory exists and that the previous directory
 * does not exist.  Verify that current hasn't been modified by comparing 
 * the checksum of all it's containing files with their original checksum.
 * Note that we do not check that previous is removed on the DataNode
 * because its removal is asynchronous therefore we have no reliable
 * way to know when it will happen.  
 */
static void checkResult(String[] nameNodeDirs, String[] dataNodeDirs) throws Exception {
  List<File> dirs = Lists.newArrayList();
  for (int i = 0; i < nameNodeDirs.length; i++) {
    File curDir = new File(nameNodeDirs[i], "current");
    dirs.add(curDir);
    FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
  }

  FSImageTestUtil.assertParallelFilesAreIdentical(
      dirs, Collections.<String>emptySet());

  for (int i = 0; i < dataNodeDirs.length; i++) {
    assertEquals(
                 UpgradeUtilities.checksumContents(
                                                   DATA_NODE, new File(dataNodeDirs[i],"current")),
                 UpgradeUtilities.checksumMasterDataNodeContents());
  }
  for (int i = 0; i < nameNodeDirs.length; i++) {
    assertFalse(new File(nameNodeDirs[i],"previous").isDirectory());
  }
}
项目:hadoop-plus    文件:HATestUtil.java   
public static void waitForCheckpoint(MiniDFSCluster cluster, int nnIdx,
    List<Integer> txids) throws InterruptedException {
  long start = Time.now();
  while (true) {
    try {
      FSImageTestUtil.assertNNHasCheckpoints(cluster, nnIdx, txids);
      return;
    } catch (AssertionError err) {
      if (Time.now() - start > 10000) {
        throw err;
      } else {
        Thread.sleep(300);
      }
    }
  }
}
项目:hadoop-plus    文件:TestStandbyCheckpoints.java   
/**
 * Test for the case when both of the NNs in the cluster are
 * in the standby state, and thus are both creating checkpoints
 * and uploading them to each other.
 * In this circumstance, they should receive the error from the
 * other node indicating that the other node already has a
 * checkpoint for the given txid, but this should not cause
 * an abort, etc.
 */
@Test
public void testBothNodesInStandbyState() throws Exception {
  doEdits(0, 10);

  cluster.transitionToStandby(0);

  // Transitioning to standby closed the edit log on the active,
  // so the standby will catch up. Then, both will be in standby mode
  // with enough uncheckpointed txns to cause a checkpoint, and they
  // will each try to take a checkpoint and upload to each other.
  HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12));
  HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12));

  assertEquals(12, nn0.getNamesystem().getFSImage()
      .getMostRecentCheckpointTxId());
  assertEquals(12, nn1.getNamesystem().getFSImage()
      .getMostRecentCheckpointTxId());

  List<File> dirs = Lists.newArrayList();
  dirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0));
  dirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 1));
  FSImageTestUtil.assertParallelFilesAreIdentical(dirs, ImmutableSet.<String>of());
}
项目:hadoop-plus    文件:TestDFSRollback.java   
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
  List<File> curDirs = Lists.newArrayList();
  for (String baseDir : baseDirs) {
    File curDir = new File(baseDir, "current");
    curDirs.add(curDir);
    switch (nodeType) {
    case NAME_NODE:
      FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
      break;
    case DATA_NODE:
      assertEquals(
          UpgradeUtilities.checksumContents(nodeType, curDir),
          UpgradeUtilities.checksumMasterDataNodeContents());
      break;
    }
  }

  FSImageTestUtil.assertParallelFilesAreIdentical(
      curDirs, Collections.<String>emptySet());

  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
项目:FlexMap    文件:TestDFSStorageStateRecovery.java   
/**
 * For NameNode, verify that the current and/or previous exist as indicated by 
 * the method parameters.  If previous exists, verify that
 * it hasn't been modified by comparing the checksum of all it's
 * containing files with their original checksum.  It is assumed that
 * the server has recovered.
 */
void checkResultNameNode(String[] baseDirs, 
                 boolean currentShouldExist, boolean previousShouldExist) 
  throws IOException
{
  if (currentShouldExist) {
    for (int i = 0; i < baseDirs.length; i++) {
      assertTrue(new File(baseDirs[i],"current").isDirectory());
      assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
      assertNotNull(FSImageTestUtil.findNewestImageFile(
          baseDirs[i] + "/current"));
      assertTrue(new File(baseDirs[i],"current/seen_txid").isFile());
    }
  }
  if (previousShouldExist) {
    for (int i = 0; i < baseDirs.length; i++) {
      assertTrue(new File(baseDirs[i],"previous").isDirectory());
      assertEquals(
                   UpgradeUtilities.checksumContents(
                   NAME_NODE, new File(baseDirs[i],"previous"), false),
                   UpgradeUtilities.checksumMasterNameNodeContents());
    }
  }
}
项目:FlexMap    文件:HATestUtil.java   
public static void waitForCheckpoint(MiniDFSCluster cluster, int nnIdx,
    List<Integer> txids) throws InterruptedException {
  long start = Time.now();
  while (true) {
    try {
      FSImageTestUtil.assertNNHasCheckpoints(cluster, nnIdx, txids);
      return;
    } catch (AssertionError err) {
      if (Time.now() - start > 10000) {
        throw err;
      } else {
        Thread.sleep(300);
      }
    }
  }
}
项目:FlexMap    文件:TestBootstrapStandbyWithQJM.java   
/** BootstrapStandby when the existing NN is standby */
@Test
public void testBootstrapStandbyWithStandbyNN() throws Exception {
  // make the first NN in standby state
  cluster.transitionToStandby(0);
  Configuration confNN1 = cluster.getConfiguration(1);

  // shut down nn1
  cluster.shutdownNameNode(1);

  int rc = BootstrapStandby.run(new String[] { "-force" }, confNN1);
  assertEquals(0, rc);

  // Should have copied over the namespace from the standby
  FSImageTestUtil.assertNNHasCheckpoints(cluster, 1,
      ImmutableList.of(0));
  FSImageTestUtil.assertNNFilesMatch(cluster);
}
项目:FlexMap    文件:TestBootstrapStandbyWithQJM.java   
/** BootstrapStandby when the existing NN is active */
@Test
public void testBootstrapStandbyWithActiveNN() throws Exception {
  // make the first NN in active state
  cluster.transitionToActive(0);
  Configuration confNN1 = cluster.getConfiguration(1);

  // shut down nn1
  cluster.shutdownNameNode(1);

  int rc = BootstrapStandby.run(new String[] { "-force" }, confNN1);
  assertEquals(0, rc);

  // Should have copied over the namespace from the standby
  FSImageTestUtil.assertNNHasCheckpoints(cluster, 1,
      ImmutableList.of(0));
  FSImageTestUtil.assertNNFilesMatch(cluster);
}
项目:FlexMap    文件:TestSnapshot.java   
/**
 * Test if the OfflineImageViewerPB can correctly parse a fsimage containing
 * snapshots
 */
@Test
public void testOfflineImageViewer() throws Exception {
  runTestSnapshot(1);

  // retrieve the fsimage. Note that we already save namespace to fsimage at
  // the end of each iteration of runTestSnapshot.
  File originalFsimage = FSImageTestUtil.findLatestImageFile(
      FSImageTestUtil.getFSImage(
      cluster.getNameNode()).getStorage().getStorageDir(0));
  assertNotNull("Didn't generate or can't find fsimage", originalFsimage);
  StringWriter output = new StringWriter();
  PrintWriter o = new PrintWriter(output);
  PBImageXmlWriter v = new PBImageXmlWriter(new Configuration(), o);
  v.visit(new RandomAccessFile(originalFsimage, "r"));
}
项目:FlexMap    文件:TestDFSRollback.java   
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
  List<File> curDirs = Lists.newArrayList();
  for (String baseDir : baseDirs) {
    File curDir = new File(baseDir, "current");
    curDirs.add(curDir);
    switch (nodeType) {
    case NAME_NODE:
      FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
      break;
    case DATA_NODE:
      assertEquals(
          UpgradeUtilities.checksumContents(nodeType, curDir, false),
          UpgradeUtilities.checksumMasterDataNodeContents());
      break;
    }
  }

  FSImageTestUtil.assertParallelFilesAreIdentical(
      curDirs, Collections.<String>emptySet());

  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
项目:hadoop-TCP    文件:TestDFSStorageStateRecovery.java   
/**
 * For NameNode, verify that the current and/or previous exist as indicated by 
 * the method parameters.  If previous exists, verify that
 * it hasn't been modified by comparing the checksum of all it's
 * containing files with their original checksum.  It is assumed that
 * the server has recovered.
 */
void checkResultNameNode(String[] baseDirs, 
                 boolean currentShouldExist, boolean previousShouldExist) 
  throws IOException
{
  if (currentShouldExist) {
    for (int i = 0; i < baseDirs.length; i++) {
      assertTrue(new File(baseDirs[i],"current").isDirectory());
      assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
      assertNotNull(FSImageTestUtil.findNewestImageFile(
          baseDirs[i] + "/current"));
      assertTrue(new File(baseDirs[i],"current/seen_txid").isFile());
    }
  }
  if (previousShouldExist) {
    for (int i = 0; i < baseDirs.length; i++) {
      assertTrue(new File(baseDirs[i],"previous").isDirectory());
      assertEquals(
                   UpgradeUtilities.checksumContents(
                                                     NAME_NODE, new File(baseDirs[i],"previous")),
                   UpgradeUtilities.checksumMasterNameNodeContents());
    }
  }
}
项目:hadoop-TCP    文件:TestDFSFinalize.java   
/**
 * Verify that the current directory exists and that the previous directory
 * does not exist.  Verify that current hasn't been modified by comparing 
 * the checksum of all it's containing files with their original checksum.
 * Note that we do not check that previous is removed on the DataNode
 * because its removal is asynchronous therefore we have no reliable
 * way to know when it will happen.  
 */
static void checkResult(String[] nameNodeDirs, String[] dataNodeDirs) throws Exception {
  List<File> dirs = Lists.newArrayList();
  for (int i = 0; i < nameNodeDirs.length; i++) {
    File curDir = new File(nameNodeDirs[i], "current");
    dirs.add(curDir);
    FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
  }

  FSImageTestUtil.assertParallelFilesAreIdentical(
      dirs, Collections.<String>emptySet());

  for (int i = 0; i < dataNodeDirs.length; i++) {
    assertEquals(
                 UpgradeUtilities.checksumContents(
                                                   DATA_NODE, new File(dataNodeDirs[i],"current")),
                 UpgradeUtilities.checksumMasterDataNodeContents());
  }
  for (int i = 0; i < nameNodeDirs.length; i++) {
    assertFalse(new File(nameNodeDirs[i],"previous").isDirectory());
  }
}
项目:hadoop-TCP    文件:HATestUtil.java   
public static void waitForCheckpoint(MiniDFSCluster cluster, int nnIdx,
    List<Integer> txids) throws InterruptedException {
  long start = Time.now();
  while (true) {
    try {
      FSImageTestUtil.assertNNHasCheckpoints(cluster, nnIdx, txids);
      return;
    } catch (AssertionError err) {
      if (Time.now() - start > 10000) {
        throw err;
      } else {
        Thread.sleep(300);
      }
    }
  }
}
项目:hadoop-TCP    文件:TestBootstrapStandbyWithQJM.java   
/** BootstrapStandby when the existing NN is standby */
@Test
public void testBootstrapStandbyWithStandbyNN() throws Exception {
  // make the first NN in standby state
  cluster.transitionToStandby(0);
  Configuration confNN1 = cluster.getConfiguration(1);

  // shut down nn1
  cluster.shutdownNameNode(1);

  int rc = BootstrapStandby.run(new String[] { "-force" }, confNN1);
  assertEquals(0, rc);

  // Should have copied over the namespace from the standby
  FSImageTestUtil.assertNNHasCheckpoints(cluster, 1,
      ImmutableList.of(0));
  FSImageTestUtil.assertNNFilesMatch(cluster);
}
项目:hadoop-TCP    文件:TestBootstrapStandbyWithQJM.java   
/** BootstrapStandby when the existing NN is active */
@Test
public void testBootstrapStandbyWithActiveNN() throws Exception {
  // make the first NN in active state
  cluster.transitionToActive(0);
  Configuration confNN1 = cluster.getConfiguration(1);

  // shut down nn1
  cluster.shutdownNameNode(1);

  int rc = BootstrapStandby.run(new String[] { "-force" }, confNN1);
  assertEquals(0, rc);

  // Should have copied over the namespace from the standby
  FSImageTestUtil.assertNNHasCheckpoints(cluster, 1,
      ImmutableList.of(0));
  FSImageTestUtil.assertNNFilesMatch(cluster);
}
项目:hadoop-TCP    文件:TestStandbyCheckpoints.java   
/**
 * Test for the case when both of the NNs in the cluster are
 * in the standby state, and thus are both creating checkpoints
 * and uploading them to each other.
 * In this circumstance, they should receive the error from the
 * other node indicating that the other node already has a
 * checkpoint for the given txid, but this should not cause
 * an abort, etc.
 */
@Test
public void testBothNodesInStandbyState() throws Exception {
  doEdits(0, 10);

  cluster.transitionToStandby(0);

  // Transitioning to standby closed the edit log on the active,
  // so the standby will catch up. Then, both will be in standby mode
  // with enough uncheckpointed txns to cause a checkpoint, and they
  // will each try to take a checkpoint and upload to each other.
  HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12));
  HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12));

  assertEquals(12, nn0.getNamesystem().getFSImage()
      .getMostRecentCheckpointTxId());
  assertEquals(12, nn1.getNamesystem().getFSImage()
      .getMostRecentCheckpointTxId());

  List<File> dirs = Lists.newArrayList();
  dirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0));
  dirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 1));
  FSImageTestUtil.assertParallelFilesAreIdentical(dirs, ImmutableSet.<String>of());
}
项目:hadoop-TCP    文件:TestDFSRollback.java   
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
  List<File> curDirs = Lists.newArrayList();
  for (String baseDir : baseDirs) {
    File curDir = new File(baseDir, "current");
    curDirs.add(curDir);
    switch (nodeType) {
    case NAME_NODE:
      FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
      break;
    case DATA_NODE:
      assertEquals(
          UpgradeUtilities.checksumContents(nodeType, curDir),
          UpgradeUtilities.checksumMasterDataNodeContents());
      break;
    }
  }

  FSImageTestUtil.assertParallelFilesAreIdentical(
      curDirs, Collections.<String>emptySet());

  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
项目:hardfs    文件:TestDFSStorageStateRecovery.java   
/**
 * For NameNode, verify that the current and/or previous exist as indicated by 
 * the method parameters.  If previous exists, verify that
 * it hasn't been modified by comparing the checksum of all it's
 * containing files with their original checksum.  It is assumed that
 * the server has recovered.
 */
void checkResultNameNode(String[] baseDirs, 
                 boolean currentShouldExist, boolean previousShouldExist) 
  throws IOException
{
  if (currentShouldExist) {
    for (int i = 0; i < baseDirs.length; i++) {
      assertTrue(new File(baseDirs[i],"current").isDirectory());
      assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
      assertNotNull(FSImageTestUtil.findNewestImageFile(
          baseDirs[i] + "/current"));
      assertTrue(new File(baseDirs[i],"current/seen_txid").isFile());
    }
  }
  if (previousShouldExist) {
    for (int i = 0; i < baseDirs.length; i++) {
      assertTrue(new File(baseDirs[i],"previous").isDirectory());
      assertEquals(
                   UpgradeUtilities.checksumContents(
                                                     NAME_NODE, new File(baseDirs[i],"previous")),
                   UpgradeUtilities.checksumMasterNameNodeContents());
    }
  }
}
项目:hardfs    文件:TestDFSFinalize.java   
/**
 * Verify that the current directory exists and that the previous directory
 * does not exist.  Verify that current hasn't been modified by comparing 
 * the checksum of all it's containing files with their original checksum.
 * Note that we do not check that previous is removed on the DataNode
 * because its removal is asynchronous therefore we have no reliable
 * way to know when it will happen.  
 */
static void checkResult(String[] nameNodeDirs, String[] dataNodeDirs) throws Exception {
  List<File> dirs = Lists.newArrayList();
  for (int i = 0; i < nameNodeDirs.length; i++) {
    File curDir = new File(nameNodeDirs[i], "current");
    dirs.add(curDir);
    FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
  }

  FSImageTestUtil.assertParallelFilesAreIdentical(
      dirs, Collections.<String>emptySet());

  for (int i = 0; i < dataNodeDirs.length; i++) {
    assertEquals(
                 UpgradeUtilities.checksumContents(
                                                   DATA_NODE, new File(dataNodeDirs[i],"current")),
                 UpgradeUtilities.checksumMasterDataNodeContents());
  }
  for (int i = 0; i < nameNodeDirs.length; i++) {
    assertFalse(new File(nameNodeDirs[i],"previous").isDirectory());
  }
}