Java 类org.apache.hadoop.hdfs.server.namenode.TestGenericJournalConf.DummyJournalManager 实例源码

项目:hadoop-EAR    文件:TestAllowFormat.java   
/**
 * Test to ensure that format is called for non-file journals.
 */
@Test
public void testFormatShouldBeIgnoredForNonFileBasedDirs() throws Exception {
  Configuration conf = new Configuration();
  File nameDir = new File(hdfsDir, "name");
  if (nameDir.exists()) {
    FileUtil.fullyDelete(nameDir);
  }

  conf.setBoolean("dfs.namenode.support.allowformat", true);
  conf.set("dfs.name.edits.journal-plugin" + ".dummy",
      DummyJournalManager.class.getName());
  conf.set("dfs.name.edits.dir",
      "dummy://test");
  conf.set("dfs.name.dir", nameDir.getPath());

  NameNode.format(conf, false, true);
  assertTrue(DummyJournalManager.formatCalled);
  assertTrue(DummyJournalManager.shouldPromptCalled);
}
项目:hadoop    文件:TestAllowFormat.java   
/**
 * Test to skip format for non file scheme directory configured
 *
 * @throws Exception
 */
@Test
public void testFormatShouldBeIgnoredForNonFileBasedDirs() throws Exception {
  Configuration conf = new HdfsConfiguration();
  String logicalName = "mycluster";

  // DFS_NAMENODE_RPC_ADDRESS_KEY are required to identify the NameNode
  // is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY
  // is considered.
  String localhost = "127.0.0.1";
  InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 8020);
  InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 9020);
  HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);

  conf.set(DFS_NAMENODE_NAME_DIR_KEY,
      new File(DFS_BASE_DIR, "name").getAbsolutePath());
  conf.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
  conf.set(DFSUtil.addKeySuffixes(
      DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX, "dummy"),
      DummyJournalManager.class.getName());
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, "dummy://"
      + localhost + ":2181/ledgers");
  conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");

  // An internal assert is added to verify the working of test
  NameNode.format(conf);
}
项目:aliyun-oss-hadoop-fs    文件:TestAllowFormat.java   
/**
 * Test to skip format for non file scheme directory configured
 *
 * @throws Exception
 */
@Test
public void testFormatShouldBeIgnoredForNonFileBasedDirs() throws Exception {
  Configuration conf = new HdfsConfiguration();
  String logicalName = "mycluster";

  // DFS_NAMENODE_RPC_ADDRESS_KEY are required to identify the NameNode
  // is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY
  // is considered.
  String localhost = "127.0.0.1";
  InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 8020);
  InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 9020);
  HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);

  conf.set(DFS_NAMENODE_NAME_DIR_KEY,
      new File(DFS_BASE_DIR, "name").getAbsolutePath());
  conf.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
  conf.set(DFSUtil.addKeySuffixes(
      DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX, "dummy"),
      DummyJournalManager.class.getName());
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, "dummy://"
      + localhost + ":2181/ledgers");
  conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");

  // An internal assert is added to verify the working of test
  NameNode.format(conf);
}
项目:big-c    文件:TestAllowFormat.java   
/**
 * Test to skip format for non file scheme directory configured
 *
 * @throws Exception
 */
@Test
public void testFormatShouldBeIgnoredForNonFileBasedDirs() throws Exception {
  Configuration conf = new HdfsConfiguration();
  String logicalName = "mycluster";

  // DFS_NAMENODE_RPC_ADDRESS_KEY are required to identify the NameNode
  // is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY
  // is considered.
  String localhost = "127.0.0.1";
  InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 8020);
  InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 9020);
  HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);

  conf.set(DFS_NAMENODE_NAME_DIR_KEY,
      new File(DFS_BASE_DIR, "name").getAbsolutePath());
  conf.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
  conf.set(DFSUtil.addKeySuffixes(
      DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX, "dummy"),
      DummyJournalManager.class.getName());
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, "dummy://"
      + localhost + ":2181/ledgers");
  conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");

  // An internal assert is added to verify the working of test
  NameNode.format(conf);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestAllowFormat.java   
/**
 * Test to skip format for non file scheme directory configured
 *
 * @throws Exception
 */
@Test
public void testFormatShouldBeIgnoredForNonFileBasedDirs() throws Exception {
  Configuration conf = new HdfsConfiguration();
  String logicalName = "mycluster";

  // DFS_NAMENODE_RPC_ADDRESS_KEY are required to identify the NameNode
  // is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY
  // is considered.
  String localhost = "127.0.0.1";
  InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 8020);
  InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 9020);
  HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);

  conf.set(DFS_NAMENODE_NAME_DIR_KEY,
      new File(DFS_BASE_DIR, "name").getAbsolutePath());
  conf.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
  conf.set(DFSUtil.addKeySuffixes(
      DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX, "dummy"),
      DummyJournalManager.class.getName());
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, "dummy://"
      + localhost + ":2181/ledgers");
  conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");

  // An internal assert is added to verify the working of test
  NameNode.format(conf);
}
项目:hadoop-plus    文件:TestAllowFormat.java   
/**
 * Test to skip format for non file scheme directory configured
 *
 * @throws Exception
 */
@Test
public void testFormatShouldBeIgnoredForNonFileBasedDirs() throws Exception {
  Configuration conf = new HdfsConfiguration();
  String logicalName = "mycluster";

  // DFS_NAMENODE_RPC_ADDRESS_KEY are required to identify the NameNode
  // is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY
  // is considered.
  String localhost = "127.0.0.1";
  InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 8020);
  InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 9020);
  HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);

  conf.set(DFS_NAMENODE_NAME_DIR_KEY,
      new File(hdfsDir, "name").getAbsolutePath());
  conf.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
  conf.set(DFSUtil.addKeySuffixes(
      DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX, "dummy"),
      DummyJournalManager.class.getName());
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, "dummy://"
      + localhost + ":2181/ledgers");
  conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");

  // An internal assert is added to verify the working of test
  NameNode.format(conf);
}
项目:FlexMap    文件:TestAllowFormat.java   
/**
 * Test to skip format for non file scheme directory configured
 *
 * @throws Exception
 */
@Test
public void testFormatShouldBeIgnoredForNonFileBasedDirs() throws Exception {
  Configuration conf = new HdfsConfiguration();
  String logicalName = "mycluster";

  // DFS_NAMENODE_RPC_ADDRESS_KEY are required to identify the NameNode
  // is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY
  // is considered.
  String localhost = "127.0.0.1";
  InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 8020);
  InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 9020);
  HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);

  conf.set(DFS_NAMENODE_NAME_DIR_KEY,
      new File(DFS_BASE_DIR, "name").getAbsolutePath());
  conf.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
  conf.set(DFSUtil.addKeySuffixes(
      DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX, "dummy"),
      DummyJournalManager.class.getName());
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, "dummy://"
      + localhost + ":2181/ledgers");
  conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");

  // An internal assert is added to verify the working of test
  NameNode.format(conf);
}
项目:hadoop-TCP    文件:TestAllowFormat.java   
/**
 * Test to skip format for non file scheme directory configured
 *
 * @throws Exception
 */
@Test
public void testFormatShouldBeIgnoredForNonFileBasedDirs() throws Exception {
  Configuration conf = new HdfsConfiguration();
  String logicalName = "mycluster";

  // DFS_NAMENODE_RPC_ADDRESS_KEY are required to identify the NameNode
  // is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY
  // is considered.
  String localhost = "127.0.0.1";
  InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 8020);
  InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 9020);
  HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);

  conf.set(DFS_NAMENODE_NAME_DIR_KEY,
      new File(hdfsDir, "name").getAbsolutePath());
  conf.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
  conf.set(DFSUtil.addKeySuffixes(
      DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX, "dummy"),
      DummyJournalManager.class.getName());
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, "dummy://"
      + localhost + ":2181/ledgers");
  conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");

  // An internal assert is added to verify the working of test
  NameNode.format(conf);
}
项目:hardfs    文件:TestAllowFormat.java   
/**
 * Test to skip format for non file scheme directory configured
 *
 * @throws Exception
 */
@Test
public void testFormatShouldBeIgnoredForNonFileBasedDirs() throws Exception {
  Configuration conf = new HdfsConfiguration();
  String logicalName = "mycluster";

  // DFS_NAMENODE_RPC_ADDRESS_KEY are required to identify the NameNode
  // is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY
  // is considered.
  String localhost = "127.0.0.1";
  InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 8020);
  InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 9020);
  HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);

  conf.set(DFS_NAMENODE_NAME_DIR_KEY,
      new File(hdfsDir, "name").getAbsolutePath());
  conf.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
  conf.set(DFSUtil.addKeySuffixes(
      DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX, "dummy"),
      DummyJournalManager.class.getName());
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, "dummy://"
      + localhost + ":2181/ledgers");
  conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");

  // An internal assert is added to verify the working of test
  NameNode.format(conf);
}
项目:hadoop-on-lustre2    文件:TestAllowFormat.java   
/**
 * Test to skip format for non file scheme directory configured
 *
 * @throws Exception
 */
@Test
public void testFormatShouldBeIgnoredForNonFileBasedDirs() throws Exception {
  Configuration conf = new HdfsConfiguration();
  String logicalName = "mycluster";

  // DFS_NAMENODE_RPC_ADDRESS_KEY are required to identify the NameNode
  // is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY
  // is considered.
  String localhost = "127.0.0.1";
  InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 8020);
  InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 9020);
  HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);

  conf.set(DFS_NAMENODE_NAME_DIR_KEY,
      new File(DFS_BASE_DIR, "name").getAbsolutePath());
  conf.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
  conf.set(DFSUtil.addKeySuffixes(
      DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX, "dummy"),
      DummyJournalManager.class.getName());
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, "dummy://"
      + localhost + ":2181/ledgers");
  conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");

  // An internal assert is added to verify the working of test
  NameNode.format(conf);
}