Java 类org.apache.hadoop.hdfs.MiniDFSNNTopology 实例源码

项目:hadoop    文件:TestWebHdfsWithMultipleNameNodes.java   
private static void setupCluster(final int nNameNodes, final int nDataNodes)
    throws Exception {
  LOG.info("nNameNodes=" + nNameNodes + ", nDataNodes=" + nDataNodes);

  conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);

  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes))
      .numDataNodes(nDataNodes)
      .build();
  cluster.waitActive();

  webhdfs = new WebHdfsFileSystem[nNameNodes];
  for(int i = 0; i < webhdfs.length; i++) {
    final InetSocketAddress addr = cluster.getNameNode(i).getHttpAddress();
    final String uri = WebHdfsFileSystem.SCHEME  + "://"
        + addr.getHostName() + ":" + addr.getPort() + "/";
    webhdfs[i] = (WebHdfsFileSystem)FileSystem.get(new URI(uri), conf);
  }
}
项目:hadoop    文件:TestMover.java   
@Test
public void testMoverCliWithHAConf() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new HdfsConfiguration())
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(0).build();
  HATestUtil.setFailoverConfigurations(cluster, conf, "MyCluster");
  try {
    Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
        "-p", "/foo", "/bar");
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(1, namenodes.size());
    Assert.assertEquals(1, movePaths.size());
    URI nn = namenodes.iterator().next();
    Assert.assertEquals(new URI("hdfs://MyCluster"), nn);
    Assert.assertTrue(movePaths.containsKey(nn));
    checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar"));
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop    文件:TestMover.java   
@Test
public void testMoverCliWithFederationHA() throws Exception {
  final MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new HdfsConfiguration())
      .nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(3))
      .numDataNodes(0).build();
  final Configuration conf = new HdfsConfiguration();
  DFSTestUtil.setFederatedHAConfiguration(cluster, conf);
  try {
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(3, namenodes.size());

    Iterator<URI> iter = namenodes.iterator();
    URI nn1 = iter.next();
    URI nn2 = iter.next();
    URI nn3 = iter.next();
    Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
        "-p", nn1 + "/foo", nn1 + "/bar", nn2 + "/foo/bar", nn3 + "/foobar");
    Assert.assertEquals(3, movePaths.size());
    checkMovePaths(movePaths.get(nn1), new Path("/foo"), new Path("/bar"));
    checkMovePaths(movePaths.get(nn2), new Path("/foo/bar"));
    checkMovePaths(movePaths.get(nn3), new Path("/foobar"));
  } finally {
     cluster.shutdown();
  }
}
项目:hadoop    文件:TestXAttrsWithHA.java   
@Before
public void setupCluster() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  HAUtil.setAllowStandbyReads(conf, true);

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(1)
    .waitSafeMode(false)
    .build();
  cluster.waitActive();

  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  fs = HATestUtil.configureFailoverFs(cluster, conf);

  cluster.transitionToActive(0);
}
项目:hadoop    文件:TestBootstrapStandby.java   
@Before
public void setupCluster() throws IOException {
  Configuration conf = new Configuration();

  MiniDFSNNTopology topology = new MiniDFSNNTopology()
    .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
      .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(20001))
      .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(20002)));

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(topology)
    .numDataNodes(0)
    .build();
  cluster.waitActive();

  nn0 = cluster.getNameNode(0);
  cluster.transitionToActive(0);
  cluster.shutdownNameNode(1);
}
项目:hadoop    文件:TestRetryCacheWithHA.java   
@Before
public void setup() throws Exception {
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, ResponseSize);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, ResponseSize);
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(DataNodes).build();
  cluster.waitActive();
  cluster.transitionToActive(0);
  // setup the configuration
  HATestUtil.setFailoverConfigurations(cluster, conf);
  dfs = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
}
项目:hadoop    文件:TestInitializeSharedEdits.java   
@Before
public void setupCluster() throws IOException {
  conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  HAUtil.setAllowStandbyReads(conf, true);

  MiniDFSNNTopology topology = MiniDFSNNTopology.simpleHATopology();

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(topology)
    .numDataNodes(0)
    .build();
  cluster.waitActive();

  shutdownClusterAndRemoveSharedEditsDir();
}
项目:hadoop    文件:TestHarFileSystemWithHA.java   
/**
 * Test that the HarFileSystem works with underlying HDFS URIs that have no
 * port specified, as is often the case with an HA setup.
 */
@Test
public void testHarUriWithHaUriWithNoPort() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .numDataNodes(1)
        .nnTopology(MiniDFSNNTopology.simpleHATopology())
        .build();
    cluster.transitionToActive(0);
    HATestUtil.setFailoverConfigurations(cluster, conf);

    createEmptyHarArchive(HATestUtil.configureFailoverFs(cluster, conf),
        TEST_HAR_PATH);

    URI failoverUri = FileSystem.getDefaultUri(conf);
    Path p = new Path("har://hdfs-" + failoverUri.getAuthority() + TEST_HAR_PATH);
    p.getFileSystem(conf);
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop    文件:TestHASafeMode.java   
@Before
public void setupCluster() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(3)
    .waitSafeMode(false)
    .build();
  cluster.waitActive();

  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  fs = HATestUtil.configureFailoverFs(cluster, conf);

  cluster.transitionToActive(0);
}
项目:hadoop    文件:TestLossyRetryInvocationHandler.java   
@Test
public void testStartNNWithTrashEmptier() throws Exception {
  MiniDFSCluster cluster = null;
  Configuration conf = new HdfsConfiguration();

  // enable both trash emptier and dropping response
  conf.setLong("fs.trash.interval", 360);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);

  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0)
        .build();
    cluster.waitActive();
    cluster.transitionToActive(0);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop    文件:TestEditLogTailer.java   
private static void testStandbyTriggersLogRolls(int activeIndex)
    throws Exception {
  Configuration conf = new Configuration();
  // Roll every 1s
  conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);

  // Have to specify IPC ports so the NNs can talk to each other.
  MiniDFSNNTopology topology = new MiniDFSNNTopology()
    .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
      .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10031))
      .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10032)));

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(topology)
    .numDataNodes(0)
    .build();
  try {
    cluster.transitionToActive(activeIndex);
    waitForLogRollInSharedDir(cluster, 3);
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop    文件:TestDFSUpgradeWithHA.java   
/**
 * Make sure that an HA NN will start if a previous upgrade was in progress.
 */
@Test
public void testStartingWithUpgradeInProgressSucceeds() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .nnTopology(MiniDFSNNTopology.simpleHATopology())
        .numDataNodes(0)
        .build();

    // Simulate an upgrade having started.
    for (int i = 0; i < 2; i++) {
      for (URI uri : cluster.getNameDirs(i)) {
        File prevTmp = new File(new File(uri), Storage.STORAGE_TMP_PREVIOUS);
        LOG.info("creating previous tmp dir: " + prevTmp);
        assertTrue(prevTmp.mkdirs());
      }
    }

    cluster.restartNameNodes();
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop    文件:TestQuotasWithHA.java   
@Before
public void setupCluster() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  HAUtil.setAllowStandbyReads(conf, true);

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(1)
    .waitSafeMode(false)
    .build();
  cluster.waitActive();

  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  fs = HATestUtil.configureFailoverFs(cluster, conf);

  cluster.transitionToActive(0);
}
项目:hadoop    文件:TestNameNodeRetryCacheMetrics.java   
/** Start a cluster */
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3)
      .build();
  cluster.waitActive();
  cluster.transitionToActive(namenodeId);
  HATestUtil.setFailoverConfigurations(cluster, conf);
  filesystem = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
  namesystem = cluster.getNamesystem(namenodeId);
  metrics = namesystem.getRetryCache().getMetricsForTests();
}
项目:hadoop    文件:TestDataNodeHotSwapVolumes.java   
private void startDFSCluster(int numNameNodes, int numDataNodes)
    throws IOException {
  shutdown();
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);

  /*
   * Lower the DN heartbeat, DF rate, and recheck interval to one second
   * so state about failures and datanode death propagates faster.
   */
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_DF_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
      1000);
  /* Allow 1 volume failure */
  conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);

  MiniDFSNNTopology nnTopology =
      MiniDFSNNTopology.simpleFederatedTopology(numNameNodes);

  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(nnTopology)
      .numDataNodes(numDataNodes)
      .build();
  cluster.waitActive();
}
项目:hadoop    文件:TestBootstrapStandbyWithBKJM.java   
@Before
public void setUp() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 5);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
      .createJournalURI("/bootstrapStandby").toString());
  BKJMUtil.addJournalManagerDefinition(conf);
  conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
  conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
      SlowCodec.class.getCanonicalName());
  CompressionCodecFactory.setCodecClasses(conf,
      ImmutableList.<Class> of(SlowCodec.class));
  MiniDFSNNTopology topology = new MiniDFSNNTopology()
      .addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(
          new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001)).addNN(
          new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
  cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology)
      .numDataNodes(1).manageNameDfsSharedDirs(false).build();
  cluster.waitActive();
}
项目:aliyun-oss-hadoop-fs    文件:TestWebHdfsWithMultipleNameNodes.java   
private static void setupCluster(final int nNameNodes, final int nDataNodes)
    throws Exception {
  LOG.info("nNameNodes=" + nNameNodes + ", nDataNodes=" + nDataNodes);

  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes))
      .numDataNodes(nDataNodes)
      .build();
  cluster.waitActive();

  webhdfs = new WebHdfsFileSystem[nNameNodes];
  for(int i = 0; i < webhdfs.length; i++) {
    final InetSocketAddress addr = cluster.getNameNode(i).getHttpAddress();
    final String uri = WebHdfsConstants.WEBHDFS_SCHEME + "://"
        + addr.getHostName() + ":" + addr.getPort() + "/";
    webhdfs[i] = (WebHdfsFileSystem)FileSystem.get(new URI(uri), conf);
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestMover.java   
@Test
public void testMoverCliWithHAConf() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new HdfsConfiguration())
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(0).build();
  HATestUtil.setFailoverConfigurations(cluster, conf, "MyCluster");
  try {
    Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
        "-p", "/foo", "/bar");
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(1, namenodes.size());
    Assert.assertEquals(1, movePaths.size());
    URI nn = namenodes.iterator().next();
    Assert.assertEquals(new URI("hdfs://MyCluster"), nn);
    Assert.assertTrue(movePaths.containsKey(nn));
    checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar"));
  } finally {
    cluster.shutdown();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestXAttrsWithHA.java   
@Before
public void setupCluster() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  HAUtil.setAllowStandbyReads(conf, true);

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(1)
    .waitSafeMode(false)
    .build();
  cluster.waitActive();

  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  fs = HATestUtil.configureFailoverFs(cluster, conf);

  cluster.transitionToActive(0);
}
项目:aliyun-oss-hadoop-fs    文件:TestBootstrapStandby.java   
@Before
public void setupCluster() throws IOException {
  Configuration conf = new Configuration();

  // duplicate code with MiniQJMHACluster#createDefaultTopology, but don't want to cross
  // dependencies or munge too much code to support it all correctly
  MiniDFSNNTopology.NSConf nameservice = new MiniDFSNNTopology.NSConf("ns1");
  for (int i = 0; i < maxNNCount; i++) {
    nameservice.addNN(new MiniDFSNNTopology.NNConf("nn" + i).setHttpPort(STARTING_PORT + i + 1));
  }

  MiniDFSNNTopology topology = new MiniDFSNNTopology().addNameservice(nameservice);

  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(topology)
      .numDataNodes(0)
      .build();
  cluster.waitActive();

  nn0 = cluster.getNameNode(0);
  cluster.transitionToActive(0);
  // shutdown the other NNs
  for (int i = 1; i < maxNNCount; i++) {
    cluster.shutdownNameNode(i);
  }
}
项目:big-c    文件:TestDataNodeHotSwapVolumes.java   
private void startDFSCluster(int numNameNodes, int numDataNodes)
    throws IOException {
  shutdown();
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);

  /*
   * Lower the DN heartbeat, DF rate, and recheck interval to one second
   * so state about failures and datanode death propagates faster.
   */
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_DF_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
      1000);
  /* Allow 1 volume failure */
  conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);

  MiniDFSNNTopology nnTopology =
      MiniDFSNNTopology.simpleFederatedTopology(numNameNodes);

  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(nnTopology)
      .numDataNodes(numDataNodes)
      .build();
  cluster.waitActive();
}
项目:aliyun-oss-hadoop-fs    文件:TestInitializeSharedEdits.java   
@Before
public void setupCluster() throws IOException {
  conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  HAUtil.setAllowStandbyReads(conf, true);

  MiniDFSNNTopology topology = MiniDFSNNTopology.simpleHATopology();

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(topology)
    .numDataNodes(0)
    .build();
  cluster.waitActive();

  shutdownClusterAndRemoveSharedEditsDir();
}
项目:aliyun-oss-hadoop-fs    文件:TestHarFileSystemWithHA.java   
/**
 * Test that the HarFileSystem works with underlying HDFS URIs that have no
 * port specified, as is often the case with an HA setup.
 */
@Test
public void testHarUriWithHaUriWithNoPort() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .numDataNodes(1)
        .nnTopology(MiniDFSNNTopology.simpleHATopology())
        .build();
    cluster.transitionToActive(0);
    HATestUtil.setFailoverConfigurations(cluster, conf);

    createEmptyHarArchive(HATestUtil.configureFailoverFs(cluster, conf),
        TEST_HAR_PATH);

    URI failoverUri = FileSystem.getDefaultUri(conf);
    Path p = new Path("har://hdfs-" + failoverUri.getAuthority() + TEST_HAR_PATH);
    p.getFileSystem(conf);
  } finally {
    cluster.shutdown();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestHASafeMode.java   
@Before
public void setupCluster() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(3)
    .waitSafeMode(false)
    .build();
  cluster.waitActive();

  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  fs = HATestUtil.configureFailoverFs(cluster, conf);

  cluster.transitionToActive(0);
}
项目:aliyun-oss-hadoop-fs    文件:TestLossyRetryInvocationHandler.java   
@Test
public void testStartNNWithTrashEmptier() throws Exception {
  MiniDFSCluster cluster = null;
  Configuration conf = new HdfsConfiguration();

  // enable both trash emptier and dropping response
  conf.setLong("fs.trash.interval", 360);
  conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);

  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0)
        .build();
    cluster.waitActive();
    cluster.transitionToActive(0);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestDFSUpgradeWithHA.java   
/**
 * Make sure that an HA NN will start if a previous upgrade was in progress.
 */
@Test
public void testStartingWithUpgradeInProgressSucceeds() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .nnTopology(MiniDFSNNTopology.simpleHATopology())
        .numDataNodes(0)
        .build();

    // Simulate an upgrade having started.
    for (int i = 0; i < 2; i++) {
      for (URI uri : cluster.getNameDirs(i)) {
        File prevTmp = new File(new File(uri), Storage.STORAGE_TMP_PREVIOUS);
        LOG.info("creating previous tmp dir: " + prevTmp);
        assertTrue(prevTmp.mkdirs());
      }
    }

    cluster.restartNameNodes();
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:big-c    文件:TestQuotasWithHA.java   
@Before
public void setupCluster() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  HAUtil.setAllowStandbyReads(conf, true);

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(1)
    .waitSafeMode(false)
    .build();
  cluster.waitActive();

  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  fs = HATestUtil.configureFailoverFs(cluster, conf);

  cluster.transitionToActive(0);
}
项目:aliyun-oss-hadoop-fs    文件:TestQuotasWithHA.java   
@Before
public void setupCluster() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  HAUtil.setAllowStandbyReads(conf, true);

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(1)
    .waitSafeMode(false)
    .build();
  cluster.waitActive();

  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  fs = HATestUtil.configureFailoverFs(cluster, conf);

  cluster.transitionToActive(0);
}
项目:big-c    文件:TestEditLogTailer.java   
private static void testStandbyTriggersLogRolls(int activeIndex)
    throws Exception {
  Configuration conf = new Configuration();
  // Roll every 1s
  conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);

  // Have to specify IPC ports so the NNs can talk to each other.
  MiniDFSNNTopology topology = new MiniDFSNNTopology()
    .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
      .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10031))
      .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10032)));

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(topology)
    .numDataNodes(0)
    .build();
  try {
    cluster.transitionToActive(activeIndex);
    waitForLogRollInSharedDir(cluster, 3);
  } finally {
    cluster.shutdown();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestDataNodeHotSwapVolumes.java   
private void startDFSCluster(int numNameNodes, int numDataNodes)
    throws IOException {
  shutdown();
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);

  /*
   * Lower the DN heartbeat, DF rate, and recheck interval to one second
   * so state about failures and datanode death propagates faster.
   */
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_DF_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
      1000);
  /* Allow 1 volume failure */
  conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);

  MiniDFSNNTopology nnTopology =
      MiniDFSNNTopology.simpleFederatedTopology(numNameNodes);

  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(nnTopology)
      .numDataNodes(numDataNodes)
      .build();
  cluster.waitActive();
}
项目:aliyun-oss-hadoop-fs    文件:TestBookKeeperHACheckpoints.java   
@SuppressWarnings("rawtypes")
@Override
@Before
public void setupCluster() throws Exception {
  Configuration conf = setupCommonConfig();
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
           BKJMUtil.createJournalURI("/checkpointing" + journalCount++)
           .toString());
  BKJMUtil.addJournalManagerDefinition(conf);
  MiniDFSNNTopology topology = new MiniDFSNNTopology()
    .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
      .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
      .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(topology)
    .numDataNodes(1)
    .manageNameDfsSharedDirs(false)
    .build();
  cluster.waitActive();

  setNNs();
  fs = HATestUtil.configureFailoverFs(cluster, conf);

  cluster.transitionToActive(0);
}
项目:aliyun-oss-hadoop-fs    文件:TestBootstrapStandbyWithBKJM.java   
@Before
public void setUp() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 5);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
      .createJournalURI("/bootstrapStandby").toString());
  BKJMUtil.addJournalManagerDefinition(conf);
  conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
  conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
      SlowCodec.class.getCanonicalName());
  CompressionCodecFactory.setCodecClasses(conf,
      ImmutableList.<Class> of(SlowCodec.class));
  MiniDFSNNTopology topology = new MiniDFSNNTopology()
      .addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(
          new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001)).addNN(
          new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
  cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology)
      .numDataNodes(1).manageNameDfsSharedDirs(false).build();
  cluster.waitActive();
}
项目:big-c    文件:TestMover.java   
@Test
public void testMoverCliWithHAConf() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new HdfsConfiguration())
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(0).build();
  HATestUtil.setFailoverConfigurations(cluster, conf, "MyCluster");
  try {
    Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
        "-p", "/foo", "/bar");
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(1, namenodes.size());
    Assert.assertEquals(1, movePaths.size());
    URI nn = namenodes.iterator().next();
    Assert.assertEquals(new URI("hdfs://MyCluster"), nn);
    Assert.assertTrue(movePaths.containsKey(nn));
    checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar"));
  } finally {
    cluster.shutdown();
  }
}
项目:big-c    文件:TestMover.java   
@Test
public void testMoverCliWithFederationHA() throws Exception {
  final MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new HdfsConfiguration())
      .nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(3))
      .numDataNodes(0).build();
  final Configuration conf = new HdfsConfiguration();
  DFSTestUtil.setFederatedHAConfiguration(cluster, conf);
  try {
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(3, namenodes.size());

    Iterator<URI> iter = namenodes.iterator();
    URI nn1 = iter.next();
    URI nn2 = iter.next();
    URI nn3 = iter.next();
    Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
        "-p", nn1 + "/foo", nn1 + "/bar", nn2 + "/foo/bar", nn3 + "/foobar");
    Assert.assertEquals(3, movePaths.size());
    checkMovePaths(movePaths.get(nn1), new Path("/foo"), new Path("/bar"));
    checkMovePaths(movePaths.get(nn2), new Path("/foo/bar"));
    checkMovePaths(movePaths.get(nn3), new Path("/foobar"));
  } finally {
     cluster.shutdown();
  }
}
项目:big-c    文件:TestXAttrsWithHA.java   
@Before
public void setupCluster() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  HAUtil.setAllowStandbyReads(conf, true);

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(1)
    .waitSafeMode(false)
    .build();
  cluster.waitActive();

  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  fs = HATestUtil.configureFailoverFs(cluster, conf);

  cluster.transitionToActive(0);
}
项目:big-c    文件:TestBootstrapStandby.java   
@Before
public void setupCluster() throws IOException {
  Configuration conf = new Configuration();

  MiniDFSNNTopology topology = new MiniDFSNNTopology()
    .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
      .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(20001))
      .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(20002)));

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(topology)
    .numDataNodes(0)
    .build();
  cluster.waitActive();

  nn0 = cluster.getNameNode(0);
  cluster.transitionToActive(0);
  cluster.shutdownNameNode(1);
}
项目:big-c    文件:TestRetryCacheWithHA.java   
@Before
public void setup() throws Exception {
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, ResponseSize);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, ResponseSize);
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(DataNodes).build();
  cluster.waitActive();
  cluster.transitionToActive(0);
  // setup the configuration
  HATestUtil.setFailoverConfigurations(cluster, conf);
  dfs = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
}
项目:big-c    文件:TestInitializeSharedEdits.java   
@Before
public void setupCluster() throws IOException {
  conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  HAUtil.setAllowStandbyReads(conf, true);

  MiniDFSNNTopology topology = MiniDFSNNTopology.simpleHATopology();

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(topology)
    .numDataNodes(0)
    .build();
  cluster.waitActive();

  shutdownClusterAndRemoveSharedEditsDir();
}
项目:big-c    文件:TestHarFileSystemWithHA.java   
/**
 * Test that the HarFileSystem works with underlying HDFS URIs that have no
 * port specified, as is often the case with an HA setup.
 */
@Test
public void testHarUriWithHaUriWithNoPort() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .numDataNodes(1)
        .nnTopology(MiniDFSNNTopology.simpleHATopology())
        .build();
    cluster.transitionToActive(0);
    HATestUtil.setFailoverConfigurations(cluster, conf);

    createEmptyHarArchive(HATestUtil.configureFailoverFs(cluster, conf),
        TEST_HAR_PATH);

    URI failoverUri = FileSystem.getDefaultUri(conf);
    Path p = new Path("har://hdfs-" + failoverUri.getAuthority() + TEST_HAR_PATH);
    p.getFileSystem(conf);
  } finally {
    cluster.shutdown();
  }
}
项目:big-c    文件:TestHASafeMode.java   
@Before
public void setupCluster() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(3)
    .waitSafeMode(false)
    .build();
  cluster.waitActive();

  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  fs = HATestUtil.configureFailoverFs(cluster, conf);

  cluster.transitionToActive(0);
}