Java 类org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil 实例源码

项目:hadoop    文件:TestDFSClientFailover.java   
/**
 * Test that even a non-idempotent method will properly fail-over if the
 * first IPC attempt times out trying to connect. Regression test for
 * HDFS-4404. 
 */
@Test
public void testFailoverOnConnectTimeout() throws Exception {
  conf.setClass(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,
      InjectingSocketFactory.class, SocketFactory.class);
  // Set up the InjectingSocketFactory to throw a ConnectTimeoutException
  // when connecting to the first NN.
  InjectingSocketFactory.portToInjectOn = cluster.getNameNodePort(0);

  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);

  // Make the second NN the active one.
  cluster.shutdownNameNode(0);
  cluster.transitionToActive(1);

  // Call a non-idempotent method, and ensure the failover of the call proceeds
  // successfully.
  IOUtils.closeStream(fs.create(TEST_FILE));
}
项目:hadoop    文件:TestDFSClientFailover.java   
/**
 * Make sure that client failover works when an active NN dies and the standby
 * takes over.
 */
@Test
public void testDfsClientFailover() throws IOException, URISyntaxException {
  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);

  DFSTestUtil.createFile(fs, TEST_FILE,
      FILE_LENGTH_TO_VERIFY, (short)1, 1L);

  assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY);
  cluster.shutdownNameNode(0);
  cluster.transitionToActive(1);
  assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY);

  // Check that it functions even if the URL becomes canonicalized
  // to include a port number.
  Path withPort = new Path("hdfs://" +
      HATestUtil.getLogicalHostname(cluster) + ":" +
      NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath());
  FileSystem fs2 = withPort.getFileSystem(fs.getConf());
  assertTrue(fs2.exists(withPort));

  fs.close();
}
项目:hadoop    文件:TestEncryptionZonesWithHA.java   
/**
 * Test that encryption zones are properly tracked by the standby.
 */
@Test(timeout = 60000)
public void testEncryptionZonesTrackedOnStandby() throws Exception {
  final int len = 8196;
  final Path dir = new Path("/enc");
  final Path dirChild = new Path(dir, "child");
  final Path dirFile = new Path(dir, "file");
  fs.mkdir(dir, FsPermission.getDirDefault());
  dfsAdmin0.createEncryptionZone(dir, TEST_KEY);
  fs.mkdir(dirChild, FsPermission.getDirDefault());
  DFSTestUtil.createFile(fs, dirFile, len, (short) 1, 0xFEED);
  String contents = DFSTestUtil.readFile(fs, dirFile);

  // Failover the current standby to active.
  HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
  cluster.shutdownNameNode(0);
  cluster.transitionToActive(1);

  Assert.assertEquals("Got unexpected ez path", dir.toString(),
      dfsAdmin1.getEncryptionZoneForPath(dir).getPath().toString());
  Assert.assertEquals("Got unexpected ez path", dir.toString(),
      dfsAdmin1.getEncryptionZoneForPath(dirChild).getPath().toString());
  Assert.assertEquals("File contents after failover were changed",
      contents, DFSTestUtil.readFile(fs, dirFile));
}
项目:hadoop    文件:TestWebHDFSForHA.java   
@Test
public void testMultipleNamespacesConfigured() throws Exception {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  MiniDFSCluster cluster = null;
  WebHdfsFileSystem fs = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
            .numDataNodes(1).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);

    cluster.waitActive();
    DFSTestUtil.addHAConfiguration(conf, LOGICAL_NAME + "remote");
    DFSTestUtil.setFakeHttpAddresses(conf, LOGICAL_NAME + "remote");

    fs = (WebHdfsFileSystem)FileSystem.get(WEBHDFS_URI, conf);
    Assert.assertEquals(2, fs.getResolvedNNAddr().length);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop    文件:TestDFSClientFailover.java   
/**
 * Same test as above, but for FileContext.
 */
@Test
public void testFileContextDoesntDnsResolveLogicalURI() throws Exception {
  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
  NameService spyNS = spyOnNameService();
  String logicalHost = fs.getUri().getHost();
  Configuration haClientConf = fs.getConf();

  FileContext fc = FileContext.getFileContext(haClientConf);
  Path root = new Path("/");
  fc.listStatus(root);
  fc.listStatus(fc.makeQualified(root));
  fc.getDefaultFileSystem().getCanonicalServiceName();

  // Ensure that the logical hostname was never resolved.
  Mockito.verify(spyNS, Mockito.never()).lookupAllHostAddr(Mockito.eq(logicalHost));
}
项目:hadoop    文件:TestDFSClientFailover.java   
/**
 * Test to verify legacy proxy providers are correctly wrapped.
 */
@Test
public void testWrappedFailoverProxyProvider() throws Exception {
  // setup the config with the dummy provider class
  Configuration config = new HdfsConfiguration(conf);
  String logicalName = HATestUtil.getLogicalHostname(cluster);
  HATestUtil.setFailoverConfigurations(cluster, config, logicalName);
  config.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalName,
      DummyLegacyFailoverProxyProvider.class.getName());
  Path p = new Path("hdfs://" + logicalName + "/");

  // not to use IP address for token service
  SecurityUtil.setTokenServiceUseIp(false);

  // Logical URI should be used.
  assertTrue("Legacy proxy providers should use logical URI.",
      HAUtil.useLogicalUri(config, p.toUri()));
}
项目:hadoop    文件:TestMover.java   
@Test
public void testMoverCliWithHAConf() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new HdfsConfiguration())
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(0).build();
  HATestUtil.setFailoverConfigurations(cluster, conf, "MyCluster");
  try {
    Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
        "-p", "/foo", "/bar");
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(1, namenodes.size());
    Assert.assertEquals(1, movePaths.size());
    URI nn = namenodes.iterator().next();
    Assert.assertEquals(new URI("hdfs://MyCluster"), nn);
    Assert.assertTrue(movePaths.containsKey(nn));
    checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar"));
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop    文件:TestNameNodeRetryCacheMetrics.java   
/** Start a cluster */
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3)
      .build();
  cluster.waitActive();
  cluster.transitionToActive(namenodeId);
  HATestUtil.setFailoverConfigurations(cluster, conf);
  filesystem = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
  namesystem = cluster.getNamesystem(namenodeId);
  metrics = namesystem.getRetryCache().getMetricsForTests();
}
项目:aliyun-oss-hadoop-fs    文件:TestEncryptionZonesWithHA.java   
/**
 * Test that encryption zones are properly tracked by the standby.
 */
@Test(timeout = 60000)
public void testEncryptionZonesTrackedOnStandby() throws Exception {
  final int len = 8196;
  final Path dir = new Path("/enc");
  final Path dirChild = new Path(dir, "child");
  final Path dirFile = new Path(dir, "file");
  fs.mkdir(dir, FsPermission.getDirDefault());
  dfsAdmin0.createEncryptionZone(dir, TEST_KEY);
  fs.mkdir(dirChild, FsPermission.getDirDefault());
  DFSTestUtil.createFile(fs, dirFile, len, (short) 1, 0xFEED);
  String contents = DFSTestUtil.readFile(fs, dirFile);

  // Failover the current standby to active.
  HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
  cluster.shutdownNameNode(0);
  cluster.transitionToActive(1);

  Assert.assertEquals("Got unexpected ez path", dir.toString(),
      dfsAdmin1.getEncryptionZoneForPath(dir).getPath().toString());
  Assert.assertEquals("Got unexpected ez path", dir.toString(),
      dfsAdmin1.getEncryptionZoneForPath(dirChild).getPath().toString());
  Assert.assertEquals("File contents after failover were changed",
      contents, DFSTestUtil.readFile(fs, dirFile));
}
项目:aliyun-oss-hadoop-fs    文件:TestWebHDFSForHA.java   
@Test
public void testMultipleNamespacesConfigured() throws Exception {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  MiniDFSCluster cluster = null;
  WebHdfsFileSystem fs = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
            .numDataNodes(1).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);

    cluster.waitActive();
    DFSTestUtil.addHAConfiguration(conf, LOGICAL_NAME + "remote");
    DFSTestUtil.setFakeHttpAddresses(conf, LOGICAL_NAME + "remote");

    fs = (WebHdfsFileSystem)FileSystem.get(WEBHDFS_URI, conf);
    Assert.assertEquals(2, fs.getResolvedNNAddr().length);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestDFSClientFailover.java   
/**
 * Make sure that client failover works when an active NN dies and the standby
 * takes over.
 */
@Test
public void testDfsClientFailover() throws IOException, URISyntaxException {
  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);

  DFSTestUtil.createFile(fs, TEST_FILE,
      FILE_LENGTH_TO_VERIFY, (short)1, 1L);

  assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY);
  cluster.shutdownNameNode(0);
  cluster.transitionToActive(1);
  assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY);

  // Check that it functions even if the URL becomes canonicalized
  // to include a port number.
  Path withPort = new Path("hdfs://" +
      HATestUtil.getLogicalHostname(cluster) + ":" +
      HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT + "/" +
      TEST_FILE.toUri().getPath());
  FileSystem fs2 = withPort.getFileSystem(fs.getConf());
  assertTrue(fs2.exists(withPort));

  fs.close();
}
项目:aliyun-oss-hadoop-fs    文件:TestDFSClientFailover.java   
/**
 * Test that even a non-idempotent method will properly fail-over if the
 * first IPC attempt times out trying to connect. Regression test for
 * HDFS-4404. 
 */
@Test
public void testFailoverOnConnectTimeout() throws Exception {
  conf.setClass(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,
      InjectingSocketFactory.class, SocketFactory.class);
  // Set up the InjectingSocketFactory to throw a ConnectTimeoutException
  // when connecting to the first NN.
  InjectingSocketFactory.portToInjectOn = cluster.getNameNodePort(0);

  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);

  // Make the second NN the active one.
  cluster.shutdownNameNode(0);
  cluster.transitionToActive(1);

  // Call a non-idempotent method, and ensure the failover of the call proceeds
  // successfully.
  IOUtils.closeStream(fs.create(TEST_FILE));
}
项目:aliyun-oss-hadoop-fs    文件:TestDFSClientFailover.java   
/**
 * Same test as above, but for FileContext.
 */
@Test
public void testFileContextDoesntDnsResolveLogicalURI() throws Exception {
  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
  NameService spyNS = spyOnNameService();
  String logicalHost = fs.getUri().getHost();
  Configuration haClientConf = fs.getConf();

  FileContext fc = FileContext.getFileContext(haClientConf);
  Path root = new Path("/");
  fc.listStatus(root);
  fc.listStatus(fc.makeQualified(root));
  fc.getDefaultFileSystem().getCanonicalServiceName();

  // Ensure that the logical hostname was never resolved.
  Mockito.verify(spyNS, Mockito.never()).lookupAllHostAddr(Mockito.eq(logicalHost));
}
项目:aliyun-oss-hadoop-fs    文件:TestDFSClientFailover.java   
/**
 * Test to verify legacy proxy providers are correctly wrapped.
 */
@Test
public void testWrappedFailoverProxyProvider() throws Exception {
  // setup the config with the dummy provider class
  Configuration config = new HdfsConfiguration(conf);
  String logicalName = HATestUtil.getLogicalHostname(cluster);
  HATestUtil.setFailoverConfigurations(cluster, config, logicalName);
  config.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + "." + logicalName,
      DummyLegacyFailoverProxyProvider.class.getName());
  Path p = new Path("hdfs://" + logicalName + "/");

  // not to use IP address for token service
  SecurityUtil.setTokenServiceUseIp(false);

  // Logical URI should be used.
  assertTrue("Legacy proxy providers should use logical URI.",
      HAUtil.useLogicalUri(config, p.toUri()));
}
项目:aliyun-oss-hadoop-fs    文件:TestMover.java   
@Test
public void testMoverCliWithHAConf() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new HdfsConfiguration())
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(0).build();
  HATestUtil.setFailoverConfigurations(cluster, conf, "MyCluster");
  try {
    Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
        "-p", "/foo", "/bar");
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(1, namenodes.size());
    Assert.assertEquals(1, movePaths.size());
    URI nn = namenodes.iterator().next();
    Assert.assertEquals(new URI("hdfs://MyCluster"), nn);
    Assert.assertTrue(movePaths.containsKey(nn));
    checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar"));
  } finally {
    cluster.shutdown();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestNameNodeRetryCacheMetrics.java   
/** Start a cluster */
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
  conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3)
      .build();
  cluster.waitActive();
  cluster.transitionToActive(namenodeId);
  HATestUtil.setFailoverConfigurations(cluster, conf);
  filesystem = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
  namesystem = cluster.getNamesystem(namenodeId);
  metrics = namesystem.getRetryCache().getMetricsForTests();
}
项目:aliyun-oss-hadoop-fs    文件:MiniQJMHACluster.java   
private Configuration initHAConf(URI journalURI, Configuration conf, int numNNs) {
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
      journalURI.toString());

  List<String> nns = new ArrayList<String>(numNNs);
  int port = basePort;
  for (int i = 0; i < numNNs; i++) {
    nns.add("127.0.0.1:" + port);
    // increment by 2 each time to account for the http port in the config setting
    port += 2;
  }

  // use standard failover configurations
  HATestUtil.setFailoverConfigurations(conf, NAMESERVICE, nns);
  return conf;
}
项目:aliyun-oss-hadoop-fs    文件:TestBookKeeperHACheckpoints.java   
@SuppressWarnings("rawtypes")
@Override
@Before
public void setupCluster() throws Exception {
  Configuration conf = setupCommonConfig();
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
           BKJMUtil.createJournalURI("/checkpointing" + journalCount++)
           .toString());
  BKJMUtil.addJournalManagerDefinition(conf);
  MiniDFSNNTopology topology = new MiniDFSNNTopology()
    .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
      .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
      .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(topology)
    .numDataNodes(1)
    .manageNameDfsSharedDirs(false)
    .build();
  cluster.waitActive();

  setNNs();
  fs = HATestUtil.configureFailoverFs(cluster, conf);

  cluster.transitionToActive(0);
}
项目:big-c    文件:TestEncryptionZonesWithHA.java   
/**
 * Test that encryption zones are properly tracked by the standby.
 */
@Test(timeout = 60000)
public void testEncryptionZonesTrackedOnStandby() throws Exception {
  final int len = 8196;
  final Path dir = new Path("/enc");
  final Path dirChild = new Path(dir, "child");
  final Path dirFile = new Path(dir, "file");
  fs.mkdir(dir, FsPermission.getDirDefault());
  dfsAdmin0.createEncryptionZone(dir, TEST_KEY);
  fs.mkdir(dirChild, FsPermission.getDirDefault());
  DFSTestUtil.createFile(fs, dirFile, len, (short) 1, 0xFEED);
  String contents = DFSTestUtil.readFile(fs, dirFile);

  // Failover the current standby to active.
  HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
  cluster.shutdownNameNode(0);
  cluster.transitionToActive(1);

  Assert.assertEquals("Got unexpected ez path", dir.toString(),
      dfsAdmin1.getEncryptionZoneForPath(dir).getPath().toString());
  Assert.assertEquals("Got unexpected ez path", dir.toString(),
      dfsAdmin1.getEncryptionZoneForPath(dirChild).getPath().toString());
  Assert.assertEquals("File contents after failover were changed",
      contents, DFSTestUtil.readFile(fs, dirFile));
}
项目:big-c    文件:TestWebHDFSForHA.java   
@Test
public void testMultipleNamespacesConfigured() throws Exception {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  MiniDFSCluster cluster = null;
  WebHdfsFileSystem fs = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
            .numDataNodes(1).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);

    cluster.waitActive();
    DFSTestUtil.addHAConfiguration(conf, LOGICAL_NAME + "remote");
    DFSTestUtil.setFakeHttpAddresses(conf, LOGICAL_NAME + "remote");

    fs = (WebHdfsFileSystem)FileSystem.get(WEBHDFS_URI, conf);
    Assert.assertEquals(2, fs.getResolvedNNAddr().length);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:big-c    文件:TestDFSClientFailover.java   
/**
 * Make sure that client failover works when an active NN dies and the standby
 * takes over.
 */
@Test
public void testDfsClientFailover() throws IOException, URISyntaxException {
  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);

  DFSTestUtil.createFile(fs, TEST_FILE,
      FILE_LENGTH_TO_VERIFY, (short)1, 1L);

  assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY);
  cluster.shutdownNameNode(0);
  cluster.transitionToActive(1);
  assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY);

  // Check that it functions even if the URL becomes canonicalized
  // to include a port number.
  Path withPort = new Path("hdfs://" +
      HATestUtil.getLogicalHostname(cluster) + ":" +
      NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath());
  FileSystem fs2 = withPort.getFileSystem(fs.getConf());
  assertTrue(fs2.exists(withPort));

  fs.close();
}
项目:big-c    文件:TestDFSClientFailover.java   
/**
 * Test that even a non-idempotent method will properly fail-over if the
 * first IPC attempt times out trying to connect. Regression test for
 * HDFS-4404. 
 */
@Test
public void testFailoverOnConnectTimeout() throws Exception {
  conf.setClass(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,
      InjectingSocketFactory.class, SocketFactory.class);
  // Set up the InjectingSocketFactory to throw a ConnectTimeoutException
  // when connecting to the first NN.
  InjectingSocketFactory.portToInjectOn = cluster.getNameNodePort(0);

  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);

  // Make the second NN the active one.
  cluster.shutdownNameNode(0);
  cluster.transitionToActive(1);

  // Call a non-idempotent method, and ensure the failover of the call proceeds
  // successfully.
  IOUtils.closeStream(fs.create(TEST_FILE));
}
项目:big-c    文件:TestDFSClientFailover.java   
/**
 * Same test as above, but for FileContext.
 */
@Test
public void testFileContextDoesntDnsResolveLogicalURI() throws Exception {
  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
  NameService spyNS = spyOnNameService();
  String logicalHost = fs.getUri().getHost();
  Configuration haClientConf = fs.getConf();

  FileContext fc = FileContext.getFileContext(haClientConf);
  Path root = new Path("/");
  fc.listStatus(root);
  fc.listStatus(fc.makeQualified(root));
  fc.getDefaultFileSystem().getCanonicalServiceName();

  // Ensure that the logical hostname was never resolved.
  Mockito.verify(spyNS, Mockito.never()).lookupAllHostAddr(Mockito.eq(logicalHost));
}
项目:big-c    文件:TestDFSClientFailover.java   
/**
 * Test to verify legacy proxy providers are correctly wrapped.
 */
@Test
public void testWrappedFailoverProxyProvider() throws Exception {
  // setup the config with the dummy provider class
  Configuration config = new HdfsConfiguration(conf);
  String logicalName = HATestUtil.getLogicalHostname(cluster);
  HATestUtil.setFailoverConfigurations(cluster, config, logicalName);
  config.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalName,
      DummyLegacyFailoverProxyProvider.class.getName());
  Path p = new Path("hdfs://" + logicalName + "/");

  // not to use IP address for token service
  SecurityUtil.setTokenServiceUseIp(false);

  // Logical URI should be used.
  assertTrue("Legacy proxy providers should use logical URI.",
      HAUtil.useLogicalUri(config, p.toUri()));
}
项目:big-c    文件:TestMover.java   
@Test
public void testMoverCliWithHAConf() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new HdfsConfiguration())
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(0).build();
  HATestUtil.setFailoverConfigurations(cluster, conf, "MyCluster");
  try {
    Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
        "-p", "/foo", "/bar");
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(1, namenodes.size());
    Assert.assertEquals(1, movePaths.size());
    URI nn = namenodes.iterator().next();
    Assert.assertEquals(new URI("hdfs://MyCluster"), nn);
    Assert.assertTrue(movePaths.containsKey(nn));
    checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar"));
  } finally {
    cluster.shutdown();
  }
}
项目:big-c    文件:TestNameNodeRetryCacheMetrics.java   
/** Start a cluster */
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3)
      .build();
  cluster.waitActive();
  cluster.transitionToActive(namenodeId);
  HATestUtil.setFailoverConfigurations(cluster, conf);
  filesystem = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
  namesystem = cluster.getNamesystem(namenodeId);
  metrics = namesystem.getRetryCache().getMetricsForTests();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestEncryptionZonesWithHA.java   
/**
 * Test that encryption zones are properly tracked by the standby.
 */
@Test(timeout = 60000)
public void testEncryptionZonesTrackedOnStandby() throws Exception {
  final int len = 8196;
  final Path dir = new Path("/enc");
  final Path dirChild = new Path(dir, "child");
  final Path dirFile = new Path(dir, "file");
  fs.mkdir(dir, FsPermission.getDirDefault());
  dfsAdmin0.createEncryptionZone(dir, TEST_KEY);
  fs.mkdir(dirChild, FsPermission.getDirDefault());
  DFSTestUtil.createFile(fs, dirFile, len, (short) 1, 0xFEED);
  String contents = DFSTestUtil.readFile(fs, dirFile);

  // Failover the current standby to active.
  HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
  cluster.shutdownNameNode(0);
  cluster.transitionToActive(1);

  Assert.assertEquals("Got unexpected ez path", dir.toString(),
      dfsAdmin1.getEncryptionZoneForPath(dir).getPath().toString());
  Assert.assertEquals("Got unexpected ez path", dir.toString(),
      dfsAdmin1.getEncryptionZoneForPath(dirChild).getPath().toString());
  Assert.assertEquals("File contents after failover were changed",
      contents, DFSTestUtil.readFile(fs, dirFile));
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestWebHDFSForHA.java   
@Test
public void testMultipleNamespacesConfigured() throws Exception {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  MiniDFSCluster cluster = null;
  WebHdfsFileSystem fs = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
            .numDataNodes(1).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);

    cluster.waitActive();
    DFSTestUtil.addHAConfiguration(conf, LOGICAL_NAME + "remote");
    DFSTestUtil.setFakeHttpAddresses(conf, LOGICAL_NAME + "remote");

    fs = (WebHdfsFileSystem)FileSystem.get(WEBHDFS_URI, conf);
    Assert.assertEquals(2, fs.getResolvedNNAddr().length);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDFSClientFailover.java   
/**
 * Make sure that client failover works when an active NN dies and the standby
 * takes over.
 */
@Test
public void testDfsClientFailover() throws IOException, URISyntaxException {
  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);

  DFSTestUtil.createFile(fs, TEST_FILE,
      FILE_LENGTH_TO_VERIFY, (short)1, 1L);

  assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY);
  cluster.shutdownNameNode(0);
  cluster.transitionToActive(1);
  assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY);

  // Check that it functions even if the URL becomes canonicalized
  // to include a port number.
  Path withPort = new Path("hdfs://" +
      HATestUtil.getLogicalHostname(cluster) + ":" +
      NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath());
  FileSystem fs2 = withPort.getFileSystem(fs.getConf());
  assertTrue(fs2.exists(withPort));

  fs.close();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDFSClientFailover.java   
/**
 * Test that even a non-idempotent method will properly fail-over if the
 * first IPC attempt times out trying to connect. Regression test for
 * HDFS-4404. 
 */
@Test
public void testFailoverOnConnectTimeout() throws Exception {
  conf.setClass(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,
      InjectingSocketFactory.class, SocketFactory.class);
  // Set up the InjectingSocketFactory to throw a ConnectTimeoutException
  // when connecting to the first NN.
  InjectingSocketFactory.portToInjectOn = cluster.getNameNodePort(0);

  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);

  // Make the second NN the active one.
  cluster.shutdownNameNode(0);
  cluster.transitionToActive(1);

  // Call a non-idempotent method, and ensure the failover of the call proceeds
  // successfully.
  IOUtils.closeStream(fs.create(TEST_FILE));
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDFSClientFailover.java   
/**
 * Same test as above, but for FileContext.
 */
@Test
public void testFileContextDoesntDnsResolveLogicalURI() throws Exception {
  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
  NameService spyNS = spyOnNameService();
  String logicalHost = fs.getUri().getHost();
  Configuration haClientConf = fs.getConf();

  FileContext fc = FileContext.getFileContext(haClientConf);
  Path root = new Path("/");
  fc.listStatus(root);
  fc.listStatus(fc.makeQualified(root));
  fc.getDefaultFileSystem().getCanonicalServiceName();

  // Ensure that the logical hostname was never resolved.
  Mockito.verify(spyNS, Mockito.never()).lookupAllHostAddr(Mockito.eq(logicalHost));
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDFSClientFailover.java   
/**
 * Test to verify legacy proxy providers are correctly wrapped.
 */
@Test
public void testWrappedFailoverProxyProvider() throws Exception {
  // setup the config with the dummy provider class
  Configuration config = new HdfsConfiguration(conf);
  String logicalName = HATestUtil.getLogicalHostname(cluster);
  HATestUtil.setFailoverConfigurations(cluster, config, logicalName);
  config.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalName,
      DummyLegacyFailoverProxyProvider.class.getName());
  Path p = new Path("hdfs://" + logicalName + "/");

  // not to use IP address for token service
  SecurityUtil.setTokenServiceUseIp(false);

  // Logical URI should be used.
  assertTrue("Legacy proxy providers should use logical URI.",
      HAUtil.useLogicalUri(config, p.toUri()));
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestMover.java   
@Test
public void testMoverCliWithHAConf() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new HdfsConfiguration())
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(0).build();
  HATestUtil.setFailoverConfigurations(cluster, conf, "MyCluster");
  try {
    Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
        "-p", "/foo", "/bar");
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(1, namenodes.size());
    Assert.assertEquals(1, movePaths.size());
    URI nn = namenodes.iterator().next();
    Assert.assertEquals(new URI("hdfs://MyCluster"), nn);
    Assert.assertTrue(movePaths.containsKey(nn));
    checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar"));
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestNameNodeRetryCacheMetrics.java   
/** Start a cluster */
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3)
      .build();
  cluster.waitActive();
  cluster.transitionToActive(namenodeId);
  HATestUtil.setFailoverConfigurations(cluster, conf);
  filesystem = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
  namesystem = cluster.getNamesystem(namenodeId);
  metrics = namesystem.getRetryCache().getMetricsForTests();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestHarFileSystemWithHA.java   
/**
 * Test that the HarFileSystem works with underlying HDFS URIs that have no
 * port specified, as is often the case with an HA setup.
 */
@Test
public void testHarUriWithHaUriWithNoPort() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .numDataNodes(1)
        .nnTopology(MiniDFSNNTopology.simpleHATopology())
        .build();
    cluster.transitionToActive(0);
    HATestUtil.setFailoverConfigurations(cluster, conf);

    createEmptyHarArchive(HATestUtil.configureFailoverFs(cluster, conf),
        TEST_HAR_PATH);

    URI failoverUri = FileSystem.getDefaultUri(conf);
    Path p = new Path("har://hdfs-" + failoverUri.getAuthority() + TEST_HAR_PATH);
    p.getFileSystem(conf);
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop-plus    文件:TestDFSClientFailover.java   
/**
 * Make sure that client failover works when an active NN dies and the standby
 * takes over.
 */
@Test
public void testDfsClientFailover() throws IOException, URISyntaxException {
  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);

  DFSTestUtil.createFile(fs, TEST_FILE,
      FILE_LENGTH_TO_VERIFY, (short)1, 1L);

  assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY);
  cluster.shutdownNameNode(0);
  cluster.transitionToActive(1);
  assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY);

  // Check that it functions even if the URL becomes canonicalized
  // to include a port number.
  Path withPort = new Path("hdfs://" +
      HATestUtil.getLogicalHostname(cluster) + ":" +
      NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath());
  FileSystem fs2 = withPort.getFileSystem(fs.getConf());
  assertTrue(fs2.exists(withPort));

  fs.close();
}
项目:hadoop-plus    文件:TestDFSClientFailover.java   
/**
 * Test that even a non-idempotent method will properly fail-over if the
 * first IPC attempt times out trying to connect. Regression test for
 * HDFS-4404. 
 */
@Test
public void testFailoverOnConnectTimeout() throws Exception {
  conf.setClass(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,
      InjectingSocketFactory.class, SocketFactory.class);
  // Set up the InjectingSocketFactory to throw a ConnectTimeoutException
  // when connecting to the first NN.
  InjectingSocketFactory.portToInjectOn = cluster.getNameNodePort(0);

  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);

  // Make the second NN the active one.
  cluster.shutdownNameNode(0);
  cluster.transitionToActive(1);

  // Call a non-idempotent method, and ensure the failover of the call proceeds
  // successfully.
  IOUtils.closeStream(fs.create(TEST_FILE));
}
项目:hadoop-plus    文件:TestDFSClientFailover.java   
/**
 * Test that the client doesn't ever try to DNS-resolve the logical URI.
 * Regression test for HADOOP-9150.
 */
@Test
public void testDoesntDnsResolveLogicalURI() throws Exception {
  NameService spyNS = spyOnNameService();

  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
  String logicalHost = fs.getUri().getHost();
  Path qualifiedRoot = fs.makeQualified(new Path("/"));

  // Make a few calls against the filesystem.
  fs.getCanonicalServiceName();
  fs.listStatus(qualifiedRoot);

  // Ensure that the logical hostname was never resolved.
  Mockito.verify(spyNS, Mockito.never()).lookupAllHostAddr(Mockito.eq(logicalHost));
}
项目:hadoop-plus    文件:TestDFSClientFailover.java   
/**
 * Same test as above, but for FileContext.
 */
@Test
public void testFileContextDoesntDnsResolveLogicalURI() throws Exception {
  NameService spyNS = spyOnNameService();
  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
  String logicalHost = fs.getUri().getHost();
  Configuration haClientConf = fs.getConf();

  FileContext fc = FileContext.getFileContext(haClientConf);
  Path root = new Path("/");
  fc.listStatus(root);
  fc.listStatus(fc.makeQualified(root));
  fc.getDefaultFileSystem().getCanonicalServiceName();

  // Ensure that the logical hostname was never resolved.
  Mockito.verify(spyNS, Mockito.never()).lookupAllHostAddr(Mockito.eq(logicalHost));
}
项目:FlexMap    文件:TestEncryptionZonesWithHA.java   
/**
 * Test that encryption zones are properly tracked by the standby.
 */
@Test(timeout = 60000)
public void testEncryptionZonesTrackedOnStandby() throws Exception {
  final int len = 8196;
  final Path dir = new Path("/enc");
  final Path dirChild = new Path(dir, "child");
  final Path dirFile = new Path(dir, "file");
  fs.mkdir(dir, FsPermission.getDirDefault());
  dfsAdmin0.createEncryptionZone(dir, TEST_KEY);
  fs.mkdir(dirChild, FsPermission.getDirDefault());
  DFSTestUtil.createFile(fs, dirFile, len, (short) 1, 0xFEED);
  String contents = DFSTestUtil.readFile(fs, dirFile);

  // Failover the current standby to active.
  HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
  cluster.shutdownNameNode(0);
  cluster.transitionToActive(1);

  Assert.assertEquals("Got unexpected ez path", dir.toString(),
      dfsAdmin1.getEncryptionZoneForPath(dir).getPath().toString());
  Assert.assertEquals("Got unexpected ez path", dir.toString(),
      dfsAdmin1.getEncryptionZoneForPath(dirChild).getPath().toString());
  Assert.assertEquals("File contents after failover were changed",
      contents, DFSTestUtil.readFile(fs, dirFile));
}