Java 类org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider 实例源码

项目:hadoop    文件:TestDFSClientFailover.java   
/**
 * Make sure that a helpful error message is shown if a proxy provider is
 * configured for a given URI, but no actual addresses are configured for that
 * URI.
 */
@Test
public void testFailureWithMisconfiguredHaNNs() throws Exception {
  String logicalHost = "misconfigured-ha-uri";
  Configuration conf = new Configuration();
  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalHost,
      ConfiguredFailoverProxyProvider.class.getName());

  URI uri = new URI("hdfs://" + logicalHost + "/test");
  try {
    FileSystem.get(uri, conf).exists(new Path("/test"));
    fail("Successfully got proxy provider for misconfigured FS");
  } catch (IOException ioe) {
    LOG.info("got expected exception", ioe);
    assertTrue("expected exception did not contain helpful message",
        StringUtils.stringifyException(ioe).contains(
        "Could not find any configured addresses for URI " + uri));
  }
}
项目:hadoop    文件:MiniQJMHACluster.java   
private Configuration initHAConf(URI journalURI, Configuration conf) {
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
      journalURI.toString());

  String address1 = "127.0.0.1:" + basePort;
  String address2 = "127.0.0.1:" + (basePort + 2);
  conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
      NAMESERVICE, NN1), address1);
  conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
      NAMESERVICE, NN2), address2);
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, NAMESERVICE);
  conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, NAMESERVICE),
      NN1 + "," + NN2);
  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + NAMESERVICE,
      ConfiguredFailoverProxyProvider.class.getName());
  conf.set("fs.defaultFS", "hdfs://" + NAMESERVICE);

  return conf;
}
项目:aliyun-oss-hadoop-fs    文件:DFSTestUtil.java   
/**
 * Add a new HA configuration.
 */
public static void addHAConfiguration(Configuration conf,
    final String logicalName) {
  String nsIds = conf.get(DFSConfigKeys.DFS_NAMESERVICES);
  if (nsIds == null) {
    conf.set(DFSConfigKeys.DFS_NAMESERVICES, logicalName);
  } else { // append the nsid
    conf.set(DFSConfigKeys.DFS_NAMESERVICES, nsIds + "," + logicalName);
  }
  conf.set(DFSUtil.addKeySuffixes(HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,
          logicalName), "nn1,nn2");
  conf.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX +
          "." + logicalName,
          ConfiguredFailoverProxyProvider.class.getName());
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
}
项目:aliyun-oss-hadoop-fs    文件:TestDFSClientFailover.java   
/**
 * Make sure that a helpful error message is shown if a proxy provider is
 * configured for a given URI, but no actual addresses are configured for that
 * URI.
 */
@Test
public void testFailureWithMisconfiguredHaNNs() throws Exception {
  String logicalHost = "misconfigured-ha-uri";
  Configuration conf = new Configuration();
  conf.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + "." + logicalHost,
      ConfiguredFailoverProxyProvider.class.getName());

  URI uri = new URI("hdfs://" + logicalHost + "/test");
  try {
    FileSystem.get(uri, conf).exists(new Path("/test"));
    fail("Successfully got proxy provider for misconfigured FS");
  } catch (IOException ioe) {
    LOG.info("got expected exception", ioe);
    assertTrue("expected exception did not contain helpful message",
        StringUtils.stringifyException(ioe).contains(
        "Could not find any configured addresses for URI " + uri));
  }
}
项目:big-c    文件:TestDFSClientFailover.java   
/**
 * Make sure that a helpful error message is shown if a proxy provider is
 * configured for a given URI, but no actual addresses are configured for that
 * URI.
 */
@Test
public void testFailureWithMisconfiguredHaNNs() throws Exception {
  String logicalHost = "misconfigured-ha-uri";
  Configuration conf = new Configuration();
  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalHost,
      ConfiguredFailoverProxyProvider.class.getName());

  URI uri = new URI("hdfs://" + logicalHost + "/test");
  try {
    FileSystem.get(uri, conf).exists(new Path("/test"));
    fail("Successfully got proxy provider for misconfigured FS");
  } catch (IOException ioe) {
    LOG.info("got expected exception", ioe);
    assertTrue("expected exception did not contain helpful message",
        StringUtils.stringifyException(ioe).contains(
        "Could not find any configured addresses for URI " + uri));
  }
}
项目:big-c    文件:MiniQJMHACluster.java   
private Configuration initHAConf(URI journalURI, Configuration conf) {
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
      journalURI.toString());

  String address1 = "127.0.0.1:" + basePort;
  String address2 = "127.0.0.1:" + (basePort + 2);
  conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
      NAMESERVICE, NN1), address1);
  conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
      NAMESERVICE, NN2), address2);
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, NAMESERVICE);
  conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, NAMESERVICE),
      NN1 + "," + NN2);
  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + NAMESERVICE,
      ConfiguredFailoverProxyProvider.class.getName());
  conf.set("fs.defaultFS", "hdfs://" + NAMESERVICE);

  return conf;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDFSClientFailover.java   
/**
 * Make sure that a helpful error message is shown if a proxy provider is
 * configured for a given URI, but no actual addresses are configured for that
 * URI.
 */
@Test
public void testFailureWithMisconfiguredHaNNs() throws Exception {
  String logicalHost = "misconfigured-ha-uri";
  Configuration conf = new Configuration();
  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalHost,
      ConfiguredFailoverProxyProvider.class.getName());

  URI uri = new URI("hdfs://" + logicalHost + "/test");
  try {
    FileSystem.get(uri, conf).exists(new Path("/test"));
    fail("Successfully got proxy provider for misconfigured FS");
  } catch (IOException ioe) {
    LOG.info("got expected exception", ioe);
    assertTrue("expected exception did not contain helpful message",
        StringUtils.stringifyException(ioe).contains(
        "Could not find any configured addresses for URI " + uri));
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:MiniQJMHACluster.java   
private Configuration initHAConf(URI journalURI, Configuration conf) {
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
      journalURI.toString());

  String address1 = "127.0.0.1:" + basePort;
  String address2 = "127.0.0.1:" + (basePort + 2);
  conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
      NAMESERVICE, NN1), address1);
  conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
      NAMESERVICE, NN2), address2);
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, NAMESERVICE);
  conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, NAMESERVICE),
      NN1 + "," + NN2);
  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + NAMESERVICE,
      ConfiguredFailoverProxyProvider.class.getName());
  conf.set("fs.defaultFS", "hdfs://" + NAMESERVICE);

  return conf;
}
项目:hadoop-plus    文件:TestDFSClientFailover.java   
/**
 * Make sure that a helpful error message is shown if a proxy provider is
 * configured for a given URI, but no actual addresses are configured for that
 * URI.
 */
@Test
public void testFailureWithMisconfiguredHaNNs() throws Exception {
  String logicalHost = "misconfigured-ha-uri";
  Configuration conf = new Configuration();
  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalHost,
      ConfiguredFailoverProxyProvider.class.getName());

  URI uri = new URI("hdfs://" + logicalHost + "/test");
  try {
    FileSystem.get(uri, conf).exists(new Path("/test"));
    fail("Successfully got proxy provider for misconfigured FS");
  } catch (IOException ioe) {
    LOG.info("got expected exception", ioe);
    assertTrue("expected exception did not contain helpful message",
        StringUtils.stringifyException(ioe).contains(
        "Could not find any configured addresses for URI " + uri));
  }
}
项目:FlexMap    文件:TestDFSClientFailover.java   
/**
 * Make sure that a helpful error message is shown if a proxy provider is
 * configured for a given URI, but no actual addresses are configured for that
 * URI.
 */
@Test
public void testFailureWithMisconfiguredHaNNs() throws Exception {
  String logicalHost = "misconfigured-ha-uri";
  Configuration conf = new Configuration();
  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalHost,
      ConfiguredFailoverProxyProvider.class.getName());

  URI uri = new URI("hdfs://" + logicalHost + "/test");
  try {
    FileSystem.get(uri, conf).exists(new Path("/test"));
    fail("Successfully got proxy provider for misconfigured FS");
  } catch (IOException ioe) {
    LOG.info("got expected exception", ioe);
    assertTrue("expected exception did not contain helpful message",
        StringUtils.stringifyException(ioe).contains(
        "Could not find any configured addresses for URI " + uri));
  }
}
项目:FlexMap    文件:MiniQJMHACluster.java   
private Configuration initHAConf(URI journalURI, Configuration conf) {
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
      journalURI.toString());

  String address1 = "127.0.0.1:" + basePort;
  String address2 = "127.0.0.1:" + (basePort + 2);
  conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
      NAMESERVICE, NN1), address1);
  conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
      NAMESERVICE, NN2), address2);
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, NAMESERVICE);
  conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, NAMESERVICE),
      NN1 + "," + NN2);
  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + NAMESERVICE,
      ConfiguredFailoverProxyProvider.class.getName());
  conf.set("fs.defaultFS", "hdfs://" + NAMESERVICE);

  return conf;
}
项目:hadoop-TCP    文件:TestDFSClientFailover.java   
/**
 * Make sure that a helpful error message is shown if a proxy provider is
 * configured for a given URI, but no actual addresses are configured for that
 * URI.
 */
@Test
public void testFailureWithMisconfiguredHaNNs() throws Exception {
  String logicalHost = "misconfigured-ha-uri";
  Configuration conf = new Configuration();
  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalHost,
      ConfiguredFailoverProxyProvider.class.getName());

  URI uri = new URI("hdfs://" + logicalHost + "/test");
  try {
    FileSystem.get(uri, conf).exists(new Path("/test"));
    fail("Successfully got proxy provider for misconfigured FS");
  } catch (IOException ioe) {
    LOG.info("got expected exception", ioe);
    assertTrue("expected exception did not contain helpful message",
        StringUtils.stringifyException(ioe).contains(
        "Could not find any configured addresses for URI " + uri));
  }
}
项目:hardfs    文件:TestDFSClientFailover.java   
/**
 * Make sure that a helpful error message is shown if a proxy provider is
 * configured for a given URI, but no actual addresses are configured for that
 * URI.
 */
@Test
public void testFailureWithMisconfiguredHaNNs() throws Exception {
  String logicalHost = "misconfigured-ha-uri";
  Configuration conf = new Configuration();
  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalHost,
      ConfiguredFailoverProxyProvider.class.getName());

  URI uri = new URI("hdfs://" + logicalHost + "/test");
  try {
    FileSystem.get(uri, conf).exists(new Path("/test"));
    fail("Successfully got proxy provider for misconfigured FS");
  } catch (IOException ioe) {
    LOG.info("got expected exception", ioe);
    assertTrue("expected exception did not contain helpful message",
        StringUtils.stringifyException(ioe).contains(
        "Could not find any configured addresses for URI " + uri));
  }
}
项目:hadoop-on-lustre2    文件:TestDFSClientFailover.java   
/**
 * Make sure that a helpful error message is shown if a proxy provider is
 * configured for a given URI, but no actual addresses are configured for that
 * URI.
 */
@Test
public void testFailureWithMisconfiguredHaNNs() throws Exception {
  String logicalHost = "misconfigured-ha-uri";
  Configuration conf = new Configuration();
  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalHost,
      ConfiguredFailoverProxyProvider.class.getName());

  URI uri = new URI("hdfs://" + logicalHost + "/test");
  try {
    FileSystem.get(uri, conf).exists(new Path("/test"));
    fail("Successfully got proxy provider for misconfigured FS");
  } catch (IOException ioe) {
    LOG.info("got expected exception", ioe);
    assertTrue("expected exception did not contain helpful message",
        StringUtils.stringifyException(ioe).contains(
        "Could not find any configured addresses for URI " + uri));
  }
}
项目:hadoop-on-lustre2    文件:MiniQJMHACluster.java   
private Configuration initHAConf(URI journalURI, Configuration conf) {
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
      journalURI.toString());

  String address1 = "127.0.0.1:" + NN1_IPC_PORT;
  String address2 = "127.0.0.1:" + NN2_IPC_PORT;
  conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
      NAMESERVICE, NN1), address1);
  conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
      NAMESERVICE, NN2), address2);
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, NAMESERVICE);
  conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, NAMESERVICE),
      NN1 + "," + NN2);
  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + NAMESERVICE,
      ConfiguredFailoverProxyProvider.class.getName());
  conf.set("fs.defaultFS", "hdfs://" + NAMESERVICE);

  return conf;
}
项目:hadoop    文件:TestDFSUtil.java   
private static Configuration createWebHDFSHAConfiguration(String logicalHostName, String nnaddr1, String nnaddr2) {
  HdfsConfiguration conf = new HdfsConfiguration();

  conf.set(DFS_NAMESERVICES, "ns1");
  conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2");
  conf.set(DFSUtil.addKeySuffixes(
      DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn1"), nnaddr1);
  conf.set(DFSUtil.addKeySuffixes(
      DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn2"), nnaddr2);

  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalHostName,
      ConfiguredFailoverProxyProvider.class.getName());
  return conf;
}
项目:aliyun-oss-hadoop-fs    文件:DFSTestUtil.java   
public static void setFederatedHAConfiguration(MiniDFSCluster cluster,
    Configuration conf) {
  Map<String, List<String>> nameservices = Maps.newHashMap();
  for (NameNodeInfo info : cluster.getNameNodeInfos()) {
    Preconditions.checkState(info.nameserviceId != null);
    List<String> nns = nameservices.get(info.nameserviceId);
    if (nns == null) {
      nns = Lists.newArrayList();
      nameservices.put(info.nameserviceId, nns);
    }
    nns.add(info.nnId);

    conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
        info.nameserviceId, info.nnId),
        DFSUtil.createUri(HdfsConstants.HDFS_URI_SCHEME,
        info.nameNode.getNameNodeAddress()).toString());
    conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
        info.nameserviceId, info.nnId),
        DFSUtil.createUri(HdfsConstants.HDFS_URI_SCHEME,
        info.nameNode.getNameNodeAddress()).toString());
  }
  for (Map.Entry<String, List<String>> entry : nameservices.entrySet()) {
    conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX,
        entry.getKey()), Joiner.on(",").join(entry.getValue()));
    conf.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + "."
        + entry.getKey(), ConfiguredFailoverProxyProvider.class.getName());
  }
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, Joiner.on(",")
      .join(nameservices.keySet()));
}
项目:aliyun-oss-hadoop-fs    文件:TestDFSUtil.java   
private static Configuration createWebHDFSHAConfiguration(String logicalHostName, String nnaddr1, String nnaddr2) {
  HdfsConfiguration conf = new HdfsConfiguration();

  conf.set(DFS_NAMESERVICES, "ns1");
  conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2");
  conf.set(DFSUtil.addKeySuffixes(
      DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn1"), nnaddr1);
  conf.set(DFSUtil.addKeySuffixes(
      DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn2"), nnaddr2);

  conf.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + "." + logicalHostName,
      ConfiguredFailoverProxyProvider.class.getName());
  return conf;
}
项目:gemfirexd-oss    文件:HdfsSortedOplogOrganizerJUnitTest.java   
private void initClientHAConf(int nn1port, int nn2port) throws Exception {
  hsf.setHomeDir("test-case");
  hsf.setNameNodeURL("hdfs://ns1");
  File confFile = new File(getName());
  String conf = "<configuration>\n             "
      + "  <property>\n                                    "
      + "    <name>dfs.nameservices</name>\n               "
      + "    <value>ns1</value>\n                          "
      + "  </property>\n                                   "
      + "  <property>\n                                    "
      + "    <name>dfs.ha.namenodes.ns1</name>\n           "
      + "    <value>nn1,nn2</value>\n                      "
      + "  </property>\n                                   "
      + "  <property>\n                                    "
      + "    <name>dfs.namenode.rpc-address.ns1.nn1</name>\n"
      + "    <value>hdfs://127.0.0.1:" + nn1port + "</value>\n"
      + "  </property>\n                                   "
      + "  <property>\n                                    "
      + "    <name>dfs.namenode.rpc-address.ns1.nn2</name>\n"
      + "    <value>hdfs://127.0.0.1:" + nn2port + "</value>\n"
      + "  </property>\n                                   "
      + "  <property>\n                                    "
      + "    <name>dfs.client.failover.proxy.provider.ns1</name>\n"
      + "    <value>" + ConfiguredFailoverProxyProvider.class.getName() + "</value>\n"
      + "  </property>\n                                   "
      + "</configuration>";
  setConfigFile(hsf, confFile, conf);
}
项目:big-c    文件:TestDFSUtil.java   
private static Configuration createWebHDFSHAConfiguration(String logicalHostName, String nnaddr1, String nnaddr2) {
  HdfsConfiguration conf = new HdfsConfiguration();

  conf.set(DFS_NAMESERVICES, "ns1");
  conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2");
  conf.set(DFSUtil.addKeySuffixes(
      DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn1"), nnaddr1);
  conf.set(DFSUtil.addKeySuffixes(
      DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn2"), nnaddr2);

  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalHostName,
      ConfiguredFailoverProxyProvider.class.getName());
  return conf;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDFSUtil.java   
private static Configuration createWebHDFSHAConfiguration(String logicalHostName, String nnaddr1, String nnaddr2) {
  HdfsConfiguration conf = new HdfsConfiguration();

  conf.set(DFS_NAMESERVICES, "ns1");
  conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2");
  conf.set(DFSUtil.addKeySuffixes(
      DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn1"), nnaddr1);
  conf.set(DFSUtil.addKeySuffixes(
      DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn2"), nnaddr2);

  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalHostName,
      ConfiguredFailoverProxyProvider.class.getName());
  return conf;
}
项目:gemfirexd-oss    文件:HdfsSortedOplogOrganizerJUnitTest.java   
private void initClientHAConf(int nn1port, int nn2port) throws Exception {
  hsf.setHomeDir("test-case");
  hsf.setNameNodeURL("hdfs://ns1");
  File confFile = new File(getName());
  String conf = "<configuration>\n             "
      + "  <property>\n                                    "
      + "    <name>dfs.nameservices</name>\n               "
      + "    <value>ns1</value>\n                          "
      + "  </property>\n                                   "
      + "  <property>\n                                    "
      + "    <name>dfs.ha.namenodes.ns1</name>\n           "
      + "    <value>nn1,nn2</value>\n                      "
      + "  </property>\n                                   "
      + "  <property>\n                                    "
      + "    <name>dfs.namenode.rpc-address.ns1.nn1</name>\n"
      + "    <value>hdfs://127.0.0.1:" + nn1port + "</value>\n"
      + "  </property>\n                                   "
      + "  <property>\n                                    "
      + "    <name>dfs.namenode.rpc-address.ns1.nn2</name>\n"
      + "    <value>hdfs://127.0.0.1:" + nn2port + "</value>\n"
      + "  </property>\n                                   "
      + "  <property>\n                                    "
      + "    <name>dfs.client.failover.proxy.provider.ns1</name>\n"
      + "    <value>" + ConfiguredFailoverProxyProvider.class.getName() + "</value>\n"
      + "  </property>\n                                   "
      + "</configuration>";
  setConfigFile(hsf, confFile, conf);
}
项目:hadoop-plus    文件:TestDFSClientFailover.java   
/**
 * Regression test for HDFS-2683.
 */
@Test
public void testLogicalUriShouldNotHavePorts() {
  Configuration conf = new HdfsConfiguration();
  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + ".foo",
      ConfiguredFailoverProxyProvider.class.getName());
  Path p = new Path("hdfs://foo:12345/");
  try {
    p.getFileSystem(conf).exists(p);
    fail("Did not fail with fake FS");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains(
        "does not use port information", ioe);
  }
}
项目:FlexMap    文件:TestDFSUtil.java   
private static Configuration createWebHDFSHAConfiguration(String logicalHostName, String nnaddr1, String nnaddr2) {
  HdfsConfiguration conf = new HdfsConfiguration();

  conf.set(DFS_NAMESERVICES, "ns1");
  conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2");
  conf.set(DFSUtil.addKeySuffixes(
      DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn1"), nnaddr1);
  conf.set(DFSUtil.addKeySuffixes(
      DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn2"), nnaddr2);

  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalHostName,
      ConfiguredFailoverProxyProvider.class.getName());
  return conf;
}
项目:hadoop-TCP    文件:TestDFSClientFailover.java   
/**
 * Regression test for HDFS-2683.
 */
@Test
public void testLogicalUriShouldNotHavePorts() {
  Configuration conf = new HdfsConfiguration();
  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + ".foo",
      ConfiguredFailoverProxyProvider.class.getName());
  Path p = new Path("hdfs://foo:12345/");
  try {
    p.getFileSystem(conf).exists(p);
    fail("Did not fail with fake FS");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains(
        "does not use port information", ioe);
  }
}
项目:hardfs    文件:TestDFSClientFailover.java   
/**
 * Regression test for HDFS-2683.
 */
@Test
public void testLogicalUriShouldNotHavePorts() {
  Configuration conf = new HdfsConfiguration();
  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + ".foo",
      ConfiguredFailoverProxyProvider.class.getName());
  Path p = new Path("hdfs://foo:12345/");
  try {
    p.getFileSystem(conf).exists(p);
    fail("Did not fail with fake FS");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains(
        "does not use port information", ioe);
  }
}
项目:hadoop-on-lustre2    文件:TestDFSUtil.java   
private static Configuration createWebHDFSHAConfiguration(String logicalHostName, String nnaddr1, String nnaddr2) {
  HdfsConfiguration conf = new HdfsConfiguration();

  conf.set(DFS_NAMESERVICES, "ns1");
  conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2");
  conf.set(DFSUtil.addKeySuffixes(
      DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn1"), nnaddr1);
  conf.set(DFSUtil.addKeySuffixes(
      DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn2"), nnaddr2);

  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalHostName,
      ConfiguredFailoverProxyProvider.class.getName());
  return conf;
}
项目:hadoop-on-lustre2    文件:TestDFSClientFailover.java   
/**
 * Regression test for HDFS-2683.
 */
@Test
public void testLogicalUriShouldNotHavePorts() {
  Configuration conf = new HdfsConfiguration();
  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + ".foo",
      ConfiguredFailoverProxyProvider.class.getName());
  Path p = new Path("hdfs://foo:12345/");
  try {
    p.getFileSystem(conf).exists(p);
    fail("Did not fail with fake FS");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains(
        "does not use port information", ioe);
  }
}