Java 类org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods 实例源码

项目:hadoop    文件:NameNodeHttpServer.java   
private void initWebHdfs(Configuration conf) throws IOException {
  if (WebHdfsFileSystem.isEnabled(conf, HttpServer2.LOG)) {
    // set user pattern based on configuration file
    UserParam.setUserPattern(conf.get(
        DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
        DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));

    // add authentication filter for webhdfs
    final String className = conf.get(
        DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY,
        DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT);
    final String name = className;

    final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
    Map<String, String> params = getAuthFilterParams(conf);
    HttpServer2.defineFilter(httpServer.getWebAppContext(), name, className,
        params, new String[] { pathSpec });
    HttpServer2.LOG.info("Added filter '" + name + "' (class=" + className
        + ")");

    // add webhdfs packages
    httpServer.addJerseyResourcePackage(NamenodeWebHdfsMethods.class
        .getPackage().getName() + ";" + Param.class.getPackage().getName(),
        pathSpec);
  }
}
项目:aliyun-oss-hadoop-fs    文件:NameNodeHttpServer.java   
private void initWebHdfs(Configuration conf) throws IOException {
  // set user pattern based on configuration file
  UserParam.setUserPattern(conf.get(
      HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
      HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));

  // add authentication filter for webhdfs
  final String className = conf.get(
      DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY,
      DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT);
  final String name = className;

  final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
  Map<String, String> params = getAuthFilterParams(conf);
  HttpServer2.defineFilter(httpServer.getWebAppContext(), name, className,
      params, new String[] { pathSpec });
  HttpServer2.LOG.info("Added filter '" + name + "' (class=" + className
      + ")");

  // add webhdfs packages
  httpServer.addJerseyResourcePackage(NamenodeWebHdfsMethods.class
      .getPackage().getName() + ";" + Param.class.getPackage().getName(),
      pathSpec);
}
项目:big-c    文件:NameNodeHttpServer.java   
private void initWebHdfs(Configuration conf) throws IOException {
  if (WebHdfsFileSystem.isEnabled(conf, HttpServer2.LOG)) {
    // set user pattern based on configuration file
    UserParam.setUserPattern(conf.get(
        DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
        DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));

    // add authentication filter for webhdfs
    final String className = conf.get(
        DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY,
        DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT);
    final String name = className;

    final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
    Map<String, String> params = getAuthFilterParams(conf);
    HttpServer2.defineFilter(httpServer.getWebAppContext(), name, className,
        params, new String[] { pathSpec });
    HttpServer2.LOG.info("Added filter '" + name + "' (class=" + className
        + ")");

    // add webhdfs packages
    httpServer.addJerseyResourcePackage(NamenodeWebHdfsMethods.class
        .getPackage().getName() + ";" + Param.class.getPackage().getName(),
        pathSpec);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:NameNodeHttpServer.java   
private void initWebHdfs(Configuration conf) throws IOException {
  if (WebHdfsFileSystem.isEnabled(conf, HttpServer2.LOG)) {
    // set user pattern based on configuration file
    UserParam.setUserPattern(conf.get(
        DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
        DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));

    // add authentication filter for webhdfs
    final String className = conf.get(
        DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY,
        DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT);
    final String name = className;

    final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
    Map<String, String> params = getAuthFilterParams(conf);
    HttpServer2.defineFilter(httpServer.getWebAppContext(), name, className,
        params, new String[] { pathSpec });
    HttpServer2.LOG.info("Added filter '" + name + "' (class=" + className
        + ")");

    // add webhdfs packages
    httpServer.addJerseyResourcePackage(NamenodeWebHdfsMethods.class
        .getPackage().getName() + ";" + Param.class.getPackage().getName(),
        pathSpec);
  }
}
项目:FlexMap    文件:NameNodeHttpServer.java   
private void initWebHdfs(Configuration conf) throws IOException {
  if (WebHdfsFileSystem.isEnabled(conf, HttpServer2.LOG)) {
    // set user pattern based on configuration file
    UserParam.setUserPattern(conf.get(
        DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
        DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));

    // add authentication filter for webhdfs
    final String className = conf.get(
        DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY,
        DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT);
    final String name = className;

    final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
    Map<String, String> params = getAuthFilterParams(conf);
    HttpServer2.defineFilter(httpServer.getWebAppContext(), name, className,
        params, new String[] { pathSpec });
    HttpServer2.LOG.info("Added filter '" + name + "' (class=" + className
        + ")");

    // add webhdfs packages
    httpServer.addJerseyResourcePackage(NamenodeWebHdfsMethods.class
        .getPackage().getName() + ";" + Param.class.getPackage().getName(),
        pathSpec);
  }
}
项目:hadoop-on-lustre2    文件:NameNodeHttpServer.java   
private void initWebHdfs(Configuration conf) throws IOException {
  if (WebHdfsFileSystem.isEnabled(conf, HttpServer2.LOG)) {
    // set user pattern based on configuration file
    UserParam.setUserPattern(conf.get(
        DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
        DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));

    // add authentication filter for webhdfs
    final String className = conf.get(
        DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY,
        DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT);
    final String name = className;

    final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
    Map<String, String> params = getAuthFilterParams(conf);
    HttpServer2.defineFilter(httpServer.getWebAppContext(), name, className,
        params, new String[] { pathSpec });
    HttpServer2.LOG.info("Added filter '" + name + "' (class=" + className
        + ")");

    // add webhdfs packages
    httpServer.addJerseyResourcePackage(NamenodeWebHdfsMethods.class
        .getPackage().getName() + ";" + Param.class.getPackage().getName(),
        pathSpec);
  }
}
项目:hadoop    文件:FSNamesystem.java   
private static InetAddress getRemoteIp() {
  InetAddress ip = Server.getRemoteIp();
  if (ip != null) {
    return ip;
  }
  return NamenodeWebHdfsMethods.getRemoteIp();
}
项目:hadoop    文件:NameNodeRpcServer.java   
private static String getClientMachine() {
  String clientMachine = NamenodeWebHdfsMethods.getRemoteAddress();
  if (clientMachine == null) { //not a web client
    clientMachine = Server.getRemoteAddress();
  }
  if (clientMachine == null) { //not a RPC client
    clientMachine = "";
  }
  return clientMachine;
}
项目:hadoop    文件:TestWebHDFS.java   
/** Test client retry with namenode restarting. */
@Test(timeout=300000)
public void testNamenodeRestart() throws Exception {
  ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
  final Configuration conf = WebHdfsTestUtil.createConf();
  TestDFSClientRetries.namenodeRestartTest(conf, true);
}
项目:hadoop    文件:TestDelegationToken.java   
@SuppressWarnings("deprecation")
@Test
public void testDelegationTokenWebHdfsApi() throws Exception {
  ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
  final String uri = WebHdfsFileSystem.SCHEME  + "://"
      + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
  //get file system as JobTracker
  final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
      "JobTracker", new String[]{"user"});
  final WebHdfsFileSystem webhdfs = ugi.doAs(
      new PrivilegedExceptionAction<WebHdfsFileSystem>() {
    @Override
    public WebHdfsFileSystem run() throws Exception {
      return (WebHdfsFileSystem)FileSystem.get(new URI(uri), config);
    }
  });

  { //test addDelegationTokens(..)
    Credentials creds = new Credentials();
    final Token<?> tokens[] = webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(1, tokens.length);
    Assert.assertEquals(1, creds.numberOfTokens());
    Assert.assertSame(tokens[0], creds.getAllTokens().iterator().next());
    checkTokenIdentifier(ugi, tokens[0]);
    final Token<?> tokens2[] = webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(0, tokens2.length);
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSNamesystem.java   
private static InetAddress getRemoteIp() {
  InetAddress ip = Server.getRemoteIp();
  if (ip != null) {
    return ip;
  }
  return NamenodeWebHdfsMethods.getRemoteIp();
}
项目:aliyun-oss-hadoop-fs    文件:NameNodeRpcServer.java   
private static String getClientMachine() {
  String clientMachine = NamenodeWebHdfsMethods.getRemoteAddress();
  if (clientMachine == null) { //not a web client
    clientMachine = Server.getRemoteAddress();
  }
  if (clientMachine == null) { //not a RPC client
    clientMachine = "";
  }
  return clientMachine;
}
项目:aliyun-oss-hadoop-fs    文件:TestWebHDFS.java   
/** Test client retry with namenode restarting. */
@Test(timeout=300000)
public void testNamenodeRestart() throws Exception {
  GenericTestUtils.setLogLevel(NamenodeWebHdfsMethods.LOG, Level.ALL);
  final Configuration conf = WebHdfsTestUtil.createConf();
  TestDFSClientRetries.namenodeRestartTest(conf, true);
}
项目:aliyun-oss-hadoop-fs    文件:TestDelegationToken.java   
@Test
public void testDelegationTokenWebHdfsApi() throws Exception {
  GenericTestUtils.setLogLevel(NamenodeWebHdfsMethods.LOG, Level.ALL);
  final String uri = WebHdfsConstants.WEBHDFS_SCHEME + "://"
      + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
  //get file system as JobTracker
  final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
      "JobTracker", new String[]{"user"});
  final WebHdfsFileSystem webhdfs = ugi.doAs(
      new PrivilegedExceptionAction<WebHdfsFileSystem>() {
    @Override
    public WebHdfsFileSystem run() throws Exception {
      return (WebHdfsFileSystem)FileSystem.get(new URI(uri), config);
    }
  });

  { //test addDelegationTokens(..)
    Credentials creds = new Credentials();
    final Token<?> tokens[] = webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(1, tokens.length);
    Assert.assertEquals(1, creds.numberOfTokens());
    Assert.assertSame(tokens[0], creds.getAllTokens().iterator().next());
    checkTokenIdentifier(ugi, tokens[0]);
    final Token<?> tokens2[] = webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(0, tokens2.length);
  }
}
项目:big-c    文件:FSNamesystem.java   
private static InetAddress getRemoteIp() {
  InetAddress ip = Server.getRemoteIp();
  if (ip != null) {
    return ip;
  }
  return NamenodeWebHdfsMethods.getRemoteIp();
}
项目:big-c    文件:NameNodeRpcServer.java   
private static String getClientMachine() {
  String clientMachine = NamenodeWebHdfsMethods.getRemoteAddress();
  if (clientMachine == null) { //not a web client
    clientMachine = Server.getRemoteAddress();
  }
  if (clientMachine == null) { //not a RPC client
    clientMachine = "";
  }
  return clientMachine;
}
项目:big-c    文件:TestWebHDFS.java   
/** Test client retry with namenode restarting. */
@Test(timeout=300000)
public void testNamenodeRestart() throws Exception {
  ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
  final Configuration conf = WebHdfsTestUtil.createConf();
  TestDFSClientRetries.namenodeRestartTest(conf, true);
}
项目:big-c    文件:TestDelegationToken.java   
@SuppressWarnings("deprecation")
@Test
public void testDelegationTokenWebHdfsApi() throws Exception {
  ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
  final String uri = WebHdfsFileSystem.SCHEME  + "://"
      + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
  //get file system as JobTracker
  final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
      "JobTracker", new String[]{"user"});
  final WebHdfsFileSystem webhdfs = ugi.doAs(
      new PrivilegedExceptionAction<WebHdfsFileSystem>() {
    @Override
    public WebHdfsFileSystem run() throws Exception {
      return (WebHdfsFileSystem)FileSystem.get(new URI(uri), config);
    }
  });

  { //test addDelegationTokens(..)
    Credentials creds = new Credentials();
    final Token<?> tokens[] = webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(1, tokens.length);
    Assert.assertEquals(1, creds.numberOfTokens());
    Assert.assertSame(tokens[0], creds.getAllTokens().iterator().next());
    checkTokenIdentifier(ugi, tokens[0]);
    final Token<?> tokens2[] = webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(0, tokens2.length);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSNamesystem.java   
private static InetAddress getRemoteIp() {
  InetAddress ip = Server.getRemoteIp();
  if (ip != null) {
    return ip;
  }
  return NamenodeWebHdfsMethods.getRemoteIp();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:NameNodeRpcServer.java   
private static String getClientMachine() {
  String clientMachine = NamenodeWebHdfsMethods.getRemoteAddress();
  if (clientMachine == null) { //not a web client
    clientMachine = Server.getRemoteAddress();
  }
  if (clientMachine == null) { //not a RPC client
    clientMachine = "";
  }
  return clientMachine;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestWebHDFS.java   
/** Test client retry with namenode restarting. */
@Test(timeout=300000)
public void testNamenodeRestart() throws Exception {
  ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
  final Configuration conf = WebHdfsTestUtil.createConf();
  TestDFSClientRetries.namenodeRestartTest(conf, true);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDelegationToken.java   
@SuppressWarnings("deprecation")
@Test
public void testDelegationTokenWebHdfsApi() throws Exception {
  ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
  final String uri = WebHdfsFileSystem.SCHEME  + "://"
      + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
  //get file system as JobTracker
  final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
      "JobTracker", new String[]{"user"});
  final WebHdfsFileSystem webhdfs = ugi.doAs(
      new PrivilegedExceptionAction<WebHdfsFileSystem>() {
    @Override
    public WebHdfsFileSystem run() throws Exception {
      return (WebHdfsFileSystem)FileSystem.get(new URI(uri), config);
    }
  });

  { //test addDelegationTokens(..)
    Credentials creds = new Credentials();
    final Token<?> tokens[] = webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(1, tokens.length);
    Assert.assertEquals(1, creds.numberOfTokens());
    Assert.assertSame(tokens[0], creds.getAllTokens().iterator().next());
    checkTokenIdentifier(ugi, tokens[0]);
    final Token<?> tokens2[] = webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(0, tokens2.length);
  }
}
项目:hadoop-plus    文件:FSNamesystem.java   
private static InetAddress getRemoteIp() {
  InetAddress ip = Server.getRemoteIp();
  if (ip != null) {
    return ip;
  }
  return NamenodeWebHdfsMethods.getRemoteIp();
}
项目:hadoop-plus    文件:NameNodeRpcServer.java   
private static String getClientMachine() {
  String clientMachine = NamenodeWebHdfsMethods.getRemoteAddress();
  if (clientMachine == null) { //not a web client
    clientMachine = Server.getRemoteAddress();
  }
  if (clientMachine == null) { //not a RPC client
    clientMachine = "";
  }
  return clientMachine;
}
项目:hadoop-plus    文件:TestWebHdfsWithMultipleNameNodes.java   
static private void setLogLevel() {
  ((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);

  ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF);
  ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF);
  ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF);
}
项目:hadoop-plus    文件:TestWebHDFS.java   
/** Test client retry with namenode restarting. */
@Test(timeout=300000)
public void testNamenodeRestart() throws Exception {
  ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
  final Configuration conf = WebHdfsTestUtil.createConf();
  TestDFSClientRetries.namenodeRestartTest(conf, true);
}
项目:hadoop-plus    文件:TestDelegationToken.java   
@SuppressWarnings("deprecation")
@Test
public void testDelegationTokenWebHdfsApi() throws Exception {
  ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
  final String uri = WebHdfsFileSystem.SCHEME  + "://"
      + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
  //get file system as JobTracker
  final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
      "JobTracker", new String[]{"user"});
  final WebHdfsFileSystem webhdfs = ugi.doAs(
      new PrivilegedExceptionAction<WebHdfsFileSystem>() {
    @Override
    public WebHdfsFileSystem run() throws Exception {
      return (WebHdfsFileSystem)FileSystem.get(new URI(uri), config);
    }
  });

  { //test addDelegationTokens(..)
    Credentials creds = new Credentials();
    final Token<?> tokens[] = webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(1, tokens.length);
    Assert.assertEquals(1, creds.numberOfTokens());
    Assert.assertSame(tokens[0], creds.getAllTokens().iterator().next());
    checkTokenIdentifier(ugi, tokens[0]);
    final Token<?> tokens2[] = webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(0, tokens2.length);
  }
}
项目:FlexMap    文件:FSNamesystem.java   
private static InetAddress getRemoteIp() {
  InetAddress ip = Server.getRemoteIp();
  if (ip != null) {
    return ip;
  }
  return NamenodeWebHdfsMethods.getRemoteIp();
}
项目:FlexMap    文件:NameNodeRpcServer.java   
private static String getClientMachine() {
  String clientMachine = NamenodeWebHdfsMethods.getRemoteAddress();
  if (clientMachine == null) { //not a web client
    clientMachine = Server.getRemoteAddress();
  }
  if (clientMachine == null) { //not a RPC client
    clientMachine = "";
  }
  return clientMachine;
}
项目:FlexMap    文件:TestWebHdfsWithMultipleNameNodes.java   
static private void setLogLevel() {
  ((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);

  ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF);
  ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF);
  ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF);
}
项目:FlexMap    文件:TestWebHDFS.java   
/** Test client retry with namenode restarting. */
@Test(timeout=300000)
public void testNamenodeRestart() throws Exception {
  ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
  final Configuration conf = WebHdfsTestUtil.createConf();
  TestDFSClientRetries.namenodeRestartTest(conf, true);
}
项目:FlexMap    文件:TestDelegationToken.java   
@SuppressWarnings("deprecation")
@Test
public void testDelegationTokenWebHdfsApi() throws Exception {
  ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
  final String uri = WebHdfsFileSystem.SCHEME  + "://"
      + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
  //get file system as JobTracker
  final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
      "JobTracker", new String[]{"user"});
  final WebHdfsFileSystem webhdfs = ugi.doAs(
      new PrivilegedExceptionAction<WebHdfsFileSystem>() {
    @Override
    public WebHdfsFileSystem run() throws Exception {
      return (WebHdfsFileSystem)FileSystem.get(new URI(uri), config);
    }
  });

  { //test addDelegationTokens(..)
    Credentials creds = new Credentials();
    final Token<?> tokens[] = webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(1, tokens.length);
    Assert.assertEquals(1, creds.numberOfTokens());
    Assert.assertSame(tokens[0], creds.getAllTokens().iterator().next());
    checkTokenIdentifier(ugi, tokens[0]);
    final Token<?> tokens2[] = webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(0, tokens2.length);
  }
}
项目:hops    文件:FSNamesystem.java   
private static InetAddress getRemoteIp() {
  InetAddress ip = Server.getRemoteIp();
  if (ip != null) {
    return ip;
  }
  return NamenodeWebHdfsMethods.getRemoteIp();
}
项目:hops    文件:NameNodeRpcServer.java   
private static String getClientMachine() {
  String clientMachine = NamenodeWebHdfsMethods.getRemoteAddress();
  if (clientMachine == null) { //not a web client
    clientMachine = Server.getRemoteAddress();
  }
  if (clientMachine == null) { //not a RPC client
    clientMachine = "";
  }
  return clientMachine;
}
项目:hops    文件:TestWebHdfsWithMultipleNameNodes.java   
static private void setLogLevel() {
  ((Log4JLogger) LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger) NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);

  ((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.OFF);
  ((Log4JLogger) LeaseManager.LOG).getLogger().setLevel(Level.OFF);
  ((Log4JLogger) LogFactory.getLog(FSNamesystem.class)).getLogger()
      .setLevel(Level.OFF);
}
项目:hops    文件:TestWebHDFS.java   
/**
 * Test client retry with namenode restarting.
 */
@Test(timeout = 900000)
public void testNamenodeRestart() throws Exception {
  ((Log4JLogger) NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
  final Configuration conf = WebHdfsTestUtil.createConf();
  TestDFSClientRetries.namenodeRestartTest(conf, true);
}
项目:hops    文件:TestDelegationToken.java   
@SuppressWarnings("deprecation")
@Test
public void testDelegationTokenWebHdfsApi() throws Exception {
  ((Log4JLogger) NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
  final String uri = WebHdfsFileSystem.SCHEME + "://" +
      config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
  //get file system as JobTracker
  final UserGroupInformation ugi = UserGroupInformation
      .createUserForTesting("JobTracker", new String[]{"user"});
  final WebHdfsFileSystem webhdfs =
      ugi.doAs(new PrivilegedExceptionAction<WebHdfsFileSystem>() {
            @Override
            public WebHdfsFileSystem run() throws Exception {
              return (WebHdfsFileSystem) FileSystem.get(new URI(uri), config);
            }
          });

  { //test addDelegationTokens(..)
    Credentials creds = new Credentials();
    final Token<?> tokens[] =
        webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(1, tokens.length);
    Assert.assertEquals(1, creds.numberOfTokens());
    Assert.assertSame(tokens[0], creds.getAllTokens().iterator().next());
    checkTokenIdentifier(ugi, tokens[0]);
    final Token<?> tokens2[] =
        webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(0, tokens2.length);
  }
}
项目:hadoop-TCP    文件:FSNamesystem.java   
private static InetAddress getRemoteIp() {
  InetAddress ip = Server.getRemoteIp();
  if (ip != null) {
    return ip;
  }
  return NamenodeWebHdfsMethods.getRemoteIp();
}
项目:hadoop-TCP    文件:NameNodeRpcServer.java   
private static String getClientMachine() {
  String clientMachine = NamenodeWebHdfsMethods.getRemoteAddress();
  if (clientMachine == null) { //not a web client
    clientMachine = Server.getRemoteAddress();
  }
  if (clientMachine == null) { //not a RPC client
    clientMachine = "";
  }
  return clientMachine;
}
项目:hadoop-TCP    文件:TestWebHdfsWithMultipleNameNodes.java   
static private void setLogLevel() {
  ((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);

  ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF);
  ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF);
  ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF);
}