Java 类org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo 实例源码

项目:hadoop    文件:DFSAdmin.java   
/**
 * Dumps DFS data structures into specified file.
 * Usage: hdfs dfsadmin -metasave filename
 * @param argv List of of command line parameters.
 * @param idx The index of the command that is being processed.
 * @exception IOException if an error occurred while accessing
 *            the file or path.
 */
public int metaSave(String[] argv, int idx) throws IOException {
  String pathname = argv[idx];
  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().metaSave(pathname);
      System.out.println("Created metasave file " + pathname + " in the log "
          + "directory of namenode " + proxy.getAddress());
    }
  } else {
    dfs.metaSave(pathname);
    System.out.println("Created metasave file " + pathname + " in the log " +
        "directory of namenode " + dfs.getUri());
  }
  return 0;
}
项目:hadoop    文件:HAUtil.java   
/**
 * Get an RPC proxy for each NN in an HA nameservice. Used when a given RPC
 * call should be made on every NN in an HA nameservice, not just the active.
 *
 * @param conf configuration
 * @param nsId the nameservice to get all of the proxies for.
 * @param xface the protocol class.
 * @return a list of RPC proxies for each NN in the nameservice.
 * @throws IOException in the event of error.
 */
public static <T> List<ProxyAndInfo<T>> getProxiesForAllNameNodesInNameservice(
    Configuration conf, String nsId, Class<T> xface) throws IOException {
  Map<String, InetSocketAddress> nnAddresses =
      DFSUtil.getRpcAddressesForNameserviceId(conf, nsId, null);

  List<ProxyAndInfo<T>> proxies = new ArrayList<ProxyAndInfo<T>>(
      nnAddresses.size());
  for (InetSocketAddress nnAddress : nnAddresses.values()) {
    NameNodeProxies.ProxyAndInfo<T> proxyInfo = null;
    proxyInfo = NameNodeProxies.createNonHAProxy(conf,
        nnAddress, xface,
        UserGroupInformation.getCurrentUser(), false);
    proxies.add(proxyInfo);
  }
  return proxies;
}
项目:big-c    文件:DFSAdmin.java   
/**
 * Dumps DFS data structures into specified file.
 * Usage: hdfs dfsadmin -metasave filename
 * @param argv List of of command line parameters.
 * @param idx The index of the command that is being processed.
 * @exception IOException if an error occurred while accessing
 *            the file or path.
 */
public int metaSave(String[] argv, int idx) throws IOException {
  String pathname = argv[idx];
  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().metaSave(pathname);
      System.out.println("Created metasave file " + pathname + " in the log "
          + "directory of namenode " + proxy.getAddress());
    }
  } else {
    dfs.metaSave(pathname);
    System.out.println("Created metasave file " + pathname + " in the log " +
        "directory of namenode " + dfs.getUri());
  }
  return 0;
}
项目:big-c    文件:HAUtil.java   
/**
 * Get an RPC proxy for each NN in an HA nameservice. Used when a given RPC
 * call should be made on every NN in an HA nameservice, not just the active.
 *
 * @param conf configuration
 * @param nsId the nameservice to get all of the proxies for.
 * @param xface the protocol class.
 * @return a list of RPC proxies for each NN in the nameservice.
 * @throws IOException in the event of error.
 */
public static <T> List<ProxyAndInfo<T>> getProxiesForAllNameNodesInNameservice(
    Configuration conf, String nsId, Class<T> xface) throws IOException {
  Map<String, InetSocketAddress> nnAddresses =
      DFSUtil.getRpcAddressesForNameserviceId(conf, nsId, null);

  List<ProxyAndInfo<T>> proxies = new ArrayList<ProxyAndInfo<T>>(
      nnAddresses.size());
  for (InetSocketAddress nnAddress : nnAddresses.values()) {
    NameNodeProxies.ProxyAndInfo<T> proxyInfo = null;
    proxyInfo = NameNodeProxies.createNonHAProxy(conf,
        nnAddress, xface,
        UserGroupInformation.getCurrentUser(), false);
    proxies.add(proxyInfo);
  }
  return proxies;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DFSAdmin.java   
/**
 * Dumps DFS data structures into specified file.
 * Usage: hdfs dfsadmin -metasave filename
 * @param argv List of of command line parameters.
 * @param idx The index of the command that is being processed.
 * @exception IOException if an error occurred while accessing
 *            the file or path.
 */
public int metaSave(String[] argv, int idx) throws IOException {
  String pathname = argv[idx];
  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().metaSave(pathname);
      System.out.println("Created metasave file " + pathname + " in the log "
          + "directory of namenode " + proxy.getAddress());
    }
  } else {
    dfs.metaSave(pathname);
    System.out.println("Created metasave file " + pathname + " in the log " +
        "directory of namenode " + dfs.getUri());
  }
  return 0;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:HAUtil.java   
/**
 * Get an RPC proxy for each NN in an HA nameservice. Used when a given RPC
 * call should be made on every NN in an HA nameservice, not just the active.
 *
 * @param conf configuration
 * @param nsId the nameservice to get all of the proxies for.
 * @param xface the protocol class.
 * @return a list of RPC proxies for each NN in the nameservice.
 * @throws IOException in the event of error.
 */
public static <T> List<ProxyAndInfo<T>> getProxiesForAllNameNodesInNameservice(
    Configuration conf, String nsId, Class<T> xface) throws IOException {
  Map<String, InetSocketAddress> nnAddresses =
      DFSUtil.getRpcAddressesForNameserviceId(conf, nsId, null);

  List<ProxyAndInfo<T>> proxies = new ArrayList<ProxyAndInfo<T>>(
      nnAddresses.size());
  for (InetSocketAddress nnAddress : nnAddresses.values()) {
    NameNodeProxies.ProxyAndInfo<T> proxyInfo = null;
    proxyInfo = NameNodeProxies.createNonHAProxy(conf,
        nnAddress, xface,
        UserGroupInformation.getCurrentUser(), false);
    proxies.add(proxyInfo);
  }
  return proxies;
}
项目:FlexMap    文件:DFSAdmin.java   
/**
 * Dumps DFS data structures into specified file.
 * Usage: hdfs dfsadmin -metasave filename
 * @param argv List of of command line parameters.
 * @param idx The index of the command that is being processed.
 * @exception IOException if an error occurred while accessing
 *            the file or path.
 */
public int metaSave(String[] argv, int idx) throws IOException {
  String pathname = argv[idx];
  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().metaSave(pathname);
      System.out.println("Created metasave file " + pathname + " in the log "
          + "directory of namenode " + proxy.getAddress());
    }
  } else {
    dfs.metaSave(pathname);
    System.out.println("Created metasave file " + pathname + " in the log " +
        "directory of namenode " + dfs.getUri());
  }
  return 0;
}
项目:FlexMap    文件:HAUtil.java   
/**
 * Get an RPC proxy for each NN in an HA nameservice. Used when a given RPC
 * call should be made on every NN in an HA nameservice, not just the active.
 *
 * @param conf configuration
 * @param nsId the nameservice to get all of the proxies for.
 * @param xface the protocol class.
 * @return a list of RPC proxies for each NN in the nameservice.
 * @throws IOException in the event of error.
 */
public static <T> List<ProxyAndInfo<T>> getProxiesForAllNameNodesInNameservice(
    Configuration conf, String nsId, Class<T> xface) throws IOException {
  Map<String, InetSocketAddress> nnAddresses =
      DFSUtil.getRpcAddressesForNameserviceId(conf, nsId, null);

  List<ProxyAndInfo<T>> proxies = new ArrayList<ProxyAndInfo<T>>(
      nnAddresses.size());
  for (InetSocketAddress nnAddress : nnAddresses.values()) {
    NameNodeProxies.ProxyAndInfo<T> proxyInfo = null;
    proxyInfo = NameNodeProxies.createNonHAProxy(conf,
        nnAddress, xface,
        UserGroupInformation.getCurrentUser(), false);
    proxies.add(proxyInfo);
  }
  return proxies;
}
项目:hadoop    文件:DFSAdmin.java   
/**
 * Command to ask the namenode to save the namespace.
 * Usage: hdfs dfsadmin -saveNamespace
 * @exception IOException 
 * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()
 */
public int saveNamespace() throws IOException {
  int exitCode = -1;

  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().saveNamespace();
      System.out.println("Save namespace successful for " +
          proxy.getAddress());
    }
  } else {
    dfs.saveNamespace();
    System.out.println("Save namespace successful");
  }
  exitCode = 0;

  return exitCode;
}
项目:hadoop    文件:DFSAdmin.java   
/**
 * Command to ask the namenode to reread the hosts and excluded hosts 
 * file.
 * Usage: hdfs dfsadmin -refreshNodes
 * @exception IOException 
 */
public int refreshNodes() throws IOException {
  int exitCode = -1;

  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy: proxies) {
      proxy.getProxy().refreshNodes();
      System.out.println("Refresh nodes successful for " +
          proxy.getAddress());
    }
  } else {
    dfs.refreshNodes();
    System.out.println("Refresh nodes successful");
  }
  exitCode = 0;

  return exitCode;
}
项目:hadoop    文件:DFSAdmin.java   
/**
 * Command to ask the namenode to finalize previously performed upgrade.
 * Usage: hdfs dfsadmin -finalizeUpgrade
 * @exception IOException 
 */
public int finalizeUpgrade() throws IOException {
  DistributedFileSystem dfs = getDFS();

  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaAndLogicalUri = HAUtil.isLogicalUri(dfsConf, dfsUri);
  if (isHaAndLogicalUri) {
    // In the case of HA and logical URI, run finalizeUpgrade for all
    // NNs in this nameservice.
    String nsId = dfsUri.getHost();
    List<ClientProtocol> namenodes =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId);
    if (!HAUtil.isAtLeastOneActive(namenodes)) {
      throw new IOException("Cannot finalize with no NameNode active");
    }

    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().finalizeUpgrade();
      System.out.println("Finalize upgrade successful for " +
          proxy.getAddress());
    }
  } else {
    dfs.finalizeUpgrade();
    System.out.println("Finalize upgrade successful");
  }

  return 0;
}
项目:hadoop    文件:DFSAdmin.java   
/**
 * Refresh the authorization policy on the {@link NameNode}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshServiceAcl() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshServiceAcl for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshAuthorizationPolicyProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshAuthorizationPolicyProtocol.class);
    for (ProxyAndInfo<RefreshAuthorizationPolicyProtocol> proxy : proxies) {
      proxy.getProxy().refreshServiceAcl();
      System.out.println("Refresh service acl successful for "
          + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshAuthorizationPolicyProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshAuthorizationPolicyProtocol.class).getProxy();
    // Refresh the authorization policy in-effect
    refreshProtocol.refreshServiceAcl();
    System.out.println("Refresh service acl successful");
  }

  return 0;
}
项目:hadoop    文件:DFSAdmin.java   
/**
 * Refresh the user-to-groups mappings on the {@link NameNode}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshUserToGroupsMappings() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshUserToGroupsMapings for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshUserMappingsProtocol.class);
    for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
      proxy.getProxy().refreshUserToGroupsMappings();
      System.out.println("Refresh user to groups mapping successful for "
          + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshUserMappingsProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshUserMappingsProtocol.class).getProxy();

    // Refresh the user-to-groups mappings
    refreshProtocol.refreshUserToGroupsMappings();
    System.out.println("Refresh user to groups mapping successful");
  }

  return 0;
}
项目:hadoop    文件:DFSAdmin.java   
/**
 * refreshSuperUserGroupsConfiguration {@link NameNode}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshSuperUserGroupsConfiguration() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call 
  // should be NAMENODE's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshSuperUserGroupsConfiguration for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshUserMappingsProtocol.class);
    for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
      proxy.getProxy().refreshSuperUserGroupsConfiguration();
      System.out.println("Refresh super user groups configuration " +
          "successful for " + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshUserMappingsProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshUserMappingsProtocol.class).getProxy();

    // Refresh the user-to-groups mappings
    refreshProtocol.refreshSuperUserGroupsConfiguration();
    System.out.println("Refresh super user groups configuration successful");
  }

  return 0;
}
项目:hadoop    文件:DFSAdmin.java   
public int refreshCallQueue() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshCallQueue for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshCallQueueProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshCallQueueProtocol.class);
    for (ProxyAndInfo<RefreshCallQueueProtocol> proxy : proxies) {
      proxy.getProxy().refreshCallQueue();
      System.out.println("Refresh call queue successful for "
          + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshCallQueueProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshCallQueueProtocol.class).getProxy();

    // Refresh the call queue
    refreshProtocol.refreshCallQueue();
    System.out.println("Refresh call queue successful");
  }

  return 0;
}
项目:hadoop    文件:HAUtil.java   
/**
 * Get an RPC proxy for each NN in an HA nameservice. Used when a given RPC
 * call should be made on every NN in an HA nameservice, not just the active.
 * 
 * @param conf configuration
 * @param nsId the nameservice to get all of the proxies for.
 * @return a list of RPC proxies for each NN in the nameservice.
 * @throws IOException in the event of error.
 */
public static List<ClientProtocol> getProxiesForAllNameNodesInNameservice(
    Configuration conf, String nsId) throws IOException {
  List<ProxyAndInfo<ClientProtocol>> proxies =
      getProxiesForAllNameNodesInNameservice(conf, nsId, ClientProtocol.class);

  List<ClientProtocol> namenodes = new ArrayList<ClientProtocol>(
      proxies.size());
  for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
    namenodes.add(proxy.getProxy());
  }
  return namenodes;
}
项目:big-c    文件:DFSAdmin.java   
/**
 * Command to ask the namenode to save the namespace.
 * Usage: hdfs dfsadmin -saveNamespace
 * @exception IOException 
 * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()
 */
public int saveNamespace() throws IOException {
  int exitCode = -1;

  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().saveNamespace();
      System.out.println("Save namespace successful for " +
          proxy.getAddress());
    }
  } else {
    dfs.saveNamespace();
    System.out.println("Save namespace successful");
  }
  exitCode = 0;

  return exitCode;
}
项目:big-c    文件:DFSAdmin.java   
/**
 * Command to ask the namenode to reread the hosts and excluded hosts 
 * file.
 * Usage: hdfs dfsadmin -refreshNodes
 * @exception IOException 
 */
public int refreshNodes() throws IOException {
  int exitCode = -1;

  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy: proxies) {
      proxy.getProxy().refreshNodes();
      System.out.println("Refresh nodes successful for " +
          proxy.getAddress());
    }
  } else {
    dfs.refreshNodes();
    System.out.println("Refresh nodes successful");
  }
  exitCode = 0;

  return exitCode;
}
项目:big-c    文件:DFSAdmin.java   
/**
 * Command to ask the namenode to finalize previously performed upgrade.
 * Usage: hdfs dfsadmin -finalizeUpgrade
 * @exception IOException 
 */
public int finalizeUpgrade() throws IOException {
  DistributedFileSystem dfs = getDFS();

  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaAndLogicalUri = HAUtil.isLogicalUri(dfsConf, dfsUri);
  if (isHaAndLogicalUri) {
    // In the case of HA and logical URI, run finalizeUpgrade for all
    // NNs in this nameservice.
    String nsId = dfsUri.getHost();
    List<ClientProtocol> namenodes =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId);
    if (!HAUtil.isAtLeastOneActive(namenodes)) {
      throw new IOException("Cannot finalize with no NameNode active");
    }

    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().finalizeUpgrade();
      System.out.println("Finalize upgrade successful for " +
          proxy.getAddress());
    }
  } else {
    dfs.finalizeUpgrade();
    System.out.println("Finalize upgrade successful");
  }

  return 0;
}
项目:big-c    文件:DFSAdmin.java   
/**
 * Refresh the authorization policy on the {@link NameNode}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshServiceAcl() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshServiceAcl for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshAuthorizationPolicyProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshAuthorizationPolicyProtocol.class);
    for (ProxyAndInfo<RefreshAuthorizationPolicyProtocol> proxy : proxies) {
      proxy.getProxy().refreshServiceAcl();
      System.out.println("Refresh service acl successful for "
          + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshAuthorizationPolicyProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshAuthorizationPolicyProtocol.class).getProxy();
    // Refresh the authorization policy in-effect
    refreshProtocol.refreshServiceAcl();
    System.out.println("Refresh service acl successful");
  }

  return 0;
}
项目:big-c    文件:DFSAdmin.java   
/**
 * Refresh the user-to-groups mappings on the {@link NameNode}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshUserToGroupsMappings() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshUserToGroupsMapings for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshUserMappingsProtocol.class);
    for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
      proxy.getProxy().refreshUserToGroupsMappings();
      System.out.println("Refresh user to groups mapping successful for "
          + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshUserMappingsProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshUserMappingsProtocol.class).getProxy();

    // Refresh the user-to-groups mappings
    refreshProtocol.refreshUserToGroupsMappings();
    System.out.println("Refresh user to groups mapping successful");
  }

  return 0;
}
项目:big-c    文件:DFSAdmin.java   
/**
 * refreshSuperUserGroupsConfiguration {@link NameNode}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshSuperUserGroupsConfiguration() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call 
  // should be NAMENODE's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshSuperUserGroupsConfiguration for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshUserMappingsProtocol.class);
    for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
      proxy.getProxy().refreshSuperUserGroupsConfiguration();
      System.out.println("Refresh super user groups configuration " +
          "successful for " + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshUserMappingsProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshUserMappingsProtocol.class).getProxy();

    // Refresh the user-to-groups mappings
    refreshProtocol.refreshSuperUserGroupsConfiguration();
    System.out.println("Refresh super user groups configuration successful");
  }

  return 0;
}
项目:big-c    文件:DFSAdmin.java   
public int refreshCallQueue() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshCallQueue for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshCallQueueProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshCallQueueProtocol.class);
    for (ProxyAndInfo<RefreshCallQueueProtocol> proxy : proxies) {
      proxy.getProxy().refreshCallQueue();
      System.out.println("Refresh call queue successful for "
          + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshCallQueueProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshCallQueueProtocol.class).getProxy();

    // Refresh the call queue
    refreshProtocol.refreshCallQueue();
    System.out.println("Refresh call queue successful");
  }

  return 0;
}
项目:big-c    文件:HAUtil.java   
/**
 * Get an RPC proxy for each NN in an HA nameservice. Used when a given RPC
 * call should be made on every NN in an HA nameservice, not just the active.
 * 
 * @param conf configuration
 * @param nsId the nameservice to get all of the proxies for.
 * @return a list of RPC proxies for each NN in the nameservice.
 * @throws IOException in the event of error.
 */
public static List<ClientProtocol> getProxiesForAllNameNodesInNameservice(
    Configuration conf, String nsId) throws IOException {
  List<ProxyAndInfo<ClientProtocol>> proxies =
      getProxiesForAllNameNodesInNameservice(conf, nsId, ClientProtocol.class);

  List<ClientProtocol> namenodes = new ArrayList<ClientProtocol>(
      proxies.size());
  for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
    namenodes.add(proxy.getProxy());
  }
  return namenodes;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DFSAdmin.java   
/**
 * Command to ask the namenode to save the namespace.
 * Usage: hdfs dfsadmin -saveNamespace
 * @exception IOException 
 * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()
 */
public int saveNamespace() throws IOException {
  int exitCode = -1;

  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().saveNamespace();
      System.out.println("Save namespace successful for " +
          proxy.getAddress());
    }
  } else {
    dfs.saveNamespace();
    System.out.println("Save namespace successful");
  }
  exitCode = 0;

  return exitCode;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DFSAdmin.java   
/**
 * Command to ask the namenode to reread the hosts and excluded hosts 
 * file.
 * Usage: hdfs dfsadmin -refreshNodes
 * @exception IOException 
 */
public int refreshNodes() throws IOException {
  int exitCode = -1;

  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy: proxies) {
      proxy.getProxy().refreshNodes();
      System.out.println("Refresh nodes successful for " +
          proxy.getAddress());
    }
  } else {
    dfs.refreshNodes();
    System.out.println("Refresh nodes successful");
  }
  exitCode = 0;

  return exitCode;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DFSAdmin.java   
/**
 * Command to ask the namenode to finalize previously performed upgrade.
 * Usage: hdfs dfsadmin -finalizeUpgrade
 * @exception IOException 
 */
public int finalizeUpgrade() throws IOException {
  DistributedFileSystem dfs = getDFS();

  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaAndLogicalUri = HAUtil.isLogicalUri(dfsConf, dfsUri);
  if (isHaAndLogicalUri) {
    // In the case of HA and logical URI, run finalizeUpgrade for all
    // NNs in this nameservice.
    String nsId = dfsUri.getHost();
    List<ClientProtocol> namenodes =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId);
    if (!HAUtil.isAtLeastOneActive(namenodes)) {
      throw new IOException("Cannot finalize with no NameNode active");
    }

    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().finalizeUpgrade();
      System.out.println("Finalize upgrade successful for " +
          proxy.getAddress());
    }
  } else {
    dfs.finalizeUpgrade();
    System.out.println("Finalize upgrade successful");
  }

  return 0;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DFSAdmin.java   
/**
 * Refresh the authorization policy on the {@link NameNode}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshServiceAcl() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshServiceAcl for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshAuthorizationPolicyProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshAuthorizationPolicyProtocol.class);
    for (ProxyAndInfo<RefreshAuthorizationPolicyProtocol> proxy : proxies) {
      proxy.getProxy().refreshServiceAcl();
      System.out.println("Refresh service acl successful for "
          + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshAuthorizationPolicyProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshAuthorizationPolicyProtocol.class).getProxy();
    // Refresh the authorization policy in-effect
    refreshProtocol.refreshServiceAcl();
    System.out.println("Refresh service acl successful");
  }

  return 0;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DFSAdmin.java   
/**
 * Refresh the user-to-groups mappings on the {@link NameNode}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshUserToGroupsMappings() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshUserToGroupsMapings for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshUserMappingsProtocol.class);
    for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
      proxy.getProxy().refreshUserToGroupsMappings();
      System.out.println("Refresh user to groups mapping successful for "
          + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshUserMappingsProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshUserMappingsProtocol.class).getProxy();

    // Refresh the user-to-groups mappings
    refreshProtocol.refreshUserToGroupsMappings();
    System.out.println("Refresh user to groups mapping successful");
  }

  return 0;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DFSAdmin.java   
/**
 * refreshSuperUserGroupsConfiguration {@link NameNode}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshSuperUserGroupsConfiguration() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call 
  // should be NAMENODE's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshSuperUserGroupsConfiguration for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshUserMappingsProtocol.class);
    for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
      proxy.getProxy().refreshSuperUserGroupsConfiguration();
      System.out.println("Refresh super user groups configuration " +
          "successful for " + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshUserMappingsProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshUserMappingsProtocol.class).getProxy();

    // Refresh the user-to-groups mappings
    refreshProtocol.refreshSuperUserGroupsConfiguration();
    System.out.println("Refresh super user groups configuration successful");
  }

  return 0;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DFSAdmin.java   
public int refreshCallQueue() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshCallQueue for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshCallQueueProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshCallQueueProtocol.class);
    for (ProxyAndInfo<RefreshCallQueueProtocol> proxy : proxies) {
      proxy.getProxy().refreshCallQueue();
      System.out.println("Refresh call queue successful for "
          + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshCallQueueProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshCallQueueProtocol.class).getProxy();

    // Refresh the call queue
    refreshProtocol.refreshCallQueue();
    System.out.println("Refresh call queue successful");
  }

  return 0;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:HAUtil.java   
/**
 * Get an RPC proxy for each NN in an HA nameservice. Used when a given RPC
 * call should be made on every NN in an HA nameservice, not just the active.
 * 
 * @param conf configuration
 * @param nsId the nameservice to get all of the proxies for.
 * @return a list of RPC proxies for each NN in the nameservice.
 * @throws IOException in the event of error.
 */
public static List<ClientProtocol> getProxiesForAllNameNodesInNameservice(
    Configuration conf, String nsId) throws IOException {
  List<ProxyAndInfo<ClientProtocol>> proxies =
      getProxiesForAllNameNodesInNameservice(conf, nsId, ClientProtocol.class);

  List<ClientProtocol> namenodes = new ArrayList<ClientProtocol>(
      proxies.size());
  for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
    namenodes.add(proxy.getProxy());
  }
  return namenodes;
}
项目:FlexMap    文件:DFSAdmin.java   
/**
 * Command to ask the namenode to save the namespace.
 * Usage: hdfs dfsadmin -saveNamespace
 * @exception IOException 
 * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()
 */
public int saveNamespace() throws IOException {
  int exitCode = -1;

  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().saveNamespace();
      System.out.println("Save namespace successful for " +
          proxy.getAddress());
    }
  } else {
    dfs.saveNamespace();
    System.out.println("Save namespace successful");
  }
  exitCode = 0;

  return exitCode;
}
项目:FlexMap    文件:DFSAdmin.java   
/**
 * Command to ask the namenode to reread the hosts and excluded hosts 
 * file.
 * Usage: hdfs dfsadmin -refreshNodes
 * @exception IOException 
 */
public int refreshNodes() throws IOException {
  int exitCode = -1;

  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy: proxies) {
      proxy.getProxy().refreshNodes();
      System.out.println("Refresh nodes successful for " +
          proxy.getAddress());
    }
  } else {
    dfs.refreshNodes();
    System.out.println("Refresh nodes successful");
  }
  exitCode = 0;

  return exitCode;
}
项目:FlexMap    文件:DFSAdmin.java   
/**
 * Command to ask the namenode to finalize previously performed upgrade.
 * Usage: hdfs dfsadmin -finalizeUpgrade
 * @exception IOException 
 */
public int finalizeUpgrade() throws IOException {
  DistributedFileSystem dfs = getDFS();

  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaAndLogicalUri = HAUtil.isLogicalUri(dfsConf, dfsUri);
  if (isHaAndLogicalUri) {
    // In the case of HA and logical URI, run finalizeUpgrade for all
    // NNs in this nameservice.
    String nsId = dfsUri.getHost();
    List<ClientProtocol> namenodes =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId);
    if (!HAUtil.isAtLeastOneActive(namenodes)) {
      throw new IOException("Cannot finalize with no NameNode active");
    }

    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().finalizeUpgrade();
      System.out.println("Finalize upgrade successful for " +
          proxy.getAddress());
    }
  } else {
    dfs.finalizeUpgrade();
    System.out.println("Finalize upgrade successful");
  }

  return 0;
}
项目:FlexMap    文件:DFSAdmin.java   
/**
 * Refresh the authorization policy on the {@link NameNode}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshServiceAcl() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshServiceAcl for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshAuthorizationPolicyProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshAuthorizationPolicyProtocol.class);
    for (ProxyAndInfo<RefreshAuthorizationPolicyProtocol> proxy : proxies) {
      proxy.getProxy().refreshServiceAcl();
      System.out.println("Refresh service acl successful for "
          + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshAuthorizationPolicyProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshAuthorizationPolicyProtocol.class).getProxy();
    // Refresh the authorization policy in-effect
    refreshProtocol.refreshServiceAcl();
    System.out.println("Refresh service acl successful");
  }

  return 0;
}
项目:FlexMap    文件:DFSAdmin.java   
/**
 * Refresh the user-to-groups mappings on the {@link NameNode}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshUserToGroupsMappings() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshUserToGroupsMapings for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshUserMappingsProtocol.class);
    for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
      proxy.getProxy().refreshUserToGroupsMappings();
      System.out.println("Refresh user to groups mapping successful for "
          + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshUserMappingsProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshUserMappingsProtocol.class).getProxy();

    // Refresh the user-to-groups mappings
    refreshProtocol.refreshUserToGroupsMappings();
    System.out.println("Refresh user to groups mapping successful");
  }

  return 0;
}
项目:FlexMap    文件:DFSAdmin.java   
/**
 * refreshSuperUserGroupsConfiguration {@link NameNode}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshSuperUserGroupsConfiguration() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call 
  // should be NAMENODE's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshSuperUserGroupsConfiguration for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshUserMappingsProtocol.class);
    for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
      proxy.getProxy().refreshSuperUserGroupsConfiguration();
      System.out.println("Refresh super user groups configuration " +
          "successful for " + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshUserMappingsProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshUserMappingsProtocol.class).getProxy();

    // Refresh the user-to-groups mappings
    refreshProtocol.refreshSuperUserGroupsConfiguration();
    System.out.println("Refresh super user groups configuration successful");
  }

  return 0;
}
项目:FlexMap    文件:DFSAdmin.java   
public int refreshCallQueue() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshCallQueue for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshCallQueueProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshCallQueueProtocol.class);
    for (ProxyAndInfo<RefreshCallQueueProtocol> proxy : proxies) {
      proxy.getProxy().refreshCallQueue();
      System.out.println("Refresh call queue successful for "
          + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshCallQueueProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshCallQueueProtocol.class).getProxy();

    // Refresh the call queue
    refreshProtocol.refreshCallQueue();
    System.out.println("Refresh call queue successful");
  }

  return 0;
}
项目:FlexMap    文件:HAUtil.java   
/**
 * Get an RPC proxy for each NN in an HA nameservice. Used when a given RPC
 * call should be made on every NN in an HA nameservice, not just the active.
 * 
 * @param conf configuration
 * @param nsId the nameservice to get all of the proxies for.
 * @return a list of RPC proxies for each NN in the nameservice.
 * @throws IOException in the event of error.
 */
public static List<ClientProtocol> getProxiesForAllNameNodesInNameservice(
    Configuration conf, String nsId) throws IOException {
  List<ProxyAndInfo<ClientProtocol>> proxies =
      getProxiesForAllNameNodesInNameservice(conf, nsId, ClientProtocol.class);

  List<ClientProtocol> namenodes = new ArrayList<ClientProtocol>(
      proxies.size());
  for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
    namenodes.add(proxy.getProxy());
  }
  return namenodes;
}