Java 类org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources 实例源码

项目:hadoop    文件:MiniDFSCluster.java   
/**
 * Restart a datanode, on the same port if requested
 * @param dnprop the datanode to restart
 * @param keepPort whether to use the same port 
 * @return true if restarting is successful
 * @throws IOException
 */
public synchronized boolean restartDataNode(DataNodeProperties dnprop,
    boolean keepPort) throws IOException {
  Configuration conf = dnprop.conf;
  String[] args = dnprop.dnArgs;
  SecureResources secureResources = dnprop.secureResources;
  Configuration newconf = new HdfsConfiguration(conf); // save cloned config
  if (keepPort) {
    InetSocketAddress addr = dnprop.datanode.getXferAddress();
    conf.set(DFS_DATANODE_ADDRESS_KEY, 
        addr.getAddress().getHostAddress() + ":" + addr.getPort());
    conf.set(DFS_DATANODE_IPC_ADDRESS_KEY,
        addr.getAddress().getHostAddress() + ":" + dnprop.ipcPort); 
  }
  DataNode newDn = DataNode.createDataNode(args, conf, secureResources);
  dataNodes.add(new DataNodeProperties(
      newDn, newconf, args, secureResources, newDn.getIpcPort()));
  numDataNodes++;
  return true;
}
项目:hadoop    文件:DataNode.java   
/**
 * Checks if the DataNode has a secure configuration if security is enabled.
 * There are 2 possible configurations that are considered secure:
 * 1. The server has bound to privileged ports for RPC and HTTP via
 *   SecureDataNodeStarter.
 * 2. The configuration enables SASL on DataTransferProtocol and HTTPS (no
 *   plain HTTP) for the HTTP server.  The SASL handshake guarantees
 *   authentication of the RPC server before a client transmits a secret, such
 *   as a block access token.  Similarly, SSL guarantees authentication of the
 *   HTTP server before a client transmits a secret, such as a delegation
 *   token.
 * It is not possible to run with both privileged ports and SASL on
 * DataTransferProtocol.  For backwards-compatibility, the connection logic
 * must check if the target port is a privileged port, and if so, skip the
 * SASL handshake.
 *
 * @param dnConf DNConf to check
 * @param conf Configuration to check
 * @param resources SecuredResources obtained for DataNode
 * @throws RuntimeException if security enabled, but configuration is insecure
 */
private static void checkSecureConfig(DNConf dnConf, Configuration conf,
    SecureResources resources) throws RuntimeException {
  if (!UserGroupInformation.isSecurityEnabled()) {
    return;
  }
  SaslPropertiesResolver saslPropsResolver = dnConf.getSaslPropsResolver();
  if (resources != null && saslPropsResolver == null) {
    return;
  }
  if (dnConf.getIgnoreSecurePortsForTesting()) {
    return;
  }
  if (saslPropsResolver != null &&
      DFSUtil.getHttpPolicy(conf) == HttpConfig.Policy.HTTPS_ONLY &&
      resources == null) {
    return;
  }
  throw new RuntimeException("Cannot start secure DataNode without " +
    "configuring either privileged resources or SASL RPC data transfer " +
    "protection and SSL for HTTP.  Using privileged resources in " +
    "combination with SASL RPC data transfer protection is not supported.");
}
项目:hadoop    文件:DataNode.java   
/**
 * Make an instance of DataNode after ensuring that at least one of the
 * given data directories (and their parent directories, if necessary)
 * can be created.
 * @param dataDirs List of directories, where the new DataNode instance should
 * keep its files.
 * @param conf Configuration instance to use.
 * @param resources Secure resources needed to run under Kerberos
 * @return DataNode instance for given list of data dirs and conf, or null if
 * no directory from this directory list can be created.
 * @throws IOException
 */
static DataNode makeInstance(Collection<StorageLocation> dataDirs,
    Configuration conf, SecureResources resources) throws IOException {
  LocalFileSystem localFS = FileSystem.getLocal(conf);
  FsPermission permission = new FsPermission(
      conf.get(DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
               DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
  DataNodeDiskChecker dataNodeDiskChecker =
      new DataNodeDiskChecker(permission);
  List<StorageLocation> locations =
      checkStorageLocations(dataDirs, localFS, dataNodeDiskChecker);
  DefaultMetricsSystem.initialize("DataNode");

  assert locations.size() > 0 : "number of data directories should be > 0";
  return new DataNode(conf, locations, resources);
}
项目:hadoop    文件:DataNode.java   
public static void secureMain(String args[], SecureResources resources) {
  int errorCode = 0;
  try {
    StringUtils.startupShutdownMessage(DataNode.class, args, LOG);
    DataNode datanode = createDataNode(args, null, resources);
    if (datanode != null) {
      datanode.join();
    } else {
      errorCode = 1;
    }
  } catch (Throwable e) {
    LOG.fatal("Exception in secureMain", e);
    terminate(1, e);
  } finally {
    // We need to terminate the process here because either shutdown was called
    // or some disk related conditions like volumes tolerated or volumes required
    // condition was not met. Also, In secure mode, control will go to Jsvc
    // and Datanode process hangs if it does not exit.
    LOG.warn("Exiting Datanode");
    terminate(errorCode);
  }
}
项目:aliyun-oss-hadoop-fs    文件:DataNode.java   
/** Instantiate a single datanode object, along with its secure resources. 
 * This must be run by invoking{@link DataNode#runDatanodeDaemon()} 
 * subsequently. 
 */
public static DataNode instantiateDataNode(String args [], Configuration conf,
    SecureResources resources) throws IOException {
  if (conf == null)
    conf = new HdfsConfiguration();

  if (args != null) {
    // parse generic hadoop options
    GenericOptionsParser hParser = new GenericOptionsParser(conf, args);
    args = hParser.getRemainingArgs();
  }

  if (!parseArguments(args, conf)) {
    printUsage(System.err);
    return null;
  }
  Collection<StorageLocation> dataLocations = getStorageLocations(conf);
  UserGroupInformation.setConfiguration(conf);
  SecurityUtil.login(conf, DFS_DATANODE_KEYTAB_FILE_KEY,
      DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, getHostName(conf));
  return makeInstance(dataLocations, conf, resources);
}
项目:aliyun-oss-hadoop-fs    文件:DataNode.java   
/**
 * Make an instance of DataNode after ensuring that at least one of the
 * given data directories (and their parent directories, if necessary)
 * can be created.
 * @param dataDirs List of directories, where the new DataNode instance should
 * keep its files.
 * @param conf Configuration instance to use.
 * @param resources Secure resources needed to run under Kerberos
 * @return DataNode instance for given list of data dirs and conf, or null if
 * no directory from this directory list can be created.
 * @throws IOException
 */
static DataNode makeInstance(Collection<StorageLocation> dataDirs,
    Configuration conf, SecureResources resources) throws IOException {
  LocalFileSystem localFS = FileSystem.getLocal(conf);
  FsPermission permission = new FsPermission(
      conf.get(DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
               DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
  DataNodeDiskChecker dataNodeDiskChecker =
      new DataNodeDiskChecker(permission);
  List<StorageLocation> locations =
      checkStorageLocations(dataDirs, localFS, dataNodeDiskChecker);
  DefaultMetricsSystem.initialize("DataNode");

  assert locations.size() > 0 : "number of data directories should be > 0";
  return new DataNode(conf, locations, resources);
}
项目:aliyun-oss-hadoop-fs    文件:DataNode.java   
public static void secureMain(String args[], SecureResources resources) {
  int errorCode = 0;
  try {
    StringUtils.startupShutdownMessage(DataNode.class, args, LOG);
    DataNode datanode = createDataNode(args, null, resources);
    if (datanode != null) {
      datanode.join();
    } else {
      errorCode = 1;
    }
  } catch (Throwable e) {
    LOG.error("Exception in secureMain", e);
    terminate(1, e);
  } finally {
    // We need to terminate the process here because either shutdown was called
    // or some disk related conditions like volumes tolerated or volumes required
    // condition was not met. Also, In secure mode, control will go to Jsvc
    // and Datanode process hangs if it does not exit.
    LOG.warn("Exiting Datanode");
    terminate(errorCode);
  }
}
项目:aliyun-oss-hadoop-fs    文件:MiniDFSCluster.java   
/**
 * Restart a datanode, on the same port if requested
 * @param dnprop the datanode to restart
 * @param keepPort whether to use the same port 
 * @return true if restarting is successful
 * @throws IOException
 */
public synchronized boolean restartDataNode(DataNodeProperties dnprop,
    boolean keepPort) throws IOException {
  Configuration conf = dnprop.conf;
  String[] args = dnprop.dnArgs;
  SecureResources secureResources = dnprop.secureResources;
  Configuration newconf = new HdfsConfiguration(conf); // save cloned config
  if (keepPort) {
    InetSocketAddress addr = dnprop.datanode.getXferAddress();
    conf.set(DFS_DATANODE_ADDRESS_KEY, 
        addr.getAddress().getHostAddress() + ":" + addr.getPort());
    conf.set(DFS_DATANODE_IPC_ADDRESS_KEY,
        addr.getAddress().getHostAddress() + ":" + dnprop.ipcPort); 
  }
  DataNode newDn = DataNode.createDataNode(args, conf, secureResources);
  dataNodes.add(new DataNodeProperties(
      newDn, newconf, args, secureResources, newDn.getIpcPort()));
  numDataNodes++;
  return true;
}
项目:big-c    文件:DataNode.java   
/**
 * Checks if the DataNode has a secure configuration if security is enabled.
 * There are 2 possible configurations that are considered secure:
 * 1. The server has bound to privileged ports for RPC and HTTP via
 *   SecureDataNodeStarter.
 * 2. The configuration enables SASL on DataTransferProtocol and HTTPS (no
 *   plain HTTP) for the HTTP server.  The SASL handshake guarantees
 *   authentication of the RPC server before a client transmits a secret, such
 *   as a block access token.  Similarly, SSL guarantees authentication of the
 *   HTTP server before a client transmits a secret, such as a delegation
 *   token.
 * It is not possible to run with both privileged ports and SASL on
 * DataTransferProtocol.  For backwards-compatibility, the connection logic
 * must check if the target port is a privileged port, and if so, skip the
 * SASL handshake.
 *
 * @param dnConf DNConf to check
 * @param conf Configuration to check
 * @param resources SecuredResources obtained for DataNode
 * @throws RuntimeException if security enabled, but configuration is insecure
 */
private static void checkSecureConfig(DNConf dnConf, Configuration conf,
    SecureResources resources) throws RuntimeException {
  if (!UserGroupInformation.isSecurityEnabled()) {
    return;
  }
  SaslPropertiesResolver saslPropsResolver = dnConf.getSaslPropsResolver();
  if (resources != null && saslPropsResolver == null) {
    return;
  }
  if (dnConf.getIgnoreSecurePortsForTesting()) {
    return;
  }
  if (saslPropsResolver != null &&
      DFSUtil.getHttpPolicy(conf) == HttpConfig.Policy.HTTPS_ONLY &&
      resources == null) {
    return;
  }
  throw new RuntimeException("Cannot start secure DataNode without " +
    "configuring either privileged resources or SASL RPC data transfer " +
    "protection and SSL for HTTP.  Using privileged resources in " +
    "combination with SASL RPC data transfer protection is not supported.");
}
项目:big-c    文件:DataNode.java   
/** Instantiate a single datanode object, along with its secure resources. 
 * This must be run by invoking{@link DataNode#runDatanodeDaemon()} 
 * subsequently. 
 */
public static DataNode instantiateDataNode(String args [], Configuration conf,
    SecureResources resources) throws IOException {
  if (conf == null)
    conf = new HdfsConfiguration();

  if (args != null) {
    // parse generic hadoop options
    GenericOptionsParser hParser = new GenericOptionsParser(conf, args);
    args = hParser.getRemainingArgs();
  }

  if (!parseArguments(args, conf)) {
    printUsage(System.err);
    return null;
  }
  Collection<StorageLocation> dataLocations = getStorageLocations(conf);
  UserGroupInformation.setConfiguration(conf);
  SecurityUtil.login(conf, DFS_DATANODE_KEYTAB_FILE_KEY,
      DFS_DATANODE_KERBEROS_PRINCIPAL_KEY);
  return makeInstance(dataLocations, conf, resources);
}
项目:big-c    文件:DataNode.java   
/**
 * Make an instance of DataNode after ensuring that at least one of the
 * given data directories (and their parent directories, if necessary)
 * can be created.
 * @param dataDirs List of directories, where the new DataNode instance should
 * keep its files.
 * @param conf Configuration instance to use.
 * @param resources Secure resources needed to run under Kerberos
 * @return DataNode instance for given list of data dirs and conf, or null if
 * no directory from this directory list can be created.
 * @throws IOException
 */
static DataNode makeInstance(Collection<StorageLocation> dataDirs,
    Configuration conf, SecureResources resources) throws IOException {
  LocalFileSystem localFS = FileSystem.getLocal(conf);
  FsPermission permission = new FsPermission(
      conf.get(DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
               DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
  DataNodeDiskChecker dataNodeDiskChecker =
      new DataNodeDiskChecker(permission);
  List<StorageLocation> locations =
      checkStorageLocations(dataDirs, localFS, dataNodeDiskChecker);
  DefaultMetricsSystem.initialize("DataNode");

  assert locations.size() > 0 : "number of data directories should be > 0";
  return new DataNode(conf, locations, resources);
}
项目:big-c    文件:DataNode.java   
public static void secureMain(String args[], SecureResources resources) {
  int errorCode = 0;
  try {
    StringUtils.startupShutdownMessage(DataNode.class, args, LOG);
    DataNode datanode = createDataNode(args, null, resources);
    if (datanode != null) {
      datanode.join();
    } else {
      errorCode = 1;
    }
  } catch (Throwable e) {
    LOG.fatal("Exception in secureMain", e);
    terminate(1, e);
  } finally {
    // We need to terminate the process here because either shutdown was called
    // or some disk related conditions like volumes tolerated or volumes required
    // condition was not met. Also, In secure mode, control will go to Jsvc
    // and Datanode process hangs if it does not exit.
    LOG.warn("Exiting Datanode");
    terminate(errorCode);
  }
}
项目:big-c    文件:MiniDFSCluster.java   
/**
 * Restart a datanode, on the same port if requested
 * @param dnprop the datanode to restart
 * @param keepPort whether to use the same port 
 * @return true if restarting is successful
 * @throws IOException
 */
public synchronized boolean restartDataNode(DataNodeProperties dnprop,
    boolean keepPort) throws IOException {
  Configuration conf = dnprop.conf;
  String[] args = dnprop.dnArgs;
  SecureResources secureResources = dnprop.secureResources;
  Configuration newconf = new HdfsConfiguration(conf); // save cloned config
  if (keepPort) {
    InetSocketAddress addr = dnprop.datanode.getXferAddress();
    conf.set(DFS_DATANODE_ADDRESS_KEY, 
        addr.getAddress().getHostAddress() + ":" + addr.getPort());
    conf.set(DFS_DATANODE_IPC_ADDRESS_KEY,
        addr.getAddress().getHostAddress() + ":" + dnprop.ipcPort); 
  }
  DataNode newDn = DataNode.createDataNode(args, conf, secureResources);
  dataNodes.add(new DataNodeProperties(
      newDn, newconf, args, secureResources, newDn.getIpcPort()));
  numDataNodes++;
  return true;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DataNode.java   
/**
 * Checks if the DataNode has a secure configuration if security is enabled.
 * There are 2 possible configurations that are considered secure:
 * 1. The server has bound to privileged ports for RPC and HTTP via
 *   SecureDataNodeStarter.
 * 2. The configuration enables SASL on DataTransferProtocol and HTTPS (no
 *   plain HTTP) for the HTTP server.  The SASL handshake guarantees
 *   authentication of the RPC server before a client transmits a secret, such
 *   as a block access token.  Similarly, SSL guarantees authentication of the
 *   HTTP server before a client transmits a secret, such as a delegation
 *   token.
 * It is not possible to run with both privileged ports and SASL on
 * DataTransferProtocol.  For backwards-compatibility, the connection logic
 * must check if the target port is a privileged port, and if so, skip the
 * SASL handshake.
 *
 * @param dnConf DNConf to check
 * @param conf Configuration to check
 * @param resources SecuredResources obtained for DataNode
 * @throws RuntimeException if security enabled, but configuration is insecure
 */
private static void checkSecureConfig(DNConf dnConf, Configuration conf,
    SecureResources resources) throws RuntimeException {
  if (!UserGroupInformation.isSecurityEnabled()) {
    return;
  }
  SaslPropertiesResolver saslPropsResolver = dnConf.getSaslPropsResolver();
  if (resources != null && saslPropsResolver == null) {
    return;
  }
  if (dnConf.getIgnoreSecurePortsForTesting()) {
    return;
  }
  if (saslPropsResolver != null &&
      DFSUtil.getHttpPolicy(conf) == HttpConfig.Policy.HTTPS_ONLY &&
      resources == null) {
    return;
  }
  throw new RuntimeException("Cannot start secure DataNode without " +
    "configuring either privileged resources or SASL RPC data transfer " +
    "protection and SSL for HTTP.  Using privileged resources in " +
    "combination with SASL RPC data transfer protection is not supported.");
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DataNode.java   
/** Instantiate a single datanode object, along with its secure resources. 
 * This must be run by invoking{@link DataNode#runDatanodeDaemon()} 
 * subsequently. 
 */
public static DataNode instantiateDataNode(String args [], Configuration conf,
    SecureResources resources) throws IOException {
  if (conf == null)
    conf = new HdfsConfiguration();   //czhc: 怎么定位到配置文件在哪的?

  if (args != null) {
    // parse generic hadoop options
    GenericOptionsParser hParser = new GenericOptionsParser(conf, args);
    args = hParser.getRemainingArgs();
  }

  if (!parseArguments(args, conf)) {
    printUsage(System.err);
    return null;
  }
  Collection<StorageLocation> dataLocations = getStorageLocations(conf);
  UserGroupInformation.setConfiguration(conf);
  SecurityUtil.login(conf, DFS_DATANODE_KEYTAB_FILE_KEY,
      DFS_DATANODE_KERBEROS_PRINCIPAL_KEY);
  return makeInstance(dataLocations, conf, resources);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DataNode.java   
/**
 * Make an instance of DataNode after ensuring that at least one of the
 * given data directories (and their parent directories, if necessary)
 * can be created.
 * @param dataDirs List of directories, where the new DataNode instance should
 * keep its files.
 * @param conf Configuration instance to use.
 * @param resources Secure resources needed to run under Kerberos
 * @return DataNode instance for given list of data dirs and conf, or null if
 * no directory from this directory list can be created.
 * @throws IOException
 */
static DataNode makeInstance(Collection<StorageLocation> dataDirs,
    Configuration conf, SecureResources resources) throws IOException {
  LocalFileSystem localFS = FileSystem.getLocal(conf);
  FsPermission permission = new FsPermission(
      conf.get(DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
               DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
  DataNodeDiskChecker dataNodeDiskChecker =
      new DataNodeDiskChecker(permission);
  List<StorageLocation> locations =
      checkStorageLocations(dataDirs, localFS, dataNodeDiskChecker);
  DefaultMetricsSystem.initialize("DataNode");

  assert locations.size() > 0 : "number of data directories should be > 0";
  return new DataNode(conf, locations, resources);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DataNode.java   
public static void secureMain(String args[], SecureResources resources) {
  int errorCode = 0;
  try {
    StringUtils.startupShutdownMessage(DataNode.class, args, LOG);
    DataNode datanode = createDataNode(args, null, resources);
    if (datanode != null) {
      datanode.join();
    } else {
      errorCode = 1;
    }
  } catch (Throwable e) {
    LOG.fatal("Exception in secureMain", e);
    terminate(1, e);
  } finally {
    // We need to terminate the process here because either shutdown was called
    // or some disk related conditions like volumes tolerated or volumes required
    // condition was not met. Also, In secure mode, control will go to Jsvc
    // and Datanode process hangs if it does not exit.
    LOG.warn("Exiting Datanode");
    terminate(errorCode);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:MiniDFSCluster.java   
/**
 * Restart a datanode, on the same port if requested
 * @param dnprop the datanode to restart
 * @param keepPort whether to use the same port 
 * @return true if restarting is successful
 * @throws IOException
 */
public synchronized boolean restartDataNode(DataNodeProperties dnprop,
    boolean keepPort) throws IOException {
  Configuration conf = dnprop.conf;
  String[] args = dnprop.dnArgs;
  SecureResources secureResources = dnprop.secureResources;
  Configuration newconf = new HdfsConfiguration(conf); // save cloned config
  if (keepPort) {
    InetSocketAddress addr = dnprop.datanode.getXferAddress();
    conf.set(DFS_DATANODE_ADDRESS_KEY, 
        addr.getAddress().getHostAddress() + ":" + addr.getPort());
    conf.set(DFS_DATANODE_IPC_ADDRESS_KEY,
        addr.getAddress().getHostAddress() + ":" + dnprop.ipcPort); 
  }
  DataNode newDn = DataNode.createDataNode(args, conf, secureResources);
  dataNodes.add(new DataNodeProperties(
      newDn, newconf, args, secureResources, newDn.getIpcPort()));
  numDataNodes++;
  return true;
}
项目:hadoop-plus    文件:DataNode.java   
/** Instantiate a single datanode object, along with its secure resources. 
 * This must be run by invoking{@link DataNode#runDatanodeDaemon()} 
 * subsequently. 
 */
public static DataNode instantiateDataNode(String args [], Configuration conf,
    SecureResources resources) throws IOException {
  if (conf == null)
    conf = new HdfsConfiguration();

  if (args != null) {
    // parse generic hadoop options
    GenericOptionsParser hParser = new GenericOptionsParser(conf, args);
    args = hParser.getRemainingArgs();
  }

  if (!parseArguments(args, conf)) {
    printUsage(System.err);
    return null;
  }
  Collection<URI> dataDirs = getStorageDirs(conf);
  UserGroupInformation.setConfiguration(conf);
  SecurityUtil.login(conf, DFS_DATANODE_KEYTAB_FILE_KEY,
      DFS_DATANODE_USER_NAME_KEY);
  return makeInstance(dataDirs, conf, resources);
}
项目:hadoop-plus    文件:DataNode.java   
/**
 * Make an instance of DataNode after ensuring that at least one of the
 * given data directories (and their parent directories, if necessary)
 * can be created.
 * @param dataDirs List of directories, where the new DataNode instance should
 * keep its files.
 * @param conf Configuration instance to use.
 * @param resources Secure resources needed to run under Kerberos
 * @return DataNode instance for given list of data dirs and conf, or null if
 * no directory from this directory list can be created.
 * @throws IOException
 */
static DataNode makeInstance(Collection<URI> dataDirs, Configuration conf,
    SecureResources resources) throws IOException {
  LocalFileSystem localFS = FileSystem.getLocal(conf);
  FsPermission permission = new FsPermission(
      conf.get(DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
               DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
  DataNodeDiskChecker dataNodeDiskChecker =
      new DataNodeDiskChecker(permission);
  ArrayList<File> dirs =
      getDataDirsFromURIs(dataDirs, localFS, dataNodeDiskChecker);
  DefaultMetricsSystem.initialize("DataNode");

  assert dirs.size() > 0 : "number of data directories should be > 0";
  return new DataNode(conf, dirs, resources);
}
项目:hadoop-plus    文件:DataNode.java   
public static void secureMain(String args[], SecureResources resources) {
  try {
    StringUtils.startupShutdownMessage(DataNode.class, args, LOG);
    DataNode datanode = createDataNode(args, null, resources);
    if (datanode != null)
      datanode.join();
  } catch (Throwable e) {
    LOG.fatal("Exception in secureMain", e);
    terminate(1, e);
  } finally {
    // We need to terminate the process here because either shutdown was called
    // or some disk related conditions like volumes tolerated or volumes required
    // condition was not met. Also, In secure mode, control will go to Jsvc
    // and Datanode process hangs if it does not exit.
    LOG.warn("Exiting Datanode");
    terminate(0);
  }
}
项目:hadoop-plus    文件:MiniDFSCluster.java   
/**
 * Restart a datanode, on the same port if requested
 * @param dnprop the datanode to restart
 * @param keepPort whether to use the same port 
 * @return true if restarting is successful
 * @throws IOException
 */
public synchronized boolean restartDataNode(DataNodeProperties dnprop,
    boolean keepPort) throws IOException {
  Configuration conf = dnprop.conf;
  String[] args = dnprop.dnArgs;
  SecureResources secureResources = dnprop.secureResources;
  Configuration newconf = new HdfsConfiguration(conf); // save cloned config
  if (keepPort) {
    InetSocketAddress addr = dnprop.datanode.getXferAddress();
    conf.set(DFS_DATANODE_ADDRESS_KEY, 
        addr.getAddress().getHostAddress() + ":" + addr.getPort());
  }
  dataNodes.add(new DataNodeProperties(
      DataNode.createDataNode(args, conf, secureResources),
      newconf, args, secureResources));
  numDataNodes++;
  return true;
}
项目:PDHC    文件:CheckerNode.java   
/** Instantiate a single datanode object, along with its secure resources. 
 * This must be run by invoking{@link DataNode#runDatanodeDaemon()} 
 * subsequently. 
 */
public static DataNode instantiateDataNode(String args [], Configuration conf,
    SecureResources resources) throws IOException {
  if (conf == null)
    conf = new HdfsConfiguration();

  if (args != null) {
    // parse generic hadoop options
    GenericOptionsParser hParser = new GenericOptionsParser(conf, args);
    args = hParser.getRemainingArgs();
  }

  if (!parseArguments(args, conf)) {
    printUsage(System.err);
    return null;
  }
  Collection<URI> dataDirs = getStorageDirs(conf);
  UserGroupInformation.setConfiguration(conf);
  SecurityUtil.login(conf, DFS_DATANODE_KEYTAB_FILE_KEY,
      DFS_DATANODE_USER_NAME_KEY);
  return makeInstance(dataDirs, conf, resources);
}
项目:PDHC    文件:CheckerNode.java   
/**
 * Make an instance of DataNode after ensuring that at least one of the
 * given data directories (and their parent directories, if necessary)
 * can be created.
 * @param dataDirs List of directories, where the new DataNode instance should
 * keep its files.
 * @param conf Configuration instance to use.
 * @param resources Secure resources needed to run under Kerberos
 * @return DataNode instance for given list of data dirs and conf, or null if
 * no directory from this directory list can be created.
 * @throws IOException
 */
static DataNode makeInstance(Collection<URI> dataDirs, Configuration conf,
    SecureResources resources) throws IOException {
  LocalFileSystem localFS = FileSystem.getLocal(conf);
  FsPermission permission = new FsPermission(
      conf.get(DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
               DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
  DataNodeDiskChecker dataNodeDiskChecker =
      new DataNodeDiskChecker(permission);
  ArrayList<File> dirs =
      getDataDirsFromURIs(dataDirs, localFS, dataNodeDiskChecker);
  DefaultMetricsSystem.initialize("DataNode");

  assert dirs.size() > 0 : "number of data directories should be > 0";
  return new DataNode(conf, dirs, resources);
}
项目:PDHC    文件:CheckerNode.java   
public static void secureMain(String args[], SecureResources resources) {
  try {
    StringUtils.startupShutdownMessage(DataNode.class, args, LOG);
    DataNode datanode = createDataNode(args, null, resources);
    if (datanode != null)
      datanode.join();
  } catch (Throwable e) {
    LOG.fatal("Exception in secureMain", e);
    terminate(1, e);
  } finally {
    // We need to terminate the process here because either shutdown was called
    // or some disk related conditions like volumes tolerated or volumes required
    // condition was not met. Also, In secure mode, control will go to Jsvc
    // and Datanode process hangs if it does not exit.
    LOG.warn("Exiting Datanode");
    terminate(0);
  }
}
项目:FlexMap    文件:DataNode.java   
/**
 * Checks if the DataNode has a secure configuration if security is enabled.
 * There are 2 possible configurations that are considered secure:
 * 1. The server has bound to privileged ports for RPC and HTTP via
 *   SecureDataNodeStarter.
 * 2. The configuration enables SASL on DataTransferProtocol and HTTPS (no
 *   plain HTTP) for the HTTP server.  The SASL handshake guarantees
 *   authentication of the RPC server before a client transmits a secret, such
 *   as a block access token.  Similarly, SSL guarantees authentication of the
 *   HTTP server before a client transmits a secret, such as a delegation
 *   token.
 * It is not possible to run with both privileged ports and SASL on
 * DataTransferProtocol.  For backwards-compatibility, the connection logic
 * must check if the target port is a privileged port, and if so, skip the
 * SASL handshake.
 *
 * @param dnConf DNConf to check
 * @param conf Configuration to check
 * @param resources SecuredResources obtained for DataNode
 * @throws RuntimeException if security enabled, but configuration is insecure
 */
private static void checkSecureConfig(DNConf dnConf, Configuration conf,
    SecureResources resources) throws RuntimeException {
  if (!UserGroupInformation.isSecurityEnabled()) {
    return;
  }
  SaslPropertiesResolver saslPropsResolver = dnConf.getSaslPropsResolver();
  if (resources != null && saslPropsResolver == null) {
    return;
  }
  if (dnConf.getIgnoreSecurePortsForTesting()) {
    return;
  }
  if (saslPropsResolver != null &&
      DFSUtil.getHttpPolicy(conf) == HttpConfig.Policy.HTTPS_ONLY &&
      resources == null) {
    return;
  }
  throw new RuntimeException("Cannot start secure DataNode without " +
    "configuring either privileged resources or SASL RPC data transfer " +
    "protection and SSL for HTTP.  Using privileged resources in " +
    "combination with SASL RPC data transfer protection is not supported.");
}
项目:FlexMap    文件:DataNode.java   
/** Instantiate a single datanode object, along with its secure resources. 
 * This must be run by invoking{@link DataNode#runDatanodeDaemon()} 
 * subsequently. 
 */
public static DataNode instantiateDataNode(String args [], Configuration conf,
    SecureResources resources) throws IOException {
  if (conf == null)
    conf = new HdfsConfiguration();

  if (args != null) {
    // parse generic hadoop options
    GenericOptionsParser hParser = new GenericOptionsParser(conf, args);
    args = hParser.getRemainingArgs();
  }

  if (!parseArguments(args, conf)) {
    printUsage(System.err);
    return null;
  }
  Collection<StorageLocation> dataLocations = getStorageLocations(conf);
  UserGroupInformation.setConfiguration(conf);
  SecurityUtil.login(conf, DFS_DATANODE_KEYTAB_FILE_KEY,
      DFS_DATANODE_KERBEROS_PRINCIPAL_KEY);
  return makeInstance(dataLocations, conf, resources);
}
项目:FlexMap    文件:DataNode.java   
/**
 * Make an instance of DataNode after ensuring that at least one of the
 * given data directories (and their parent directories, if necessary)
 * can be created.
 * @param dataDirs List of directories, where the new DataNode instance should
 * keep its files.
 * @param conf Configuration instance to use.
 * @param resources Secure resources needed to run under Kerberos
 * @return DataNode instance for given list of data dirs and conf, or null if
 * no directory from this directory list can be created.
 * @throws IOException
 */
static DataNode makeInstance(Collection<StorageLocation> dataDirs,
    Configuration conf, SecureResources resources) throws IOException {
  LocalFileSystem localFS = FileSystem.getLocal(conf);
  FsPermission permission = new FsPermission(
      conf.get(DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
               DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
  DataNodeDiskChecker dataNodeDiskChecker =
      new DataNodeDiskChecker(permission);
  List<StorageLocation> locations =
      checkStorageLocations(dataDirs, localFS, dataNodeDiskChecker);
  DefaultMetricsSystem.initialize("DataNode");

  assert locations.size() > 0 : "number of data directories should be > 0";
  return new DataNode(conf, locations, resources);
}
项目:FlexMap    文件:DataNode.java   
public static void secureMain(String args[], SecureResources resources) {
  int errorCode = 0;
  try {
    StringUtils.startupShutdownMessage(DataNode.class, args, LOG);
    DataNode datanode = createDataNode(args, null, resources);
    if (datanode != null) {
      datanode.join();
    } else {
      errorCode = 1;
    }
  } catch (Throwable e) {
    LOG.fatal("Exception in secureMain", e);
    terminate(1, e);
  } finally {
    // We need to terminate the process here because either shutdown was called
    // or some disk related conditions like volumes tolerated or volumes required
    // condition was not met. Also, In secure mode, control will go to Jsvc
    // and Datanode process hangs if it does not exit.
    LOG.warn("Exiting Datanode");
    terminate(errorCode);
  }
}
项目:FlexMap    文件:MiniDFSCluster.java   
/**
 * Restart a datanode, on the same port if requested
 * @param dnprop the datanode to restart
 * @param keepPort whether to use the same port 
 * @return true if restarting is successful
 * @throws IOException
 */
public synchronized boolean restartDataNode(DataNodeProperties dnprop,
    boolean keepPort) throws IOException {
  Configuration conf = dnprop.conf;
  String[] args = dnprop.dnArgs;
  SecureResources secureResources = dnprop.secureResources;
  Configuration newconf = new HdfsConfiguration(conf); // save cloned config
  if (keepPort) {
    InetSocketAddress addr = dnprop.datanode.getXferAddress();
    conf.set(DFS_DATANODE_ADDRESS_KEY, 
        addr.getAddress().getHostAddress() + ":" + addr.getPort());
    conf.set(DFS_DATANODE_IPC_ADDRESS_KEY,
        addr.getAddress().getHostAddress() + ":" + dnprop.ipcPort); 
  }
  DataNode newDn = DataNode.createDataNode(args, conf, secureResources);
  dataNodes.add(new DataNodeProperties(
      newDn, newconf, args, secureResources, newDn.getIpcPort()));
  numDataNodes++;
  return true;
}
项目:hops    文件:DataNode.java   
/**
 * Create the DataNode given a configuration, an array of dataDirs,
 * and a namenode proxy
 */
DataNode(final Configuration conf, final AbstractList<File> dataDirs,
    final SecureResources resources) throws IOException {
  super(conf);

  this.usersWithLocalPathAccess = Arrays.asList(conf.getTrimmedStrings(
      DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY));
  this.connectToDnViaHostname =
      conf.getBoolean(DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME,
          DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT);
  this.getHdfsBlockLocationsEnabled =
      conf.getBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,
          DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT);
  try {
    hostName = getHostName(conf);
    LOG.info("Configured hostname is " + hostName);
    startDataNode(conf, dataDirs, resources);
  } catch (IOException ie) {
    shutdown();
    throw ie;
  }
}
项目:hops    文件:DataNode.java   
/**
 * Instantiate a single datanode object, along with its secure resources.
 * This must be run by invoking{@link DataNode#runDatanodeDaemon()}
 * subsequently.
 */
public static DataNode instantiateDataNode(String args[], Configuration conf,
    SecureResources resources) throws IOException {
  if (conf == null) {
    conf = new HdfsConfiguration();
  }

  if (args != null) {
    // parse generic hadoop options
    GenericOptionsParser hParser = new GenericOptionsParser(conf, args);
    args = hParser.getRemainingArgs();
  }

  if (!parseArguments(args, conf)) {
    printUsage(System.err);
    return null;
  }
  Collection<URI> dataDirs = getStorageDirs(conf);
  UserGroupInformation.setConfiguration(conf);
  SecurityUtil
      .login(conf, DFS_DATANODE_KEYTAB_FILE_KEY, DFS_DATANODE_USER_NAME_KEY);
  return makeInstance(dataDirs, conf, resources);
}
项目:hops    文件:DataNode.java   
public static void secureMain(String args[], SecureResources resources) {
  try {
    StringUtils.startupShutdownMessage(DataNode.class, args, LOG);
    DataNode datanode = createDataNode(args, null, resources);
    if (datanode != null) {
      datanode.join();
    }
  } catch (Throwable e) {
    LOG.fatal("Exception in secureMain", e);
    terminate(1, e);
  } finally {
    // We need to terminate the process here because either shutdown was called
    // or some disk related conditions like volumes tolerated or volumes required
    // condition was not met. Also, In secure mode, control will go to Jsvc
    // and Datanode process hangs if it does not exit.
    LOG.warn("Exiting Datanode");
    terminate(0);
  }
}
项目:hadoop-TCP    文件:DataNode.java   
/** Instantiate a single datanode object, along with its secure resources. 
 * This must be run by invoking{@link DataNode#runDatanodeDaemon()} 
 * subsequently. 
 */
public static DataNode instantiateDataNode(String args [], Configuration conf,
    SecureResources resources) throws IOException {
  if (conf == null)
    conf = new HdfsConfiguration();

  if (args != null) {
    // parse generic hadoop options
    GenericOptionsParser hParser = new GenericOptionsParser(conf, args);
    args = hParser.getRemainingArgs();
  }

  if (!parseArguments(args, conf)) {
    printUsage(System.err);
    return null;
  }
  Collection<URI> dataDirs = getStorageDirs(conf);
  UserGroupInformation.setConfiguration(conf);
  SecurityUtil.login(conf, DFS_DATANODE_KEYTAB_FILE_KEY,
      DFS_DATANODE_USER_NAME_KEY);
  return makeInstance(dataDirs, conf, resources);
}
项目:hadoop-TCP    文件:DataNode.java   
/**
 * Make an instance of DataNode after ensuring that at least one of the
 * given data directories (and their parent directories, if necessary)
 * can be created.
 * @param dataDirs List of directories, where the new DataNode instance should
 * keep its files.
 * @param conf Configuration instance to use.
 * @param resources Secure resources needed to run under Kerberos
 * @return DataNode instance for given list of data dirs and conf, or null if
 * no directory from this directory list can be created.
 * @throws IOException
 */
static DataNode makeInstance(Collection<URI> dataDirs, Configuration conf,
    SecureResources resources) throws IOException {
  LocalFileSystem localFS = FileSystem.getLocal(conf);
  FsPermission permission = new FsPermission(
      conf.get(DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
               DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
  DataNodeDiskChecker dataNodeDiskChecker =
      new DataNodeDiskChecker(permission);
  ArrayList<File> dirs =
      getDataDirsFromURIs(dataDirs, localFS, dataNodeDiskChecker);
  DefaultMetricsSystem.initialize("DataNode");

  assert dirs.size() > 0 : "number of data directories should be > 0";
  return new DataNode(conf, dirs, resources);
}
项目:hadoop-TCP    文件:DataNode.java   
public static void secureMain(String args[], SecureResources resources) {
  try {
    StringUtils.startupShutdownMessage(DataNode.class, args, LOG);
    DataNode datanode = createDataNode(args, null, resources);
    if (datanode != null)
      datanode.join();
  } catch (Throwable e) {
    LOG.fatal("Exception in secureMain", e);
    terminate(1, e);
  } finally {
    // We need to terminate the process here because either shutdown was called
    // or some disk related conditions like volumes tolerated or volumes required
    // condition was not met. Also, In secure mode, control will go to Jsvc
    // and Datanode process hangs if it does not exit.
    LOG.warn("Exiting Datanode");
    terminate(0);
  }
}
项目:hadoop-TCP    文件:MiniDFSCluster.java   
/**
 * Restart a datanode, on the same port if requested
 * @param dnprop the datanode to restart
 * @param keepPort whether to use the same port 
 * @return true if restarting is successful
 * @throws IOException
 */
public synchronized boolean restartDataNode(DataNodeProperties dnprop,
    boolean keepPort) throws IOException {
  Configuration conf = dnprop.conf;
  String[] args = dnprop.dnArgs;
  SecureResources secureResources = dnprop.secureResources;
  Configuration newconf = new HdfsConfiguration(conf); // save cloned config
  if (keepPort) {
    InetSocketAddress addr = dnprop.datanode.getXferAddress();
    conf.set(DFS_DATANODE_ADDRESS_KEY, 
        addr.getAddress().getHostAddress() + ":" + addr.getPort());
  }
  dataNodes.add(new DataNodeProperties(
      DataNode.createDataNode(args, conf, secureResources),
      newconf, args, secureResources));
  numDataNodes++;
  return true;
}
项目:hadoop-on-lustre    文件:DataNode.java   
/**
 * Start a Datanode with specified server sockets for secure environments
 * where they are run with privileged ports and injected from a higher
 * level of capability
 */
DataNode(final Configuration conf,
         final AbstractList<File> dataDirs, SecureResources resources) throws IOException {
  super(conf);
  SecurityUtil.login(conf, DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, 
      DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY);

  datanodeObject = this;
  supportAppends = conf.getBoolean("dfs.support.append", false);
  this.userWithLocalPathAccess = conf
      .get(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY);
  try {
    startDataNode(conf, dataDirs, resources);
  } catch (IOException ie) {
    shutdown();
    throw ie;
  }   
}
项目:hadoop-on-lustre    文件:DataNode.java   
/** Instantiate a single datanode object. This must be run by invoking
 *  {@link DataNode#runDatanodeDaemon(DataNode)} subsequently. 
 * @param resources Secure resources needed to run under Kerberos
 */
public static DataNode instantiateDataNode(String args[],
                                    Configuration conf, 
                                    SecureResources resources) throws IOException {
  if (conf == null)
    conf = new Configuration();
  if (!parseArguments(args, conf)) {
    printUsage();
    return null;
  }
  if (conf.get("dfs.network.script") != null) {
    LOG.error("This configuration for rack identification is not supported" +
        " anymore. RackID resolution is handled by the NameNode.");
    System.exit(-1);
  }
  String[] dataDirs = conf.getStrings(DATA_DIR_KEY);
  dnThreadName = "DataNode: [" +
                      StringUtils.arrayToString(dataDirs) + "]";
  DefaultMetricsSystem.initialize("DataNode");
  return makeInstance(dataDirs, conf, resources);
}
项目:hadoop-on-lustre    文件:DataNode.java   
/**
 * Make an instance of DataNode after ensuring that at least one of the
 * given data directories (and their parent directories, if necessary)
 * can be created.
 * @param dataDirs List of directories, where the new DataNode instance should
 * keep its files.
 * @param conf Configuration instance to use.
 * @param resources Secure resources needed to run under Kerberos
 * @return DataNode instance for given list of data dirs and conf, or null if
 * no directory from this directory list can be created.
 * @throws IOException
 */
public static DataNode makeInstance(String[] dataDirs, Configuration conf, 
    SecureResources resources) throws IOException {
  UserGroupInformation.setConfiguration(conf);
  LocalFileSystem localFS = FileSystem.getLocal(conf);
  ArrayList<File> dirs = new ArrayList<File>();
  FsPermission dataDirPermission = 
    new FsPermission(conf.get(DATA_DIR_PERMISSION_KEY, 
                              DEFAULT_DATA_DIR_PERMISSION));
  for (String dir : dataDirs) {
    try {
      DiskChecker.checkDir(localFS, new Path(dir), dataDirPermission);
      dirs.add(new File(dir));
    } catch(IOException e) {
      LOG.warn("Invalid directory in " + DATA_DIR_KEY +  ": " + 
               e.getMessage());
    }
  }
  if (dirs.size() > 0) 
    return new DataNode(conf, dirs, resources);
  LOG.error("All directories in " + DATA_DIR_KEY + " are invalid.");
  return null;
}