Java 类org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager 实例源码

项目:hadoop-plus    文件:DataTransferEncryptor.java   
/**
 * Given a secret manager and a username encoded as described above, determine
 * the encryption key.
 * 
 * @param blockPoolTokenSecretManager to determine the encryption key.
 * @param userName containing the keyId, blockPoolId, and nonce.
 * @return secret encryption key.
 * @throws IOException
 */
private static byte[] getEncryptionKeyFromUserName(
    BlockPoolTokenSecretManager blockPoolTokenSecretManager, String userName)
    throws IOException {
  String[] nameComponents = userName.split(NAME_DELIMITER);
  if (nameComponents.length != 3) {
    throw new IOException("Provided name '" + userName + "' has " +
        nameComponents.length + " components instead of the expected 3.");
  }
  int keyId = Integer.parseInt(nameComponents[0]);
  String blockPoolId = nameComponents[1];
  byte[] nonce = Base64.decodeBase64(nameComponents[2]);
  return blockPoolTokenSecretManager.retrieveDataEncryptionKey(keyId,
      blockPoolId, nonce);
}
项目:hadoop-plus    文件:DataNode.java   
/**
 * This method starts the data node with the specified conf.
 * 
 * @param conf - the configuration
 *  if conf's CONFIG_PROPERTY_SIMULATED property is set
 *  then a simulated storage based data node is created.
 * 
 * @param dataDirs - only for a non-simulated storage data node
 * @throws IOException
 */
void startDataNode(Configuration conf, 
                   AbstractList<File> dataDirs,
                  // DatanodeProtocol namenode,
                   SecureResources resources
                   ) throws IOException {
  if(UserGroupInformation.isSecurityEnabled() && resources == null) {
    if (!conf.getBoolean("ignore.secure.ports.for.testing", false)) {
      throw new RuntimeException("Cannot start secure cluster without "
          + "privileged resources.");
    }
  }

  // settings global for all BPs in the Data Node
  this.secureResources = resources;
  this.dataDirs = dataDirs;
  this.conf = conf;
  this.dnConf = new DNConf(conf);

  storage = new DataStorage();

  // global DN settings
  registerMXBean();
  initDataXceiver(conf);
  startInfoServer(conf);

  // BlockPoolTokenSecretManager is required to create ipc server.
  this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();
  initIpcServer(conf);

  metrics = DataNodeMetrics.create(conf, getDisplayName());

  blockPoolManager = new BlockPoolManager(this);
  blockPoolManager.refreshNamenodes(conf);

  // Create the ReadaheadPool from the DataNode context so we can
  // exit without having to explicitly shutdown its thread pool.
  readaheadPool = ReadaheadPool.getInstance();
}
项目:PDHC    文件:CheckerNode.java   
/**
 * This method starts the data node with the specified conf.
 * 
 * @param conf - the configuration
 *  if conf's CONFIG_PROPERTY_SIMULATED property is set
 *  then a simulated storage based data node is created.
 * 
 * @param dataDirs - only for a non-simulated storage data node
 * @throws IOException
 */
void startDataNode(Configuration conf, 
                   AbstractList<File> dataDirs,
                  // DatanodeProtocol namenode,
                   SecureResources resources
                   ) throws IOException {
  if(UserGroupInformation.isSecurityEnabled() && resources == null) {
    if (!conf.getBoolean("ignore.secure.ports.for.testing", false)) {
      throw new RuntimeException("Cannot start secure cluster without "
          + "privileged resources.");
    }
  }

  // settings global for all BPs in the Data Node
  this.secureResources = resources;
  this.dataDirs = dataDirs;
  this.conf = conf;
  this.dnConf = new DNConf(conf);

  storage = new DataStorage();

  // global DN settings
  registerMXBean();
  initDataXceiver(conf);
  startInfoServer(conf);

  // BlockPoolTokenSecretManager is required to create ipc server.
  this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();
  initIpcServer(conf);

  metrics = DataNodeMetrics.create(conf, getDisplayName());

  blockPoolManager = new BlockPoolManager(this);
  blockPoolManager.refreshNamenodes(conf);

  // Create the ReadaheadPool from the DataNode context so we can
  // exit without having to explicitly shutdown its thread pool.
  readaheadPool = ReadaheadPool.getInstance();
}
项目:hops    文件:DataTransferEncryptor.java   
/**
 * Given a secret manager and a username encoded as described above,
 * determine
 * the encryption key.
 *
 * @param blockPoolTokenSecretManager
 *     to determine the encryption key.
 * @param userName
 *     containing the keyId, blockPoolId, and nonce.
 * @return secret encryption key.
 * @throws IOException
 */
private static byte[] getEncryptionKeyFromUserName(
    BlockPoolTokenSecretManager blockPoolTokenSecretManager, String userName)
    throws IOException {
  String[] nameComponents = userName.split(NAME_DELIMITER);
  if (nameComponents.length != 3) {
    throw new IOException("Provided name '" + userName + "' has " +
        nameComponents.length + " components instead of the expected 3.");
  }
  int keyId = Integer.parseInt(nameComponents[0]);
  String blockPoolId = nameComponents[1];
  byte[] nonce = Base64.decodeBase64(nameComponents[2]);
  return blockPoolTokenSecretManager
      .retrieveDataEncryptionKey(keyId, blockPoolId, nonce);
}
项目:hops    文件:DataNode.java   
/**
 * This method starts the data node with the specified conf.
 *
 * @param conf
 *     - the configuration
 *     if conf's CONFIG_PROPERTY_SIMULATED property is set
 *     then a simulated storage based data node is created.
 * @param dataDirs
 *     - only for a non-simulated storage data node
 * @throws IOException
 */
void startDataNode(Configuration conf, AbstractList<File> dataDirs,
    // DatanodeProtocol namenode,
    SecureResources resources) throws IOException {
  if (UserGroupInformation.isSecurityEnabled() && resources == null) {
    if (!conf.getBoolean("ignore.secure.ports.for.testing", false)) {
      throw new RuntimeException(
          "Cannot start secure cluster without " + "privileged resources.");
    }
  }

  // settings global for all BPs in the Data Node
  this.secureResources = resources;
  this.dataDirs = dataDirs;
  this.conf = conf;
  this.dnConf = new DNConf(conf);

  storage = new DataStorage();

  // global DN settings
  registerMXBean();
  initDataXceiver(conf);
  startInfoServer(conf);

  // BlockPoolTokenSecretManager is required to create ipc server.
  this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();
  initIpcServer(conf);

  metrics = DataNodeMetrics.create(conf, getDisplayName());

  blockPoolManager = new BlockPoolManager(this);
  blockPoolManager.refreshNamenodes(conf);

  // Create the ReadaheadPool from the DataNode context so we can
  // exit without having to explicitly shutdown its thread pool.
  readaheadPool = ReadaheadPool.getInstance();
}
项目:hadoop-TCP    文件:DataTransferEncryptor.java   
/**
 * Given a secret manager and a username encoded as described above, determine
 * the encryption key.
 * 
 * @param blockPoolTokenSecretManager to determine the encryption key.
 * @param userName containing the keyId, blockPoolId, and nonce.
 * @return secret encryption key.
 * @throws IOException
 */
private static byte[] getEncryptionKeyFromUserName(
    BlockPoolTokenSecretManager blockPoolTokenSecretManager, String userName)
    throws IOException {
  String[] nameComponents = userName.split(NAME_DELIMITER);
  if (nameComponents.length != 3) {
    throw new IOException("Provided name '" + userName + "' has " +
        nameComponents.length + " components instead of the expected 3.");
  }
  int keyId = Integer.parseInt(nameComponents[0]);
  String blockPoolId = nameComponents[1];
  byte[] nonce = Base64.decodeBase64(nameComponents[2]);
  return blockPoolTokenSecretManager.retrieveDataEncryptionKey(keyId,
      blockPoolId, nonce);
}
项目:hardfs    文件:DataTransferEncryptor.java   
/**
 * Given a secret manager and a username encoded as described above, determine
 * the encryption key.
 * 
 * @param blockPoolTokenSecretManager to determine the encryption key.
 * @param userName containing the keyId, blockPoolId, and nonce.
 * @return secret encryption key.
 * @throws IOException
 */
private static byte[] getEncryptionKeyFromUserName(
    BlockPoolTokenSecretManager blockPoolTokenSecretManager, String userName)
    throws IOException {
  String[] nameComponents = userName.split(NAME_DELIMITER);
  if (nameComponents.length != 3) {
    throw new IOException("Provided name '" + userName + "' has " +
        nameComponents.length + " components instead of the expected 3.");
  }
  int keyId = Integer.parseInt(nameComponents[0]);
  String blockPoolId = nameComponents[1];
  byte[] nonce = Base64.decodeBase64(nameComponents[2]);
  return blockPoolTokenSecretManager.retrieveDataEncryptionKey(keyId,
      blockPoolId, nonce);
}
项目:hadoop-on-lustre2    文件:DataTransferEncryptor.java   
/**
 * Given a secret manager and a username encoded as described above, determine
 * the encryption key.
 * 
 * @param blockPoolTokenSecretManager to determine the encryption key.
 * @param userName containing the keyId, blockPoolId, and nonce.
 * @return secret encryption key.
 * @throws IOException
 */
private static byte[] getEncryptionKeyFromUserName(
    BlockPoolTokenSecretManager blockPoolTokenSecretManager, String userName)
    throws IOException {
  String[] nameComponents = userName.split(NAME_DELIMITER);
  if (nameComponents.length != 3) {
    throw new IOException("Provided name '" + userName + "' has " +
        nameComponents.length + " components instead of the expected 3.");
  }
  int keyId = Integer.parseInt(nameComponents[0]);
  String blockPoolId = nameComponents[1];
  byte[] nonce = Base64.decodeBase64(nameComponents[2]);
  return blockPoolTokenSecretManager.retrieveDataEncryptionKey(keyId,
      blockPoolId, nonce);
}
项目:hadoop-plus    文件:DataTransferEncryptor.java   
/**
 * Factory method for DNs, where the nonce, keyId, and encryption key are not
 * yet known. The nonce and keyId will be sent by the client, and the DN
 * will then use those pieces of info and the secret key shared with the NN
 * to determine the encryptionKey used for the SASL handshake/encryption.
 * 
 * Establishes a secure connection assuming that the party on the other end
 * has the same shared secret. This does a SASL connection handshake, but not
 * a general-purpose one. It's specific to the MD5-DIGEST SASL mechanism with
 * auth-conf enabled. In particular, it doesn't support an arbitrary number of
 * challenge/response rounds, and we know that the client will never have an
 * initial response, so we don't check for one.
 *
 * @param underlyingOut output stream to write to the other party
 * @param underlyingIn input stream to read from the other party
 * @param blockPoolTokenSecretManager secret manager capable of constructing
 *        encryption key based on keyId, blockPoolId, and nonce
 * @return a pair of streams which wrap the given streams and encrypt/decrypt
 *         all data read/written
 * @throws IOException in the event of error
 */
public static IOStreamPair getEncryptedStreams(
    OutputStream underlyingOut, InputStream underlyingIn,
    BlockPoolTokenSecretManager blockPoolTokenSecretManager,
    String encryptionAlgorithm) throws IOException {

  DataInputStream in = new DataInputStream(underlyingIn);
  DataOutputStream out = new DataOutputStream(underlyingOut);

  Map<String, String> saslProps = Maps.newHashMap(SASL_PROPS);
  saslProps.put("com.sun.security.sasl.digest.cipher", encryptionAlgorithm);

  if (LOG.isDebugEnabled()) {
    LOG.debug("Server using encryption algorithm " + encryptionAlgorithm);
  }

  SaslParticipant sasl = new SaslParticipant(Sasl.createSaslServer(MECHANISM,
      PROTOCOL, SERVER_NAME, saslProps,
      new SaslServerCallbackHandler(blockPoolTokenSecretManager)));

  int magicNumber = in.readInt();
  if (magicNumber != ENCRYPTED_TRANSFER_MAGIC_NUMBER) {
    throw new InvalidMagicNumberException(magicNumber);
  }
  try {
    // step 1
    performSaslStep1(out, in, sasl);

    // step 2 (server-side only)
    byte[] remoteResponse = readSaslMessage(in);
    byte[] localResponse = sasl.evaluateChallengeOrResponse(remoteResponse);
    sendSaslMessage(out, localResponse);

    // SASL handshake is complete
    checkSaslComplete(sasl);

    return sasl.createEncryptedStreamPair(out, in);
  } catch (IOException ioe) {
    if (ioe instanceof SaslException &&
        ioe.getCause() != null &&
        ioe.getCause() instanceof InvalidEncryptionKeyException) {
      // This could just be because the client is long-lived and hasn't gotten
      // a new encryption key from the NN in a while. Upon receiving this
      // error, the client will get a new encryption key from the NN and retry
      // connecting to this DN.
      sendInvalidKeySaslErrorMessage(out, ioe.getCause().getMessage());
    } else {
      sendGenericSaslErrorMessage(out, ioe.getMessage());
    }
    throw ioe;
  }
}
项目:hadoop-plus    文件:DataTransferEncryptor.java   
public SaslServerCallbackHandler(BlockPoolTokenSecretManager
    blockPoolTokenSecretManager) {
  this.blockPoolTokenSecretManager = blockPoolTokenSecretManager;
}
项目:hops    文件:DataTransferEncryptor.java   
/**
 * Factory method for DNs, where the nonce, keyId, and encryption key are not
 * yet known. The nonce and keyId will be sent by the client, and the DN
 * will then use those pieces of info and the secret key shared with the NN
 * to determine the encryptionKey used for the SASL handshake/encryption.
 * <p/>
 * Establishes a secure connection assuming that the party on the other end
 * has the same shared secret. This does a SASL connection handshake, but not
 * a general-purpose one. It's specific to the MD5-DIGEST SASL mechanism with
 * auth-conf enabled. In particular, it doesn't support an arbitrary number
 * of
 * challenge/response rounds, and we know that the client will never have an
 * initial response, so we don't check for one.
 *
 * @param underlyingOut
 *     output stream to write to the other party
 * @param underlyingIn
 *     input stream to read from the other party
 * @param blockPoolTokenSecretManager
 *     secret manager capable of constructing
 *     encryption key based on keyId, blockPoolId, and nonce
 * @return a pair of streams which wrap the given streams and encrypt/decrypt
 * all data read/written
 * @throws IOException
 *     in the event of error
 */
public static IOStreamPair getEncryptedStreams(OutputStream underlyingOut,
    InputStream underlyingIn,
    BlockPoolTokenSecretManager blockPoolTokenSecretManager,
    String encryptionAlgorithm) throws IOException {

  DataInputStream in = new DataInputStream(underlyingIn);
  DataOutputStream out = new DataOutputStream(underlyingOut);

  Map<String, String> saslProps = Maps.newHashMap(SASL_PROPS);
  saslProps.put("com.sun.security.sasl.digest.cipher", encryptionAlgorithm);

  if (LOG.isDebugEnabled()) {
    LOG.debug("Server using encryption algorithm " + encryptionAlgorithm);
  }

  SaslParticipant sasl = new SaslParticipant(
      Sasl.createSaslServer(MECHANISM, PROTOCOL, SERVER_NAME, saslProps,
          new SaslServerCallbackHandler(blockPoolTokenSecretManager)));

  int magicNumber = in.readInt();
  if (magicNumber != ENCRYPTED_TRANSFER_MAGIC_NUMBER) {
    throw new InvalidMagicNumberException(magicNumber);
  }
  try {
    // step 1
    performSaslStep1(out, in, sasl);

    // step 2 (server-side only)
    byte[] remoteResponse = readSaslMessage(in);
    byte[] localResponse = sasl.evaluateChallengeOrResponse(remoteResponse);
    sendSaslMessage(out, localResponse);

    // SASL handshake is complete
    checkSaslComplete(sasl);

    return sasl.createEncryptedStreamPair(out, in);
  } catch (IOException ioe) {
    if (ioe instanceof SaslException &&
        ioe.getCause() != null &&
        ioe.getCause() instanceof InvalidEncryptionKeyException) {
      // This could just be because the client is long-lived and hasn't gotten
      // a new encryption key from the NN in a while. Upon receiving this
      // error, the client will get a new encryption key from the NN and retry
      // connecting to this DN.
      sendInvalidKeySaslErrorMessage(out, ioe.getCause().getMessage());
    } else {
      sendGenericSaslErrorMessage(out, ioe.getMessage());
    }
    throw ioe;
  }
}
项目:hops    文件:DataTransferEncryptor.java   
public SaslServerCallbackHandler(
    BlockPoolTokenSecretManager blockPoolTokenSecretManager) {
  this.blockPoolTokenSecretManager = blockPoolTokenSecretManager;
}
项目:hadoop-TCP    文件:DataTransferEncryptor.java   
/**
 * Factory method for DNs, where the nonce, keyId, and encryption key are not
 * yet known. The nonce and keyId will be sent by the client, and the DN
 * will then use those pieces of info and the secret key shared with the NN
 * to determine the encryptionKey used for the SASL handshake/encryption.
 * 
 * Establishes a secure connection assuming that the party on the other end
 * has the same shared secret. This does a SASL connection handshake, but not
 * a general-purpose one. It's specific to the MD5-DIGEST SASL mechanism with
 * auth-conf enabled. In particular, it doesn't support an arbitrary number of
 * challenge/response rounds, and we know that the client will never have an
 * initial response, so we don't check for one.
 *
 * @param underlyingOut output stream to write to the other party
 * @param underlyingIn input stream to read from the other party
 * @param blockPoolTokenSecretManager secret manager capable of constructing
 *        encryption key based on keyId, blockPoolId, and nonce
 * @return a pair of streams which wrap the given streams and encrypt/decrypt
 *         all data read/written
 * @throws IOException in the event of error
 */
public static IOStreamPair getEncryptedStreams(
    OutputStream underlyingOut, InputStream underlyingIn,
    BlockPoolTokenSecretManager blockPoolTokenSecretManager,
    String encryptionAlgorithm) throws IOException {

  DataInputStream in = new DataInputStream(underlyingIn);
  DataOutputStream out = new DataOutputStream(underlyingOut);

  Map<String, String> saslProps = Maps.newHashMap(SASL_PROPS);
  saslProps.put("com.sun.security.sasl.digest.cipher", encryptionAlgorithm);

  if (LOG.isDebugEnabled()) {
    LOG.debug("Server using encryption algorithm " + encryptionAlgorithm);
  }

  SaslParticipant sasl = new SaslParticipant(Sasl.createSaslServer(MECHANISM,
      PROTOCOL, SERVER_NAME, saslProps,
      new SaslServerCallbackHandler(blockPoolTokenSecretManager)));

  int magicNumber = in.readInt();
  if (magicNumber != ENCRYPTED_TRANSFER_MAGIC_NUMBER) {
    throw new InvalidMagicNumberException(magicNumber);
  }
  try {
    // step 1
    performSaslStep1(out, in, sasl);

    // step 2 (server-side only)
    byte[] remoteResponse = readSaslMessage(in);
    byte[] localResponse = sasl.evaluateChallengeOrResponse(remoteResponse);
    sendSaslMessage(out, localResponse);

    // SASL handshake is complete
    checkSaslComplete(sasl);

    return sasl.createEncryptedStreamPair(out, in);
  } catch (IOException ioe) {
    if (ioe instanceof SaslException &&
        ioe.getCause() != null &&
        ioe.getCause() instanceof InvalidEncryptionKeyException) {
      // This could just be because the client is long-lived and hasn't gotten
      // a new encryption key from the NN in a while. Upon receiving this
      // error, the client will get a new encryption key from the NN and retry
      // connecting to this DN.
      sendInvalidKeySaslErrorMessage(out, ioe.getCause().getMessage());
    } else {
      sendGenericSaslErrorMessage(out, ioe.getMessage());
    }
    throw ioe;
  }
}
项目:hadoop-TCP    文件:DataTransferEncryptor.java   
public SaslServerCallbackHandler(BlockPoolTokenSecretManager
    blockPoolTokenSecretManager) {
  this.blockPoolTokenSecretManager = blockPoolTokenSecretManager;
}
项目:hardfs    文件:DataTransferEncryptor.java   
/**
 * Factory method for DNs, where the nonce, keyId, and encryption key are not
 * yet known. The nonce and keyId will be sent by the client, and the DN
 * will then use those pieces of info and the secret key shared with the NN
 * to determine the encryptionKey used for the SASL handshake/encryption.
 * 
 * Establishes a secure connection assuming that the party on the other end
 * has the same shared secret. This does a SASL connection handshake, but not
 * a general-purpose one. It's specific to the MD5-DIGEST SASL mechanism with
 * auth-conf enabled. In particular, it doesn't support an arbitrary number of
 * challenge/response rounds, and we know that the client will never have an
 * initial response, so we don't check for one.
 *
 * @param underlyingOut output stream to write to the other party
 * @param underlyingIn input stream to read from the other party
 * @param blockPoolTokenSecretManager secret manager capable of constructing
 *        encryption key based on keyId, blockPoolId, and nonce
 * @return a pair of streams which wrap the given streams and encrypt/decrypt
 *         all data read/written
 * @throws IOException in the event of error
 */
public static IOStreamPair getEncryptedStreams(
    OutputStream underlyingOut, InputStream underlyingIn,
    BlockPoolTokenSecretManager blockPoolTokenSecretManager,
    String encryptionAlgorithm) throws IOException {

  DataInputStream in = new DataInputStream(underlyingIn);
  DataOutputStream out = new DataOutputStream(underlyingOut);

  Map<String, String> saslProps = Maps.newHashMap(SASL_PROPS);
  saslProps.put("com.sun.security.sasl.digest.cipher", encryptionAlgorithm);

  if (LOG.isDebugEnabled()) {
    LOG.debug("Server using encryption algorithm " + encryptionAlgorithm);
  }

  SaslParticipant sasl = new SaslParticipant(Sasl.createSaslServer(MECHANISM,
      PROTOCOL, SERVER_NAME, saslProps,
      new SaslServerCallbackHandler(blockPoolTokenSecretManager)));

  int magicNumber = in.readInt();
  if (magicNumber != ENCRYPTED_TRANSFER_MAGIC_NUMBER) {
    throw new InvalidMagicNumberException(magicNumber);
  }
  try {
    // step 1
    performSaslStep1(out, in, sasl);

    // step 2 (server-side only)
    byte[] remoteResponse = readSaslMessage(in);
    byte[] localResponse = sasl.evaluateChallengeOrResponse(remoteResponse);
    sendSaslMessage(out, localResponse);

    // SASL handshake is complete
    checkSaslComplete(sasl);

    return sasl.createEncryptedStreamPair(out, in);
  } catch (IOException ioe) {
    if (ioe instanceof SaslException &&
        ioe.getCause() != null &&
        ioe.getCause() instanceof InvalidEncryptionKeyException) {
      // This could just be because the client is long-lived and hasn't gotten
      // a new encryption key from the NN in a while. Upon receiving this
      // error, the client will get a new encryption key from the NN and retry
      // connecting to this DN.
      sendInvalidKeySaslErrorMessage(out, ioe.getCause().getMessage());
    } else {
      sendGenericSaslErrorMessage(out, ioe.getMessage());
    }
    throw ioe;
  }
}
项目:hardfs    文件:DataTransferEncryptor.java   
public SaslServerCallbackHandler(BlockPoolTokenSecretManager
    blockPoolTokenSecretManager) {
  this.blockPoolTokenSecretManager = blockPoolTokenSecretManager;
}
项目:hadoop-on-lustre2    文件:DataTransferEncryptor.java   
/**
 * Factory method for DNs, where the nonce, keyId, and encryption key are not
 * yet known. The nonce and keyId will be sent by the client, and the DN
 * will then use those pieces of info and the secret key shared with the NN
 * to determine the encryptionKey used for the SASL handshake/encryption.
 * 
 * Establishes a secure connection assuming that the party on the other end
 * has the same shared secret. This does a SASL connection handshake, but not
 * a general-purpose one. It's specific to the MD5-DIGEST SASL mechanism with
 * auth-conf enabled. In particular, it doesn't support an arbitrary number of
 * challenge/response rounds, and we know that the client will never have an
 * initial response, so we don't check for one.
 *
 * @param underlyingOut output stream to write to the other party
 * @param underlyingIn input stream to read from the other party
 * @param blockPoolTokenSecretManager secret manager capable of constructing
 *        encryption key based on keyId, blockPoolId, and nonce
 * @return a pair of streams which wrap the given streams and encrypt/decrypt
 *         all data read/written
 * @throws IOException in the event of error
 */
public static IOStreamPair getEncryptedStreams(
    OutputStream underlyingOut, InputStream underlyingIn,
    BlockPoolTokenSecretManager blockPoolTokenSecretManager,
    String encryptionAlgorithm) throws IOException {

  DataInputStream in = new DataInputStream(underlyingIn);
  DataOutputStream out = new DataOutputStream(underlyingOut);

  Map<String, String> saslProps = Maps.newHashMap(SASL_PROPS);
  saslProps.put("com.sun.security.sasl.digest.cipher", encryptionAlgorithm);

  if (LOG.isDebugEnabled()) {
    LOG.debug("Server using encryption algorithm " + encryptionAlgorithm);
  }

  SaslParticipant sasl = new SaslParticipant(Sasl.createSaslServer(MECHANISM,
      PROTOCOL, SERVER_NAME, saslProps,
      new SaslServerCallbackHandler(blockPoolTokenSecretManager)));

  int magicNumber = in.readInt();
  if (magicNumber != ENCRYPTED_TRANSFER_MAGIC_NUMBER) {
    throw new InvalidMagicNumberException(magicNumber);
  }
  try {
    // step 1
    performSaslStep1(out, in, sasl);

    // step 2 (server-side only)
    byte[] remoteResponse = readSaslMessage(in);
    byte[] localResponse = sasl.evaluateChallengeOrResponse(remoteResponse);
    sendSaslMessage(out, localResponse);

    // SASL handshake is complete
    checkSaslComplete(sasl);

    return sasl.createEncryptedStreamPair(out, in);
  } catch (IOException ioe) {
    if (ioe instanceof SaslException &&
        ioe.getCause() != null &&
        ioe.getCause() instanceof InvalidEncryptionKeyException) {
      // This could just be because the client is long-lived and hasn't gotten
      // a new encryption key from the NN in a while. Upon receiving this
      // error, the client will get a new encryption key from the NN and retry
      // connecting to this DN.
      sendInvalidKeySaslErrorMessage(out, ioe.getCause().getMessage());
    } else {
      sendGenericSaslErrorMessage(out, ioe.getMessage());
    }
    throw ioe;
  }
}
项目:hadoop-on-lustre2    文件:DataTransferEncryptor.java   
public SaslServerCallbackHandler(BlockPoolTokenSecretManager
    blockPoolTokenSecretManager) {
  this.blockPoolTokenSecretManager = blockPoolTokenSecretManager;
}
项目:hadoop    文件:SaslDataTransferServer.java   
/**
 * Creates a new SaslDataTransferServer.
 *
 * @param dnConf configuration of DataNode
 * @param blockPoolTokenSecretManager used for checking block access tokens
 *   and encryption keys
 */
public SaslDataTransferServer(DNConf dnConf,
    BlockPoolTokenSecretManager blockPoolTokenSecretManager) {
  this.blockPoolTokenSecretManager = blockPoolTokenSecretManager;
  this.dnConf = dnConf;
}
项目:aliyun-oss-hadoop-fs    文件:SaslDataTransferServer.java   
/**
 * Creates a new SaslDataTransferServer.
 *
 * @param dnConf configuration of DataNode
 * @param blockPoolTokenSecretManager used for checking block access tokens
 *   and encryption keys
 */
public SaslDataTransferServer(DNConf dnConf,
    BlockPoolTokenSecretManager blockPoolTokenSecretManager) {
  this.blockPoolTokenSecretManager = blockPoolTokenSecretManager;
  this.dnConf = dnConf;
}
项目:big-c    文件:SaslDataTransferServer.java   
/**
 * Creates a new SaslDataTransferServer.
 *
 * @param dnConf configuration of DataNode
 * @param blockPoolTokenSecretManager used for checking block access tokens
 *   and encryption keys
 */
public SaslDataTransferServer(DNConf dnConf,
    BlockPoolTokenSecretManager blockPoolTokenSecretManager) {
  this.blockPoolTokenSecretManager = blockPoolTokenSecretManager;
  this.dnConf = dnConf;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:SaslDataTransferServer.java   
/**
 * Creates a new SaslDataTransferServer.
 *
 * @param dnConf configuration of DataNode
 * @param blockPoolTokenSecretManager used for checking block access tokens
 *   and encryption keys
 */
public SaslDataTransferServer(DNConf dnConf,
    BlockPoolTokenSecretManager blockPoolTokenSecretManager) {
  this.blockPoolTokenSecretManager = blockPoolTokenSecretManager;
  this.dnConf = dnConf;
}
项目:FlexMap    文件:SaslDataTransferServer.java   
/**
 * Creates a new SaslDataTransferServer.
 *
 * @param dnConf configuration of DataNode
 * @param blockPoolTokenSecretManager used for checking block access tokens
 *   and encryption keys
 */
public SaslDataTransferServer(DNConf dnConf,
    BlockPoolTokenSecretManager blockPoolTokenSecretManager) {
  this.blockPoolTokenSecretManager = blockPoolTokenSecretManager;
  this.dnConf = dnConf;
}