Java 类org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode 实例源码

项目:hadoop    文件:DataNode.java   
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
    Token<BlockTokenIdentifier> token) throws IOException {
  checkBlockLocalPathAccess();
  checkBlockToken(block, token, BlockTokenSecretManager.AccessMode.READ);
  Preconditions.checkNotNull(data, "Storage not yet initialized");
  BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
  if (LOG.isDebugEnabled()) {
    if (info != null) {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo successful block=" + block
            + " blockfile " + info.getBlockPath() + " metafile "
            + info.getMetaPath());
      }
    } else {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo for block=" + block
            + " returning null");
      }
    }
  }
  metrics.incrBlocksGetLocalPathInfo();
  return info;
}
项目:hadoop    文件:DataNode.java   
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(
    String bpId, long[] blockIds,
    List<Token<BlockTokenIdentifier>> tokens) throws IOException, 
    UnsupportedOperationException {
  if (!getHdfsBlockLocationsEnabled) {
    throw new UnsupportedOperationException("Datanode#getHdfsBlocksMetadata "
        + " is not enabled in datanode config");
  }
  if (blockIds.length != tokens.size()) {
    throw new IOException("Differing number of blocks and tokens");
  }
  // Check access for each block
  for (int i = 0; i < blockIds.length; i++) {
    checkBlockToken(new ExtendedBlock(bpId, blockIds[i]),
        tokens.get(i), BlockTokenSecretManager.AccessMode.READ);
  }

  DataNodeFaultInjector.get().getHdfsBlocksMetadata();

  return data.getHdfsBlocksMetadata(bpId, blockIds);
}
项目:hadoop    文件:DataNode.java   
private void checkReadAccess(final ExtendedBlock block) throws IOException {
  if (isBlockTokenEnabled) {
    Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
        .getTokenIdentifiers();
    if (tokenIds.size() != 1) {
      throw new IOException("Can't continue since none or more than one "
          + "BlockTokenIdentifier is found.");
    }
    for (TokenIdentifier tokenId : tokenIds) {
      BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
      if (LOG.isDebugEnabled()) {
        LOG.debug("Got: " + id.toString());
      }
      blockPoolTokenSecretManager.checkAccess(id, null, block,
          BlockTokenSecretManager.AccessMode.READ);
    }
  }
}
项目:big-c    文件:FSNamesystem.java   
/**
 * Get a new generation stamp together with an access token for 
 * a block under construction
 * 
 * This method is called for recovering a failed pipeline or setting up
 * a pipeline to append to a block.
 * 
 * @param block a block
 * @param clientName the name of a client
 * @return a located block with a new generation stamp and an access token
 * @throws IOException if any error occurs
 */
LocatedBlock updateBlockForPipeline(ExtendedBlock block, 
    String clientName) throws IOException {
  LocatedBlock locatedBlock;
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);

    // check vadility of parameters
    checkUCBlock(block, clientName);

    // get a new generation stamp and an access token
    block.setGenerationStamp(nextGenerationStamp(blockIdManager.isLegacyBlock(block.getLocalBlock())));
    locatedBlock = new LocatedBlock(block, new DatanodeInfo[0]);
    blockManager.setBlockToken(locatedBlock, AccessMode.WRITE);
  } finally {
    writeUnlock();
  }
  // Ensure we record the new generation stamp
  getEditLog().logSync();
  return locatedBlock;
}
项目:big-c    文件:DataNode.java   
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
    Token<BlockTokenIdentifier> token) throws IOException {
  checkBlockLocalPathAccess();
  checkBlockToken(block, token, BlockTokenSecretManager.AccessMode.READ);
  Preconditions.checkNotNull(data, "Storage not yet initialized");
  BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
  if (LOG.isDebugEnabled()) {
    if (info != null) {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo successful block=" + block
            + " blockfile " + info.getBlockPath() + " metafile "
            + info.getMetaPath());
      }
    } else {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo for block=" + block
            + " returning null");
      }
    }
  }
  metrics.incrBlocksGetLocalPathInfo();
  return info;
}
项目:big-c    文件:DataNode.java   
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(
    String bpId, long[] blockIds,
    List<Token<BlockTokenIdentifier>> tokens) throws IOException, 
    UnsupportedOperationException {
  if (!getHdfsBlockLocationsEnabled) {
    throw new UnsupportedOperationException("Datanode#getHdfsBlocksMetadata "
        + " is not enabled in datanode config");
  }
  if (blockIds.length != tokens.size()) {
    throw new IOException("Differing number of blocks and tokens");
  }
  // Check access for each block
  for (int i = 0; i < blockIds.length; i++) {
    checkBlockToken(new ExtendedBlock(bpId, blockIds[i]),
        tokens.get(i), BlockTokenSecretManager.AccessMode.READ);
  }

  DataNodeFaultInjector.get().getHdfsBlocksMetadata();

  return data.getHdfsBlocksMetadata(bpId, blockIds);
}
项目:big-c    文件:DataNode.java   
private void checkReadAccess(final ExtendedBlock block) throws IOException {
  if (isBlockTokenEnabled) {
    Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
        .getTokenIdentifiers();
    if (tokenIds.size() != 1) {
      throw new IOException("Can't continue since none or more than one "
          + "BlockTokenIdentifier is found.");
    }
    for (TokenIdentifier tokenId : tokenIds) {
      BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
      if (LOG.isDebugEnabled()) {
        LOG.debug("Got: " + id.toString());
      }
      blockPoolTokenSecretManager.checkAccess(id, null, block,
          BlockTokenSecretManager.AccessMode.READ);
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSNamesystem.java   
/**
 * Get a new generation stamp together with an access token for 
 * a block under construction
 * 
 * This method is called for recovering a failed pipeline or setting up
 * a pipeline to append to a block.
 * 
 * @param block a block
 * @param clientName the name of a client
 * @return a located block with a new generation stamp and an access token
 * @throws IOException if any error occurs
 */
LocatedBlock updateBlockForPipeline(ExtendedBlock block, 
    String clientName) throws IOException {
  LocatedBlock locatedBlock;
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);

    // check vadility of parameters
    checkUCBlock(block, clientName);

    // get a new generation stamp and an access token
    block.setGenerationStamp(
        nextGenerationStamp(isLegacyBlock(block.getLocalBlock())));
    locatedBlock = new LocatedBlock(block, new DatanodeInfo[0]);
    blockManager.setBlockToken(locatedBlock, AccessMode.WRITE);
  } finally {
    writeUnlock();
  }
  // Ensure we record the new generation stamp
  getEditLog().logSync();
  return locatedBlock;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DataNode.java   
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
    Token<BlockTokenIdentifier> token) throws IOException {
  checkBlockLocalPathAccess();
  checkBlockToken(block, token, BlockTokenSecretManager.AccessMode.READ);
  Preconditions.checkNotNull(data, "Storage not yet initialized");
  BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
  if (LOG.isDebugEnabled()) {
    if (info != null) {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo successful block=" + block
            + " blockfile " + info.getBlockPath() + " metafile "
            + info.getMetaPath());
      }
    } else {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo for block=" + block
            + " returning null");
      }
    }
  }
  metrics.incrBlocksGetLocalPathInfo();
  return info;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DataNode.java   
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(
    String bpId, long[] blockIds,
    List<Token<BlockTokenIdentifier>> tokens) throws IOException, 
    UnsupportedOperationException {
  if (!getHdfsBlockLocationsEnabled) {
    throw new UnsupportedOperationException("Datanode#getHdfsBlocksMetadata "
        + " is not enabled in datanode config");
  }
  if (blockIds.length != tokens.size()) {
    throw new IOException("Differing number of blocks and tokens");
  }
  // Check access for each block
  for (int i = 0; i < blockIds.length; i++) {
    checkBlockToken(new ExtendedBlock(bpId, blockIds[i]),
        tokens.get(i), BlockTokenSecretManager.AccessMode.READ);
  }

  DataNodeFaultInjector.get().getHdfsBlocksMetadata();

  return data.getHdfsBlocksMetadata(bpId, blockIds);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DataNode.java   
private void checkReadAccess(final ExtendedBlock block) throws IOException {
  if (isBlockTokenEnabled) {
    Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
        .getTokenIdentifiers();
    if (tokenIds.size() != 1) {
      throw new IOException("Can't continue since none or more than one "
          + "BlockTokenIdentifier is found.");
    }
    for (TokenIdentifier tokenId : tokenIds) {
      BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
      if (LOG.isDebugEnabled()) {
        LOG.debug("Got: " + id.toString());
      }
      blockPoolTokenSecretManager.checkAccess(id, null, block,
          BlockTokenSecretManager.AccessMode.READ);
    }
  }
}
项目:hadoop-plus    文件:FSNamesystem.java   
/**
 * Get a new generation stamp together with an access token for 
 * a block under construction
 * 
 * This method is called for recovering a failed pipeline or setting up
 * a pipeline to append to a block.
 * 
 * @param block a block
 * @param clientName the name of a client
 * @return a located block with a new generation stamp and an access token
 * @throws IOException if any error occurs
 */
LocatedBlock updateBlockForPipeline(ExtendedBlock block, 
    String clientName) throws IOException {
  LocatedBlock locatedBlock;
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);

    // check vadility of parameters
    checkUCBlock(block, clientName);

    // get a new generation stamp and an access token
    block.setGenerationStamp(
        nextGenerationStamp(isLegacyBlock(block.getLocalBlock())));
    locatedBlock = new LocatedBlock(block, new DatanodeInfo[0]);
    blockManager.setBlockToken(locatedBlock, AccessMode.WRITE);
  } finally {
    writeUnlock();
  }
  // Ensure we record the new generation stamp
  getEditLog().logSync();
  return locatedBlock;
}
项目:hadoop-plus    文件:DataNode.java   
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
    Token<BlockTokenIdentifier> token) throws IOException {
  checkBlockLocalPathAccess();
  checkBlockToken(block, token, BlockTokenSecretManager.AccessMode.READ);
  BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
  if (LOG.isDebugEnabled()) {
    if (info != null) {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo successful block=" + block
            + " blockfile " + info.getBlockPath() + " metafile "
            + info.getMetaPath());
      }
    } else {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo for block=" + block
            + " returning null");
      }
    }
  }
  metrics.incrBlocksGetLocalPathInfo();
  return info;
}
项目:hadoop-plus    文件:DataNode.java   
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(List<ExtendedBlock> blocks,
    List<Token<BlockTokenIdentifier>> tokens) throws IOException, 
    UnsupportedOperationException {
  if (!getHdfsBlockLocationsEnabled) {
    throw new UnsupportedOperationException("Datanode#getHdfsBlocksMetadata "
        + " is not enabled in datanode config");
  }
  if (blocks.size() != tokens.size()) {
    throw new IOException("Differing number of blocks and tokens");
  }
  // Check access for each block
  for (int i = 0; i < blocks.size(); i++) {
    checkBlockToken(blocks.get(i), tokens.get(i), 
        BlockTokenSecretManager.AccessMode.READ);
  }
  return data.getHdfsBlocksMetadata(blocks);
}
项目:hadoop-plus    文件:DataNode.java   
private void checkWriteAccess(final ExtendedBlock block) throws IOException {
  if (isBlockTokenEnabled) {
    Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
        .getTokenIdentifiers();
    if (tokenIds.size() != 1) {
      throw new IOException("Can't continue since none or more than one "
          + "BlockTokenIdentifier is found.");
    }
    for (TokenIdentifier tokenId : tokenIds) {
      BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
      if (LOG.isDebugEnabled()) {
        LOG.debug("Got: " + id.toString());
      }
      blockPoolTokenSecretManager.checkAccess(id, null, block,
          BlockTokenSecretManager.AccessMode.READ);
    }
  }
}
项目:PDHC    文件:CheckerNode.java   
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
    Token<BlockTokenIdentifier> token) throws IOException {
  checkBlockLocalPathAccess();
  checkBlockToken(block, token, BlockTokenSecretManager.AccessMode.READ);
  BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
  if (LOG.isDebugEnabled()) {
    if (info != null) {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo successful block=" + block
            + " blockfile " + info.getBlockPath() + " metafile "
            + info.getMetaPath());
      }
    } else {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo for block=" + block
            + " returning null");
      }
    }
  }
  metrics.incrBlocksGetLocalPathInfo();
  return info;
}
项目:PDHC    文件:CheckerNode.java   
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(List<ExtendedBlock> blocks,
    List<Token<BlockTokenIdentifier>> tokens) throws IOException, 
    UnsupportedOperationException {
  if (!getHdfsBlockLocationsEnabled) {
    throw new UnsupportedOperationException("Datanode#getHdfsBlocksMetadata "
        + " is not enabled in datanode config");
  }
  if (blocks.size() != tokens.size()) {
    throw new IOException("Differing number of blocks and tokens");
  }
  // Check access for each block
  for (int i = 0; i < blocks.size(); i++) {
    checkBlockToken(blocks.get(i), tokens.get(i), 
        BlockTokenSecretManager.AccessMode.READ);
  }
  return data.getHdfsBlocksMetadata(blocks);
}
项目:PDHC    文件:CheckerNode.java   
private void checkWriteAccess(final ExtendedBlock block) throws IOException {
  if (isBlockTokenEnabled) {
    Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
        .getTokenIdentifiers();
    if (tokenIds.size() != 1) {
      throw new IOException("Can't continue since none or more than one "
          + "BlockTokenIdentifier is found.");
    }
    for (TokenIdentifier tokenId : tokenIds) {
      BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
      if (LOG.isDebugEnabled()) {
        LOG.debug("Got: " + id.toString());
      }
      blockPoolTokenSecretManager.checkAccess(id, null, block,
          BlockTokenSecretManager.AccessMode.READ);
    }
  }
}
项目:FlexMap    文件:FSNamesystem.java   
/**
 * Get a new generation stamp together with an access token for 
 * a block under construction
 * 
 * This method is called for recovering a failed pipeline or setting up
 * a pipeline to append to a block.
 * 
 * @param block a block
 * @param clientName the name of a client
 * @return a located block with a new generation stamp and an access token
 * @throws IOException if any error occurs
 */
LocatedBlock updateBlockForPipeline(ExtendedBlock block, 
    String clientName) throws IOException {
  LocatedBlock locatedBlock;
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);

    // check vadility of parameters
    checkUCBlock(block, clientName);

    // get a new generation stamp and an access token
    block.setGenerationStamp(
        nextGenerationStamp(isLegacyBlock(block.getLocalBlock())));
    locatedBlock = new LocatedBlock(block, new DatanodeInfo[0]);
    blockManager.setBlockToken(locatedBlock, AccessMode.WRITE);
  } finally {
    writeUnlock();
  }
  // Ensure we record the new generation stamp
  getEditLog().logSync();
  return locatedBlock;
}
项目:FlexMap    文件:DataNode.java   
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
    Token<BlockTokenIdentifier> token) throws IOException {
  checkBlockLocalPathAccess();
  checkBlockToken(block, token, BlockTokenSecretManager.AccessMode.READ);
  Preconditions.checkNotNull(data, "Storage not yet initialized");
  BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
  if (LOG.isDebugEnabled()) {
    if (info != null) {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo successful block=" + block
            + " blockfile " + info.getBlockPath() + " metafile "
            + info.getMetaPath());
      }
    } else {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo for block=" + block
            + " returning null");
      }
    }
  }
  metrics.incrBlocksGetLocalPathInfo();
  return info;
}
项目:FlexMap    文件:DataNode.java   
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(
    String bpId, long[] blockIds,
    List<Token<BlockTokenIdentifier>> tokens) throws IOException, 
    UnsupportedOperationException {
  if (!getHdfsBlockLocationsEnabled) {
    throw new UnsupportedOperationException("Datanode#getHdfsBlocksMetadata "
        + " is not enabled in datanode config");
  }
  if (blockIds.length != tokens.size()) {
    throw new IOException("Differing number of blocks and tokens");
  }
  // Check access for each block
  for (int i = 0; i < blockIds.length; i++) {
    checkBlockToken(new ExtendedBlock(bpId, blockIds[i]),
        tokens.get(i), BlockTokenSecretManager.AccessMode.READ);
  }

  DataNodeFaultInjector.get().getHdfsBlocksMetadata();

  return data.getHdfsBlocksMetadata(bpId, blockIds);
}
项目:FlexMap    文件:DataNode.java   
private void checkReadAccess(final ExtendedBlock block) throws IOException {
  if (isBlockTokenEnabled) {
    Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
        .getTokenIdentifiers();
    if (tokenIds.size() != 1) {
      throw new IOException("Can't continue since none or more than one "
          + "BlockTokenIdentifier is found.");
    }
    for (TokenIdentifier tokenId : tokenIds) {
      BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
      if (LOG.isDebugEnabled()) {
        LOG.debug("Got: " + id.toString());
      }
      blockPoolTokenSecretManager.checkAccess(id, null, block,
          BlockTokenSecretManager.AccessMode.READ);
    }
  }
}
项目:hops    文件:DataNode.java   
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
    Token<BlockTokenIdentifier> token) throws IOException {
  checkBlockLocalPathAccess();
  checkBlockToken(block, token, BlockTokenSecretManager.AccessMode.READ);
  BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
  if (LOG.isDebugEnabled()) {
    if (info != null) {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo successful block=" + block +
            " blockfile " + info.getBlockPath() + " metafile " +
            info.getMetaPath());
      }
    } else {
      if (LOG.isTraceEnabled()) {
        LOG.trace(
            "getBlockLocalPathInfo for block=" + block + " returning null");
      }
    }
  }
  metrics.incrBlocksGetLocalPathInfo();
  return info;
}
项目:hops    文件:DataNode.java   
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(List<ExtendedBlock> blocks,
    List<Token<BlockTokenIdentifier>> tokens)
    throws IOException, UnsupportedOperationException {
  if (!getHdfsBlockLocationsEnabled) {
    throw new UnsupportedOperationException(
        "Datanode#getHdfsBlocksMetadata " +
            " is not enabled in datanode config");
  }
  if (blocks.size() != tokens.size()) {
    throw new IOException("Differing number of blocks and tokens");
  }
  // Check access for each block
  for (int i = 0; i < blocks.size(); i++) {
    checkBlockToken(blocks.get(i), tokens.get(i),
        BlockTokenSecretManager.AccessMode.READ);
  }
  return data.getHdfsBlocksMetadata(blocks);
}
项目:hops    文件:DataNode.java   
private void checkWriteAccess(final ExtendedBlock block) throws IOException {
  if (isBlockTokenEnabled) {
    Set<TokenIdentifier> tokenIds =
        UserGroupInformation.getCurrentUser().getTokenIdentifiers();
    if (tokenIds.size() != 1) {
      throw new IOException("Can't continue since none or more than one " +
          "BlockTokenIdentifier is found.");
    }
    for (TokenIdentifier tokenId : tokenIds) {
      BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
      if (LOG.isDebugEnabled()) {
        LOG.debug("Got: " + id.toString());
      }
      blockPoolTokenSecretManager.checkAccess(id, null, block,
          BlockTokenSecretManager.AccessMode.READ);
    }
  }
}
项目:hadoop-TCP    文件:FSNamesystem.java   
/**
 * Get a new generation stamp together with an access token for 
 * a block under construction
 * 
 * This method is called for recovering a failed pipeline or setting up
 * a pipeline to append to a block.
 * 
 * @param block a block
 * @param clientName the name of a client
 * @return a located block with a new generation stamp and an access token
 * @throws IOException if any error occurs
 */
LocatedBlock updateBlockForPipeline(ExtendedBlock block, 
    String clientName) throws IOException {
  LocatedBlock locatedBlock;
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);

    // check vadility of parameters
    checkUCBlock(block, clientName);

    // get a new generation stamp and an access token
    block.setGenerationStamp(
        nextGenerationStamp(isLegacyBlock(block.getLocalBlock())));
    locatedBlock = new LocatedBlock(block, new DatanodeInfo[0]);
    blockManager.setBlockToken(locatedBlock, AccessMode.WRITE);
  } finally {
    writeUnlock();
  }
  // Ensure we record the new generation stamp
  getEditLog().logSync();
  return locatedBlock;
}
项目:hadoop-TCP    文件:DataNode.java   
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
    Token<BlockTokenIdentifier> token) throws IOException {
  checkBlockLocalPathAccess();
  checkBlockToken(block, token, BlockTokenSecretManager.AccessMode.READ);
  BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
  if (LOG.isDebugEnabled()) {
    if (info != null) {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo successful block=" + block
            + " blockfile " + info.getBlockPath() + " metafile "
            + info.getMetaPath());
      }
    } else {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo for block=" + block
            + " returning null");
      }
    }
  }
  metrics.incrBlocksGetLocalPathInfo();
  return info;
}
项目:hadoop-TCP    文件:DataNode.java   
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(List<ExtendedBlock> blocks,
    List<Token<BlockTokenIdentifier>> tokens) throws IOException, 
    UnsupportedOperationException {
  if (!getHdfsBlockLocationsEnabled) {
    throw new UnsupportedOperationException("Datanode#getHdfsBlocksMetadata "
        + " is not enabled in datanode config");
  }
  if (blocks.size() != tokens.size()) {
    throw new IOException("Differing number of blocks and tokens");
  }
  // Check access for each block
  for (int i = 0; i < blocks.size(); i++) {
    checkBlockToken(blocks.get(i), tokens.get(i), 
        BlockTokenSecretManager.AccessMode.READ);
  }
  return data.getHdfsBlocksMetadata(blocks);
}
项目:hadoop-TCP    文件:DataNode.java   
private void checkWriteAccess(final ExtendedBlock block) throws IOException {
  if (isBlockTokenEnabled) {
    Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
        .getTokenIdentifiers();
    if (tokenIds.size() != 1) {
      throw new IOException("Can't continue since none or more than one "
          + "BlockTokenIdentifier is found.");
    }
    for (TokenIdentifier tokenId : tokenIds) {
      BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
      if (LOG.isDebugEnabled()) {
        LOG.debug("Got: " + id.toString());
      }
      blockPoolTokenSecretManager.checkAccess(id, null, block,
          BlockTokenSecretManager.AccessMode.READ);
    }
  }
}
项目:hadoop-on-lustre    文件:DataNode.java   
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(Block block,
    Token<BlockTokenIdentifier> token) throws IOException {
  checkBlockLocalPathAccess();
  checkBlockToken(block, token, BlockTokenSecretManager.AccessMode.READ);
  BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
  if (LOG.isDebugEnabled()) {
    if (info != null) {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo successful block=" + block
            + " blockfile " + info.getBlockPath() + " metafile "
            + info.getMetaPath());
      }
    } else {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo for block=" + block
            + " returning null");
      }
    }
  }
  myMetrics.incrBlocksGetLocalPathInfo();
  return info;
}
项目:hadoop-on-lustre    文件:DataNode.java   
/** Check block access token for the given access mode */
private void checkBlockToken(Block block,
    BlockTokenSecretManager.AccessMode accessMode) throws IOException {
  if (isBlockTokenEnabled && UserGroupInformation.isSecurityEnabled()) {
    Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
        .getTokenIdentifiers();
    if (tokenIds.size() != 1) {
      throw new IOException("Can't continue with "
          + "authorization since " + tokenIds.size()
          + " BlockTokenIdentifier " + "is found.");
    }
    for (TokenIdentifier tokenId : tokenIds) {
      BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
      if (LOG.isDebugEnabled()) {
        LOG.debug("Got: " + id.toString());
      }
      blockTokenSecretManager.checkAccess(id, null, block, accessMode);
    }
  }
}
项目:hardfs    文件:FSNamesystem.java   
/**
 * Get a new generation stamp together with an access token for 
 * a block under construction
 * 
 * This method is called for recovering a failed pipeline or setting up
 * a pipeline to append to a block.
 * 
 * @param block a block
 * @param clientName the name of a client
 * @return a located block with a new generation stamp and an access token
 * @throws IOException if any error occurs
 */
LocatedBlock updateBlockForPipeline(ExtendedBlock block, 
    String clientName) throws IOException {
  LocatedBlock locatedBlock;
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);

    // check vadility of parameters
    checkUCBlock(block, clientName);

    // get a new generation stamp and an access token
    block.setGenerationStamp(
        nextGenerationStamp(isLegacyBlock(block.getLocalBlock())));
    locatedBlock = new LocatedBlock(block, new DatanodeInfo[0]);
    blockManager.setBlockToken(locatedBlock, AccessMode.WRITE);
  } finally {
    writeUnlock();
  }
  // Ensure we record the new generation stamp
  getEditLog().logSync();
  return locatedBlock;
}
项目:hardfs    文件:DataNode.java   
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
    Token<BlockTokenIdentifier> token) throws IOException {
  checkBlockLocalPathAccess();
  checkBlockToken(block, token, BlockTokenSecretManager.AccessMode.READ);
  BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
  if (LOG.isDebugEnabled()) {
    if (info != null) {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo successful block=" + block
            + " blockfile " + info.getBlockPath() + " metafile "
            + info.getMetaPath());
      }
    } else {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo for block=" + block
            + " returning null");
      }
    }
  }
  metrics.incrBlocksGetLocalPathInfo();
  return info;
}
项目:hardfs    文件:DataNode.java   
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(List<ExtendedBlock> blocks,
    List<Token<BlockTokenIdentifier>> tokens) throws IOException, 
    UnsupportedOperationException {
  if (!getHdfsBlockLocationsEnabled) {
    throw new UnsupportedOperationException("Datanode#getHdfsBlocksMetadata "
        + " is not enabled in datanode config");
  }
  if (blocks.size() != tokens.size()) {
    throw new IOException("Differing number of blocks and tokens");
  }
  // Check access for each block
  for (int i = 0; i < blocks.size(); i++) {
    checkBlockToken(blocks.get(i), tokens.get(i), 
        BlockTokenSecretManager.AccessMode.READ);
  }
  return data.getHdfsBlocksMetadata(blocks);
}
项目:hardfs    文件:DataNode.java   
private void checkWriteAccess(final ExtendedBlock block) throws IOException {
  if (isBlockTokenEnabled) {
    Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
        .getTokenIdentifiers();
    if (tokenIds.size() != 1) {
      throw new IOException("Can't continue since none or more than one "
          + "BlockTokenIdentifier is found.");
    }
    for (TokenIdentifier tokenId : tokenIds) {
      BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
      if (LOG.isDebugEnabled()) {
        LOG.debug("Got: " + id.toString());
      }
      blockPoolTokenSecretManager.checkAccess(id, null, block,
          BlockTokenSecretManager.AccessMode.READ);
    }
  }
}
项目:hadoop-on-lustre2    文件:FSNamesystem.java   
/**
 * Get a new generation stamp together with an access token for 
 * a block under construction
 * 
 * This method is called for recovering a failed pipeline or setting up
 * a pipeline to append to a block.
 * 
 * @param block a block
 * @param clientName the name of a client
 * @return a located block with a new generation stamp and an access token
 * @throws IOException if any error occurs
 */
LocatedBlock updateBlockForPipeline(ExtendedBlock block, 
    String clientName) throws IOException {
  LocatedBlock locatedBlock;
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);

    // check vadility of parameters
    checkUCBlock(block, clientName);

    // get a new generation stamp and an access token
    block.setGenerationStamp(
        nextGenerationStamp(isLegacyBlock(block.getLocalBlock())));
    locatedBlock = new LocatedBlock(block, new DatanodeInfo[0]);
    blockManager.setBlockToken(locatedBlock, AccessMode.WRITE);
  } finally {
    writeUnlock();
  }
  // Ensure we record the new generation stamp
  getEditLog().logSync();
  return locatedBlock;
}
项目:hadoop-on-lustre2    文件:DataNode.java   
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
    Token<BlockTokenIdentifier> token) throws IOException {
  checkBlockLocalPathAccess();
  checkBlockToken(block, token, BlockTokenSecretManager.AccessMode.READ);
  BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
  if (LOG.isDebugEnabled()) {
    if (info != null) {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo successful block=" + block
            + " blockfile " + info.getBlockPath() + " metafile "
            + info.getMetaPath());
      }
    } else {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo for block=" + block
            + " returning null");
      }
    }
  }
  metrics.incrBlocksGetLocalPathInfo();
  return info;
}
项目:hadoop-on-lustre2    文件:DataNode.java   
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(
    String bpId, long[] blockIds,
    List<Token<BlockTokenIdentifier>> tokens) throws IOException, 
    UnsupportedOperationException {
  if (!getHdfsBlockLocationsEnabled) {
    throw new UnsupportedOperationException("Datanode#getHdfsBlocksMetadata "
        + " is not enabled in datanode config");
  }
  if (blockIds.length != tokens.size()) {
    throw new IOException("Differing number of blocks and tokens");
  }
  // Check access for each block
  for (int i = 0; i < blockIds.length; i++) {
    checkBlockToken(new ExtendedBlock(bpId, blockIds[i]),
        tokens.get(i), BlockTokenSecretManager.AccessMode.READ);
  }

  DataNodeFaultInjector.get().getHdfsBlocksMetadata();

  return data.getHdfsBlocksMetadata(bpId, blockIds);
}
项目:hadoop-on-lustre2    文件:DataNode.java   
private void checkWriteAccess(final ExtendedBlock block) throws IOException {
  if (isBlockTokenEnabled) {
    Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
        .getTokenIdentifiers();
    if (tokenIds.size() != 1) {
      throw new IOException("Can't continue since none or more than one "
          + "BlockTokenIdentifier is found.");
    }
    for (TokenIdentifier tokenId : tokenIds) {
      BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
      if (LOG.isDebugEnabled()) {
        LOG.debug("Got: " + id.toString());
      }
      blockPoolTokenSecretManager.checkAccess(id, null, block,
          BlockTokenSecretManager.AccessMode.READ);
    }
  }
}
项目:hortonworks-extension    文件:DataNode.java   
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(Block block,
    Token<BlockTokenIdentifier> token) throws IOException {
  checkBlockLocalPathAccess();
  checkBlockToken(block, token, BlockTokenSecretManager.AccessMode.READ);
  BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
  if (LOG.isDebugEnabled()) {
    if (info != null) {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo successful block=" + block
            + " blockfile " + info.getBlockPath() + " metafile "
            + info.getMetaPath());
      }
    } else {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo for block=" + block
            + " returning null");
      }
    }
  }
  myMetrics.incrBlocksGetLocalPathInfo();
  return info;
}