Java 类org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream 实例源码

项目:hadoop    文件:DatanodeUtil.java   
/**
 * @return the FileInputStream for the meta data of the given block.
 * @throws FileNotFoundException
 *           if the file not found.
 * @throws ClassCastException
 *           if the underlying input stream is not a FileInputStream.
 */
public static FileInputStream getMetaDataInputStream(
    ExtendedBlock b, FsDatasetSpi<?> data) throws IOException {
  final LengthInputStream lin = data.getMetaDataInputStream(b);
  if (lin == null) {
    throw new FileNotFoundException("Meta file for " + b + " not found.");
  }
  return (FileInputStream)lin.getWrappedStream();
}
项目:hadoop    文件:FsDatasetImpl.java   
@Override // FsDatasetSpi
public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
    throws IOException {
  File meta = FsDatasetUtil.getMetaFile(getBlockFile(b), b.getGenerationStamp());
  if (meta == null || !meta.exists()) {
    return null;
  }
  if (isNativeIOAvailable) {
    return new LengthInputStream(
        NativeIO.getShareDeleteFileInputStream(meta),
        meta.length());
  }
  return new LengthInputStream(new FileInputStream(meta), meta.length());
}
项目:hadoop    文件:SimulatedFSDataset.java   
@Override // FsDatasetSpi
public synchronized LengthInputStream getMetaDataInputStream(ExtendedBlock b
    ) throws IOException {
  final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
  BInfo binfo = map.get(b.getLocalBlock());
  if (binfo == null) {
    throw new IOException("No such Block " + b );  
  }
  if (!binfo.finalized) {
    throw new IOException("Block " + b + 
        " is being written, its meta cannot be read");
  }
  final SimulatedInputStream sin = binfo.getMetaIStream();
  return new LengthInputStream(sin, sin.getLength());
}
项目:aliyun-oss-hadoop-fs    文件:DatanodeUtil.java   
/**
 * @return the FileInputStream for the meta data of the given block.
 * @throws FileNotFoundException
 *           if the file not found.
 * @throws ClassCastException
 *           if the underlying input stream is not a FileInputStream.
 */
public static FileInputStream getMetaDataInputStream(
    ExtendedBlock b, FsDatasetSpi<?> data) throws IOException {
  final LengthInputStream lin = data.getMetaDataInputStream(b);
  if (lin == null) {
    throw new FileNotFoundException("Meta file for " + b + " not found.");
  }
  return (FileInputStream)lin.getWrappedStream();
}
项目:aliyun-oss-hadoop-fs    文件:FsDatasetImpl.java   
@Override // FsDatasetSpi
public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
    throws IOException {
  File meta = FsDatasetUtil.getMetaFile(getBlockFile(b), b.getGenerationStamp());
  if (meta == null || !meta.exists()) {
    return null;
  }
  if (isNativeIOAvailable) {
    return new LengthInputStream(
        NativeIO.getShareDeleteFileInputStream(meta),
        meta.length());
  }
  return new LengthInputStream(new FileInputStream(meta), meta.length());
}
项目:aliyun-oss-hadoop-fs    文件:SimulatedFSDataset.java   
@Override // FsDatasetSpi
public synchronized LengthInputStream getMetaDataInputStream(ExtendedBlock b
    ) throws IOException {
  final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
  BInfo binfo = map.get(b.getLocalBlock());
  if (binfo == null) {
    throw new IOException("No such Block " + b );  
  }
  if (!binfo.finalized) {
    throw new IOException("Block " + b + 
        " is being written, its meta cannot be read");
  }
  final SimulatedInputStream sin = binfo.getMetaIStream();
  return new LengthInputStream(sin, sin.getLength());
}
项目:big-c    文件:DatanodeUtil.java   
/**
 * @return the FileInputStream for the meta data of the given block.
 * @throws FileNotFoundException
 *           if the file not found.
 * @throws ClassCastException
 *           if the underlying input stream is not a FileInputStream.
 */
public static FileInputStream getMetaDataInputStream(
    ExtendedBlock b, FsDatasetSpi<?> data) throws IOException {
  final LengthInputStream lin = data.getMetaDataInputStream(b);
  if (lin == null) {
    throw new FileNotFoundException("Meta file for " + b + " not found.");
  }
  return (FileInputStream)lin.getWrappedStream();
}
项目:big-c    文件:FsDatasetImpl.java   
@Override // FsDatasetSpi
public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
    throws IOException {
  File meta = FsDatasetUtil.getMetaFile(getBlockFile(b), b.getGenerationStamp());
  if (meta == null || !meta.exists()) {
    return null;
  }
  if (isNativeIOAvailable) {
    return new LengthInputStream(
        NativeIO.getShareDeleteFileInputStream(meta),
        meta.length());
  }
  return new LengthInputStream(new FileInputStream(meta), meta.length());
}
项目:big-c    文件:SimulatedFSDataset.java   
@Override // FsDatasetSpi
public synchronized LengthInputStream getMetaDataInputStream(ExtendedBlock b
    ) throws IOException {
  final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
  BInfo binfo = map.get(b.getLocalBlock());
  if (binfo == null) {
    throw new IOException("No such Block " + b );  
  }
  if (!binfo.finalized) {
    throw new IOException("Block " + b + 
        " is being written, its meta cannot be read");
  }
  final SimulatedInputStream sin = binfo.getMetaIStream();
  return new LengthInputStream(sin, sin.getLength());
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DatanodeUtil.java   
/**
 * @return the FileInputStream for the meta data of the given block.
 * @throws FileNotFoundException
 *           if the file not found.
 * @throws ClassCastException
 *           if the underlying input stream is not a FileInputStream.
 */
public static FileInputStream getMetaDataInputStream(
    ExtendedBlock b, FsDatasetSpi<?> data) throws IOException {
  final LengthInputStream lin = data.getMetaDataInputStream(b);
  if (lin == null) {
    throw new FileNotFoundException("Meta file for " + b + " not found.");
  }
  return (FileInputStream)lin.getWrappedStream();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FsDatasetImpl.java   
@Override // FsDatasetSpi
public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
    throws IOException {
  File meta = FsDatasetUtil.getMetaFile(getBlockFile(b), b.getGenerationStamp());
  if (meta == null || !meta.exists()) {
    return null;
  }
  if (isNativeIOAvailable) {
    return new LengthInputStream(
        NativeIO.getShareDeleteFileInputStream(meta),
        meta.length());
  }
  return new LengthInputStream(new FileInputStream(meta), meta.length());
}
项目:hadoop-2.6.0-cdh5.4.3    文件:SimulatedFSDataset.java   
@Override // FsDatasetSpi
public synchronized LengthInputStream getMetaDataInputStream(ExtendedBlock b
    ) throws IOException {
  final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
  BInfo binfo = map.get(b.getLocalBlock());
  if (binfo == null) {
    throw new IOException("No such Block " + b );  
  }
  if (!binfo.finalized) {
    throw new IOException("Block " + b + 
        " is being written, its meta cannot be read");
  }
  final SimulatedInputStream sin = binfo.getMetaIStream();
  return new LengthInputStream(sin, sin.getLength());
}
项目:hadoop-plus    文件:FsDatasetImpl.java   
@Override // FsDatasetSpi
public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
    throws IOException {
  File meta = FsDatasetUtil.getMetaFile(getBlockFile(b), b.getGenerationStamp());
  if (meta == null || !meta.exists()) {
    return null;
  }
  if (isNativeIOAvailable) {
    return new LengthInputStream(
        NativeIO.getShareDeleteFileInputStream(meta),
        meta.length());
  }
  return new LengthInputStream(new FileInputStream(meta), meta.length());
}
项目:hadoop-plus    文件:SimulatedFSDataset.java   
@Override // FsDatasetSpi
public synchronized LengthInputStream getMetaDataInputStream(ExtendedBlock b
    ) throws IOException {
  final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
  BInfo binfo = map.get(b.getLocalBlock());
  if (binfo == null) {
    throw new IOException("No such Block " + b );  
  }
  if (!binfo.finalized) {
    throw new IOException("Block " + b + 
        " is being written, its meta cannot be read");
  }
  final SimulatedInputStream sin = binfo.getMetaIStream();
  return new LengthInputStream(sin, sin.getLength());
}
项目:FlexMap    文件:DatanodeUtil.java   
/**
 * @return the FileInputStream for the meta data of the given block.
 * @throws FileNotFoundException
 *           if the file not found.
 * @throws ClassCastException
 *           if the underlying input stream is not a FileInputStream.
 */
public static FileInputStream getMetaDataInputStream(
    ExtendedBlock b, FsDatasetSpi<?> data) throws IOException {
  final LengthInputStream lin = data.getMetaDataInputStream(b);
  if (lin == null) {
    throw new FileNotFoundException("Meta file for " + b + " not found.");
  }
  return (FileInputStream)lin.getWrappedStream();
}
项目:FlexMap    文件:FsDatasetImpl.java   
@Override // FsDatasetSpi
public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
    throws IOException {
  File meta = FsDatasetUtil.getMetaFile(getBlockFile(b), b.getGenerationStamp());
  if (meta == null || !meta.exists()) {
    return null;
  }
  if (isNativeIOAvailable) {
    return new LengthInputStream(
        NativeIO.getShareDeleteFileInputStream(meta),
        meta.length());
  }
  return new LengthInputStream(new FileInputStream(meta), meta.length());
}
项目:FlexMap    文件:SimulatedFSDataset.java   
@Override // FsDatasetSpi
public synchronized LengthInputStream getMetaDataInputStream(ExtendedBlock b
    ) throws IOException {
  final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
  BInfo binfo = map.get(b.getLocalBlock());
  if (binfo == null) {
    throw new IOException("No such Block " + b );  
  }
  if (!binfo.finalized) {
    throw new IOException("Block " + b + 
        " is being written, its meta cannot be read");
  }
  final SimulatedInputStream sin = binfo.getMetaIStream();
  return new LengthInputStream(sin, sin.getLength());
}
项目:hops    文件:FsDatasetImpl.java   
@Override // FsDatasetSpi
public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
    throws IOException {
  File meta =
      FsDatasetUtil.getMetaFile(getBlockFile(b), b.getGenerationStamp());
  if (meta == null || !meta.exists()) {
    return null;
  }
  return new LengthInputStream(new FileInputStream(meta), meta.length());
}
项目:hops    文件:SimulatedFSDataset.java   
@Override // FsDatasetSpi
public synchronized LengthInputStream getMetaDataInputStream(ExtendedBlock b)
    throws IOException {
  final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
  BInfo binfo = map.get(b.getLocalBlock());
  if (binfo == null) {
    throw new IOException("No such Block " + b);
  }
  if (!binfo.finalized) {
    throw new IOException("Block " + b +
        " is being written, its meta cannot be read");
  }
  final SimulatedInputStream sin = binfo.getMetaIStream();
  return new LengthInputStream(sin, sin.getLength());
}
项目:hadoop-TCP    文件:FsDatasetImpl.java   
@Override // FsDatasetSpi
public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
    throws IOException {
  File meta = FsDatasetUtil.getMetaFile(getBlockFile(b), b.getGenerationStamp());
  if (meta == null || !meta.exists()) {
    return null;
  }
  if (isNativeIOAvailable) {
    return new LengthInputStream(
        NativeIO.getShareDeleteFileInputStream(meta),
        meta.length());
  }
  return new LengthInputStream(new FileInputStream(meta), meta.length());
}
项目:hadoop-TCP    文件:SimulatedFSDataset.java   
@Override // FsDatasetSpi
public synchronized LengthInputStream getMetaDataInputStream(ExtendedBlock b
    ) throws IOException {
  final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
  BInfo binfo = map.get(b.getLocalBlock());
  if (binfo == null) {
    throw new IOException("No such Block " + b );  
  }
  if (!binfo.finalized) {
    throw new IOException("Block " + b + 
        " is being written, its meta cannot be read");
  }
  final SimulatedInputStream sin = binfo.getMetaIStream();
  return new LengthInputStream(sin, sin.getLength());
}
项目:hardfs    文件:FsDatasetImpl.java   
@Override // FsDatasetSpi
public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
    throws IOException {
  File meta = FsDatasetUtil.getMetaFile(getBlockFile(b), b.getGenerationStamp());
  if (meta == null || !meta.exists()) {
    return null;
  }
  if (isNativeIOAvailable) {
    return new LengthInputStream(
        NativeIO.getShareDeleteFileInputStream(meta),
        meta.length());
  }
  return new LengthInputStream(new FileInputStream(meta), meta.length());
}
项目:hardfs    文件:SimulatedFSDataset.java   
@Override // FsDatasetSpi
public synchronized LengthInputStream getMetaDataInputStream(ExtendedBlock b
    ) throws IOException {
  final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
  BInfo binfo = map.get(b.getLocalBlock());
  if (binfo == null) {
    throw new IOException("No such Block " + b );  
  }
  if (!binfo.finalized) {
    throw new IOException("Block " + b + 
        " is being written, its meta cannot be read");
  }
  final SimulatedInputStream sin = binfo.getMetaIStream();
  return new LengthInputStream(sin, sin.getLength());
}
项目:hadoop-on-lustre2    文件:SimulatedFSDataset.java   
@Override // FsDatasetSpi
public synchronized LengthInputStream getMetaDataInputStream(ExtendedBlock b
    ) throws IOException {
  final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
  BInfo binfo = map.get(b.getLocalBlock());
  if (binfo == null) {
    throw new IOException("No such Block " + b );  
  }
  if (!binfo.finalized) {
    throw new IOException("Block " + b + 
        " is being written, its meta cannot be read");
  }
  final SimulatedInputStream sin = binfo.getMetaIStream();
  return new LengthInputStream(sin, sin.getLength());
}
项目:hadoop    文件:DataXceiver.java   
@Override
public void blockChecksum(final ExtendedBlock block,
    final Token<BlockTokenIdentifier> blockToken) throws IOException {
  final DataOutputStream out = new DataOutputStream(
      getOutputStream());
  checkAccess(out, true, block, blockToken,
      Op.BLOCK_CHECKSUM, BlockTokenSecretManager.AccessMode.READ);
  // client side now can specify a range of the block for checksum
  long requestLength = block.getNumBytes();
  Preconditions.checkArgument(requestLength >= 0);
  long visibleLength = datanode.data.getReplicaVisibleLength(block);
  boolean partialBlk = requestLength < visibleLength;

  updateCurrentThreadName("Reading metadata for block " + block);
  final LengthInputStream metadataIn = datanode.data
      .getMetaDataInputStream(block);

  final DataInputStream checksumIn = new DataInputStream(
      new BufferedInputStream(metadataIn, HdfsConstants.IO_FILE_BUFFER_SIZE));
  updateCurrentThreadName("Getting checksum for block " + block);
  try {
    //read metadata file
    final BlockMetadataHeader header = BlockMetadataHeader
        .readHeader(checksumIn);
    final DataChecksum checksum = header.getChecksum();
    final int csize = checksum.getChecksumSize();
    final int bytesPerCRC = checksum.getBytesPerChecksum();
    final long crcPerBlock = csize <= 0 ? 0 : 
      (metadataIn.getLength() - BlockMetadataHeader.getHeaderSize()) / csize;

    final MD5Hash md5 = partialBlk && crcPerBlock > 0 ? 
        calcPartialBlockChecksum(block, requestLength, checksum, checksumIn)
          : MD5Hash.digest(checksumIn);
    if (LOG.isDebugEnabled()) {
      LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC
          + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5);
    }

    //write reply
    BlockOpResponseProto.newBuilder()
      .setStatus(SUCCESS)
      .setChecksumResponse(OpBlockChecksumResponseProto.newBuilder()             
        .setBytesPerCrc(bytesPerCRC)
        .setCrcPerBlock(crcPerBlock)
        .setMd5(ByteString.copyFrom(md5.getDigest()))
        .setCrcType(PBHelper.convert(checksum.getChecksumType())))
      .build()
      .writeDelimitedTo(out);
    out.flush();
  } catch (IOException ioe) {
    LOG.info("blockChecksum " + block + " received exception " + ioe);
    incrDatanodeNetworkErrors();
    throw ioe;
  } finally {
    IOUtils.closeStream(out);
    IOUtils.closeStream(checksumIn);
    IOUtils.closeStream(metadataIn);
  }

  //update metrics
  datanode.metrics.addBlockChecksumOp(elapsed());
}
项目:hadoop    文件:ExternalDatasetImpl.java   
@Override
public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
    throws IOException {
  return new LengthInputStream(null, 0);
}
项目:aliyun-oss-hadoop-fs    文件:DataXceiver.java   
@Override
public void blockChecksum(final ExtendedBlock block,
    final Token<BlockTokenIdentifier> blockToken) throws IOException {
  final DataOutputStream out = new DataOutputStream(
      getOutputStream());
  checkAccess(out, true, block, blockToken,
      Op.BLOCK_CHECKSUM, BlockTokenIdentifier.AccessMode.READ);
  // client side now can specify a range of the block for checksum
  long requestLength = block.getNumBytes();
  Preconditions.checkArgument(requestLength >= 0);
  long visibleLength = datanode.data.getReplicaVisibleLength(block);
  boolean partialBlk = requestLength < visibleLength;

  updateCurrentThreadName("Reading metadata for block " + block);
  final LengthInputStream metadataIn = datanode.data
      .getMetaDataInputStream(block);

  final DataInputStream checksumIn = new DataInputStream(
      new BufferedInputStream(metadataIn, ioFileBufferSize));
  updateCurrentThreadName("Getting checksum for block " + block);
  try {
    //read metadata file
    final BlockMetadataHeader header = BlockMetadataHeader
        .readHeader(checksumIn);
    final DataChecksum checksum = header.getChecksum();
    final int csize = checksum.getChecksumSize();
    final int bytesPerCRC = checksum.getBytesPerChecksum();
    final long crcPerBlock = csize <= 0 ? 0 : 
      (metadataIn.getLength() - BlockMetadataHeader.getHeaderSize()) / csize;

    final MD5Hash md5 = partialBlk && crcPerBlock > 0 ? 
        calcPartialBlockChecksum(block, requestLength, checksum, checksumIn)
          : MD5Hash.digest(checksumIn);
    if (LOG.isDebugEnabled()) {
      LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC
          + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5);
    }

    //write reply
    BlockOpResponseProto.newBuilder()
      .setStatus(SUCCESS)
      .setChecksumResponse(OpBlockChecksumResponseProto.newBuilder()             
        .setBytesPerCrc(bytesPerCRC)
        .setCrcPerBlock(crcPerBlock)
        .setMd5(ByteString.copyFrom(md5.getDigest()))
        .setCrcType(PBHelperClient.convert(checksum.getChecksumType())))
      .build()
      .writeDelimitedTo(out);
    out.flush();
  } catch (IOException ioe) {
    LOG.info("blockChecksum " + block + " received exception " + ioe);
    incrDatanodeNetworkErrors();
    throw ioe;
  } finally {
    IOUtils.closeStream(out);
    IOUtils.closeStream(checksumIn);
    IOUtils.closeStream(metadataIn);
  }

  //update metrics
  datanode.metrics.addBlockChecksumOp(elapsed());
}
项目:aliyun-oss-hadoop-fs    文件:ExternalDatasetImpl.java   
@Override
public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
    throws IOException {
  return new LengthInputStream(null, 0);
}
项目:big-c    文件:DataXceiver.java   
@Override
public void blockChecksum(final ExtendedBlock block,
    final Token<BlockTokenIdentifier> blockToken) throws IOException {
  final DataOutputStream out = new DataOutputStream(
      getOutputStream());
  checkAccess(out, true, block, blockToken,
      Op.BLOCK_CHECKSUM, BlockTokenSecretManager.AccessMode.READ);
  // client side now can specify a range of the block for checksum
  long requestLength = block.getNumBytes();
  Preconditions.checkArgument(requestLength >= 0);
  long visibleLength = datanode.data.getReplicaVisibleLength(block);
  boolean partialBlk = requestLength < visibleLength;

  updateCurrentThreadName("Reading metadata for block " + block);
  final LengthInputStream metadataIn = datanode.data
      .getMetaDataInputStream(block);

  final DataInputStream checksumIn = new DataInputStream(
      new BufferedInputStream(metadataIn, HdfsConstants.IO_FILE_BUFFER_SIZE));
  updateCurrentThreadName("Getting checksum for block " + block);
  try {
    //read metadata file
    final BlockMetadataHeader header = BlockMetadataHeader
        .readHeader(checksumIn);
    final DataChecksum checksum = header.getChecksum();
    final int csize = checksum.getChecksumSize();
    final int bytesPerCRC = checksum.getBytesPerChecksum();
    final long crcPerBlock = csize <= 0 ? 0 : 
      (metadataIn.getLength() - BlockMetadataHeader.getHeaderSize()) / csize;

    final MD5Hash md5 = partialBlk && crcPerBlock > 0 ? 
        calcPartialBlockChecksum(block, requestLength, checksum, checksumIn)
          : MD5Hash.digest(checksumIn);
    if (LOG.isDebugEnabled()) {
      LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC
          + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5);
    }

    //write reply
    BlockOpResponseProto.newBuilder()
      .setStatus(SUCCESS)
      .setChecksumResponse(OpBlockChecksumResponseProto.newBuilder()             
        .setBytesPerCrc(bytesPerCRC)
        .setCrcPerBlock(crcPerBlock)
        .setMd5(ByteString.copyFrom(md5.getDigest()))
        .setCrcType(PBHelper.convert(checksum.getChecksumType())))
      .build()
      .writeDelimitedTo(out);
    out.flush();
  } catch (IOException ioe) {
    LOG.info("blockChecksum " + block + " received exception " + ioe);
    incrDatanodeNetworkErrors();
    throw ioe;
  } finally {
    IOUtils.closeStream(out);
    IOUtils.closeStream(checksumIn);
    IOUtils.closeStream(metadataIn);
  }

  //update metrics
  datanode.metrics.addBlockChecksumOp(elapsed());
}
项目:big-c    文件:ExternalDatasetImpl.java   
@Override
public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
    throws IOException {
  return new LengthInputStream(null, 0);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DataXceiver.java   
@Override
public void blockChecksum(final ExtendedBlock block,
    final Token<BlockTokenIdentifier> blockToken) throws IOException {
  final DataOutputStream out = new DataOutputStream(
      getOutputStream());
  checkAccess(out, true, block, blockToken,
      Op.BLOCK_CHECKSUM, BlockTokenSecretManager.AccessMode.READ);
  // client side now can specify a range of the block for checksum
  long requestLength = block.getNumBytes();
  Preconditions.checkArgument(requestLength >= 0);
  long visibleLength = datanode.data.getReplicaVisibleLength(block);
  boolean partialBlk = requestLength < visibleLength;

  updateCurrentThreadName("Reading metadata for block " + block);
  final LengthInputStream metadataIn = datanode.data
      .getMetaDataInputStream(block);

  final DataInputStream checksumIn = new DataInputStream(
      new BufferedInputStream(metadataIn, HdfsConstants.IO_FILE_BUFFER_SIZE));
  updateCurrentThreadName("Getting checksum for block " + block);
  try {
    //read metadata file
    final BlockMetadataHeader header = BlockMetadataHeader
        .readHeader(checksumIn);
    final DataChecksum checksum = header.getChecksum();
    final int csize = checksum.getChecksumSize();
    final int bytesPerCRC = checksum.getBytesPerChecksum();
    final long crcPerBlock = csize <= 0 ? 0 : 
      (metadataIn.getLength() - BlockMetadataHeader.getHeaderSize()) / csize;

    final MD5Hash md5 = partialBlk && crcPerBlock > 0 ? 
        calcPartialBlockChecksum(block, requestLength, checksum, checksumIn)
          : MD5Hash.digest(checksumIn);
    if (LOG.isDebugEnabled()) {
      LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC
          + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5);
    }

    //write reply
    BlockOpResponseProto.newBuilder()
      .setStatus(SUCCESS)
      .setChecksumResponse(OpBlockChecksumResponseProto.newBuilder()             
        .setBytesPerCrc(bytesPerCRC)
        .setCrcPerBlock(crcPerBlock)
        .setMd5(ByteString.copyFrom(md5.getDigest()))
        .setCrcType(PBHelper.convert(checksum.getChecksumType())))
      .build()
      .writeDelimitedTo(out);
    out.flush();
  } catch (IOException ioe) {
    LOG.info("blockChecksum " + block + " received exception " + ioe);
    incrDatanodeNetworkErrors();
    throw ioe;
  } finally {
    IOUtils.closeStream(out);
    IOUtils.closeStream(checksumIn);
    IOUtils.closeStream(metadataIn);
  }

  //update metrics
  datanode.metrics.addBlockChecksumOp(elapsed());
}
项目:hadoop-2.6.0-cdh5.4.3    文件:ExternalDatasetImpl.java   
@Override
public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
    throws IOException {
  return new LengthInputStream(null, 0);
}
项目:hadoop-plus    文件:DataXceiver.java   
@Override
public void blockChecksum(final ExtendedBlock block,
    final Token<BlockTokenIdentifier> blockToken) throws IOException {
  final DataOutputStream out = new DataOutputStream(
      getOutputStream());
  checkAccess(out, true, block, blockToken,
      Op.BLOCK_CHECKSUM, BlockTokenSecretManager.AccessMode.READ);
  updateCurrentThreadName("Reading metadata for block " + block);
  final LengthInputStream metadataIn = 
    datanode.data.getMetaDataInputStream(block);
  final DataInputStream checksumIn = new DataInputStream(new BufferedInputStream(
      metadataIn, HdfsConstants.IO_FILE_BUFFER_SIZE));

  updateCurrentThreadName("Getting checksum for block " + block);
  try {
    //read metadata file
    final BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
    final DataChecksum checksum = header.getChecksum(); 
    final int bytesPerCRC = checksum.getBytesPerChecksum();
    final long crcPerBlock = (metadataIn.getLength()
        - BlockMetadataHeader.getHeaderSize())/checksum.getChecksumSize();

    //compute block checksum
    final MD5Hash md5 = MD5Hash.digest(checksumIn);

    if (LOG.isDebugEnabled()) {
      LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC
          + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5);
    }

    //write reply
    BlockOpResponseProto.newBuilder()
      .setStatus(SUCCESS)
      .setChecksumResponse(OpBlockChecksumResponseProto.newBuilder()             
        .setBytesPerCrc(bytesPerCRC)
        .setCrcPerBlock(crcPerBlock)
        .setMd5(ByteString.copyFrom(md5.getDigest()))
        .setCrcType(PBHelper.convert(checksum.getChecksumType()))
        )
      .build()
      .writeDelimitedTo(out);
    out.flush();
  } finally {
    IOUtils.closeStream(out);
    IOUtils.closeStream(checksumIn);
    IOUtils.closeStream(metadataIn);
  }

  //update metrics
  datanode.metrics.addBlockChecksumOp(elapsed());
}
项目:FlexMap    文件:DataXceiver.java   
@Override
public void blockChecksum(final ExtendedBlock block,
    final Token<BlockTokenIdentifier> blockToken) throws IOException {
  final DataOutputStream out = new DataOutputStream(
      getOutputStream());
  checkAccess(out, true, block, blockToken,
      Op.BLOCK_CHECKSUM, BlockTokenSecretManager.AccessMode.READ);
  // client side now can specify a range of the block for checksum
  long requestLength = block.getNumBytes();
  Preconditions.checkArgument(requestLength >= 0);
  long visibleLength = datanode.data.getReplicaVisibleLength(block);
  boolean partialBlk = requestLength < visibleLength;

  updateCurrentThreadName("Reading metadata for block " + block);
  final LengthInputStream metadataIn = datanode.data
      .getMetaDataInputStream(block);

  final DataInputStream checksumIn = new DataInputStream(
      new BufferedInputStream(metadataIn, HdfsConstants.IO_FILE_BUFFER_SIZE));
  updateCurrentThreadName("Getting checksum for block " + block);
  try {
    //read metadata file
    final BlockMetadataHeader header = BlockMetadataHeader
        .readHeader(checksumIn);
    final DataChecksum checksum = header.getChecksum();
    final int csize = checksum.getChecksumSize();
    final int bytesPerCRC = checksum.getBytesPerChecksum();
    final long crcPerBlock = csize <= 0 ? 0 : 
      (metadataIn.getLength() - BlockMetadataHeader.getHeaderSize()) / csize;

    final MD5Hash md5 = partialBlk && crcPerBlock > 0 ? 
        calcPartialBlockChecksum(block, requestLength, checksum, checksumIn)
          : MD5Hash.digest(checksumIn);
    if (LOG.isDebugEnabled()) {
      LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC
          + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5);
    }

    //write reply
    BlockOpResponseProto.newBuilder()
      .setStatus(SUCCESS)
      .setChecksumResponse(OpBlockChecksumResponseProto.newBuilder()             
        .setBytesPerCrc(bytesPerCRC)
        .setCrcPerBlock(crcPerBlock)
        .setMd5(ByteString.copyFrom(md5.getDigest()))
        .setCrcType(PBHelper.convert(checksum.getChecksumType())))
      .build()
      .writeDelimitedTo(out);
    out.flush();
  } finally {
    IOUtils.closeStream(out);
    IOUtils.closeStream(checksumIn);
    IOUtils.closeStream(metadataIn);
  }

  //update metrics
  datanode.metrics.addBlockChecksumOp(elapsed());
}
项目:hops    文件:DataXceiver.java   
@Override
public void blockChecksum(final ExtendedBlock block,
    final Token<BlockTokenIdentifier> blockToken) throws IOException {
  final DataOutputStream out = new DataOutputStream(getOutputStream());
  checkAccess(out, true, block, blockToken, Op.BLOCK_CHECKSUM,
      BlockTokenSecretManager.AccessMode.READ);
  updateCurrentThreadName("Reading metadata for block " + block);
  final LengthInputStream metadataIn =
      datanode.data.getMetaDataInputStream(block);
  final DataInputStream checksumIn = new DataInputStream(
      new BufferedInputStream(metadataIn, HdfsConstants.IO_FILE_BUFFER_SIZE));

  updateCurrentThreadName("Getting checksum for block " + block);
  try {
    //read metadata file
    final BlockMetadataHeader header =
        BlockMetadataHeader.readHeader(checksumIn);
    final DataChecksum checksum = header.getChecksum();
    final int bytesPerCRC = checksum.getBytesPerChecksum();
    final long crcPerBlock =
        (metadataIn.getLength() - BlockMetadataHeader.getHeaderSize()) /
            checksum.getChecksumSize();

    //compute block checksum
    final MD5Hash md5 = MD5Hash.digest(checksumIn);

    if (LOG.isDebugEnabled()) {
      LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC +
          ", crcPerBlock=" + crcPerBlock + ", md5=" + md5);
    }

    //write reply
    BlockOpResponseProto.newBuilder().setStatus(SUCCESS).setChecksumResponse(
        OpBlockChecksumResponseProto.newBuilder().setBytesPerCrc(bytesPerCRC)
            .setCrcPerBlock(crcPerBlock)
            .setMd5(ByteString.copyFrom(md5.getDigest()))
            .setCrcType(PBHelper.convert(checksum.getChecksumType()))).build()
        .writeDelimitedTo(out);
    out.flush();
  } finally {
    IOUtils.closeStream(out);
    IOUtils.closeStream(checksumIn);
    IOUtils.closeStream(metadataIn);
  }

  //update metrics
  datanode.metrics.addBlockChecksumOp(elapsed());
}
项目:hadoop-TCP    文件:DataXceiver.java   
@Override
public void blockChecksum(final ExtendedBlock block,
    final Token<BlockTokenIdentifier> blockToken) throws IOException {
  final DataOutputStream out = new DataOutputStream(
      getOutputStream());
  checkAccess(out, true, block, blockToken,
      Op.BLOCK_CHECKSUM, BlockTokenSecretManager.AccessMode.READ);
  updateCurrentThreadName("Reading metadata for block " + block);
  final LengthInputStream metadataIn = 
    datanode.data.getMetaDataInputStream(block);
  final DataInputStream checksumIn = new DataInputStream(new BufferedInputStream(
      metadataIn, HdfsConstants.IO_FILE_BUFFER_SIZE));

  updateCurrentThreadName("Getting checksum for block " + block);
  try {
    //read metadata file
    final BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
    final DataChecksum checksum = header.getChecksum(); 
    final int bytesPerCRC = checksum.getBytesPerChecksum();
    final long crcPerBlock = (metadataIn.getLength()
        - BlockMetadataHeader.getHeaderSize())/checksum.getChecksumSize();

    //compute block checksum
    final MD5Hash md5 = MD5Hash.digest(checksumIn);

    if (LOG.isDebugEnabled()) {
      LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC
          + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5);
    }

    //write reply
    BlockOpResponseProto.newBuilder()
      .setStatus(SUCCESS)
      .setChecksumResponse(OpBlockChecksumResponseProto.newBuilder()             
        .setBytesPerCrc(bytesPerCRC)
        .setCrcPerBlock(crcPerBlock)
        .setMd5(ByteString.copyFrom(md5.getDigest()))
        .setCrcType(PBHelper.convert(checksum.getChecksumType()))
        )
      .build()
      .writeDelimitedTo(out);
    out.flush();
  } finally {
    IOUtils.closeStream(out);
    IOUtils.closeStream(checksumIn);
    IOUtils.closeStream(metadataIn);
  }

  //update metrics
  datanode.metrics.addBlockChecksumOp(elapsed());
}
项目:hardfs    文件:DataXceiver.java   
@Override
public void blockChecksum(final ExtendedBlock block,
    final Token<BlockTokenIdentifier> blockToken) throws IOException {
  final DataOutputStream out = new DataOutputStream(
      getOutputStream());
  checkAccess(out, true, block, blockToken,
      Op.BLOCK_CHECKSUM, BlockTokenSecretManager.AccessMode.READ);
  updateCurrentThreadName("Reading metadata for block " + block);
  final LengthInputStream metadataIn = 
    datanode.data.getMetaDataInputStream(block);
  final DataInputStream checksumIn = new DataInputStream(new BufferedInputStream(
      metadataIn, HdfsConstants.IO_FILE_BUFFER_SIZE));

  updateCurrentThreadName("Getting checksum for block " + block);
  try {
    //read metadata file
    final BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
    final DataChecksum checksum = header.getChecksum(); 
    final int bytesPerCRC = checksum.getBytesPerChecksum();
    final long crcPerBlock = (metadataIn.getLength()
        - BlockMetadataHeader.getHeaderSize())/checksum.getChecksumSize();

    //compute block checksum
    final MD5Hash md5 = MD5Hash.digest(checksumIn);

    if (LOG.isDebugEnabled()) {
      LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC
          + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5);
    }

    //write reply
    BlockOpResponseProto.newBuilder()
      .setStatus(SUCCESS)
      .setChecksumResponse(OpBlockChecksumResponseProto.newBuilder()             
        .setBytesPerCrc(bytesPerCRC)
        .setCrcPerBlock(crcPerBlock)
        .setMd5(ByteString.copyFrom(md5.getDigest()))
        .setCrcType(PBHelper.convert(checksum.getChecksumType()))
        )
      .build()
      .writeDelimitedTo(out);
    out.flush();
  } finally {
    IOUtils.closeStream(out);
    IOUtils.closeStream(checksumIn);
    IOUtils.closeStream(metadataIn);
  }

  //update metrics
  datanode.metrics.addBlockChecksumOp(elapsed());
}
项目:hadoop-on-lustre2    文件:DataXceiver.java   
@Override
public void blockChecksum(final ExtendedBlock block,
    final Token<BlockTokenIdentifier> blockToken) throws IOException {
  final DataOutputStream out = new DataOutputStream(
      getOutputStream());
  checkAccess(out, true, block, blockToken,
      Op.BLOCK_CHECKSUM, BlockTokenSecretManager.AccessMode.READ);
  updateCurrentThreadName("Reading metadata for block " + block);
  final LengthInputStream metadataIn = 
    datanode.data.getMetaDataInputStream(block);
  final DataInputStream checksumIn = new DataInputStream(new BufferedInputStream(
      metadataIn, HdfsConstants.IO_FILE_BUFFER_SIZE));

  updateCurrentThreadName("Getting checksum for block " + block);
  try {
    //read metadata file
    final BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
    final DataChecksum checksum = header.getChecksum(); 
    final int bytesPerCRC = checksum.getBytesPerChecksum();
    final long crcPerBlock = checksum.getChecksumSize() > 0 
            ? (metadataIn.getLength() - BlockMetadataHeader.getHeaderSize())/checksum.getChecksumSize()
            : 0;

    //compute block checksum
    final MD5Hash md5 = MD5Hash.digest(checksumIn);

    if (LOG.isDebugEnabled()) {
      LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC
          + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5);
    }

    //write reply
    BlockOpResponseProto.newBuilder()
      .setStatus(SUCCESS)
      .setChecksumResponse(OpBlockChecksumResponseProto.newBuilder()             
        .setBytesPerCrc(bytesPerCRC)
        .setCrcPerBlock(crcPerBlock)
        .setMd5(ByteString.copyFrom(md5.getDigest()))
        .setCrcType(PBHelper.convert(checksum.getChecksumType()))
        )
      .build()
      .writeDelimitedTo(out);
    out.flush();
  } finally {
    IOUtils.closeStream(out);
    IOUtils.closeStream(checksumIn);
    IOUtils.closeStream(metadataIn);
  }

  //update metrics
  datanode.metrics.addBlockChecksumOp(elapsed());
}