Java 类org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams 实例源码

项目:hadoop-2.6.0-cdh5.4.3    文件:FsDatasetImpl.java   
/**
 * Returns handles to the block file and its metadata file
 */
@Override // FsDatasetSpi
public synchronized ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, 
                        long blkOffset, long ckoff) throws IOException {
  ReplicaInfo info = getReplicaInfo(b);
  FsVolumeReference ref = info.getVolume().obtainReference();
  try {
    File blockFile = info.getBlockFile();
    RandomAccessFile blockInFile = new RandomAccessFile(blockFile, "r");
    if (blkOffset > 0) {
      blockInFile.seek(blkOffset);
    }
    File metaFile = info.getMetaFile();
    RandomAccessFile metaInFile = new RandomAccessFile(metaFile, "r");
    if (ckoff > 0) {
      metaInFile.seek(ckoff);
    }
    return new ReplicaInputStreams(
        blockInFile.getFD(), metaInFile.getFD(), ref);
  } catch (IOException e) {
    IOUtils.cleanup(null, ref);
    throw e;
  }
}
项目:hadoop-plus    文件:FsDatasetImpl.java   
/**
 * Returns handles to the block file and its metadata file
 */
@Override // FsDatasetSpi
public synchronized ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, 
                        long blkOffset, long ckoff) throws IOException {
  ReplicaInfo info = getReplicaInfo(b);
  File blockFile = info.getBlockFile();
  RandomAccessFile blockInFile = new RandomAccessFile(blockFile, "r");
  if (blkOffset > 0) {
    blockInFile.seek(blkOffset);
  }
  File metaFile = info.getMetaFile();
  RandomAccessFile metaInFile = new RandomAccessFile(metaFile, "r");
  if (ckoff > 0) {
    metaInFile.seek(ckoff);
  }
  return new ReplicaInputStreams(blockInFile.getFD(), metaInFile.getFD());
}
项目:FlexMap    文件:FsDatasetImpl.java   
/**
 * Returns handles to the block file and its metadata file
 */
@Override // FsDatasetSpi
public synchronized ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, 
                        long blkOffset, long ckoff) throws IOException {
  ReplicaInfo info = getReplicaInfo(b);
  File blockFile = info.getBlockFile();
  RandomAccessFile blockInFile = new RandomAccessFile(blockFile, "r");
  if (blkOffset > 0) {
    blockInFile.seek(blkOffset);
  }
  File metaFile = info.getMetaFile();
  RandomAccessFile metaInFile = new RandomAccessFile(metaFile, "r");
  if (ckoff > 0) {
    metaInFile.seek(ckoff);
  }
  return new ReplicaInputStreams(blockInFile.getFD(), metaInFile.getFD());
}
项目:hops    文件:FsDatasetImpl.java   
/**
 * Returns handles to the block file and its metadata file
 */
@Override // FsDatasetSpi
public synchronized ReplicaInputStreams getTmpInputStreams(ExtendedBlock b,
    long blkOffset, long ckoff) throws IOException {
  ReplicaInfo info = getReplicaInfo(b);
  File blockFile = info.getBlockFile();
  RandomAccessFile blockInFile = new RandomAccessFile(blockFile, "r");
  if (blkOffset > 0) {
    blockInFile.seek(blkOffset);
  }
  File metaFile = info.getMetaFile();
  RandomAccessFile metaInFile = new RandomAccessFile(metaFile, "r");
  if (ckoff > 0) {
    metaInFile.seek(ckoff);
  }
  return new ReplicaInputStreams(blockInFile.getFD(), metaInFile.getFD());
}
项目:hadoop-TCP    文件:FsDatasetImpl.java   
/**
 * Returns handles to the block file and its metadata file
 */
@Override // FsDatasetSpi
public synchronized ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, 
                        long blkOffset, long ckoff) throws IOException {
  ReplicaInfo info = getReplicaInfo(b);
  File blockFile = info.getBlockFile();
  RandomAccessFile blockInFile = new RandomAccessFile(blockFile, "r");
  if (blkOffset > 0) {
    blockInFile.seek(blkOffset);
  }
  File metaFile = info.getMetaFile();
  RandomAccessFile metaInFile = new RandomAccessFile(metaFile, "r");
  if (ckoff > 0) {
    metaInFile.seek(ckoff);
  }
  return new ReplicaInputStreams(blockInFile.getFD(), metaInFile.getFD());
}
项目:hardfs    文件:FsDatasetImpl.java   
/**
 * Returns handles to the block file and its metadata file
 */
@Override // FsDatasetSpi
public synchronized ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, 
                        long blkOffset, long ckoff) throws IOException {
  ReplicaInfo info = getReplicaInfo(b);
  File blockFile = info.getBlockFile();
  RandomAccessFile blockInFile = new RandomAccessFile(blockFile, "r");
  if (blkOffset > 0) {
    blockInFile.seek(blkOffset);
  }
  File metaFile = info.getMetaFile();
  RandomAccessFile metaInFile = new RandomAccessFile(metaFile, "r");
  if (ckoff > 0) {
    metaInFile.seek(ckoff);
  }
  return new ReplicaInputStreams(blockInFile.getFD(), metaInFile.getFD());
}
项目:hadoop    文件:BlockReceiver.java   
/**
 * reads in the partial crc chunk and computes checksum
 * of pre-existing data in partial chunk.
 */
private Checksum computePartialChunkCrc(long blkoff, long ckoff)
    throws IOException {

  // find offset of the beginning of partial chunk.
  //
  int sizePartialChunk = (int) (blkoff % bytesPerChecksum);
  blkoff = blkoff - sizePartialChunk;
  if (LOG.isDebugEnabled()) {
    LOG.debug("computePartialChunkCrc for " + block
        + ": sizePartialChunk=" + sizePartialChunk
        + ", block offset=" + blkoff
        + ", metafile offset=" + ckoff);
  }

  // create an input stream from the block file
  // and read in partial crc chunk into temporary buffer
  //
  byte[] buf = new byte[sizePartialChunk];
  byte[] crcbuf = new byte[checksumSize];
  try (ReplicaInputStreams instr =
      datanode.data.getTmpInputStreams(block, blkoff, ckoff)) {
    IOUtils.readFully(instr.getDataIn(), buf, 0, sizePartialChunk);

    // open meta file and read in crc value computer earlier
    IOUtils.readFully(instr.getChecksumIn(), crcbuf, 0, crcbuf.length);
  }

  // compute crc of partial chunk from data read in the block file.
  final Checksum partialCrc = DataChecksum.newDataChecksum(
      diskChecksum.getChecksumType(), diskChecksum.getBytesPerChecksum());
  partialCrc.update(buf, 0, sizePartialChunk);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Read in partial CRC chunk from disk for " + block);
  }

  // paranoia! verify that the pre-computed crc matches what we
  // recalculated just now
  if (partialCrc.getValue() != checksum2long(crcbuf)) {
    String msg = "Partial CRC " + partialCrc.getValue() +
                 " does not match value computed the " +
                 " last time file was closed " +
                 checksum2long(crcbuf);
    throw new IOException(msg);
  }
  return partialCrc;
}
项目:hadoop    文件:SimulatedFSDataset.java   
/** Not supported */
@Override // FsDatasetSpi
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
    long ckoff) throws IOException {
  throw new IOException("Not supported");
}
项目:hadoop    文件:ExternalDatasetImpl.java   
@Override
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
    long ckoff) throws IOException {
  return new ReplicaInputStreams(null, null, null);
}
项目:aliyun-oss-hadoop-fs    文件:BlockReceiver.java   
/**
 * reads in the partial crc chunk and computes checksum
 * of pre-existing data in partial chunk.
 */
private Checksum computePartialChunkCrc(long blkoff, long ckoff)
    throws IOException {

  // find offset of the beginning of partial chunk.
  //
  int sizePartialChunk = (int) (blkoff % bytesPerChecksum);
  blkoff = blkoff - sizePartialChunk;
  if (LOG.isDebugEnabled()) {
    LOG.debug("computePartialChunkCrc for " + block
        + ": sizePartialChunk=" + sizePartialChunk
        + ", block offset=" + blkoff
        + ", metafile offset=" + ckoff);
  }

  // create an input stream from the block file
  // and read in partial crc chunk into temporary buffer
  //
  byte[] buf = new byte[sizePartialChunk];
  byte[] crcbuf = new byte[checksumSize];
  try (ReplicaInputStreams instr =
      datanode.data.getTmpInputStreams(block, blkoff, ckoff)) {
    IOUtils.readFully(instr.getDataIn(), buf, 0, sizePartialChunk);

    // open meta file and read in crc value computer earlier
    IOUtils.readFully(instr.getChecksumIn(), crcbuf, 0, crcbuf.length);
  }

  // compute crc of partial chunk from data read in the block file.
  final Checksum partialCrc = DataChecksum.newDataChecksum(
      diskChecksum.getChecksumType(), diskChecksum.getBytesPerChecksum());
  partialCrc.update(buf, 0, sizePartialChunk);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Read in partial CRC chunk from disk for " + block);
  }

  // paranoia! verify that the pre-computed crc matches what we
  // recalculated just now
  if (partialCrc.getValue() != checksum2long(crcbuf)) {
    String msg = "Partial CRC " + partialCrc.getValue() +
                 " does not match value computed the " +
                 " last time file was closed " +
                 checksum2long(crcbuf);
    throw new IOException(msg);
  }
  return partialCrc;
}
项目:aliyun-oss-hadoop-fs    文件:SimulatedFSDataset.java   
/** Not supported */
@Override // FsDatasetSpi
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
    long ckoff) throws IOException {
  throw new IOException("Not supported");
}
项目:aliyun-oss-hadoop-fs    文件:ExternalDatasetImpl.java   
@Override
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
    long ckoff) throws IOException {
  return new ReplicaInputStreams(null, null, null);
}
项目:big-c    文件:BlockReceiver.java   
/**
 * reads in the partial crc chunk and computes checksum
 * of pre-existing data in partial chunk.
 */
private Checksum computePartialChunkCrc(long blkoff, long ckoff)
    throws IOException {

  // find offset of the beginning of partial chunk.
  //
  int sizePartialChunk = (int) (blkoff % bytesPerChecksum);
  blkoff = blkoff - sizePartialChunk;
  if (LOG.isDebugEnabled()) {
    LOG.debug("computePartialChunkCrc for " + block
        + ": sizePartialChunk=" + sizePartialChunk
        + ", block offset=" + blkoff
        + ", metafile offset=" + ckoff);
  }

  // create an input stream from the block file
  // and read in partial crc chunk into temporary buffer
  //
  byte[] buf = new byte[sizePartialChunk];
  byte[] crcbuf = new byte[checksumSize];
  try (ReplicaInputStreams instr =
      datanode.data.getTmpInputStreams(block, blkoff, ckoff)) {
    IOUtils.readFully(instr.getDataIn(), buf, 0, sizePartialChunk);

    // open meta file and read in crc value computer earlier
    IOUtils.readFully(instr.getChecksumIn(), crcbuf, 0, crcbuf.length);
  }

  // compute crc of partial chunk from data read in the block file.
  final Checksum partialCrc = DataChecksum.newDataChecksum(
      diskChecksum.getChecksumType(), diskChecksum.getBytesPerChecksum());
  partialCrc.update(buf, 0, sizePartialChunk);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Read in partial CRC chunk from disk for " + block);
  }

  // paranoia! verify that the pre-computed crc matches what we
  // recalculated just now
  if (partialCrc.getValue() != checksum2long(crcbuf)) {
    String msg = "Partial CRC " + partialCrc.getValue() +
                 " does not match value computed the " +
                 " last time file was closed " +
                 checksum2long(crcbuf);
    throw new IOException(msg);
  }
  return partialCrc;
}
项目:big-c    文件:SimulatedFSDataset.java   
/** Not supported */
@Override // FsDatasetSpi
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
    long ckoff) throws IOException {
  throw new IOException("Not supported");
}
项目:big-c    文件:ExternalDatasetImpl.java   
@Override
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
    long ckoff) throws IOException {
  return new ReplicaInputStreams(null, null, null);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:BlockReceiver.java   
/**
 * reads in the partial crc chunk and computes checksum
 * of pre-existing data in partial chunk.
 */
private Checksum computePartialChunkCrc(long blkoff, long ckoff)
    throws IOException {

  // find offset of the beginning of partial chunk.
  //
  int sizePartialChunk = (int) (blkoff % bytesPerChecksum);
  blkoff = blkoff - sizePartialChunk;
  if (LOG.isDebugEnabled()) {
    LOG.debug("computePartialChunkCrc for " + block
        + ": sizePartialChunk=" + sizePartialChunk
        + ", block offset=" + blkoff
        + ", metafile offset=" + ckoff);
  }

  // create an input stream from the block file
  // and read in partial crc chunk into temporary buffer
  //
  byte[] buf = new byte[sizePartialChunk];
  byte[] crcbuf = new byte[checksumSize];
  try (ReplicaInputStreams instr =
      datanode.data.getTmpInputStreams(block, blkoff, ckoff)) {
    IOUtils.readFully(instr.getDataIn(), buf, 0, sizePartialChunk);

    // open meta file and read in crc value computer earlier
    IOUtils.readFully(instr.getChecksumIn(), crcbuf, 0, crcbuf.length);
  }

  // compute crc of partial chunk from data read in the block file.
  final Checksum partialCrc = DataChecksum.newDataChecksum(
      diskChecksum.getChecksumType(), diskChecksum.getBytesPerChecksum());
  partialCrc.update(buf, 0, sizePartialChunk);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Read in partial CRC chunk from disk for " + block);
  }

  // paranoia! verify that the pre-computed crc matches what we
  // recalculated just now
  if (partialCrc.getValue() != checksum2long(crcbuf)) {
    String msg = "Partial CRC " + partialCrc.getValue() +
                 " does not match value computed the " +
                 " last time file was closed " +
                 checksum2long(crcbuf);
    throw new IOException(msg);
  }
  return partialCrc;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:SimulatedFSDataset.java   
/** Not supported */
@Override // FsDatasetSpi
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
    long ckoff) throws IOException {
  throw new IOException("Not supported");
}
项目:hadoop-2.6.0-cdh5.4.3    文件:ExternalDatasetImpl.java   
@Override
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
    long ckoff) throws IOException {
  return new ReplicaInputStreams(null, null, null);
}
项目:hadoop-plus    文件:BlockReceiver.java   
/**
 * reads in the partial crc chunk and computes checksum
 * of pre-existing data in partial chunk.
 */
private void computePartialChunkCrc(long blkoff, long ckoff, 
                                    int bytesPerChecksum) throws IOException {

  // find offset of the beginning of partial chunk.
  //
  int sizePartialChunk = (int) (blkoff % bytesPerChecksum);
  int checksumSize = diskChecksum.getChecksumSize();
  blkoff = blkoff - sizePartialChunk;
  LOG.info("computePartialChunkCrc sizePartialChunk " + 
            sizePartialChunk + " " + block +
            " block offset " + blkoff +
            " metafile offset " + ckoff);

  // create an input stream from the block file
  // and read in partial crc chunk into temporary buffer
  //
  byte[] buf = new byte[sizePartialChunk];
  byte[] crcbuf = new byte[checksumSize];
  ReplicaInputStreams instr = null;
  try { 
    instr = datanode.data.getTmpInputStreams(block, blkoff, ckoff);
    IOUtils.readFully(instr.getDataIn(), buf, 0, sizePartialChunk);

    // open meta file and read in crc value computer earlier
    IOUtils.readFully(instr.getChecksumIn(), crcbuf, 0, crcbuf.length);
  } finally {
    IOUtils.closeStream(instr);
  }

  // compute crc of partial chunk from data read in the block file.
  partialCrc = DataChecksum.newDataChecksum(
      diskChecksum.getChecksumType(), diskChecksum.getBytesPerChecksum());
  partialCrc.update(buf, 0, sizePartialChunk);
  LOG.info("Read in partial CRC chunk from disk for " + block);

  // paranoia! verify that the pre-computed crc matches what we
  // recalculated just now
  if (partialCrc.getValue() != checksum2long(crcbuf)) {
    String msg = "Partial CRC " + partialCrc.getValue() +
                 " does not match value computed the " +
                 " last time file was closed " +
                 checksum2long(crcbuf);
    throw new IOException(msg);
  }
}
项目:hadoop-plus    文件:SimulatedFSDataset.java   
/** Not supported */
@Override // FsDatasetSpi
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
    long ckoff) throws IOException {
  throw new IOException("Not supported");
}
项目:FlexMap    文件:BlockReceiver.java   
/**
 * reads in the partial crc chunk and computes checksum
 * of pre-existing data in partial chunk.
 */
private Checksum computePartialChunkCrc(long blkoff, long ckoff)
    throws IOException {

  // find offset of the beginning of partial chunk.
  //
  int sizePartialChunk = (int) (blkoff % bytesPerChecksum);
  blkoff = blkoff - sizePartialChunk;
  if (LOG.isDebugEnabled()) {
    LOG.debug("computePartialChunkCrc for " + block
        + ": sizePartialChunk=" + sizePartialChunk
        + ", block offset=" + blkoff
        + ", metafile offset=" + ckoff);
  }

  // create an input stream from the block file
  // and read in partial crc chunk into temporary buffer
  //
  byte[] buf = new byte[sizePartialChunk];
  byte[] crcbuf = new byte[checksumSize];
  ReplicaInputStreams instr = null;
  try { 
    instr = datanode.data.getTmpInputStreams(block, blkoff, ckoff);
    IOUtils.readFully(instr.getDataIn(), buf, 0, sizePartialChunk);

    // open meta file and read in crc value computer earlier
    IOUtils.readFully(instr.getChecksumIn(), crcbuf, 0, crcbuf.length);
  } finally {
    IOUtils.closeStream(instr);
  }

  // compute crc of partial chunk from data read in the block file.
  final Checksum partialCrc = DataChecksum.newDataChecksum(
      diskChecksum.getChecksumType(), diskChecksum.getBytesPerChecksum());
  partialCrc.update(buf, 0, sizePartialChunk);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Read in partial CRC chunk from disk for " + block);
  }

  // paranoia! verify that the pre-computed crc matches what we
  // recalculated just now
  if (partialCrc.getValue() != checksum2long(crcbuf)) {
    String msg = "Partial CRC " + partialCrc.getValue() +
                 " does not match value computed the " +
                 " last time file was closed " +
                 checksum2long(crcbuf);
    throw new IOException(msg);
  }
  return partialCrc;
}
项目:FlexMap    文件:SimulatedFSDataset.java   
/** Not supported */
@Override // FsDatasetSpi
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
    long ckoff) throws IOException {
  throw new IOException("Not supported");
}
项目:hops    文件:BlockReceiver.java   
/**
 * reads in the partial crc chunk and computes checksum
 * of pre-existing data in partial chunk.
 */
private void computePartialChunkCrc(long blkoff, long ckoff,
    int bytesPerChecksum) throws IOException {

  // find offset of the beginning of partial chunk.
  //
  int sizePartialChunk = (int) (blkoff % bytesPerChecksum);
  int checksumSize = diskChecksum.getChecksumSize();
  blkoff = blkoff - sizePartialChunk;
  LOG.info("computePartialChunkCrc sizePartialChunk " +
      sizePartialChunk + " " + block +
      " block offset " + blkoff +
      " metafile offset " + ckoff);

  // create an input stream from the block file
  // and read in partial crc chunk into temporary buffer
  //
  byte[] buf = new byte[sizePartialChunk];
  byte[] crcbuf = new byte[checksumSize];
  ReplicaInputStreams instr = null;
  try {
    instr = datanode.data.getTmpInputStreams(block, blkoff, ckoff);
    IOUtils.readFully(instr.getDataIn(), buf, 0, sizePartialChunk);

    // open meta file and read in crc value computer earlier
    IOUtils.readFully(instr.getChecksumIn(), crcbuf, 0, crcbuf.length);
  } finally {
    IOUtils.closeStream(instr);
  }

  // compute crc of partial chunk from data read in the block file.
  partialCrc = DataChecksum.newDataChecksum(diskChecksum.getChecksumType(),
      diskChecksum.getBytesPerChecksum());
  partialCrc.update(buf, 0, sizePartialChunk);
  LOG.info("Read in partial CRC chunk from disk for " + block);

  // paranoia! verify that the pre-computed crc matches what we
  // recalculated just now
  if (partialCrc.getValue() != checksum2long(crcbuf)) {
    String msg = "Partial CRC " + partialCrc.getValue() +
        " does not match value computed the " +
        " last time file was closed " +
        checksum2long(crcbuf);
    throw new IOException(msg);
  }
}
项目:hops    文件:SimulatedFSDataset.java   
/**
 * Not supported
 */
@Override // FsDatasetSpi
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
    long ckoff) throws IOException {
  throw new IOException("Not supported");
}
项目:hadoop-TCP    文件:BlockReceiver.java   
/**
 * reads in the partial crc chunk and computes checksum
 * of pre-existing data in partial chunk.
 */
private void computePartialChunkCrc(long blkoff, long ckoff, 
                                    int bytesPerChecksum) throws IOException {

  // find offset of the beginning of partial chunk.
  //
  int sizePartialChunk = (int) (blkoff % bytesPerChecksum);
  int checksumSize = diskChecksum.getChecksumSize();
  blkoff = blkoff - sizePartialChunk;
  LOG.info("computePartialChunkCrc sizePartialChunk " + 
            sizePartialChunk + " " + block +
            " block offset " + blkoff +
            " metafile offset " + ckoff);

  // create an input stream from the block file
  // and read in partial crc chunk into temporary buffer
  //
  byte[] buf = new byte[sizePartialChunk];
  byte[] crcbuf = new byte[checksumSize];
  ReplicaInputStreams instr = null;
  try { 
    instr = datanode.data.getTmpInputStreams(block, blkoff, ckoff);
    IOUtils.readFully(instr.getDataIn(), buf, 0, sizePartialChunk);

    // open meta file and read in crc value computer earlier
    IOUtils.readFully(instr.getChecksumIn(), crcbuf, 0, crcbuf.length);
  } finally {
    IOUtils.closeStream(instr);
  }

  // compute crc of partial chunk from data read in the block file.
  partialCrc = DataChecksum.newDataChecksum(
      diskChecksum.getChecksumType(), diskChecksum.getBytesPerChecksum());
  partialCrc.update(buf, 0, sizePartialChunk);
  LOG.info("Read in partial CRC chunk from disk for " + block);

  // paranoia! verify that the pre-computed crc matches what we
  // recalculated just now
  if (partialCrc.getValue() != checksum2long(crcbuf)) {
    String msg = "Partial CRC " + partialCrc.getValue() +
                 " does not match value computed the " +
                 " last time file was closed " +
                 checksum2long(crcbuf);
    throw new IOException(msg);
  }
}
项目:hadoop-TCP    文件:SimulatedFSDataset.java   
/** Not supported */
@Override // FsDatasetSpi
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
    long ckoff) throws IOException {
  throw new IOException("Not supported");
}
项目:hardfs    文件:BlockReceiver.java   
/**
 * reads in the partial crc chunk and computes checksum
 * of pre-existing data in partial chunk.
 */
private void computePartialChunkCrc(long blkoff, long ckoff, 
                                    int bytesPerChecksum) throws IOException {

  // find offset of the beginning of partial chunk.
  //
  int sizePartialChunk = (int) (blkoff % bytesPerChecksum);
  int checksumSize = diskChecksum.getChecksumSize();
  blkoff = blkoff - sizePartialChunk;
  LOG.info("computePartialChunkCrc sizePartialChunk " + 
            sizePartialChunk + " " + block +
            " block offset " + blkoff +
            " metafile offset " + ckoff);

  // create an input stream from the block file
  // and read in partial crc chunk into temporary buffer
  //
  byte[] buf = new byte[sizePartialChunk];
  byte[] crcbuf = new byte[checksumSize];
  ReplicaInputStreams instr = null;
  try { 
    instr = datanode.data.getTmpInputStreams(block, blkoff, ckoff);
    IOUtils.readFully(instr.getDataIn(), buf, 0, sizePartialChunk);

    // open meta file and read in crc value computer earlier
    IOUtils.readFully(instr.getChecksumIn(), crcbuf, 0, crcbuf.length);
  } finally {
    IOUtils.closeStream(instr);
  }

  // compute crc of partial chunk from data read in the block file.
  partialCrc = DataChecksum.newDataChecksum(
      diskChecksum.getChecksumType(), diskChecksum.getBytesPerChecksum());
  partialCrc.update(buf, 0, sizePartialChunk);
  LOG.info("Read in partial CRC chunk from disk for " + block);

  // paranoia! verify that the pre-computed crc matches what we
  // recalculated just now
  if (partialCrc.getValue() != checksum2long(crcbuf)) {
    String msg = "Partial CRC " + partialCrc.getValue() +
                 " does not match value computed the " +
                 " last time file was closed " +
                 checksum2long(crcbuf);
    throw new IOException(msg);
  }
}
项目:hardfs    文件:SimulatedFSDataset.java   
/** Not supported */
@Override // FsDatasetSpi
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
    long ckoff) throws IOException {
  throw new IOException("Not supported");
}
项目:hadoop-on-lustre2    文件:BlockReceiver.java   
/**
 * reads in the partial crc chunk and computes checksum
 * of pre-existing data in partial chunk.
 */
private void computePartialChunkCrc(long blkoff, long ckoff, 
                                    int bytesPerChecksum) throws IOException {

  // find offset of the beginning of partial chunk.
  //
  int sizePartialChunk = (int) (blkoff % bytesPerChecksum);
  int checksumSize = diskChecksum.getChecksumSize();
  blkoff = blkoff - sizePartialChunk;
  LOG.info("computePartialChunkCrc sizePartialChunk " + 
            sizePartialChunk + " " + block +
            " block offset " + blkoff +
            " metafile offset " + ckoff);

  // create an input stream from the block file
  // and read in partial crc chunk into temporary buffer
  //
  byte[] buf = new byte[sizePartialChunk];
  byte[] crcbuf = new byte[checksumSize];
  ReplicaInputStreams instr = null;
  try { 
    instr = datanode.data.getTmpInputStreams(block, blkoff, ckoff);
    IOUtils.readFully(instr.getDataIn(), buf, 0, sizePartialChunk);

    // open meta file and read in crc value computer earlier
    IOUtils.readFully(instr.getChecksumIn(), crcbuf, 0, crcbuf.length);
  } finally {
    IOUtils.closeStream(instr);
  }

  // compute crc of partial chunk from data read in the block file.
  partialCrc = DataChecksum.newDataChecksum(
      diskChecksum.getChecksumType(), diskChecksum.getBytesPerChecksum());
  partialCrc.update(buf, 0, sizePartialChunk);
  LOG.info("Read in partial CRC chunk from disk for " + block);

  // paranoia! verify that the pre-computed crc matches what we
  // recalculated just now
  if (partialCrc.getValue() != checksum2long(crcbuf)) {
    String msg = "Partial CRC " + partialCrc.getValue() +
                 " does not match value computed the " +
                 " last time file was closed " +
                 checksum2long(crcbuf);
    throw new IOException(msg);
  }
}
项目:hadoop-on-lustre2    文件:SimulatedFSDataset.java   
/** Not supported */
@Override // FsDatasetSpi
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
    long ckoff) throws IOException {
  throw new IOException("Not supported");
}