Java 类org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams 实例源码

项目:hadoop    文件:FsDatasetImpl.java   
/**
 * Sets the offset in the meta file so that the
 * last checksum will be overwritten.
 */
@Override // FsDatasetSpi
public void adjustCrcChannelPosition(ExtendedBlock b, ReplicaOutputStreams streams, 
    int checksumSize) throws IOException {
  FileOutputStream file = (FileOutputStream)streams.getChecksumOut();
  FileChannel channel = file.getChannel();
  long oldPos = channel.position();
  long newPos = oldPos - checksumSize;
  if (LOG.isDebugEnabled()) {
    LOG.debug("Changing meta file offset of block " + b + " from " +
        oldPos + " to " + newPos);
  }
  channel.position(newPos);
}
项目:hadoop    文件:SimulatedFSDataset.java   
@Override
synchronized public ReplicaOutputStreams createStreams(boolean isCreate, 
    DataChecksum requestedChecksum) throws IOException {
  if (finalized) {
    throw new IOException("Trying to write to a finalized replica "
        + theBlock);
  } else {
    SimulatedOutputStream crcStream = new SimulatedOutputStream();
    return new ReplicaOutputStreams(oStream, crcStream, requestedChecksum,
        volume.isTransientStorage());
  }
}
项目:hadoop    文件:TestSimulatedFSDataset.java   
int addSomeBlocks(SimulatedFSDataset fsdataset, int startingBlockId)
    throws IOException {
  int bytesAdded = 0;
  for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
    ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0); 
    // we pass expected len as zero, - fsdataset should use the sizeof actual
    // data written
    ReplicaInPipelineInterface bInfo = fsdataset.createRbw(
        StorageType.DEFAULT, b, false).getReplica();
    ReplicaOutputStreams out = bInfo.createStreams(true,
        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
    try {
      OutputStream dataOut  = out.getDataOut();
      assertEquals(0, fsdataset.getLength(b));
      for (int j=1; j <= blockIdToLen(i); ++j) {
        dataOut.write(j);
        assertEquals(j, bInfo.getBytesOnDisk()); // correct length even as we write
        bytesAdded++;
      }
    } finally {
      out.close();
    }
    b.setNumBytes(blockIdToLen(i));
    fsdataset.finalizeBlock(b);
    assertEquals(blockIdToLen(i), fsdataset.getLength(b));
  }
  return bytesAdded;  
}
项目:hadoop    文件:TestBlockRecovery.java   
/**
 * BlockRecoveryFI_11. a replica's recovery id does not match new GS.
 *
 * @throws IOException in case of an error
 */
@Test
public void testNotMatchedReplicaID() throws IOException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(
      StorageType.DEFAULT, block, false).getReplica();
  ReplicaOutputStreams streams = null;
  try {
    streams = replicaInfo.createStreams(true,
        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
    streams.getChecksumOut().write('a');
    dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID+1));
    try {
      dn.syncBlock(rBlock, initBlockRecords(dn));
      fail("Sync should fail");
    } catch (IOException e) {
      e.getMessage().startsWith("Cannot recover ");
    }
    DatanodeProtocol namenode = dn.getActiveNamenodeForBP(POOL_ID);
    verify(namenode, never()).commitBlockSynchronization(
        any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(),
        anyBoolean(), any(DatanodeID[].class), any(String[].class));
  } finally {
    streams.close();
  }
}
项目:aliyun-oss-hadoop-fs    文件:FsDatasetImpl.java   
/**
 * Sets the offset in the meta file so that the
 * last checksum will be overwritten.
 */
@Override // FsDatasetSpi
public void adjustCrcChannelPosition(ExtendedBlock b, ReplicaOutputStreams streams, 
    int checksumSize) throws IOException {
  FileOutputStream file = (FileOutputStream)streams.getChecksumOut();
  FileChannel channel = file.getChannel();
  long oldPos = channel.position();
  long newPos = oldPos - checksumSize;
  if (LOG.isDebugEnabled()) {
    LOG.debug("Changing meta file offset of block " + b + " from " +
        oldPos + " to " + newPos);
  }
  channel.position(newPos);
}
项目:aliyun-oss-hadoop-fs    文件:SimulatedFSDataset.java   
@Override
synchronized public ReplicaOutputStreams createStreams(boolean isCreate, 
    DataChecksum requestedChecksum) throws IOException {
  if (finalized) {
    throw new IOException("Trying to write to a finalized replica "
        + theBlock);
  } else {
    SimulatedOutputStream crcStream = new SimulatedOutputStream();
    return new ReplicaOutputStreams(oStream, crcStream, requestedChecksum,
        volume.isTransientStorage());
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestSimulatedFSDataset.java   
static int addSomeBlocks(SimulatedFSDataset fsdataset, long startingBlockId,
    boolean negativeBlkID) throws IOException {
  int bytesAdded = 0;
  for (long i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
    long blkID = negativeBlkID ? i * -1 : i;
    ExtendedBlock b = new ExtendedBlock(bpid, blkID, 0, 0);
    // we pass expected len as zero, - fsdataset should use the sizeof actual
    // data written
    ReplicaInPipelineInterface bInfo = fsdataset.createRbw(
        StorageType.DEFAULT, b, false).getReplica();
    ReplicaOutputStreams out = bInfo.createStreams(true,
        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
    try {
      OutputStream dataOut  = out.getDataOut();
      assertEquals(0, fsdataset.getLength(b));
      for (int j=1; j <= blockIdToLen(i); ++j) {
        dataOut.write(j);
        assertEquals(j, bInfo.getBytesOnDisk()); // correct length even as we write
        bytesAdded++;
      }
    } finally {
      out.close();
    }
    b.setNumBytes(blockIdToLen(i));
    fsdataset.finalizeBlock(b);
    assertEquals(blockIdToLen(i), fsdataset.getLength(b));
  }
  return bytesAdded;
}
项目:aliyun-oss-hadoop-fs    文件:TestBlockRecovery.java   
/**
 * BlockRecoveryFI_11. a replica's recovery id does not match new GS.
 *
 * @throws IOException in case of an error
 */
@Test
public void testNotMatchedReplicaID() throws IOException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(
      StorageType.DEFAULT, block, false).getReplica();
  ReplicaOutputStreams streams = null;
  try {
    streams = replicaInfo.createStreams(true,
        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
    streams.getChecksumOut().write('a');
    dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID+1));
    BlockRecoveryWorker.RecoveryTaskContiguous RecoveryTaskContiguous =
        recoveryWorker.new RecoveryTaskContiguous(rBlock);
    try {
      RecoveryTaskContiguous.syncBlock(initBlockRecords(dn));
      fail("Sync should fail");
    } catch (IOException e) {
      e.getMessage().startsWith("Cannot recover ");
    }
    DatanodeProtocol namenode = recoveryWorker.getActiveNamenodeForBP(POOL_ID);
    verify(namenode, never()).commitBlockSynchronization(
        any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(),
        anyBoolean(), any(DatanodeID[].class), any(String[].class));
  } finally {
    streams.close();
  }
}
项目:big-c    文件:FsDatasetImpl.java   
/**
 * Sets the offset in the meta file so that the
 * last checksum will be overwritten.
 */
@Override // FsDatasetSpi
public void adjustCrcChannelPosition(ExtendedBlock b, ReplicaOutputStreams streams, 
    int checksumSize) throws IOException {
  FileOutputStream file = (FileOutputStream)streams.getChecksumOut();
  FileChannel channel = file.getChannel();
  long oldPos = channel.position();
  long newPos = oldPos - checksumSize;
  if (LOG.isDebugEnabled()) {
    LOG.debug("Changing meta file offset of block " + b + " from " +
        oldPos + " to " + newPos);
  }
  channel.position(newPos);
}
项目:big-c    文件:SimulatedFSDataset.java   
@Override
synchronized public ReplicaOutputStreams createStreams(boolean isCreate, 
    DataChecksum requestedChecksum) throws IOException {
  if (finalized) {
    throw new IOException("Trying to write to a finalized replica "
        + theBlock);
  } else {
    SimulatedOutputStream crcStream = new SimulatedOutputStream();
    return new ReplicaOutputStreams(oStream, crcStream, requestedChecksum,
        volume.isTransientStorage());
  }
}
项目:big-c    文件:TestSimulatedFSDataset.java   
int addSomeBlocks(SimulatedFSDataset fsdataset, int startingBlockId)
    throws IOException {
  int bytesAdded = 0;
  for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
    ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0); 
    // we pass expected len as zero, - fsdataset should use the sizeof actual
    // data written
    ReplicaInPipelineInterface bInfo = fsdataset.createRbw(
        StorageType.DEFAULT, b, false).getReplica();
    ReplicaOutputStreams out = bInfo.createStreams(true,
        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
    try {
      OutputStream dataOut  = out.getDataOut();
      assertEquals(0, fsdataset.getLength(b));
      for (int j=1; j <= blockIdToLen(i); ++j) {
        dataOut.write(j);
        assertEquals(j, bInfo.getBytesOnDisk()); // correct length even as we write
        bytesAdded++;
      }
    } finally {
      out.close();
    }
    b.setNumBytes(blockIdToLen(i));
    fsdataset.finalizeBlock(b);
    assertEquals(blockIdToLen(i), fsdataset.getLength(b));
  }
  return bytesAdded;  
}
项目:big-c    文件:TestBlockRecovery.java   
/**
 * BlockRecoveryFI_11. a replica's recovery id does not match new GS.
 *
 * @throws IOException in case of an error
 */
@Test
public void testNotMatchedReplicaID() throws IOException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(
      StorageType.DEFAULT, block, false).getReplica();
  ReplicaOutputStreams streams = null;
  try {
    streams = replicaInfo.createStreams(true,
        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
    streams.getChecksumOut().write('a');
    dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID+1));
    try {
      dn.syncBlock(rBlock, initBlockRecords(dn));
      fail("Sync should fail");
    } catch (IOException e) {
      e.getMessage().startsWith("Cannot recover ");
    }
    DatanodeProtocol namenode = dn.getActiveNamenodeForBP(POOL_ID);
    verify(namenode, never()).commitBlockSynchronization(
        any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(),
        anyBoolean(), any(DatanodeID[].class), any(String[].class));
  } finally {
    streams.close();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FsDatasetImpl.java   
/**
 * Sets the offset in the meta file so that the
 * last checksum will be overwritten.
 */
@Override // FsDatasetSpi
public void adjustCrcChannelPosition(ExtendedBlock b, ReplicaOutputStreams streams, 
    int checksumSize) throws IOException {
  FileOutputStream file = (FileOutputStream)streams.getChecksumOut();
  FileChannel channel = file.getChannel();
  long oldPos = channel.position();
  long newPos = oldPos - checksumSize;
  if (LOG.isDebugEnabled()) {
    LOG.debug("Changing meta file offset of block " + b + " from " +
        oldPos + " to " + newPos);
  }
  channel.position(newPos);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:SimulatedFSDataset.java   
@Override
synchronized public ReplicaOutputStreams createStreams(boolean isCreate, 
    DataChecksum requestedChecksum) throws IOException {
  if (finalized) {
    throw new IOException("Trying to write to a finalized replica "
        + theBlock);
  } else {
    SimulatedOutputStream crcStream = new SimulatedOutputStream();
    return new ReplicaOutputStreams(oStream, crcStream, requestedChecksum,
        volume.isTransientStorage());
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestSimulatedFSDataset.java   
int addSomeBlocks(SimulatedFSDataset fsdataset, int startingBlockId)
    throws IOException {
  int bytesAdded = 0;
  for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
    ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0); 
    // we pass expected len as zero, - fsdataset should use the sizeof actual
    // data written
    ReplicaInPipelineInterface bInfo = fsdataset.createRbw(
        StorageType.DEFAULT, b, false).getReplica();
    ReplicaOutputStreams out = bInfo.createStreams(true,
        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
    try {
      OutputStream dataOut  = out.getDataOut();
      assertEquals(0, fsdataset.getLength(b));
      for (int j=1; j <= blockIdToLen(i); ++j) {
        dataOut.write(j);
        assertEquals(j, bInfo.getBytesOnDisk()); // correct length even as we write
        bytesAdded++;
      }
    } finally {
      out.close();
    }
    b.setNumBytes(blockIdToLen(i));
    fsdataset.finalizeBlock(b);
    assertEquals(blockIdToLen(i), fsdataset.getLength(b));
  }
  return bytesAdded;  
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestBlockRecovery.java   
/**
 * BlockRecoveryFI_11. a replica's recovery id does not match new GS.
 *
 * @throws IOException in case of an error
 */
@Test
public void testNotMatchedReplicaID() throws IOException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(
      StorageType.DEFAULT, block, false).getReplica();
  ReplicaOutputStreams streams = null;
  try {
    streams = replicaInfo.createStreams(true,
        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
    streams.getChecksumOut().write('a');
    dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID+1));
    try {
      dn.syncBlock(rBlock, initBlockRecords(dn));
      fail("Sync should fail");
    } catch (IOException e) {
      e.getMessage().startsWith("Cannot recover ");
    }
    DatanodeProtocol namenode = dn.getActiveNamenodeForBP(POOL_ID);
    verify(namenode, never()).commitBlockSynchronization(
        any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(),
        anyBoolean(), any(DatanodeID[].class), any(String[].class));
  } finally {
    streams.close();
  }
}
项目:hadoop-plus    文件:FsDatasetImpl.java   
/**
 * Sets the offset in the meta file so that the
 * last checksum will be overwritten.
 */
@Override // FsDatasetSpi
public void adjustCrcChannelPosition(ExtendedBlock b, ReplicaOutputStreams streams, 
    int checksumSize) throws IOException {
  FileOutputStream file = (FileOutputStream)streams.getChecksumOut();
  FileChannel channel = file.getChannel();
  long oldPos = channel.position();
  long newPos = oldPos - checksumSize;
  if (LOG.isDebugEnabled()) {
    LOG.debug("Changing meta file offset of block " + b + " from " +
        oldPos + " to " + newPos);
  }
  channel.position(newPos);
}
项目:hadoop-plus    文件:SimulatedFSDataset.java   
@Override
synchronized public ReplicaOutputStreams createStreams(boolean isCreate, 
    DataChecksum requestedChecksum) throws IOException {
  if (finalized) {
    throw new IOException("Trying to write to a finalized replica "
        + theBlock);
  } else {
    SimulatedOutputStream crcStream = new SimulatedOutputStream();
    return new ReplicaOutputStreams(oStream, crcStream, requestedChecksum);
  }
}
项目:hadoop-plus    文件:TestSimulatedFSDataset.java   
int addSomeBlocks(SimulatedFSDataset fsdataset, int startingBlockId)
    throws IOException {
  int bytesAdded = 0;
  for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
    ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0); 
    // we pass expected len as zero, - fsdataset should use the sizeof actual
    // data written
    ReplicaInPipelineInterface bInfo = fsdataset.createRbw(b);
    ReplicaOutputStreams out = bInfo.createStreams(true,
        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
    try {
      OutputStream dataOut  = out.getDataOut();
      assertEquals(0, fsdataset.getLength(b));
      for (int j=1; j <= blockIdToLen(i); ++j) {
        dataOut.write(j);
        assertEquals(j, bInfo.getBytesOnDisk()); // correct length even as we write
        bytesAdded++;
      }
    } finally {
      out.close();
    }
    b.setNumBytes(blockIdToLen(i));
    fsdataset.finalizeBlock(b);
    assertEquals(blockIdToLen(i), fsdataset.getLength(b));
  }
  return bytesAdded;  
}
项目:hadoop-plus    文件:TestBlockRecovery.java   
/**
 * BlockRecoveryFI_11. a replica's recovery id does not match new GS.
 *
 * @throws IOException in case of an error
 */
@Test
public void testNotMatchedReplicaID() throws IOException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(block);
  ReplicaOutputStreams streams = null;
  try {
    streams = replicaInfo.createStreams(true,
        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
    streams.getChecksumOut().write('a');
    dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID+1));
    try {
      dn.syncBlock(rBlock, initBlockRecords(dn));
      fail("Sync should fail");
    } catch (IOException e) {
      e.getMessage().startsWith("Cannot recover ");
    }
    DatanodeProtocol namenode = dn.getActiveNamenodeForBP(POOL_ID);
    verify(namenode, never()).commitBlockSynchronization(
        any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(),
        anyBoolean(), any(DatanodeID[].class), any(String[].class));
  } finally {
    streams.close();
  }
}
项目:FlexMap    文件:FsDatasetImpl.java   
/**
 * Sets the offset in the meta file so that the
 * last checksum will be overwritten.
 */
@Override // FsDatasetSpi
public void adjustCrcChannelPosition(ExtendedBlock b, ReplicaOutputStreams streams, 
    int checksumSize) throws IOException {
  FileOutputStream file = (FileOutputStream)streams.getChecksumOut();
  FileChannel channel = file.getChannel();
  long oldPos = channel.position();
  long newPos = oldPos - checksumSize;
  if (LOG.isDebugEnabled()) {
    LOG.debug("Changing meta file offset of block " + b + " from " +
        oldPos + " to " + newPos);
  }
  channel.position(newPos);
}
项目:FlexMap    文件:SimulatedFSDataset.java   
@Override
synchronized public ReplicaOutputStreams createStreams(boolean isCreate, 
    DataChecksum requestedChecksum) throws IOException {
  if (finalized) {
    throw new IOException("Trying to write to a finalized replica "
        + theBlock);
  } else {
    SimulatedOutputStream crcStream = new SimulatedOutputStream();
    return new ReplicaOutputStreams(oStream, crcStream, requestedChecksum,
        volume.isTransientStorage());
  }
}
项目:FlexMap    文件:TestSimulatedFSDataset.java   
int addSomeBlocks(SimulatedFSDataset fsdataset, int startingBlockId)
    throws IOException {
  int bytesAdded = 0;
  for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
    ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0); 
    // we pass expected len as zero, - fsdataset should use the sizeof actual
    // data written
    ReplicaInPipelineInterface bInfo = fsdataset.createRbw(
        StorageType.DEFAULT, b, false);
    ReplicaOutputStreams out = bInfo.createStreams(true,
        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
    try {
      OutputStream dataOut  = out.getDataOut();
      assertEquals(0, fsdataset.getLength(b));
      for (int j=1; j <= blockIdToLen(i); ++j) {
        dataOut.write(j);
        assertEquals(j, bInfo.getBytesOnDisk()); // correct length even as we write
        bytesAdded++;
      }
    } finally {
      out.close();
    }
    b.setNumBytes(blockIdToLen(i));
    fsdataset.finalizeBlock(b);
    assertEquals(blockIdToLen(i), fsdataset.getLength(b));
  }
  return bytesAdded;  
}
项目:FlexMap    文件:TestBlockRecovery.java   
/**
 * BlockRecoveryFI_11. a replica's recovery id does not match new GS.
 *
 * @throws IOException in case of an error
 */
@Test
public void testNotMatchedReplicaID() throws IOException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(
      StorageType.DEFAULT, block, false);
  ReplicaOutputStreams streams = null;
  try {
    streams = replicaInfo.createStreams(true,
        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
    streams.getChecksumOut().write('a');
    dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID+1));
    try {
      dn.syncBlock(rBlock, initBlockRecords(dn));
      fail("Sync should fail");
    } catch (IOException e) {
      e.getMessage().startsWith("Cannot recover ");
    }
    DatanodeProtocol namenode = dn.getActiveNamenodeForBP(POOL_ID);
    verify(namenode, never()).commitBlockSynchronization(
        any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(),
        anyBoolean(), any(DatanodeID[].class), any(String[].class));
  } finally {
    streams.close();
  }
}
项目:hops    文件:FsDatasetImpl.java   
/**
 * Sets the offset in the meta file so that the
 * last checksum will be overwritten.
 */
@Override // FsDatasetSpi
public void adjustCrcChannelPosition(ExtendedBlock b,
    ReplicaOutputStreams streams, int checksumSize) throws IOException {
  FileOutputStream file = (FileOutputStream) streams.getChecksumOut();
  FileChannel channel = file.getChannel();
  long oldPos = channel.position();
  long newPos = oldPos - checksumSize;
  if (LOG.isDebugEnabled()) {
    LOG.debug("Changing meta file offset of block " + b + " from " +
        oldPos + " to " + newPos);
  }
  channel.position(newPos);
}
项目:hops    文件:SimulatedFSDataset.java   
@Override
synchronized public ReplicaOutputStreams createStreams(boolean isCreate,
    DataChecksum requestedChecksum) throws IOException {
  if (finalized) {
    throw new IOException(
        "Trying to write to a finalized replica " + theBlock);
  } else {
    SimulatedOutputStream crcStream = new SimulatedOutputStream();
    return new ReplicaOutputStreams(oStream, crcStream, requestedChecksum);
  }
}
项目:hops    文件:TestSimulatedFSDataset.java   
int addSomeBlocks(SimulatedFSDataset fsdataset, int startingBlockId)
    throws IOException {
  int bytesAdded = 0;
  for (int i = startingBlockId; i < startingBlockId + NUMBLOCKS; ++i) {
    ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0);
    // we pass expected len as zero, - fsdataset should use the sizeof actual
    // data written
    ReplicaInPipelineInterface bInfo = fsdataset.createRbw(b);
    ReplicaOutputStreams out = bInfo.createStreams(true,
        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
    try {
      OutputStream dataOut = out.getDataOut();
      assertEquals(0, fsdataset.getLength(b));
      for (int j = 1; j <= blockIdToLen(i); ++j) {
        dataOut.write(j);
        assertEquals(j,
            bInfo.getBytesOnDisk()); // correct length even as we write
        bytesAdded++;
      }
    } finally {
      out.close();
    }
    b.setNumBytes(blockIdToLen(i));
    fsdataset.finalizeBlock(b);
    assertEquals(blockIdToLen(i), fsdataset.getLength(b));
  }
  return bytesAdded;
}
项目:hops    文件:TestBlockRecovery.java   
/**
 * BlockRecoveryFI_11. a replica's recovery id does not match new GS.
 *
 * @throws IOException
 *     in case of an error
 */
@Test
public void testNotMatchedReplicaID() throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(block);
  ReplicaOutputStreams streams = null;
  try {
    streams = replicaInfo.createStreams(true,
        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
    streams.getChecksumOut().write('a');
    dn.data.initReplicaRecovery(
        new RecoveringBlock(block, null, RECOVERY_ID + 1));
    try {
      dn.syncBlock(rBlock, initBlockRecords(dn));
      fail("Sync should fail");
    } catch (IOException e) {
      e.getMessage().startsWith("Cannot recover ");
    }
    DatanodeProtocol namenode = dn.getActiveNamenodeForBP(POOL_ID);
    verify(namenode, never())
        .commitBlockSynchronization(any(ExtendedBlock.class), anyLong(),
            anyLong(), anyBoolean(), anyBoolean(), any(DatanodeID[].class),
            any(String[].class));
  } finally {
    streams.close();
  }
}
项目:hadoop-TCP    文件:FsDatasetImpl.java   
/**
 * Sets the offset in the meta file so that the
 * last checksum will be overwritten.
 */
@Override // FsDatasetSpi
public void adjustCrcChannelPosition(ExtendedBlock b, ReplicaOutputStreams streams, 
    int checksumSize) throws IOException {
  FileOutputStream file = (FileOutputStream)streams.getChecksumOut();
  FileChannel channel = file.getChannel();
  long oldPos = channel.position();
  long newPos = oldPos - checksumSize;
  if (LOG.isDebugEnabled()) {
    LOG.debug("Changing meta file offset of block " + b + " from " +
        oldPos + " to " + newPos);
  }
  channel.position(newPos);
}
项目:hadoop-TCP    文件:SimulatedFSDataset.java   
@Override
synchronized public ReplicaOutputStreams createStreams(boolean isCreate, 
    DataChecksum requestedChecksum) throws IOException {
  if (finalized) {
    throw new IOException("Trying to write to a finalized replica "
        + theBlock);
  } else {
    SimulatedOutputStream crcStream = new SimulatedOutputStream();
    return new ReplicaOutputStreams(oStream, crcStream, requestedChecksum);
  }
}
项目:hadoop-TCP    文件:TestSimulatedFSDataset.java   
int addSomeBlocks(SimulatedFSDataset fsdataset, int startingBlockId)
    throws IOException {
  int bytesAdded = 0;
  for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
    ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0); 
    // we pass expected len as zero, - fsdataset should use the sizeof actual
    // data written
    ReplicaInPipelineInterface bInfo = fsdataset.createRbw(b);
    ReplicaOutputStreams out = bInfo.createStreams(true,
        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
    try {
      OutputStream dataOut  = out.getDataOut();
      assertEquals(0, fsdataset.getLength(b));
      for (int j=1; j <= blockIdToLen(i); ++j) {
        dataOut.write(j);
        assertEquals(j, bInfo.getBytesOnDisk()); // correct length even as we write
        bytesAdded++;
      }
    } finally {
      out.close();
    }
    b.setNumBytes(blockIdToLen(i));
    fsdataset.finalizeBlock(b);
    assertEquals(blockIdToLen(i), fsdataset.getLength(b));
  }
  return bytesAdded;  
}
项目:hadoop-TCP    文件:TestBlockRecovery.java   
/**
 * BlockRecoveryFI_11. a replica's recovery id does not match new GS.
 *
 * @throws IOException in case of an error
 */
@Test
public void testNotMatchedReplicaID() throws IOException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(block);
  ReplicaOutputStreams streams = null;
  try {
    streams = replicaInfo.createStreams(true,
        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
    streams.getChecksumOut().write('a');
    dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID+1));
    try {
      dn.syncBlock(rBlock, initBlockRecords(dn));
      fail("Sync should fail");
    } catch (IOException e) {
      e.getMessage().startsWith("Cannot recover ");
    }
    DatanodeProtocol namenode = dn.getActiveNamenodeForBP(POOL_ID);
    verify(namenode, never()).commitBlockSynchronization(
        any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(),
        anyBoolean(), any(DatanodeID[].class), any(String[].class));
  } finally {
    streams.close();
  }
}
项目:hardfs    文件:FsDatasetImpl.java   
/**
 * Sets the offset in the meta file so that the
 * last checksum will be overwritten.
 */
@Override // FsDatasetSpi
public void adjustCrcChannelPosition(ExtendedBlock b, ReplicaOutputStreams streams, 
    int checksumSize) throws IOException {
  FileOutputStream file = (FileOutputStream)streams.getChecksumOut();
  FileChannel channel = file.getChannel();
  long oldPos = channel.position();
  long newPos = oldPos - checksumSize;
  if (LOG.isDebugEnabled()) {
    LOG.debug("Changing meta file offset of block " + b + " from " +
        oldPos + " to " + newPos);
  }
  channel.position(newPos);
}
项目:hardfs    文件:SimulatedFSDataset.java   
@Override
synchronized public ReplicaOutputStreams createStreams(boolean isCreate, 
    DataChecksum requestedChecksum) throws IOException {
  if (finalized) {
    throw new IOException("Trying to write to a finalized replica "
        + theBlock);
  } else {
    SimulatedOutputStream crcStream = new SimulatedOutputStream();
    return new ReplicaOutputStreams(oStream, crcStream, requestedChecksum);
  }
}
项目:hardfs    文件:TestSimulatedFSDataset.java   
int addSomeBlocks(SimulatedFSDataset fsdataset, int startingBlockId)
    throws IOException {
  int bytesAdded = 0;
  for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
    ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0); 
    // we pass expected len as zero, - fsdataset should use the sizeof actual
    // data written
    ReplicaInPipelineInterface bInfo = fsdataset.createRbw(b);
    ReplicaOutputStreams out = bInfo.createStreams(true,
        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
    try {
      OutputStream dataOut  = out.getDataOut();
      assertEquals(0, fsdataset.getLength(b));
      for (int j=1; j <= blockIdToLen(i); ++j) {
        dataOut.write(j);
        assertEquals(j, bInfo.getBytesOnDisk()); // correct length even as we write
        bytesAdded++;
      }
    } finally {
      out.close();
    }
    b.setNumBytes(blockIdToLen(i));
    fsdataset.finalizeBlock(b);
    assertEquals(blockIdToLen(i), fsdataset.getLength(b));
  }
  return bytesAdded;  
}
项目:hardfs    文件:TestBlockRecovery.java   
/**
 * BlockRecoveryFI_11. a replica's recovery id does not match new GS.
 *
 * @throws IOException in case of an error
 */
@Test
public void testNotMatchedReplicaID() throws IOException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(block);
  ReplicaOutputStreams streams = null;
  try {
    streams = replicaInfo.createStreams(true,
        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
    streams.getChecksumOut().write('a');
    dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID+1));
    try {
      dn.syncBlock(rBlock, initBlockRecords(dn));
      fail("Sync should fail");
    } catch (IOException e) {
      e.getMessage().startsWith("Cannot recover ");
    }
    DatanodeProtocol namenode = dn.getActiveNamenodeForBP(POOL_ID);
    verify(namenode, never()).commitBlockSynchronization(
        any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(),
        anyBoolean(), any(DatanodeID[].class), any(String[].class));
  } finally {
    streams.close();
  }
}
项目:hadoop-on-lustre2    文件:SimulatedFSDataset.java   
@Override
synchronized public ReplicaOutputStreams createStreams(boolean isCreate, 
    DataChecksum requestedChecksum) throws IOException {
  if (finalized) {
    throw new IOException("Trying to write to a finalized replica "
        + theBlock);
  } else {
    SimulatedOutputStream crcStream = new SimulatedOutputStream();
    return new ReplicaOutputStreams(oStream, crcStream, requestedChecksum);
  }
}
项目:hadoop-on-lustre2    文件:TestSimulatedFSDataset.java   
int addSomeBlocks(SimulatedFSDataset fsdataset, int startingBlockId)
    throws IOException {
  int bytesAdded = 0;
  for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
    ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0); 
    // we pass expected len as zero, - fsdataset should use the sizeof actual
    // data written
    ReplicaInPipelineInterface bInfo = fsdataset.createRbw(b);
    ReplicaOutputStreams out = bInfo.createStreams(true,
        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
    try {
      OutputStream dataOut  = out.getDataOut();
      assertEquals(0, fsdataset.getLength(b));
      for (int j=1; j <= blockIdToLen(i); ++j) {
        dataOut.write(j);
        assertEquals(j, bInfo.getBytesOnDisk()); // correct length even as we write
        bytesAdded++;
      }
    } finally {
      out.close();
    }
    b.setNumBytes(blockIdToLen(i));
    fsdataset.finalizeBlock(b);
    assertEquals(blockIdToLen(i), fsdataset.getLength(b));
  }
  return bytesAdded;  
}
项目:hadoop-on-lustre2    文件:TestBlockRecovery.java   
/**
 * BlockRecoveryFI_11. a replica's recovery id does not match new GS.
 *
 * @throws IOException in case of an error
 */
@Test
public void testNotMatchedReplicaID() throws IOException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(block);
  ReplicaOutputStreams streams = null;
  try {
    streams = replicaInfo.createStreams(true,
        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
    streams.getChecksumOut().write('a');
    dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID+1));
    try {
      dn.syncBlock(rBlock, initBlockRecords(dn));
      fail("Sync should fail");
    } catch (IOException e) {
      e.getMessage().startsWith("Cannot recover ");
    }
    DatanodeProtocol namenode = dn.getActiveNamenodeForBP(POOL_ID);
    verify(namenode, never()).commitBlockSynchronization(
        any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(),
        anyBoolean(), any(DatanodeID[].class), any(String[].class));
  } finally {
    streams.close();
  }
}
项目:hadoop    文件:SimulatedFSDataset.java   
@Override // FsDatasetSpi
public synchronized void adjustCrcChannelPosition(ExtendedBlock b,
                                            ReplicaOutputStreams stream, 
                                            int checksumSize)
                                            throws IOException {
}