Java 类org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface 实例源码

项目:hadoop-EAR    文件:TestSimulatedFSDataset.java   
int addSomeBlocks(FSDatasetInterface fsdataset, int startingBlockId) throws IOException {
  int bytesAdded = 0;
  for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
    Block b = new Block(i, 0, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual data written
    BlockDataFile.Writer dataOut = ((SimulatedFSDataset.SimulatedBlockInlineChecksumFileWriter) fsdataset
        .writeToBlock(0, b, b, false, false, -1, -1)).getBlockDataFile()
        .getWriter(0);
    assertEquals(0, fsdataset.getFinalizedBlockLength(0,b));
    for (int j=1; j <= blockIdToLen(i); ++j) {
      dataOut.write(new byte[] {(byte)j});
      assertEquals(j, fsdataset.getFinalizedBlockLength(0,b)); // correct length even as we write
      bytesAdded++;
    }
    dataOut.close();
    b.setNumBytes(blockIdToLen(i));
    fsdataset.finalizeBlock(0,b);
    assertEquals(blockIdToLen(i), fsdataset.getFinalizedBlockLength(0,b));
  }
  return bytesAdded;  
}
项目:hadoop-EAR    文件:TestSimulatedFSDataset.java   
void  checkBlockDataAndSize(FSDatasetInterface fsdataset, 
            Block b, long expectedLen) throws IOException { 
  ReplicaToRead replica = fsdataset.getReplicaToRead(0, b);
  InputStream input = replica.getBlockInputStream(null, 0);
  long lengthRead = 0;
  int data;
  int count = 0;
  while ((data = input.read()) != -1) {
    if (count++ < BlockInlineChecksumReader.getHeaderSize()) {
      continue;
    }
    assertEquals(SimulatedFSDataset.DEFAULT_DATABYTE, data);
    lengthRead++;
  }
  assertEquals(expectedLen, lengthRead);
}
项目:hadoop-EAR    文件:TestSimulatedFSDataset.java   
public void testInvalidate() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  int bytesAdded = addSomeBlocks(fsdataset);
  Block[] deleteBlocks = new Block[2];
  deleteBlocks[0] = new Block(1, 0, 0);
  deleteBlocks[1] = new Block(2, 0, 0);
  fsdataset.invalidate(0,deleteBlocks);
  checkInvalidBlock(deleteBlocks[0]);
  checkInvalidBlock(deleteBlocks[1]);
  long sizeDeleted = blockIdToLen(1) + blockIdToLen(2);
  assertEquals(bytesAdded-sizeDeleted, fsdataset.getDfsUsed());
  assertEquals(fsdataset.getCapacity()-bytesAdded+sizeDeleted,  fsdataset.getRemaining());



  // Now make sure the rest of the blocks are valid
  for (int i=3; i <= NUMBLOCKS; ++i) {
    Block b = new Block(i, 0, 0);
    assertTrue(fsdataset.isValidBlock(0, b, false));
  }
}
项目:hadoop-on-lustre    文件:TestSimulatedFSDataset.java   
int addSomeBlocks(FSDatasetInterface fsdataset, int startingBlockId) throws IOException {
  int bytesAdded = 0;
  for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
    Block b = new Block(i, 0, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual data written
    OutputStream dataOut  = fsdataset.writeToBlock(b, false, false).dataOut;
    assertEquals(0, fsdataset.getLength(b));
    for (int j=1; j <= blockIdToLen(i); ++j) {
      dataOut.write(j);
      assertEquals(j, fsdataset.getLength(b)); // correct length even as we write
      bytesAdded++;
    }
    dataOut.close();
    b.setNumBytes(blockIdToLen(i));
    fsdataset.finalizeBlock(b);
    assertEquals(blockIdToLen(i), fsdataset.getLength(b));
  }
  return bytesAdded;  
}
项目:hadoop-on-lustre    文件:TestSimulatedFSDataset.java   
public void testGetMetaData() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  Block b = new Block(1, 5, 0);
  try {
    assertFalse(fsdataset.metaFileExists(b));
    assertTrue("Expected an IO exception", false);
  } catch (IOException e) {
    // ok - as expected
  }
  addSomeBlocks(fsdataset); // Only need to add one but ....
  b = new Block(1, 0, 0);
  InputStream metaInput = fsdataset.getMetaDataInputStream(b);
  DataInputStream metaDataInput = new DataInputStream(metaInput);
  short version = metaDataInput.readShort();
  assertEquals(FSDataset.METADATA_VERSION, version);
  DataChecksum checksum = DataChecksum.newDataChecksum(metaDataInput);
  assertEquals(DataChecksum.CHECKSUM_NULL, checksum.getChecksumType());
  assertEquals(0, checksum.getChecksumSize());  
}
项目:hadoop-on-lustre    文件:TestSimulatedFSDataset.java   
public void testInvalidate() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  int bytesAdded = addSomeBlocks(fsdataset);
  Block[] deleteBlocks = new Block[2];
  deleteBlocks[0] = new Block(1, 0, 0);
  deleteBlocks[1] = new Block(2, 0, 0);
  fsdataset.invalidate(deleteBlocks);
  checkInvalidBlock(deleteBlocks[0]);
  checkInvalidBlock(deleteBlocks[1]);
  long sizeDeleted = blockIdToLen(1) + blockIdToLen(2);
  assertEquals(bytesAdded-sizeDeleted, fsdataset.getDfsUsed());
  assertEquals(fsdataset.getCapacity()-bytesAdded+sizeDeleted,  fsdataset.getRemaining());



  // Now make sure the rest of the blocks are valid
  for (int i=3; i <= NUMBLOCKS; ++i) {
    Block b = new Block(i, 0, 0);
    assertTrue(fsdataset.isValidBlock(b));
  }
}
项目:cumulus    文件:TestSimulatedFSDataset.java   
int addSomeBlocks(FSDatasetInterface fsdataset, int startingBlockId) throws IOException {
  int bytesAdded = 0;
  for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
    Block b = new Block(i, 0, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual data written
    ReplicaInPipelineInterface bInfo = fsdataset.createRbw(b);
    BlockWriteStreams out = bInfo.createStreams(true, 512, 4);
    try {
      OutputStream dataOut  = out.dataOut;
      assertEquals(0, fsdataset.getLength(b));
      for (int j=1; j <= blockIdToLen(i); ++j) {
        dataOut.write(j);
        assertEquals(j, bInfo.getBytesOnDisk()); // correct length even as we write
        bytesAdded++;
      }
    } finally {
      out.close();
    }
    b.setNumBytes(blockIdToLen(i));
    fsdataset.finalizeBlock(b);
    assertEquals(blockIdToLen(i), fsdataset.getLength(b));
  }
  return bytesAdded;  
}
项目:cumulus    文件:TestSimulatedFSDataset.java   
public void testGetMetaData() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  Block b = new Block(1, 5, 0);
  try {
    assertFalse(fsdataset.metaFileExists(b));
    assertTrue("Expected an IO exception", false);
  } catch (IOException e) {
    // ok - as expected
  }
  addSomeBlocks(fsdataset); // Only need to add one but ....
  b = new Block(1, 0, 0);
  InputStream metaInput = fsdataset.getMetaDataInputStream(b);
  DataInputStream metaDataInput = new DataInputStream(metaInput);
  short version = metaDataInput.readShort();
  assertEquals(FSDataset.METADATA_VERSION, version);
  DataChecksum checksum = DataChecksum.newDataChecksum(metaDataInput);
  assertEquals(DataChecksum.CHECKSUM_NULL, checksum.getChecksumType());
  assertEquals(0, checksum.getChecksumSize());  
}
项目:cumulus    文件:TestSimulatedFSDataset.java   
public void testInvalidate() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  int bytesAdded = addSomeBlocks(fsdataset);
  Block[] deleteBlocks = new Block[2];
  deleteBlocks[0] = new Block(1, 0, 0);
  deleteBlocks[1] = new Block(2, 0, 0);
  fsdataset.invalidate(deleteBlocks);
  checkInvalidBlock(deleteBlocks[0]);
  checkInvalidBlock(deleteBlocks[1]);
  long sizeDeleted = blockIdToLen(1) + blockIdToLen(2);
  assertEquals(bytesAdded-sizeDeleted, fsdataset.getDfsUsed());
  assertEquals(fsdataset.getCapacity()-bytesAdded+sizeDeleted,  fsdataset.getRemaining());



  // Now make sure the rest of the blocks are valid
  for (int i=3; i <= NUMBLOCKS; ++i) {
    Block b = new Block(i, 0, 0);
    assertTrue(fsdataset.isValidBlock(b));
  }
}
项目:RDFS    文件:TestSimulatedFSDataset.java   
int addSomeBlocks(FSDatasetInterface fsdataset, int startingBlockId) throws IOException {
  int bytesAdded = 0;
  for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
    Block b = new Block(i, 0, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual data written
    OutputStream dataOut  = fsdataset.writeToBlock(0, b, false, false).dataOut;
    assertEquals(0, fsdataset.getFinalizedBlockLength(0,b));
    for (int j=1; j <= blockIdToLen(i); ++j) {
      dataOut.write(j);
      assertEquals(j, fsdataset.getFinalizedBlockLength(0,b)); // correct length even as we write
      bytesAdded++;
    }
    dataOut.close();
    b.setNumBytes(blockIdToLen(i));
    fsdataset.finalizeBlock(0,b);
    assertEquals(blockIdToLen(i), fsdataset.getFinalizedBlockLength(0,b));
  }
  return bytesAdded;  
}
项目:RDFS    文件:TestSimulatedFSDataset.java   
public void testGetMetaData() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  Block b = new Block(1, 5, 0);
  try {
    assertFalse(fsdataset.metaFileExists(0,b));
    assertTrue("Expected an IO exception", false);
  } catch (IOException e) {
    // ok - as expected
  }
  addSomeBlocks(fsdataset); // Only need to add one but ....
  b = new Block(1, 0, 0);
  InputStream metaInput = fsdataset.getMetaDataInputStream(0,b);
  DataInputStream metaDataInput = new DataInputStream(metaInput);
  short version = metaDataInput.readShort();
  assertEquals(FSDataset.METADATA_VERSION, version);
  DataChecksum checksum = DataChecksum.newDataChecksum(metaDataInput);
  assertEquals(DataChecksum.CHECKSUM_NULL, checksum.getChecksumType());
  assertEquals(0, checksum.getChecksumSize());  
}
项目:RDFS    文件:TestSimulatedFSDataset.java   
public void testInvalidate() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  int bytesAdded = addSomeBlocks(fsdataset);
  Block[] deleteBlocks = new Block[2];
  deleteBlocks[0] = new Block(1, 0, 0);
  deleteBlocks[1] = new Block(2, 0, 0);
  fsdataset.invalidate(0,deleteBlocks);
  checkInvalidBlock(deleteBlocks[0]);
  checkInvalidBlock(deleteBlocks[1]);
  long sizeDeleted = blockIdToLen(1) + blockIdToLen(2);
  assertEquals(bytesAdded-sizeDeleted, fsdataset.getDfsUsed());
  assertEquals(fsdataset.getCapacity()-bytesAdded+sizeDeleted,  fsdataset.getRemaining());



  // Now make sure the rest of the blocks are valid
  for (int i=3; i <= NUMBLOCKS; ++i) {
    Block b = new Block(i, 0, 0);
    assertTrue(fsdataset.isValidBlock(0, b, false));
  }
}
项目:hadoop-0.20    文件:TestSimulatedFSDataset.java   
int addSomeBlocks(FSDatasetInterface fsdataset, int startingBlockId) throws IOException {
  int bytesAdded = 0;
  for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
    Block b = new Block(i, 0, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual data written
    OutputStream dataOut  = fsdataset.writeToBlock(b, false).dataOut;
    assertEquals(0, fsdataset.getLength(b));
    for (int j=1; j <= blockIdToLen(i); ++j) {
      dataOut.write(j);
      assertEquals(j, fsdataset.getLength(b)); // correct length even as we write
      bytesAdded++;
    }
    dataOut.close();
    b.setNumBytes(blockIdToLen(i));
    fsdataset.finalizeBlock(b);
    assertEquals(blockIdToLen(i), fsdataset.getLength(b));
  }
  return bytesAdded;  
}
项目:hadoop-0.20    文件:TestSimulatedFSDataset.java   
public void testGetMetaData() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  Block b = new Block(1, 5, 0);
  try {
    assertFalse(fsdataset.metaFileExists(b));
    assertTrue("Expected an IO exception", false);
  } catch (IOException e) {
    // ok - as expected
  }
  addSomeBlocks(fsdataset); // Only need to add one but ....
  b = new Block(1, 0, 0);
  InputStream metaInput = fsdataset.getMetaDataInputStream(b);
  DataInputStream metaDataInput = new DataInputStream(metaInput);
  short version = metaDataInput.readShort();
  assertEquals(FSDataset.METADATA_VERSION, version);
  DataChecksum checksum = DataChecksum.newDataChecksum(metaDataInput);
  assertEquals(DataChecksum.CHECKSUM_NULL, checksum.getChecksumType());
  assertEquals(0, checksum.getChecksumSize());  
}
项目:hadoop-0.20    文件:TestSimulatedFSDataset.java   
public void testInvalidate() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  int bytesAdded = addSomeBlocks(fsdataset);
  Block[] deleteBlocks = new Block[2];
  deleteBlocks[0] = new Block(1, 0, 0);
  deleteBlocks[1] = new Block(2, 0, 0);
  fsdataset.invalidate(deleteBlocks);
  checkInvalidBlock(deleteBlocks[0]);
  checkInvalidBlock(deleteBlocks[1]);
  long sizeDeleted = blockIdToLen(1) + blockIdToLen(2);
  assertEquals(bytesAdded-sizeDeleted, fsdataset.getDfsUsed());
  assertEquals(fsdataset.getCapacity()-bytesAdded+sizeDeleted,  fsdataset.getRemaining());



  // Now make sure the rest of the blocks are valid
  for (int i=3; i <= NUMBLOCKS; ++i) {
    Block b = new Block(i, 0, 0);
    assertTrue(fsdataset.isValidBlock(b));
  }
}
项目:hortonworks-extension    文件:TestSimulatedFSDataset.java   
int addSomeBlocks(FSDatasetInterface fsdataset, int startingBlockId) throws IOException {
  int bytesAdded = 0;
  for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
    Block b = new Block(i, 0, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual data written
    OutputStream dataOut  = fsdataset.writeToBlock(b, false, false).dataOut;
    assertEquals(0, fsdataset.getLength(b));
    for (int j=1; j <= blockIdToLen(i); ++j) {
      dataOut.write(j);
      assertEquals(j, fsdataset.getLength(b)); // correct length even as we write
      bytesAdded++;
    }
    dataOut.close();
    b.setNumBytes(blockIdToLen(i));
    fsdataset.finalizeBlock(b);
    assertEquals(blockIdToLen(i), fsdataset.getLength(b));
  }
  return bytesAdded;  
}
项目:hortonworks-extension    文件:TestSimulatedFSDataset.java   
public void testGetMetaData() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  Block b = new Block(1, 5, 0);
  try {
    assertFalse(fsdataset.metaFileExists(b));
    assertTrue("Expected an IO exception", false);
  } catch (IOException e) {
    // ok - as expected
  }
  addSomeBlocks(fsdataset); // Only need to add one but ....
  b = new Block(1, 0, 0);
  InputStream metaInput = fsdataset.getMetaDataInputStream(b);
  DataInputStream metaDataInput = new DataInputStream(metaInput);
  short version = metaDataInput.readShort();
  assertEquals(FSDataset.METADATA_VERSION, version);
  DataChecksum checksum = DataChecksum.newDataChecksum(metaDataInput);
  assertEquals(DataChecksum.CHECKSUM_NULL, checksum.getChecksumType());
  assertEquals(0, checksum.getChecksumSize());  
}
项目:hortonworks-extension    文件:TestSimulatedFSDataset.java   
public void testInvalidate() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  int bytesAdded = addSomeBlocks(fsdataset);
  Block[] deleteBlocks = new Block[2];
  deleteBlocks[0] = new Block(1, 0, 0);
  deleteBlocks[1] = new Block(2, 0, 0);
  fsdataset.invalidate(deleteBlocks);
  checkInvalidBlock(deleteBlocks[0]);
  checkInvalidBlock(deleteBlocks[1]);
  long sizeDeleted = blockIdToLen(1) + blockIdToLen(2);
  assertEquals(bytesAdded-sizeDeleted, fsdataset.getDfsUsed());
  assertEquals(fsdataset.getCapacity()-bytesAdded+sizeDeleted,  fsdataset.getRemaining());



  // Now make sure the rest of the blocks are valid
  for (int i=3; i <= NUMBLOCKS; ++i) {
    Block b = new Block(i, 0, 0);
    assertTrue(fsdataset.isValidBlock(b));
  }
}
项目:hortonworks-extension    文件:TestSimulatedFSDataset.java   
int addSomeBlocks(FSDatasetInterface fsdataset, int startingBlockId) throws IOException {
  int bytesAdded = 0;
  for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
    Block b = new Block(i, 0, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual data written
    OutputStream dataOut  = fsdataset.writeToBlock(b, false, false).dataOut;
    assertEquals(0, fsdataset.getLength(b));
    for (int j=1; j <= blockIdToLen(i); ++j) {
      dataOut.write(j);
      assertEquals(j, fsdataset.getLength(b)); // correct length even as we write
      bytesAdded++;
    }
    dataOut.close();
    b.setNumBytes(blockIdToLen(i));
    fsdataset.finalizeBlock(b);
    assertEquals(blockIdToLen(i), fsdataset.getLength(b));
  }
  return bytesAdded;  
}
项目:hortonworks-extension    文件:TestSimulatedFSDataset.java   
public void testGetMetaData() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  Block b = new Block(1, 5, 0);
  try {
    assertFalse(fsdataset.metaFileExists(b));
    assertTrue("Expected an IO exception", false);
  } catch (IOException e) {
    // ok - as expected
  }
  addSomeBlocks(fsdataset); // Only need to add one but ....
  b = new Block(1, 0, 0);
  InputStream metaInput = fsdataset.getMetaDataInputStream(b);
  DataInputStream metaDataInput = new DataInputStream(metaInput);
  short version = metaDataInput.readShort();
  assertEquals(FSDataset.METADATA_VERSION, version);
  DataChecksum checksum = DataChecksum.newDataChecksum(metaDataInput);
  assertEquals(DataChecksum.CHECKSUM_NULL, checksum.getChecksumType());
  assertEquals(0, checksum.getChecksumSize());  
}
项目:hortonworks-extension    文件:TestSimulatedFSDataset.java   
public void testInvalidate() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  int bytesAdded = addSomeBlocks(fsdataset);
  Block[] deleteBlocks = new Block[2];
  deleteBlocks[0] = new Block(1, 0, 0);
  deleteBlocks[1] = new Block(2, 0, 0);
  fsdataset.invalidate(deleteBlocks);
  checkInvalidBlock(deleteBlocks[0]);
  checkInvalidBlock(deleteBlocks[1]);
  long sizeDeleted = blockIdToLen(1) + blockIdToLen(2);
  assertEquals(bytesAdded-sizeDeleted, fsdataset.getDfsUsed());
  assertEquals(fsdataset.getCapacity()-bytesAdded+sizeDeleted,  fsdataset.getRemaining());



  // Now make sure the rest of the blocks are valid
  for (int i=3; i <= NUMBLOCKS; ++i) {
    Block b = new Block(i, 0, 0);
    assertTrue(fsdataset.isValidBlock(b));
  }
}
项目:hadoop-gpu    文件:TestSimulatedFSDataset.java   
int addSomeBlocks(FSDatasetInterface fsdataset, int startingBlockId) throws IOException {
  int bytesAdded = 0;
  for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
    Block b = new Block(i, 0, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual data written
    OutputStream dataOut  = fsdataset.writeToBlock(b, false).dataOut;
    assertEquals(0, fsdataset.getLength(b));
    for (int j=1; j <= blockIdToLen(i); ++j) {
      dataOut.write(j);
      assertEquals(j, fsdataset.getLength(b)); // correct length even as we write
      bytesAdded++;
    }
    dataOut.close();
    b.setNumBytes(blockIdToLen(i));
    fsdataset.finalizeBlock(b);
    assertEquals(blockIdToLen(i), fsdataset.getLength(b));
  }
  return bytesAdded;  
}
项目:hadoop-gpu    文件:TestSimulatedFSDataset.java   
public void testGetMetaData() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  Block b = new Block(1, 5, 0);
  try {
    assertFalse(fsdataset.metaFileExists(b));
    assertTrue("Expected an IO exception", false);
  } catch (IOException e) {
    // ok - as expected
  }
  addSomeBlocks(fsdataset); // Only need to add one but ....
  b = new Block(1, 0, 0);
  InputStream metaInput = fsdataset.getMetaDataInputStream(b);
  DataInputStream metaDataInput = new DataInputStream(metaInput);
  short version = metaDataInput.readShort();
  assertEquals(FSDataset.METADATA_VERSION, version);
  DataChecksum checksum = DataChecksum.newDataChecksum(metaDataInput);
  assertEquals(DataChecksum.CHECKSUM_NULL, checksum.getChecksumType());
  assertEquals(0, checksum.getChecksumSize());  
}
项目:hadoop-gpu    文件:TestSimulatedFSDataset.java   
public void testInvalidate() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  int bytesAdded = addSomeBlocks(fsdataset);
  Block[] deleteBlocks = new Block[2];
  deleteBlocks[0] = new Block(1, 0, 0);
  deleteBlocks[1] = new Block(2, 0, 0);
  fsdataset.invalidate(deleteBlocks);
  checkInvalidBlock(deleteBlocks[0]);
  checkInvalidBlock(deleteBlocks[1]);
  long sizeDeleted = blockIdToLen(1) + blockIdToLen(2);
  assertEquals(bytesAdded-sizeDeleted, fsdataset.getDfsUsed());
  assertEquals(fsdataset.getCapacity()-bytesAdded+sizeDeleted,  fsdataset.getRemaining());



  // Now make sure the rest of the blocks are valid
  for (int i=3; i <= NUMBLOCKS; ++i) {
    Block b = new Block(i, 0, 0);
    assertTrue(fsdataset.isValidBlock(b));
  }
}
项目:hadoop-EAR    文件:MiniDFSCluster.java   
/**
 * This method is valid only if the data nodes have simulated data
 * @param dataNodeIndex - data node i which to inject - the index is same as for getDataNodes()
 * @param blocksToInject - the blocks
 * @throws IOException
 *              if not simulatedFSDataset
 *             if any of blocks already exist in the data node
 *   
 */
public void injectBlocks(int dataNodeIndex, Block[] blocksToInject) throws IOException {
  if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
    throw new IndexOutOfBoundsException();
  }
  FSDatasetInterface dataSet = dataNodes.get(dataNodeIndex).datanode.getFSDataset();
  if (!(dataSet instanceof SimulatedFSDataset)) {
    throw new IOException("injectBlocks is valid only for SimilatedFSDataset");
  }
  SimulatedFSDataset sdataset = (SimulatedFSDataset) dataSet;
  sdataset.injectBlocks(getNameNode().getNamespaceID(), blocksToInject);
  dataNodes.get(dataNodeIndex).datanode.scheduleNSBlockReport(0);
}
项目:hadoop-EAR    文件:TestSimulatedFSDataset.java   
public void testStorageUsage() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  assertEquals(fsdataset.getDfsUsed(), 0);
  assertEquals(fsdataset.getRemaining(), fsdataset.getCapacity());
  int bytesAdded = addSomeBlocks(fsdataset);
  assertEquals(bytesAdded, fsdataset.getDfsUsed());
  assertEquals(fsdataset.getCapacity()-bytesAdded,  fsdataset.getRemaining());

}
项目:hadoop-EAR    文件:TestSimulatedFSDataset.java   
public void testWriteRead() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  addSomeBlocks(fsdataset);
  for (int i=1; i <= NUMBLOCKS; ++i) {
    Block b = new Block(i, 0, 0);
    assertTrue(fsdataset.isValidBlock(0, b, false));
    assertEquals(blockIdToLen(i), fsdataset.getFinalizedBlockLength(0,b));
    checkBlockDataAndSize(fsdataset, b, blockIdToLen(i));
  }
}
项目:hadoop-EAR    文件:TestSimulatedFSDataset.java   
public void testGetBlockReport() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  Block[] blockReport = fsdataset.getBlockReport(0);
  assertEquals(0, blockReport.length);
  int bytesAdded = addSomeBlocks(fsdataset);
  blockReport = fsdataset.getBlockReport(0);
  assertEquals(NUMBLOCKS, blockReport.length);
  for (Block b: blockReport) {
    assertNotNull(b);
    assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
  }
}
项目:hadoop-EAR    文件:TestSimulatedFSDataset.java   
public void testInValidBlocks() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  Block b = new Block(1, 5, 0);
  checkInvalidBlock(b);

  // Now check invlaid after adding some blocks
  addSomeBlocks(fsdataset);
  b = new Block(NUMBLOCKS + 99, 5, 0);
  checkInvalidBlock(b);

}
项目:hadoop-on-lustre    文件:MiniDFSCluster.java   
/**
 * This method is valid only if the data nodes have simulated data
 * @param dataNodeIndex - data node i which to inject - the index is same as for getDataNodes()
 * @param blocksToInject - the blocks
 * @throws IOException
 *              if not simulatedFSDataset
 *             if any of blocks already exist in the data node
 *   
 */
public void injectBlocks(int dataNodeIndex, Block[] blocksToInject) throws IOException {
  if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
    throw new IndexOutOfBoundsException();
  }
  FSDatasetInterface dataSet = dataNodes.get(dataNodeIndex).datanode.getFSDataset();
  if (!(dataSet instanceof SimulatedFSDataset)) {
    throw new IOException("injectBlocks is valid only for SimilatedFSDataset");
  }
  SimulatedFSDataset sdataset = (SimulatedFSDataset) dataSet;
  sdataset.injectBlocks(blocksToInject);
  dataNodes.get(dataNodeIndex).datanode.scheduleBlockReport(0);
}
项目:hadoop-on-lustre    文件:TestSimulatedFSDataset.java   
public void testStorageUsage() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  assertEquals(fsdataset.getDfsUsed(), 0);
  assertEquals(fsdataset.getRemaining(), fsdataset.getCapacity());
  int bytesAdded = addSomeBlocks(fsdataset);
  assertEquals(bytesAdded, fsdataset.getDfsUsed());
  assertEquals(fsdataset.getCapacity()-bytesAdded,  fsdataset.getRemaining());

}
项目:hadoop-on-lustre    文件:TestSimulatedFSDataset.java   
void  checkBlockDataAndSize(FSDatasetInterface fsdataset, 
            Block b, long expectedLen) throws IOException { 
  InputStream input = fsdataset.getBlockInputStream(b);
  long lengthRead = 0;
  int data;
  while ((data = input.read()) != -1) {
    assertEquals(SimulatedFSDataset.DEFAULT_DATABYTE, data);
    lengthRead++;
  }
  assertEquals(expectedLen, lengthRead);
}
项目:hadoop-on-lustre    文件:TestSimulatedFSDataset.java   
public void testWriteRead() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  addSomeBlocks(fsdataset);
  for (int i=1; i <= NUMBLOCKS; ++i) {
    Block b = new Block(i, 0, 0);
    assertTrue(fsdataset.isValidBlock(b));
    assertEquals(blockIdToLen(i), fsdataset.getLength(b));
    checkBlockDataAndSize(fsdataset, b, blockIdToLen(i));
  }
}
项目:hadoop-on-lustre    文件:TestSimulatedFSDataset.java   
public void testGetBlockReport() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  Block[] blockReport = fsdataset.getBlockReport();
  assertEquals(0, blockReport.length);
  int bytesAdded = addSomeBlocks(fsdataset);
  blockReport = fsdataset.getBlockReport();
  assertEquals(NUMBLOCKS, blockReport.length);
  for (Block b: blockReport) {
    assertNotNull(b);
    assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
  }
}
项目:hadoop-on-lustre    文件:TestSimulatedFSDataset.java   
public void testInValidBlocks() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  Block b = new Block(1, 5, 0);
  checkInvalidBlock(b);

  // Now check invlaid after adding some blocks
  addSomeBlocks(fsdataset);
  b = new Block(NUMBLOCKS + 99, 5, 0);
  checkInvalidBlock(b);

}
项目:cumulus    文件:MiniDFSCluster.java   
/**
 * This method is valid only if the data nodes have simulated data
 * @param dataNodeIndex - data node i which to inject - the index is same as for getDataNodes()
 * @param blocksToInject - the blocks
 * @throws IOException
 *              if not simulatedFSDataset
 *             if any of blocks already exist in the data node
 *   
 */
public void injectBlocks(int dataNodeIndex, Iterable<Block> blocksToInject) throws IOException {
  if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
    throw new IndexOutOfBoundsException();
  }
  FSDatasetInterface dataSet = dataNodes.get(dataNodeIndex).datanode.getFSDataset();
  if (!(dataSet instanceof SimulatedFSDataset)) {
    throw new IOException("injectBlocks is valid only for SimilatedFSDataset");
  }
  SimulatedFSDataset sdataset = (SimulatedFSDataset) dataSet;
  sdataset.injectBlocks(blocksToInject);
  dataNodes.get(dataNodeIndex).datanode.scheduleBlockReport(0);
}
项目:cumulus    文件:TestSimulatedFSDataset.java   
public void testStorageUsage() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  assertEquals(fsdataset.getDfsUsed(), 0);
  assertEquals(fsdataset.getRemaining(), fsdataset.getCapacity());
  int bytesAdded = addSomeBlocks(fsdataset);
  assertEquals(bytesAdded, fsdataset.getDfsUsed());
  assertEquals(fsdataset.getCapacity()-bytesAdded,  fsdataset.getRemaining());

}
项目:cumulus    文件:TestSimulatedFSDataset.java   
void  checkBlockDataAndSize(FSDatasetInterface fsdataset, 
            Block b, long expectedLen) throws IOException { 
  InputStream input = fsdataset.getBlockInputStream(b);
  long lengthRead = 0;
  int data;
  while ((data = input.read()) != -1) {
    assertEquals(SimulatedFSDataset.DEFAULT_DATABYTE, data);
    lengthRead++;
  }
  assertEquals(expectedLen, lengthRead);
}
项目:cumulus    文件:TestSimulatedFSDataset.java   
public void testWriteRead() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  addSomeBlocks(fsdataset);
  for (int i=1; i <= NUMBLOCKS; ++i) {
    Block b = new Block(i, 0, 0);
    assertTrue(fsdataset.isValidBlock(b));
    assertEquals(blockIdToLen(i), fsdataset.getLength(b));
    checkBlockDataAndSize(fsdataset, b, blockIdToLen(i));
  }
}
项目:cumulus    文件:TestSimulatedFSDataset.java   
public void testInValidBlocks() throws IOException {
  FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
  Block b = new Block(1, 5, 0);
  checkInvalidBlock(b);

  // Now check invlaid after adding some blocks
  addSomeBlocks(fsdataset);
  b = new Block(NUMBLOCKS + 99, 5, 0);
  checkInvalidBlock(b);

}