Java 类org.apache.hadoop.hdfs.client.HdfsDataInputStream 实例源码

项目:hadoop    文件:DFSClient.java   
/**
 * Wraps the stream in a CryptoInputStream if the underlying file is
 * encrypted.
 */
public HdfsDataInputStream createWrappedInputStream(DFSInputStream dfsis)
    throws IOException {
  final FileEncryptionInfo feInfo = dfsis.getFileEncryptionInfo();
  if (feInfo != null) {
    // File is encrypted, wrap the stream in a crypto stream.
    // Currently only one version, so no special logic based on the version #
    getCryptoProtocolVersion(feInfo);
    final CryptoCodec codec = getCryptoCodec(conf, feInfo);
    final KeyVersion decrypted = decryptEncryptedDataEncryptionKey(feInfo);
    final CryptoInputStream cryptoIn =
        new CryptoInputStream(dfsis, codec, decrypted.getMaterial(),
            feInfo.getIV());
    return new HdfsDataInputStream(cryptoIn);
  } else {
    // No FileEncryptionInfo so no encryption.
    return new HdfsDataInputStream(dfsis);
  }
}
项目:hadoop    文件:TestReadWhileWriting.java   
static void checkFile(Path p, int expectedsize, final Configuration conf
    ) throws IOException, InterruptedException {
  //open the file with another user account
  final String username = UserGroupInformation.getCurrentUser().getShortUserName()
      + "_" + ++userCount;

  UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username, 
                               new String[] {"supergroup"});

  final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);

  final HdfsDataInputStream in = (HdfsDataInputStream)fs.open(p);

  //Check visible length
  Assert.assertTrue(in.getVisibleLength() >= expectedsize);

  //Able to read?
  for(int i = 0; i < expectedsize; i++) {
    Assert.assertEquals((byte)i, (byte)in.read());  
  }

  in.close();
}
项目:aliyun-oss-hadoop-fs    文件:DFSClient.java   
/**
 * Wraps the stream in a CryptoInputStream if the underlying file is
 * encrypted.
 */
public HdfsDataInputStream createWrappedInputStream(DFSInputStream dfsis)
    throws IOException {
  final FileEncryptionInfo feInfo = dfsis.getFileEncryptionInfo();
  if (feInfo != null) {
    // File is encrypted, wrap the stream in a crypto stream.
    // Currently only one version, so no special logic based on the version #
    getCryptoProtocolVersion(feInfo);
    final CryptoCodec codec = getCryptoCodec(conf, feInfo);
    final KeyVersion decrypted = decryptEncryptedDataEncryptionKey(feInfo);
    final CryptoInputStream cryptoIn =
        new CryptoInputStream(dfsis, codec, decrypted.getMaterial(),
            feInfo.getIV());
    return new HdfsDataInputStream(cryptoIn);
  } else {
    // No FileEncryptionInfo so no encryption.
    return new HdfsDataInputStream(dfsis);
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestReadWhileWriting.java   
static void checkFile(Path p, int expectedsize, final Configuration conf
    ) throws IOException, InterruptedException {
  //open the file with another user account
  final String username = UserGroupInformation.getCurrentUser().getShortUserName()
      + "_" + ++userCount;

  UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username, 
                               new String[] {"supergroup"});

  final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);

  final HdfsDataInputStream in = (HdfsDataInputStream)fs.open(p);

  //Check visible length
  Assert.assertTrue(in.getVisibleLength() >= expectedsize);

  //Able to read?
  for(int i = 0; i < expectedsize; i++) {
    Assert.assertEquals((byte)i, (byte)in.read());  
  }

  in.close();
}
项目:fffs    文件:FileTester.java   
void zeroCopyRead(FileSystem fs,String path,
   int readSize,int nloop) throws IOException{

   long start_ts,end_ts,len=0;

   ByteBuffer bb = ByteBuffer.allocate(readSize);
   HdfsDataInputStream fsis = (HdfsDataInputStream)fs.open(new Path(path));
   for(int i=0;i<nloop;i++){
     fsis.seek(0);
     len=0;
     long ts1,ts2;
     start_ts = System.nanoTime();
     while(true){
bb = fsis.rdmaRead(readSize);
       if(bb==null)break;
       len += bb.remaining();
     };
     end_ts = System.nanoTime();
     System.out.println(((double)len/(end_ts - start_ts))+" GB/s");
   }
   fsis.close();
 }
项目:big-c    文件:DFSClient.java   
/**
 * Wraps the stream in a CryptoInputStream if the underlying file is
 * encrypted.
 */
public HdfsDataInputStream createWrappedInputStream(DFSInputStream dfsis)
    throws IOException {
  final FileEncryptionInfo feInfo = dfsis.getFileEncryptionInfo();
  if (feInfo != null) {
    // File is encrypted, wrap the stream in a crypto stream.
    // Currently only one version, so no special logic based on the version #
    getCryptoProtocolVersion(feInfo);
    final CryptoCodec codec = getCryptoCodec(conf, feInfo);
    final KeyVersion decrypted = decryptEncryptedDataEncryptionKey(feInfo);
    final CryptoInputStream cryptoIn =
        new CryptoInputStream(dfsis, codec, decrypted.getMaterial(),
            feInfo.getIV());
    return new HdfsDataInputStream(cryptoIn);
  } else {
    // No FileEncryptionInfo so no encryption.
    return new HdfsDataInputStream(dfsis);
  }
}
项目:big-c    文件:TestReadWhileWriting.java   
static void checkFile(Path p, int expectedsize, final Configuration conf
    ) throws IOException, InterruptedException {
  //open the file with another user account
  final String username = UserGroupInformation.getCurrentUser().getShortUserName()
      + "_" + ++userCount;

  UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username, 
                               new String[] {"supergroup"});

  final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);

  final HdfsDataInputStream in = (HdfsDataInputStream)fs.open(p);

  //Check visible length
  Assert.assertTrue(in.getVisibleLength() >= expectedsize);

  //Able to read?
  for(int i = 0; i < expectedsize; i++) {
    Assert.assertEquals((byte)i, (byte)in.read());  
  }

  in.close();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DFSClient.java   
/**
 * Wraps the stream in a CryptoInputStream if the underlying file is
 * encrypted.
 */
public HdfsDataInputStream createWrappedInputStream(DFSInputStream dfsis)
    throws IOException {
  final FileEncryptionInfo feInfo = dfsis.getFileEncryptionInfo();
  if (feInfo != null) {
    // File is encrypted, wrap the stream in a crypto stream.
    // Currently only one version, so no special logic based on the version #
    getCryptoProtocolVersion(feInfo);
    final CryptoCodec codec = getCryptoCodec(conf, feInfo);
    final KeyVersion decrypted = decryptEncryptedDataEncryptionKey(feInfo);
    final CryptoInputStream cryptoIn =
        new CryptoInputStream(dfsis, codec, decrypted.getMaterial(),
            feInfo.getIV());
    return new HdfsDataInputStream(cryptoIn);
  } else {
    // No FileEncryptionInfo so no encryption.
    return new HdfsDataInputStream(dfsis);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestReadWhileWriting.java   
static void checkFile(Path p, int expectedsize, final Configuration conf
    ) throws IOException, InterruptedException {
  //open the file with another user account
  final String username = UserGroupInformation.getCurrentUser().getShortUserName()
      + "_" + ++userCount;

  UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username, 
                               new String[] {"supergroup"});

  final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);

  final HdfsDataInputStream in = (HdfsDataInputStream)fs.open(p);

  //Check visible length
  Assert.assertTrue(in.getVisibleLength() >= expectedsize);

  //Able to read?
  for(int i = 0; i < expectedsize; i++) {
    Assert.assertEquals((byte)i, (byte)in.read());  
  }

  in.close();
}
项目:hadoop-plus    文件:TestReadWhileWriting.java   
static void checkFile(Path p, int expectedsize, final Configuration conf
    ) throws IOException, InterruptedException {
  //open the file with another user account
  final String username = UserGroupInformation.getCurrentUser().getShortUserName()
      + "_" + ++userCount;

  UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username, 
                               new String[] {"supergroup"});

  final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);

  final HdfsDataInputStream in = (HdfsDataInputStream)fs.open(p);

  //Check visible length
  Assert.assertTrue(in.getVisibleLength() >= expectedsize);

  //Able to read?
  for(int i = 0; i < expectedsize; i++) {
    Assert.assertEquals((byte)i, (byte)in.read());  
  }

  in.close();
}
项目:FlexMap    文件:DFSClient.java   
/**
 * Wraps the stream in a CryptoInputStream if the underlying file is
 * encrypted.
 */
public HdfsDataInputStream createWrappedInputStream(DFSInputStream dfsis)
    throws IOException {
  final FileEncryptionInfo feInfo = dfsis.getFileEncryptionInfo();
  if (feInfo != null) {
    // File is encrypted, wrap the stream in a crypto stream.
    // Currently only one version, so no special logic based on the version #
    getCryptoProtocolVersion(feInfo);
    final CryptoCodec codec = getCryptoCodec(conf, feInfo);
    final KeyVersion decrypted = decryptEncryptedDataEncryptionKey(feInfo);
    final CryptoInputStream cryptoIn =
        new CryptoInputStream(dfsis, codec, decrypted.getMaterial(),
            feInfo.getIV());
    return new HdfsDataInputStream(cryptoIn);
  } else {
    // No FileEncryptionInfo so no encryption.
    return new HdfsDataInputStream(dfsis);
  }
}
项目:FlexMap    文件:TestReadWhileWriting.java   
static void checkFile(Path p, int expectedsize, final Configuration conf
    ) throws IOException, InterruptedException {
  //open the file with another user account
  final String username = UserGroupInformation.getCurrentUser().getShortUserName()
      + "_" + ++userCount;

  UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username, 
                               new String[] {"supergroup"});

  final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);

  final HdfsDataInputStream in = (HdfsDataInputStream)fs.open(p);

  //Check visible length
  Assert.assertTrue(in.getVisibleLength() >= expectedsize);

  //Able to read?
  for(int i = 0; i < expectedsize; i++) {
    Assert.assertEquals((byte)i, (byte)in.read());  
  }

  in.close();
}
项目:hops    文件:TestReadWhileWriting.java   
static void checkFile(Path p, int expectedsize, final Configuration conf)
    throws IOException, InterruptedException {
  //open the file with another user account
  final String username =
      UserGroupInformation.getCurrentUser().getShortUserName() + "_" +
          ++userCount;

  UserGroupInformation ugi = UserGroupInformation
      .createUserForTesting(username, new String[]{"supergroup"});

  final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);

  final HdfsDataInputStream in = (HdfsDataInputStream) fs.open(p);

  //Check visible length
  Assert.assertTrue(in.getVisibleLength() >= expectedsize);

  //Able to read?
  for (int i = 0; i < expectedsize; i++) {
    Assert.assertEquals((byte) i, (byte) in.read());
  }

  in.close();
}
项目:hadoop-TCP    文件:TestReadWhileWriting.java   
static void checkFile(Path p, int expectedsize, final Configuration conf
    ) throws IOException, InterruptedException {
  //open the file with another user account
  final String username = UserGroupInformation.getCurrentUser().getShortUserName()
      + "_" + ++userCount;

  UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username, 
                               new String[] {"supergroup"});

  final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);

  final HdfsDataInputStream in = (HdfsDataInputStream)fs.open(p);

  //Check visible length
  Assert.assertTrue(in.getVisibleLength() >= expectedsize);

  //Able to read?
  for(int i = 0; i < expectedsize; i++) {
    Assert.assertEquals((byte)i, (byte)in.read());  
  }

  in.close();
}
项目:hardfs    文件:TestReadWhileWriting.java   
static void checkFile(Path p, int expectedsize, final Configuration conf
    ) throws IOException, InterruptedException {
  //open the file with another user account
  final String username = UserGroupInformation.getCurrentUser().getShortUserName()
      + "_" + ++userCount;

  UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username, 
                               new String[] {"supergroup"});

  final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);

  final HdfsDataInputStream in = (HdfsDataInputStream)fs.open(p);

  //Check visible length
  Assert.assertTrue(in.getVisibleLength() >= expectedsize);

  //Able to read?
  for(int i = 0; i < expectedsize; i++) {
    Assert.assertEquals((byte)i, (byte)in.read());  
  }

  in.close();
}
项目:hadoop-on-lustre2    文件:TestReadWhileWriting.java   
static void checkFile(Path p, int expectedsize, final Configuration conf
    ) throws IOException, InterruptedException {
  //open the file with another user account
  final String username = UserGroupInformation.getCurrentUser().getShortUserName()
      + "_" + ++userCount;

  UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username, 
                               new String[] {"supergroup"});

  final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);

  final HdfsDataInputStream in = (HdfsDataInputStream)fs.open(p);

  //Check visible length
  Assert.assertTrue(in.getVisibleLength() >= expectedsize);

  //Able to read?
  for(int i = 0; i < expectedsize; i++) {
    Assert.assertEquals((byte)i, (byte)in.read());  
  }

  in.close();
}
项目:hadoop    文件:Hdfs.java   
@SuppressWarnings("deprecation")
@Override
public HdfsDataInputStream open(Path f, int bufferSize) 
    throws IOException, UnresolvedLinkException {
  final DFSInputStream dfsis = dfs.open(getUriPath(f),
    bufferSize, verifyChecksum);
  return dfs.createWrappedInputStream(dfsis);
}
项目:hadoop    文件:DFSTestUtil.java   
public static ExtendedBlock getFirstBlock(FileSystem fs, Path path) throws IOException {
  HdfsDataInputStream in = (HdfsDataInputStream) fs.open(path);
  try {
    in.readByte();
    return in.getCurrentBlock();
  } finally {
    in.close();
  }
}
项目:hadoop    文件:TestWriteRead.java   
/**
 * Open the file to read from begin to end. Then close the file. 
 * Return number of bytes read. 
 * Support both sequential read and position read.
 */
private long readData(String fname, byte[] buffer, long byteExpected, long beginPosition)
    throws IOException {
  long totalByteRead = 0;
  Path path = getFullyQualifiedPath(fname);

  FSDataInputStream in = null;
  try {
    in = openInputStream(path);

    long visibleLenFromReadStream = ((HdfsDataInputStream)in).getVisibleLength();

    if (visibleLenFromReadStream < byteExpected)
    {
      throw new IOException(visibleLenFromReadStream
          + " = visibleLenFromReadStream < bytesExpected= "
          + byteExpected);
    }

    totalByteRead = readUntilEnd(in, buffer, buffer.length, fname,
        beginPosition, visibleLenFromReadStream, positionReadOption);
    in.close();

    // reading more data than visibleLeng is OK, but not less
    if (totalByteRead + beginPosition < byteExpected ){
      throw new IOException("readData mismatch in byte read: expected=" 
          + byteExpected + " ; got " +  (totalByteRead + beginPosition));
    }
    return totalByteRead + beginPosition;

  } catch (IOException e) {
    throw new IOException("##### Caught Exception in readData. "
        + "Total Byte Read so far = " + totalByteRead + " beginPosition = "
        + beginPosition, e);
  } finally {
    if (in != null)
      in.close();
  }
}
项目:hadoop    文件:TestShortCircuitLocalRead.java   
private boolean checkUnsupportedMethod(FileSystem fs, Path file,
                                         byte[] expected, int readOffset) throws IOException {
  HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(file);
  ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
  IOUtils.skipFully(stm, readOffset);
  try {
    stm.read(actual);
  } catch(UnsupportedOperationException unex) {
    return true;
  }
  return false;
}
项目:hadoop    文件:TestScrLazyPersistFiles.java   
/**
 * Read in-memory block with Short Circuit Read
 * Note: the test uses faked RAM_DISK from physical disk.
 */
@Test
public void testRamDiskShortCircuitRead()
  throws IOException, InterruptedException {
  startUpCluster(REPL_FACTOR,
    new StorageType[]{RAM_DISK, DEFAULT},
    2 * BLOCK_SIZE - 1, true);  // 1 replica + delta, SCR read
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  final int SEED = 0xFADED;
  Path path = new Path("/" + METHOD_NAME + ".dat");

  makeRandomTestFile(path, BLOCK_SIZE, true, SEED);
  ensureFileReplicasOnStorageType(path, RAM_DISK);

  // Sleep for a short time to allow the lazy writer thread to do its job
  Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);

  //assertThat(verifyReadRandomFile(path, BLOCK_SIZE, SEED), is(true));
  FSDataInputStream fis = fs.open(path);

  // Verify SCR read counters
  try {
    fis = fs.open(path);
    byte[] buf = new byte[BUFFER_LENGTH];
    fis.read(0, buf, 0, BUFFER_LENGTH);
    HdfsDataInputStream dfsis = (HdfsDataInputStream) fis;
    Assert.assertEquals(BUFFER_LENGTH,
      dfsis.getReadStatistics().getTotalBytesRead());
    Assert.assertEquals(BUFFER_LENGTH,
      dfsis.getReadStatistics().getTotalShortCircuitBytesRead());
  } finally {
    fis.close();
    fis = null;
  }
}
项目:aliyun-oss-hadoop-fs    文件:Hdfs.java   
@SuppressWarnings("deprecation")
@Override
public HdfsDataInputStream open(Path f, int bufferSize) 
    throws IOException, UnresolvedLinkException {
  final DFSInputStream dfsis = dfs.open(getUriPath(f),
    bufferSize, verifyChecksum);
  return dfs.createWrappedInputStream(dfsis);
}
项目:aliyun-oss-hadoop-fs    文件:DFSTestUtil.java   
public static ExtendedBlock getFirstBlock(FileSystem fs, Path path) throws IOException {
  HdfsDataInputStream in = (HdfsDataInputStream) fs.open(path);
  try {
    in.readByte();
    return in.getCurrentBlock();
  } finally {
    in.close();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestWriteRead.java   
/**
 * Open the file to read from begin to end. Then close the file. 
 * Return number of bytes read. 
 * Support both sequential read and position read.
 */
private long readData(String fname, byte[] buffer, long byteExpected, long beginPosition)
    throws IOException {
  long totalByteRead = 0;
  Path path = getFullyQualifiedPath(fname);

  FSDataInputStream in = null;
  try {
    in = openInputStream(path);

    long visibleLenFromReadStream = ((HdfsDataInputStream)in).getVisibleLength();

    if (visibleLenFromReadStream < byteExpected)
    {
      throw new IOException(visibleLenFromReadStream
          + " = visibleLenFromReadStream < bytesExpected= "
          + byteExpected);
    }

    totalByteRead = readUntilEnd(in, buffer, buffer.length, fname,
        beginPosition, visibleLenFromReadStream, positionReadOption);
    in.close();

    // reading more data than visibleLeng is OK, but not less
    if (totalByteRead + beginPosition < byteExpected ){
      throw new IOException("readData mismatch in byte read: expected=" 
          + byteExpected + " ; got " +  (totalByteRead + beginPosition));
    }
    return totalByteRead + beginPosition;

  } catch (IOException e) {
    throw new IOException("##### Caught Exception in readData. "
        + "Total Byte Read so far = " + totalByteRead + " beginPosition = "
        + beginPosition, e);
  } finally {
    if (in != null)
      in.close();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestShortCircuitLocalRead.java   
private boolean checkUnsupportedMethod(FileSystem fs, Path file,
    byte[] expected, int readOffset) throws IOException {
  HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(file);
  ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
  IOUtils.skipFully(stm, readOffset);
  try {
    stm.read(actual);
  } catch(UnsupportedOperationException unex) {
    return true;
  }
  return false;
}
项目:aliyun-oss-hadoop-fs    文件:TestScrLazyPersistFiles.java   
/**
 * Read in-memory block with Short Circuit Read
 * Note: the test uses faked RAM_DISK from physical disk.
 */
@Test
public void testRamDiskShortCircuitRead()
    throws IOException, InterruptedException, TimeoutException {
  getClusterBuilder().setUseScr(true).build();
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  final int SEED = 0xFADED;
  Path path = new Path("/" + METHOD_NAME + ".dat");

  // Create a file and wait till it is persisted.
  makeRandomTestFile(path, BLOCK_SIZE, true, SEED);
  ensureFileReplicasOnStorageType(path, RAM_DISK);
  waitForMetric("RamDiskBlocksLazyPersisted", 1);

  HdfsDataInputStream fis = (HdfsDataInputStream) fs.open(path);

  // Verify SCR read counters
  try {
    byte[] buf = new byte[BUFFER_LENGTH];
    fis.read(0, buf, 0, BUFFER_LENGTH);
    Assert.assertEquals(BUFFER_LENGTH,
      fis.getReadStatistics().getTotalBytesRead());
    Assert.assertEquals(BUFFER_LENGTH,
      fis.getReadStatistics().getTotalShortCircuitBytesRead());
  } finally {
    fis.close();
    fis = null;
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestScrLazyPersistFiles.java   
/**
 * Eviction of lazy persisted blocks with Short Circuit Read handle open
 * Note: the test uses faked RAM_DISK from physical disk.
 * @throws IOException
 * @throws InterruptedException
 */
@Test
public void tesScrDuringEviction()
    throws Exception {
  getClusterBuilder().setUseScr(true).build();
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path1 = new Path("/" + METHOD_NAME + ".01.dat");

  // Create a file and wait till it is persisted.
  makeTestFile(path1, BLOCK_SIZE, true);
  ensureFileReplicasOnStorageType(path1, RAM_DISK);
  waitForMetric("RamDiskBlocksLazyPersisted", 1);

  HdfsDataInputStream fis = (HdfsDataInputStream) fs.open(path1);
  try {
    // Keep and open read handle to path1 while creating path2
    byte[] buf = new byte[BUFFER_LENGTH];
    fis.read(0, buf, 0, BUFFER_LENGTH);
    triggerEviction(cluster.getDataNodes().get(0));

    // Ensure path1 is still readable from the open SCR handle.
    fis.read(0, buf, 0, BUFFER_LENGTH);
    assertThat(fis.getReadStatistics().getTotalBytesRead(),
        is((long) 2 * BUFFER_LENGTH));
    assertThat(fis.getReadStatistics().getTotalShortCircuitBytesRead(),
        is((long) 2 * BUFFER_LENGTH));
  } finally {
    IOUtils.closeQuietly(fis);
  }
}
项目:big-c    文件:Hdfs.java   
@SuppressWarnings("deprecation")
@Override
public HdfsDataInputStream open(Path f, int bufferSize) 
    throws IOException, UnresolvedLinkException {
  final DFSInputStream dfsis = dfs.open(getUriPath(f),
    bufferSize, verifyChecksum);
  return dfs.createWrappedInputStream(dfsis);
}
项目:big-c    文件:DFSTestUtil.java   
public static ExtendedBlock getFirstBlock(FileSystem fs, Path path) throws IOException {
  HdfsDataInputStream in = (HdfsDataInputStream) fs.open(path);
  try {
    in.readByte();
    return in.getCurrentBlock();
  } finally {
    in.close();
  }
}
项目:big-c    文件:TestWriteRead.java   
/**
 * Open the file to read from begin to end. Then close the file. 
 * Return number of bytes read. 
 * Support both sequential read and position read.
 */
private long readData(String fname, byte[] buffer, long byteExpected, long beginPosition)
    throws IOException {
  long totalByteRead = 0;
  Path path = getFullyQualifiedPath(fname);

  FSDataInputStream in = null;
  try {
    in = openInputStream(path);

    long visibleLenFromReadStream = ((HdfsDataInputStream)in).getVisibleLength();

    if (visibleLenFromReadStream < byteExpected)
    {
      throw new IOException(visibleLenFromReadStream
          + " = visibleLenFromReadStream < bytesExpected= "
          + byteExpected);
    }

    totalByteRead = readUntilEnd(in, buffer, buffer.length, fname,
        beginPosition, visibleLenFromReadStream, positionReadOption);
    in.close();

    // reading more data than visibleLeng is OK, but not less
    if (totalByteRead + beginPosition < byteExpected ){
      throw new IOException("readData mismatch in byte read: expected=" 
          + byteExpected + " ; got " +  (totalByteRead + beginPosition));
    }
    return totalByteRead + beginPosition;

  } catch (IOException e) {
    throw new IOException("##### Caught Exception in readData. "
        + "Total Byte Read so far = " + totalByteRead + " beginPosition = "
        + beginPosition, e);
  } finally {
    if (in != null)
      in.close();
  }
}
项目:big-c    文件:TestShortCircuitLocalRead.java   
private boolean checkUnsupportedMethod(FileSystem fs, Path file,
                                         byte[] expected, int readOffset) throws IOException {
  HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(file);
  ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
  IOUtils.skipFully(stm, readOffset);
  try {
    stm.read(actual);
  } catch(UnsupportedOperationException unex) {
    return true;
  }
  return false;
}
项目:big-c    文件:TestScrLazyPersistFiles.java   
/**
 * Read in-memory block with Short Circuit Read
 * Note: the test uses faked RAM_DISK from physical disk.
 */
@Test
public void testRamDiskShortCircuitRead()
  throws IOException, InterruptedException {
  startUpCluster(REPL_FACTOR,
    new StorageType[]{RAM_DISK, DEFAULT},
    2 * BLOCK_SIZE - 1, true);  // 1 replica + delta, SCR read
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  final int SEED = 0xFADED;
  Path path = new Path("/" + METHOD_NAME + ".dat");

  makeRandomTestFile(path, BLOCK_SIZE, true, SEED);
  ensureFileReplicasOnStorageType(path, RAM_DISK);

  // Sleep for a short time to allow the lazy writer thread to do its job
  Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);

  //assertThat(verifyReadRandomFile(path, BLOCK_SIZE, SEED), is(true));
  FSDataInputStream fis = fs.open(path);

  // Verify SCR read counters
  try {
    fis = fs.open(path);
    byte[] buf = new byte[BUFFER_LENGTH];
    fis.read(0, buf, 0, BUFFER_LENGTH);
    HdfsDataInputStream dfsis = (HdfsDataInputStream) fis;
    Assert.assertEquals(BUFFER_LENGTH,
      dfsis.getReadStatistics().getTotalBytesRead());
    Assert.assertEquals(BUFFER_LENGTH,
      dfsis.getReadStatistics().getTotalShortCircuitBytesRead());
  } finally {
    fis.close();
    fis = null;
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:Hdfs.java   
@SuppressWarnings("deprecation")
@Override
public HdfsDataInputStream open(Path f, int bufferSize) 
    throws IOException, UnresolvedLinkException {
  final DFSInputStream dfsis = dfs.open(getUriPath(f),
    bufferSize, verifyChecksum);
  return dfs.createWrappedInputStream(dfsis);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:OpenEntity.java   
OpenEntity(final HdfsDataInputStream in, final long length,
    final int outBufferSize, final DFSClient dfsclient) {
  this.in = in;
  this.length = length;
  this.outBufferSize = outBufferSize;
  this.dfsclient = dfsclient;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DFSTestUtil.java   
public static ExtendedBlock getFirstBlock(FileSystem fs, Path path) throws IOException {
  HdfsDataInputStream in = (HdfsDataInputStream) fs.open(path);
  try {
    in.readByte();
    return in.getCurrentBlock();
  } finally {
    in.close();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestWriteRead.java   
/**
 * Open the file to read from begin to end. Then close the file. 
 * Return number of bytes read. 
 * Support both sequential read and position read.
 */
private long readData(String fname, byte[] buffer, long byteExpected, long beginPosition)
    throws IOException {
  long totalByteRead = 0;
  Path path = getFullyQualifiedPath(fname);

  FSDataInputStream in = null;
  try {
    in = openInputStream(path);

    long visibleLenFromReadStream = ((HdfsDataInputStream)in).getVisibleLength();

    if (visibleLenFromReadStream < byteExpected)
    {
      throw new IOException(visibleLenFromReadStream
          + " = visibleLenFromReadStream < bytesExpected= "
          + byteExpected);
    }

    totalByteRead = readUntilEnd(in, buffer, buffer.length, fname,
        beginPosition, visibleLenFromReadStream, positionReadOption);
    in.close();

    // reading more data than visibleLeng is OK, but not less
    if (totalByteRead + beginPosition < byteExpected ){
      throw new IOException("readData mismatch in byte read: expected=" 
          + byteExpected + " ; got " +  (totalByteRead + beginPosition));
    }
    return totalByteRead + beginPosition;

  } catch (IOException e) {
    throw new IOException("##### Caught Exception in readData. "
        + "Total Byte Read so far = " + totalByteRead + " beginPosition = "
        + beginPosition, e);
  } finally {
    if (in != null)
      in.close();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestShortCircuitLocalRead.java   
private boolean checkUnsupportedMethod(FileSystem fs, Path file,
                                         byte[] expected, int readOffset) throws IOException {
  HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(file);
  ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
  IOUtils.skipFully(stm, readOffset);
  try {
    stm.read(actual);
  } catch(UnsupportedOperationException unex) {
    return true;
  }
  return false;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestScrLazyPersistFiles.java   
/**
 * Read in-memory block with Short Circuit Read
 * Note: the test uses faked RAM_DISK from physical disk.
 */
@Test
public void testRamDiskShortCircuitRead()
  throws IOException, InterruptedException {
  startUpCluster(REPL_FACTOR,
    new StorageType[]{RAM_DISK, DEFAULT},
    2 * BLOCK_SIZE - 1, true);  // 1 replica + delta, SCR read
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  final int SEED = 0xFADED;
  Path path = new Path("/" + METHOD_NAME + ".dat");

  makeRandomTestFile(path, BLOCK_SIZE, true, SEED);
  ensureFileReplicasOnStorageType(path, RAM_DISK);

  // Sleep for a short time to allow the lazy writer thread to do its job
  Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);

  //assertThat(verifyReadRandomFile(path, BLOCK_SIZE, SEED), is(true));
  FSDataInputStream fis = fs.open(path);

  // Verify SCR read counters
  try {
    fis = fs.open(path);
    byte[] buf = new byte[BUFFER_LENGTH];
    fis.read(0, buf, 0, BUFFER_LENGTH);
    HdfsDataInputStream dfsis = (HdfsDataInputStream) fis;
    Assert.assertEquals(BUFFER_LENGTH,
      dfsis.getReadStatistics().getTotalBytesRead());
    Assert.assertEquals(BUFFER_LENGTH,
      dfsis.getReadStatistics().getTotalShortCircuitBytesRead());
  } finally {
    fis.close();
    fis = null;
  }
}
项目:hadoop-plus    文件:Hdfs.java   
@SuppressWarnings("deprecation")
@Override
public HdfsDataInputStream open(Path f, int bufferSize) 
    throws IOException, UnresolvedLinkException {
  return new DFSClient.DFSDataInputStream(dfs.open(getUriPath(f),
      bufferSize, verifyChecksum));
}
项目:hadoop-plus    文件:TestWriteRead.java   
/**
 * Open the file to read from begin to end. Then close the file. 
 * Return number of bytes read. 
 * Support both sequential read and position read.
 */
private long readData(String fname, byte[] buffer, long byteExpected, long beginPosition)
    throws IOException {
  long totalByteRead = 0;
  Path path = getFullyQualifiedPath(fname);

  FSDataInputStream in = null;
  try {
    in = openInputStream(path);

    long visibleLenFromReadStream = ((HdfsDataInputStream)in).getVisibleLength();

    if (visibleLenFromReadStream < byteExpected)
    {
      throw new IOException(visibleLenFromReadStream
          + " = visibleLenFromReadStream < bytesExpected= "
          + byteExpected);
    }

    totalByteRead = readUntilEnd(in, buffer, buffer.length, fname,
        beginPosition, visibleLenFromReadStream, positionReadOption);
    in.close();

    // reading more data than visibleLeng is OK, but not less
    if (totalByteRead + beginPosition < byteExpected ){
      throw new IOException("readData mismatch in byte read: expected=" 
          + byteExpected + " ; got " +  (totalByteRead + beginPosition));
    }
    return totalByteRead + beginPosition;

  } catch (IOException e) {
    throw new IOException("##### Caught Exception in readData. "
        + "Total Byte Read so far = " + totalByteRead + " beginPosition = "
        + beginPosition, e);
  } finally {
    if (in != null)
      in.close();
  }
}