Java 类org.apache.hadoop.hdfs.ClientContext 实例源码

项目:aliyun-oss-hadoop-fs    文件:TestScrLazyPersistFiles.java   
@Test
public void testLegacyScrAfterEviction()
    throws IOException, InterruptedException, TimeoutException {
  getClusterBuilder().setUseScr(true)
                     .setUseLegacyBlockReaderLocal(true)
                     .build();
  doShortCircuitReadAfterEvictionTest();

  // In the implementation of legacy short-circuit reads, any failure is
  // trapped silently, reverts back to a remote read, and also disables all
  // subsequent legacy short-circuit reads in the ClientContext.
  // Assert that it didn't get disabled.
  ClientContext clientContext = client.getClientContext();
  Assert.assertFalse(clientContext.getDisableLegacyBlockReaderLocal());
}
项目:hadoop    文件:TestShortCircuitLocalRead.java   
/** Check file content, reading as user {@code readingUser} */
static void checkFileContent(URI uri, Path name, byte[] expected,
    int readOffset, String readingUser, Configuration conf,
    boolean legacyShortCircuitFails)
    throws IOException, InterruptedException {
  // Ensure short circuit is enabled
  DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
  ClientContext getClientContext = ClientContext.getFromConf(conf);
  if (legacyShortCircuitFails) {
    assertFalse(getClientContext.getDisableLegacyBlockReaderLocal());
  }

  FSDataInputStream stm = fs.open(name);
  byte[] actual = new byte[expected.length-readOffset];
  stm.readFully(readOffset, actual);
  checkData(actual, readOffset, expected, "Read 2");
  stm.close();
  // Now read using a different API.
  actual = new byte[expected.length-readOffset];
  stm = fs.open(name);
  IOUtils.skipFully(stm, readOffset);
  //Read a small number of bytes first.
  int nread = stm.read(actual, 0, 3);
  nread += stm.read(actual, nread, 2);
  //Read across chunk boundary
  nread += stm.read(actual, nread, 517);
  checkData(actual, readOffset, expected, nread, "A few bytes");
  //Now read rest of it
  while (nread < actual.length) {
    int nbytes = stm.read(actual, nread, actual.length - nread);
    if (nbytes < 0) {
      throw new EOFException("End of file reached before reading fully.");
    }
    nread += nbytes;
  }
  checkData(actual, readOffset, expected, "Read 3");

  if (legacyShortCircuitFails) {
    assertTrue(getClientContext.getDisableLegacyBlockReaderLocal());
  }
  stm.close();
}
项目:hadoop    文件:TestShortCircuitLocalRead.java   
/** Check the file content, reading as user {@code readingUser} */
static void checkFileContentDirect(URI uri, Path name, byte[] expected,
    int readOffset, String readingUser, Configuration conf,
    boolean legacyShortCircuitFails)
    throws IOException, InterruptedException {
  // Ensure short circuit is enabled
  DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
  ClientContext clientContext = ClientContext.getFromConf(conf);
  if (legacyShortCircuitFails) {
    assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
  }

  HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);

  ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);

  IOUtils.skipFully(stm, readOffset);

  actual.limit(3);

  //Read a small number of bytes first.
  int nread = stm.read(actual);
  actual.limit(nread + 2);
  nread += stm.read(actual);

  // Read across chunk boundary
  actual.limit(Math.min(actual.capacity(), nread + 517));
  nread += stm.read(actual);
  checkData(arrayFromByteBuffer(actual), readOffset, expected, nread,
      "A few bytes");
  //Now read rest of it
  actual.limit(actual.capacity());
  while (actual.hasRemaining()) {
    int nbytes = stm.read(actual);

    if (nbytes < 0) {
      throw new EOFException("End of file reached before reading fully.");
    }
    nread += nbytes;
  }
  checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
  if (legacyShortCircuitFails) {
    assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
  }
  stm.close();
}
项目:hadoop    文件:TestBlockTokenWithDFS.java   
private static void tryRead(final Configuration conf, LocatedBlock lblock,
    boolean shouldSucceed) {
  InetSocketAddress targetAddr = null;
  IOException ioe = null;
  BlockReader blockReader = null;
  ExtendedBlock block = lblock.getBlock();
  try {
    DatanodeInfo[] nodes = lblock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());

    blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
        setFileName(BlockReaderFactory.getFileName(targetAddr, 
                      "test-blockpoolid", block.getBlockId())).
        setBlock(block).
        setBlockToken(lblock.getBlockToken()).
        setInetSocketAddress(targetAddr).
        setStartOffset(0).
        setLength(-1).
        setVerifyChecksum(true).
        setClientName("TestBlockTokenWithDFS").
        setDatanodeInfo(nodes[0]).
        setCachingStrategy(CachingStrategy.newDefaultStrategy()).
        setClientCacheContext(ClientContext.getFromConf(conf)).
        setConfiguration(conf).
        setRemotePeerFactory(new RemotePeerFactory() {
          @Override
          public Peer newConnectedPeer(InetSocketAddress addr,
              Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
              throws IOException {
            Peer peer = null;
            Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
            try {
              sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
              sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
              peer = TcpPeerServer.peerFromSocket(sock);
            } finally {
              if (peer == null) {
                IOUtils.closeSocket(sock);
              }
            }
            return peer;
          }
        }).
        build();
  } catch (IOException ex) {
    ioe = ex;
  } finally {
    if (blockReader != null) {
      try {
        blockReader.close();
      } catch (IOException e) {
        throw new RuntimeException(e);
      }
    }
  }
  if (shouldSucceed) {
    Assert.assertNotNull("OP_READ_BLOCK: access token is invalid, "
          + "when it is expected to be valid", blockReader);
  } else {
    Assert.assertNotNull("OP_READ_BLOCK: access token is valid, "
        + "when it is expected to be invalid", ioe);
    Assert.assertTrue(
        "OP_READ_BLOCK failed due to reasons other than access token: ",
        ioe instanceof InvalidBlockTokenException);
  }
}
项目:hadoop    文件:TestDataNodeVolumeFailure.java   
/**
 * try to access a block on a data node. If fails - throws exception
 * @param datanode
 * @param lblock
 * @throws IOException
 */
private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock)
  throws IOException {
  InetSocketAddress targetAddr = null;
  ExtendedBlock block = lblock.getBlock(); 

  targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());

  BlockReader blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
    setInetSocketAddress(targetAddr).
    setBlock(block).
    setFileName(BlockReaderFactory.getFileName(targetAddr,
                  "test-blockpoolid", block.getBlockId())).
    setBlockToken(lblock.getBlockToken()).
    setStartOffset(0).
    setLength(-1).
    setVerifyChecksum(true).
    setClientName("TestDataNodeVolumeFailure").
    setDatanodeInfo(datanode).
    setCachingStrategy(CachingStrategy.newDefaultStrategy()).
    setClientCacheContext(ClientContext.getFromConf(conf)).
    setConfiguration(conf).
    setRemotePeerFactory(new RemotePeerFactory() {
      @Override
      public Peer newConnectedPeer(InetSocketAddress addr,
          Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
          throws IOException {
        Peer peer = null;
        Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
        try {
          sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
          sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
          peer = TcpPeerServer.peerFromSocket(sock);
        } finally {
          if (peer == null) {
            IOUtils.closeSocket(sock);
          }
        }
        return peer;
      }
    }).
    build();
  blockReader.close();
}
项目:hadoop    文件:TestScrLazyPersistFiles.java   
private void doShortCircuitReadAfterEvictionTest() throws IOException,
    InterruptedException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
  Path path2 = new Path("/" + METHOD_NAME + ".02.dat");

  final int SEED = 0xFADED;
  makeRandomTestFile(path1, BLOCK_SIZE, true, SEED);

  // Verify short-circuit read from RAM_DISK.
  ensureFileReplicasOnStorageType(path1, RAM_DISK);
  File metaFile = cluster.getBlockMetadataFile(0,
      DFSTestUtil.getFirstBlock(fs, path1));
  assertTrue(metaFile.length() <= BlockMetadataHeader.getHeaderSize());
  assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));

  // Sleep for a short time to allow the lazy writer thread to do its job.
  Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);

  // Verify short-circuit read from RAM_DISK once again.
  ensureFileReplicasOnStorageType(path1, RAM_DISK);
  metaFile = cluster.getBlockMetadataFile(0,
      DFSTestUtil.getFirstBlock(fs, path1));
  assertTrue(metaFile.length() <= BlockMetadataHeader.getHeaderSize());
  assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));

  // Create another file with a replica on RAM_DISK, which evicts the first.
  makeRandomTestFile(path2, BLOCK_SIZE, true, SEED);
  Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);
  triggerBlockReport();

  // Verify short-circuit read still works from DEFAULT storage.  This time,
  // we'll have a checksum written during lazy persistence.
  ensureFileReplicasOnStorageType(path1, DEFAULT);
  metaFile = cluster.getBlockMetadataFile(0,
      DFSTestUtil.getFirstBlock(fs, path1));
  assertTrue(metaFile.length() > BlockMetadataHeader.getHeaderSize());
  assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));

  // In the implementation of legacy short-circuit reads, any failure is
  // trapped silently, reverts back to a remote read, and also disables all
  // subsequent legacy short-circuit reads in the ClientContext.  If the test
  // uses legacy, then assert that it didn't get disabled.
  ClientContext clientContext = client.getClientContext();
  if (clientContext.getUseLegacyBlockReaderLocal()) {
    Assert.assertFalse(clientContext.getDisableLegacyBlockReaderLocal());
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestShortCircuitLocalRead.java   
/** Check file content, reading as user {@code readingUser} */
static void checkFileContent(URI uri, Path name, byte[] expected,
    int readOffset, String readingUser, Configuration conf,
    boolean legacyShortCircuitFails)
    throws IOException, InterruptedException {
  // Ensure short circuit is enabled
  DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
  ClientContext getClientContext = ClientContext.getFromConf(conf);
  if (legacyShortCircuitFails) {
    assertFalse(getClientContext.getDisableLegacyBlockReaderLocal());
  }

  FSDataInputStream stm = fs.open(name);
  byte[] actual = new byte[expected.length-readOffset];
  stm.readFully(readOffset, actual);
  checkData(actual, readOffset, expected, "Read 2");
  stm.close();
  // Now read using a different API.
  actual = new byte[expected.length-readOffset];
  stm = fs.open(name);
  IOUtils.skipFully(stm, readOffset);
  //Read a small number of bytes first.
  int nread = stm.read(actual, 0, 3);
  nread += stm.read(actual, nread, 2);
  //Read across chunk boundary
  nread += stm.read(actual, nread, 517);
  checkData(actual, readOffset, expected, nread, "A few bytes");
  //Now read rest of it
  while (nread < actual.length) {
    int nbytes = stm.read(actual, nread, actual.length - nread);
    if (nbytes < 0) {
      throw new EOFException("End of file reached before reading fully.");
    }
    nread += nbytes;
  }
  checkData(actual, readOffset, expected, "Read 3");

  if (legacyShortCircuitFails) {
    assertTrue(getClientContext.getDisableLegacyBlockReaderLocal());
  }
  stm.close();
}
项目:aliyun-oss-hadoop-fs    文件:TestShortCircuitLocalRead.java   
/** Check the file content, reading as user {@code readingUser} */
static void checkFileContentDirect(URI uri, Path name, byte[] expected,
    int readOffset, String readingUser, Configuration conf,
    boolean legacyShortCircuitFails)
    throws IOException, InterruptedException {
  // Ensure short circuit is enabled
  DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
  ClientContext clientContext = ClientContext.getFromConf(conf);
  if (legacyShortCircuitFails) {
    assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
  }

  HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);

  ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);

  IOUtils.skipFully(stm, readOffset);

  actual.limit(3);

  //Read a small number of bytes first.
  int nread = stm.read(actual);
  actual.limit(nread + 2);
  nread += stm.read(actual);

  // Read across chunk boundary
  actual.limit(Math.min(actual.capacity(), nread + 517));
  nread += stm.read(actual);
  checkData(arrayFromByteBuffer(actual), readOffset, expected, nread,
      "A few bytes");
  //Now read rest of it
  actual.limit(actual.capacity());
  while (actual.hasRemaining()) {
    int nbytes = stm.read(actual);

    if (nbytes < 0) {
      throw new EOFException("End of file reached before reading fully.");
    }
    nread += nbytes;
  }
  checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
  if (legacyShortCircuitFails) {
    assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
  }
  stm.close();
}
项目:aliyun-oss-hadoop-fs    文件:TestBlockTokenWithDFS.java   
protected void tryRead(final Configuration conf, LocatedBlock lblock,
    boolean shouldSucceed) {
  InetSocketAddress targetAddr = null;
  IOException ioe = null;
  BlockReader blockReader = null;
  ExtendedBlock block = lblock.getBlock();
  try {
    DatanodeInfo[] nodes = lblock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());

    blockReader = new BlockReaderFactory(new DfsClientConf(conf)).
        setFileName(BlockReaderFactory.getFileName(targetAddr,
                      "test-blockpoolid", block.getBlockId())).
        setBlock(block).
        setBlockToken(lblock.getBlockToken()).
        setInetSocketAddress(targetAddr).
        setStartOffset(0).
        setLength(-1).
        setVerifyChecksum(true).
        setClientName("TestBlockTokenWithDFS").
        setDatanodeInfo(nodes[0]).
        setCachingStrategy(CachingStrategy.newDefaultStrategy()).
        setClientCacheContext(ClientContext.getFromConf(conf)).
        setConfiguration(conf).
        setTracer(FsTracer.get(conf)).
        setRemotePeerFactory(new RemotePeerFactory() {
          @Override
          public Peer newConnectedPeer(InetSocketAddress addr,
              Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
              throws IOException {
            Peer peer = null;
            Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
            try {
              sock.connect(addr, HdfsConstants.READ_TIMEOUT);
              sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
              peer = DFSUtilClient.peerFromSocket(sock);
            } finally {
              if (peer == null) {
                IOUtils.closeSocket(sock);
              }
            }
            return peer;
          }
        }).
        build();
  } catch (IOException ex) {
    ioe = ex;
  } finally {
    if (blockReader != null) {
      try {
        blockReader.close();
      } catch (IOException e) {
        throw new RuntimeException(e);
      }
    }
  }
  if (shouldSucceed) {
    Assert.assertNotNull("OP_READ_BLOCK: access token is invalid, "
          + "when it is expected to be valid", blockReader);
  } else {
    Assert.assertNotNull("OP_READ_BLOCK: access token is valid, "
        + "when it is expected to be invalid", ioe);
    Assert.assertTrue(
        "OP_READ_BLOCK failed due to reasons other than access token: ",
        ioe instanceof InvalidBlockTokenException);
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestDataNodeVolumeFailure.java   
/**
 * try to access a block on a data node. If fails - throws exception
 * @param datanode
 * @param lblock
 * @throws IOException
 */
private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock)
  throws IOException {
  InetSocketAddress targetAddr = null;
  ExtendedBlock block = lblock.getBlock(); 

  targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());

  BlockReader blockReader = new BlockReaderFactory(new DfsClientConf(conf)).
    setInetSocketAddress(targetAddr).
    setBlock(block).
    setFileName(BlockReaderFactory.getFileName(targetAddr,
                  "test-blockpoolid", block.getBlockId())).
    setBlockToken(lblock.getBlockToken()).
    setStartOffset(0).
    setLength(-1).
    setVerifyChecksum(true).
    setClientName("TestDataNodeVolumeFailure").
    setDatanodeInfo(datanode).
    setCachingStrategy(CachingStrategy.newDefaultStrategy()).
    setClientCacheContext(ClientContext.getFromConf(conf)).
    setConfiguration(conf).
    setTracer(FsTracer.get(conf)).
    setRemotePeerFactory(new RemotePeerFactory() {
      @Override
      public Peer newConnectedPeer(InetSocketAddress addr,
          Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
          throws IOException {
        Peer peer = null;
        Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
        try {
          sock.connect(addr, HdfsConstants.READ_TIMEOUT);
          sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
          peer = DFSUtilClient.peerFromSocket(sock);
        } finally {
          if (peer == null) {
            IOUtils.closeSocket(sock);
          }
        }
        return peer;
      }
    }).
    build();
  blockReader.close();
}
项目:big-c    文件:TestShortCircuitLocalRead.java   
/** Check file content, reading as user {@code readingUser} */
static void checkFileContent(URI uri, Path name, byte[] expected,
    int readOffset, String readingUser, Configuration conf,
    boolean legacyShortCircuitFails)
    throws IOException, InterruptedException {
  // Ensure short circuit is enabled
  DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
  ClientContext getClientContext = ClientContext.getFromConf(conf);
  if (legacyShortCircuitFails) {
    assertFalse(getClientContext.getDisableLegacyBlockReaderLocal());
  }

  FSDataInputStream stm = fs.open(name);
  byte[] actual = new byte[expected.length-readOffset];
  stm.readFully(readOffset, actual);
  checkData(actual, readOffset, expected, "Read 2");
  stm.close();
  // Now read using a different API.
  actual = new byte[expected.length-readOffset];
  stm = fs.open(name);
  IOUtils.skipFully(stm, readOffset);
  //Read a small number of bytes first.
  int nread = stm.read(actual, 0, 3);
  nread += stm.read(actual, nread, 2);
  //Read across chunk boundary
  nread += stm.read(actual, nread, 517);
  checkData(actual, readOffset, expected, nread, "A few bytes");
  //Now read rest of it
  while (nread < actual.length) {
    int nbytes = stm.read(actual, nread, actual.length - nread);
    if (nbytes < 0) {
      throw new EOFException("End of file reached before reading fully.");
    }
    nread += nbytes;
  }
  checkData(actual, readOffset, expected, "Read 3");

  if (legacyShortCircuitFails) {
    assertTrue(getClientContext.getDisableLegacyBlockReaderLocal());
  }
  stm.close();
}
项目:big-c    文件:TestShortCircuitLocalRead.java   
/** Check the file content, reading as user {@code readingUser} */
static void checkFileContentDirect(URI uri, Path name, byte[] expected,
    int readOffset, String readingUser, Configuration conf,
    boolean legacyShortCircuitFails)
    throws IOException, InterruptedException {
  // Ensure short circuit is enabled
  DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
  ClientContext clientContext = ClientContext.getFromConf(conf);
  if (legacyShortCircuitFails) {
    assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
  }

  HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);

  ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);

  IOUtils.skipFully(stm, readOffset);

  actual.limit(3);

  //Read a small number of bytes first.
  int nread = stm.read(actual);
  actual.limit(nread + 2);
  nread += stm.read(actual);

  // Read across chunk boundary
  actual.limit(Math.min(actual.capacity(), nread + 517));
  nread += stm.read(actual);
  checkData(arrayFromByteBuffer(actual), readOffset, expected, nread,
      "A few bytes");
  //Now read rest of it
  actual.limit(actual.capacity());
  while (actual.hasRemaining()) {
    int nbytes = stm.read(actual);

    if (nbytes < 0) {
      throw new EOFException("End of file reached before reading fully.");
    }
    nread += nbytes;
  }
  checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
  if (legacyShortCircuitFails) {
    assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
  }
  stm.close();
}
项目:big-c    文件:TestBlockTokenWithDFS.java   
private static void tryRead(final Configuration conf, LocatedBlock lblock,
    boolean shouldSucceed) {
  InetSocketAddress targetAddr = null;
  IOException ioe = null;
  BlockReader blockReader = null;
  ExtendedBlock block = lblock.getBlock();
  try {
    DatanodeInfo[] nodes = lblock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());

    blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
        setFileName(BlockReaderFactory.getFileName(targetAddr, 
                      "test-blockpoolid", block.getBlockId())).
        setBlock(block).
        setBlockToken(lblock.getBlockToken()).
        setInetSocketAddress(targetAddr).
        setStartOffset(0).
        setLength(-1).
        setVerifyChecksum(true).
        setClientName("TestBlockTokenWithDFS").
        setDatanodeInfo(nodes[0]).
        setCachingStrategy(CachingStrategy.newDefaultStrategy()).
        setClientCacheContext(ClientContext.getFromConf(conf)).
        setConfiguration(conf).
        setRemotePeerFactory(new RemotePeerFactory() {
          @Override
          public Peer newConnectedPeer(InetSocketAddress addr,
              Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
              throws IOException {
            Peer peer = null;
            Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
            try {
              sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
              sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
              peer = TcpPeerServer.peerFromSocket(sock);
            } finally {
              if (peer == null) {
                IOUtils.closeSocket(sock);
              }
            }
            return peer;
          }
        }).
        build();
  } catch (IOException ex) {
    ioe = ex;
  } finally {
    if (blockReader != null) {
      try {
        blockReader.close();
      } catch (IOException e) {
        throw new RuntimeException(e);
      }
    }
  }
  if (shouldSucceed) {
    Assert.assertNotNull("OP_READ_BLOCK: access token is invalid, "
          + "when it is expected to be valid", blockReader);
  } else {
    Assert.assertNotNull("OP_READ_BLOCK: access token is valid, "
        + "when it is expected to be invalid", ioe);
    Assert.assertTrue(
        "OP_READ_BLOCK failed due to reasons other than access token: ",
        ioe instanceof InvalidBlockTokenException);
  }
}
项目:big-c    文件:TestDataNodeVolumeFailure.java   
/**
 * try to access a block on a data node. If fails - throws exception
 * @param datanode
 * @param lblock
 * @throws IOException
 */
private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock)
  throws IOException {
  InetSocketAddress targetAddr = null;
  ExtendedBlock block = lblock.getBlock(); 

  targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());

  BlockReader blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
    setInetSocketAddress(targetAddr).
    setBlock(block).
    setFileName(BlockReaderFactory.getFileName(targetAddr,
                  "test-blockpoolid", block.getBlockId())).
    setBlockToken(lblock.getBlockToken()).
    setStartOffset(0).
    setLength(-1).
    setVerifyChecksum(true).
    setClientName("TestDataNodeVolumeFailure").
    setDatanodeInfo(datanode).
    setCachingStrategy(CachingStrategy.newDefaultStrategy()).
    setClientCacheContext(ClientContext.getFromConf(conf)).
    setConfiguration(conf).
    setRemotePeerFactory(new RemotePeerFactory() {
      @Override
      public Peer newConnectedPeer(InetSocketAddress addr,
          Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
          throws IOException {
        Peer peer = null;
        Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
        try {
          sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
          sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
          peer = TcpPeerServer.peerFromSocket(sock);
        } finally {
          if (peer == null) {
            IOUtils.closeSocket(sock);
          }
        }
        return peer;
      }
    }).
    build();
  blockReader.close();
}
项目:big-c    文件:TestScrLazyPersistFiles.java   
private void doShortCircuitReadAfterEvictionTest() throws IOException,
    InterruptedException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
  Path path2 = new Path("/" + METHOD_NAME + ".02.dat");

  final int SEED = 0xFADED;
  makeRandomTestFile(path1, BLOCK_SIZE, true, SEED);

  // Verify short-circuit read from RAM_DISK.
  ensureFileReplicasOnStorageType(path1, RAM_DISK);
  File metaFile = cluster.getBlockMetadataFile(0,
      DFSTestUtil.getFirstBlock(fs, path1));
  assertTrue(metaFile.length() <= BlockMetadataHeader.getHeaderSize());
  assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));

  // Sleep for a short time to allow the lazy writer thread to do its job.
  Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);

  // Verify short-circuit read from RAM_DISK once again.
  ensureFileReplicasOnStorageType(path1, RAM_DISK);
  metaFile = cluster.getBlockMetadataFile(0,
      DFSTestUtil.getFirstBlock(fs, path1));
  assertTrue(metaFile.length() <= BlockMetadataHeader.getHeaderSize());
  assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));

  // Create another file with a replica on RAM_DISK, which evicts the first.
  makeRandomTestFile(path2, BLOCK_SIZE, true, SEED);
  Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);
  triggerBlockReport();

  // Verify short-circuit read still works from DEFAULT storage.  This time,
  // we'll have a checksum written during lazy persistence.
  ensureFileReplicasOnStorageType(path1, DEFAULT);
  metaFile = cluster.getBlockMetadataFile(0,
      DFSTestUtil.getFirstBlock(fs, path1));
  assertTrue(metaFile.length() > BlockMetadataHeader.getHeaderSize());
  assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));

  // In the implementation of legacy short-circuit reads, any failure is
  // trapped silently, reverts back to a remote read, and also disables all
  // subsequent legacy short-circuit reads in the ClientContext.  If the test
  // uses legacy, then assert that it didn't get disabled.
  ClientContext clientContext = client.getClientContext();
  if (clientContext.getUseLegacyBlockReaderLocal()) {
    Assert.assertFalse(clientContext.getDisableLegacyBlockReaderLocal());
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestShortCircuitLocalRead.java   
/** Check file content, reading as user {@code readingUser} */
static void checkFileContent(URI uri, Path name, byte[] expected,
    int readOffset, String readingUser, Configuration conf,
    boolean legacyShortCircuitFails)
    throws IOException, InterruptedException {
  // Ensure short circuit is enabled
  DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
  ClientContext getClientContext = ClientContext.getFromConf(conf);
  if (legacyShortCircuitFails) {
    assertFalse(getClientContext.getDisableLegacyBlockReaderLocal());
  }

  FSDataInputStream stm = fs.open(name);
  byte[] actual = new byte[expected.length-readOffset];
  stm.readFully(readOffset, actual);
  checkData(actual, readOffset, expected, "Read 2");
  stm.close();
  // Now read using a different API.
  actual = new byte[expected.length-readOffset];
  stm = fs.open(name);
  IOUtils.skipFully(stm, readOffset);
  //Read a small number of bytes first.
  int nread = stm.read(actual, 0, 3);
  nread += stm.read(actual, nread, 2);
  //Read across chunk boundary
  nread += stm.read(actual, nread, 517);
  checkData(actual, readOffset, expected, nread, "A few bytes");
  //Now read rest of it
  while (nread < actual.length) {
    int nbytes = stm.read(actual, nread, actual.length - nread);
    if (nbytes < 0) {
      throw new EOFException("End of file reached before reading fully.");
    }
    nread += nbytes;
  }
  checkData(actual, readOffset, expected, "Read 3");

  if (legacyShortCircuitFails) {
    assertTrue(getClientContext.getDisableLegacyBlockReaderLocal());
  }
  stm.close();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestShortCircuitLocalRead.java   
/** Check the file content, reading as user {@code readingUser} */
static void checkFileContentDirect(URI uri, Path name, byte[] expected,
    int readOffset, String readingUser, Configuration conf,
    boolean legacyShortCircuitFails)
    throws IOException, InterruptedException {
  // Ensure short circuit is enabled
  DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
  ClientContext clientContext = ClientContext.getFromConf(conf);
  if (legacyShortCircuitFails) {
    assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
  }

  HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);

  ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);

  IOUtils.skipFully(stm, readOffset);

  actual.limit(3);

  //Read a small number of bytes first.
  int nread = stm.read(actual);
  actual.limit(nread + 2);
  nread += stm.read(actual);

  // Read across chunk boundary
  actual.limit(Math.min(actual.capacity(), nread + 517));
  nread += stm.read(actual);
  checkData(arrayFromByteBuffer(actual), readOffset, expected, nread,
      "A few bytes");
  //Now read rest of it
  actual.limit(actual.capacity());
  while (actual.hasRemaining()) {
    int nbytes = stm.read(actual);

    if (nbytes < 0) {
      throw new EOFException("End of file reached before reading fully.");
    }
    nread += nbytes;
  }
  checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
  if (legacyShortCircuitFails) {
    assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
  }
  stm.close();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestBlockTokenWithDFS.java   
private static void tryRead(final Configuration conf, LocatedBlock lblock,
    boolean shouldSucceed) {
  InetSocketAddress targetAddr = null;
  IOException ioe = null;
  BlockReader blockReader = null;
  ExtendedBlock block = lblock.getBlock();
  try {
    DatanodeInfo[] nodes = lblock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());

    blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
        setFileName(BlockReaderFactory.getFileName(targetAddr, 
                      "test-blockpoolid", block.getBlockId())).
        setBlock(block).
        setBlockToken(lblock.getBlockToken()).
        setInetSocketAddress(targetAddr).
        setStartOffset(0).
        setLength(-1).
        setVerifyChecksum(true).
        setClientName("TestBlockTokenWithDFS").
        setDatanodeInfo(nodes[0]).
        setCachingStrategy(CachingStrategy.newDefaultStrategy()).
        setClientCacheContext(ClientContext.getFromConf(conf)).
        setConfiguration(conf).
        setRemotePeerFactory(new RemotePeerFactory() {
          @Override
          public Peer newConnectedPeer(InetSocketAddress addr,
              Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
              throws IOException {
            Peer peer = null;
            Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
            try {
              sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
              sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
              peer = TcpPeerServer.peerFromSocket(sock);
            } finally {
              if (peer == null) {
                IOUtils.closeSocket(sock);
              }
            }
            return peer;
          }
        }).
        build();
  } catch (IOException ex) {
    ioe = ex;
  } finally {
    if (blockReader != null) {
      try {
        blockReader.close();
      } catch (IOException e) {
        throw new RuntimeException(e);
      }
    }
  }
  if (shouldSucceed) {
    Assert.assertNotNull("OP_READ_BLOCK: access token is invalid, "
          + "when it is expected to be valid", blockReader);
  } else {
    Assert.assertNotNull("OP_READ_BLOCK: access token is valid, "
        + "when it is expected to be invalid", ioe);
    Assert.assertTrue(
        "OP_READ_BLOCK failed due to reasons other than access token: ",
        ioe instanceof InvalidBlockTokenException);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDataNodeVolumeFailure.java   
/**
 * try to access a block on a data node. If fails - throws exception
 * @param datanode
 * @param lblock
 * @throws IOException
 */
private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock)
  throws IOException {
  InetSocketAddress targetAddr = null;
  ExtendedBlock block = lblock.getBlock(); 

  targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());

  BlockReader blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
    setInetSocketAddress(targetAddr).
    setBlock(block).
    setFileName(BlockReaderFactory.getFileName(targetAddr,
                  "test-blockpoolid", block.getBlockId())).
    setBlockToken(lblock.getBlockToken()).
    setStartOffset(0).
    setLength(-1).
    setVerifyChecksum(true).
    setClientName("TestDataNodeVolumeFailure").
    setDatanodeInfo(datanode).
    setCachingStrategy(CachingStrategy.newDefaultStrategy()).
    setClientCacheContext(ClientContext.getFromConf(conf)).
    setConfiguration(conf).
    setRemotePeerFactory(new RemotePeerFactory() {
      @Override
      public Peer newConnectedPeer(InetSocketAddress addr,
          Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
          throws IOException {
        Peer peer = null;
        Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
        try {
          sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
          sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
          peer = TcpPeerServer.peerFromSocket(sock);
        } finally {
          if (peer == null) {
            IOUtils.closeSocket(sock);
          }
        }
        return peer;
      }
    }).
    build();
  blockReader.close();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestScrLazyPersistFiles.java   
private void doShortCircuitReadAfterEvictionTest() throws IOException,
    InterruptedException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
  Path path2 = new Path("/" + METHOD_NAME + ".02.dat");

  final int SEED = 0xFADED;
  makeRandomTestFile(path1, BLOCK_SIZE, true, SEED);

  // Verify short-circuit read from RAM_DISK.
  ensureFileReplicasOnStorageType(path1, RAM_DISK);
  File metaFile = cluster.getBlockMetadataFile(0,
      DFSTestUtil.getFirstBlock(fs, path1));
  assertTrue(metaFile.length() <= BlockMetadataHeader.getHeaderSize());
  assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));

  // Sleep for a short time to allow the lazy writer thread to do its job.
  Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);

  // Verify short-circuit read from RAM_DISK once again.
  ensureFileReplicasOnStorageType(path1, RAM_DISK);
  metaFile = cluster.getBlockMetadataFile(0,
      DFSTestUtil.getFirstBlock(fs, path1));
  assertTrue(metaFile.length() <= BlockMetadataHeader.getHeaderSize());
  assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));

  // Create another file with a replica on RAM_DISK, which evicts the first.
  makeRandomTestFile(path2, BLOCK_SIZE, true, SEED);
  Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);
  triggerBlockReport();

  // Verify short-circuit read still works from DEFAULT storage.  This time,
  // we'll have a checksum written during lazy persistence.
  ensureFileReplicasOnStorageType(path1, DEFAULT);
  metaFile = cluster.getBlockMetadataFile(0,
      DFSTestUtil.getFirstBlock(fs, path1));
  assertTrue(metaFile.length() > BlockMetadataHeader.getHeaderSize());
  assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));

  // In the implementation of legacy short-circuit reads, any failure is
  // trapped silently, reverts back to a remote read, and also disables all
  // subsequent legacy short-circuit reads in the ClientContext.  If the test
  // uses legacy, then assert that it didn't get disabled.
  ClientContext clientContext = client.getClientContext();
  if (clientContext.getUseLegacyBlockReaderLocal()) {
    Assert.assertFalse(clientContext.getDisableLegacyBlockReaderLocal());
  }
}
项目:FlexMap    文件:TestShortCircuitLocalRead.java   
/** Check file content, reading as user {@code readingUser} */
static void checkFileContent(URI uri, Path name, byte[] expected,
    int readOffset, String readingUser, Configuration conf,
    boolean legacyShortCircuitFails)
    throws IOException, InterruptedException {
  // Ensure short circuit is enabled
  DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
  ClientContext getClientContext = ClientContext.getFromConf(conf);
  if (legacyShortCircuitFails) {
    assertFalse(getClientContext.getDisableLegacyBlockReaderLocal());
  }

  FSDataInputStream stm = fs.open(name);
  byte[] actual = new byte[expected.length-readOffset];
  stm.readFully(readOffset, actual);
  checkData(actual, readOffset, expected, "Read 2");
  stm.close();
  // Now read using a different API.
  actual = new byte[expected.length-readOffset];
  stm = fs.open(name);
  IOUtils.skipFully(stm, readOffset);
  //Read a small number of bytes first.
  int nread = stm.read(actual, 0, 3);
  nread += stm.read(actual, nread, 2);
  //Read across chunk boundary
  nread += stm.read(actual, nread, 517);
  checkData(actual, readOffset, expected, nread, "A few bytes");
  //Now read rest of it
  while (nread < actual.length) {
    int nbytes = stm.read(actual, nread, actual.length - nread);
    if (nbytes < 0) {
      throw new EOFException("End of file reached before reading fully.");
    }
    nread += nbytes;
  }
  checkData(actual, readOffset, expected, "Read 3");

  if (legacyShortCircuitFails) {
    assertTrue(getClientContext.getDisableLegacyBlockReaderLocal());
  }
  stm.close();
}
项目:FlexMap    文件:TestShortCircuitLocalRead.java   
/** Check the file content, reading as user {@code readingUser} */
static void checkFileContentDirect(URI uri, Path name, byte[] expected,
    int readOffset, String readingUser, Configuration conf,
    boolean legacyShortCircuitFails)
    throws IOException, InterruptedException {
  // Ensure short circuit is enabled
  DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
  ClientContext clientContext = ClientContext.getFromConf(conf);
  if (legacyShortCircuitFails) {
    assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
  }

  HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);

  ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);

  IOUtils.skipFully(stm, readOffset);

  actual.limit(3);

  //Read a small number of bytes first.
  int nread = stm.read(actual);
  actual.limit(nread + 2);
  nread += stm.read(actual);

  // Read across chunk boundary
  actual.limit(Math.min(actual.capacity(), nread + 517));
  nread += stm.read(actual);
  checkData(arrayFromByteBuffer(actual), readOffset, expected, nread,
      "A few bytes");
  //Now read rest of it
  actual.limit(actual.capacity());
  while (actual.hasRemaining()) {
    int nbytes = stm.read(actual);

    if (nbytes < 0) {
      throw new EOFException("End of file reached before reading fully.");
    }
    nread += nbytes;
  }
  checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
  if (legacyShortCircuitFails) {
    assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
  }
  stm.close();
}
项目:FlexMap    文件:TestBlockTokenWithDFS.java   
private static void tryRead(final Configuration conf, LocatedBlock lblock,
    boolean shouldSucceed) {
  InetSocketAddress targetAddr = null;
  IOException ioe = null;
  BlockReader blockReader = null;
  ExtendedBlock block = lblock.getBlock();
  try {
    DatanodeInfo[] nodes = lblock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());

    blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
        setFileName(BlockReaderFactory.getFileName(targetAddr, 
                      "test-blockpoolid", block.getBlockId())).
        setBlock(block).
        setBlockToken(lblock.getBlockToken()).
        setInetSocketAddress(targetAddr).
        setStartOffset(0).
        setLength(-1).
        setVerifyChecksum(true).
        setClientName("TestBlockTokenWithDFS").
        setDatanodeInfo(nodes[0]).
        setCachingStrategy(CachingStrategy.newDefaultStrategy()).
        setClientCacheContext(ClientContext.getFromConf(conf)).
        setConfiguration(conf).
        setRemotePeerFactory(new RemotePeerFactory() {
          @Override
          public Peer newConnectedPeer(InetSocketAddress addr,
              Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
              throws IOException {
            Peer peer = null;
            Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
            try {
              sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
              sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
              peer = TcpPeerServer.peerFromSocket(sock);
            } finally {
              if (peer == null) {
                IOUtils.closeSocket(sock);
              }
            }
            return peer;
          }
        }).
        build();
  } catch (IOException ex) {
    ioe = ex;
  } finally {
    if (blockReader != null) {
      try {
        blockReader.close();
      } catch (IOException e) {
        throw new RuntimeException(e);
      }
    }
  }
  if (shouldSucceed) {
    Assert.assertNotNull("OP_READ_BLOCK: access token is invalid, "
          + "when it is expected to be valid", blockReader);
  } else {
    Assert.assertNotNull("OP_READ_BLOCK: access token is valid, "
        + "when it is expected to be invalid", ioe);
    Assert.assertTrue(
        "OP_READ_BLOCK failed due to reasons other than access token: ",
        ioe instanceof InvalidBlockTokenException);
  }
}
项目:FlexMap    文件:TestDataNodeVolumeFailure.java   
/**
 * try to access a block on a data node. If fails - throws exception
 * @param datanode
 * @param lblock
 * @throws IOException
 */
private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock)
  throws IOException {
  InetSocketAddress targetAddr = null;
  ExtendedBlock block = lblock.getBlock(); 

  targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());

  BlockReader blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
    setInetSocketAddress(targetAddr).
    setBlock(block).
    setFileName(BlockReaderFactory.getFileName(targetAddr,
                  "test-blockpoolid", block.getBlockId())).
    setBlockToken(lblock.getBlockToken()).
    setStartOffset(0).
    setLength(-1).
    setVerifyChecksum(true).
    setClientName("TestDataNodeVolumeFailure").
    setDatanodeInfo(datanode).
    setCachingStrategy(CachingStrategy.newDefaultStrategy()).
    setClientCacheContext(ClientContext.getFromConf(conf)).
    setConfiguration(conf).
    setRemotePeerFactory(new RemotePeerFactory() {
      @Override
      public Peer newConnectedPeer(InetSocketAddress addr,
          Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
          throws IOException {
        Peer peer = null;
        Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
        try {
          sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
          sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
          peer = TcpPeerServer.peerFromSocket(sock);
        } finally {
          if (peer == null) {
            IOUtils.closeSocket(sock);
          }
        }
        return peer;
      }
    }).
    build();
  blockReader.close();
}
项目:FlexMap    文件:TestScrLazyPersistFiles.java   
private void doShortCircuitReadAfterEvictionTest() throws IOException,
    InterruptedException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
  Path path2 = new Path("/" + METHOD_NAME + ".02.dat");

  final int SEED = 0xFADED;
  makeRandomTestFile(path1, BLOCK_SIZE, true, SEED);

  // Verify short-circuit read from RAM_DISK.
  ensureFileReplicasOnStorageType(path1, RAM_DISK);
  File metaFile = MiniDFSCluster.getBlockMetadataFile(0,
      DFSTestUtil.getFirstBlock(fs, path1));
  assertTrue(metaFile.length() <= BlockMetadataHeader.getHeaderSize());
  assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));

  // Sleep for a short time to allow the lazy writer thread to do its job.
  Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);

  // Verify short-circuit read from RAM_DISK once again.
  ensureFileReplicasOnStorageType(path1, RAM_DISK);
  metaFile = MiniDFSCluster.getBlockMetadataFile(0,
      DFSTestUtil.getFirstBlock(fs, path1));
  assertTrue(metaFile.length() <= BlockMetadataHeader.getHeaderSize());
  assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));

  // Create another file with a replica on RAM_DISK, which evicts the first.
  makeRandomTestFile(path2, BLOCK_SIZE, true, SEED);
  Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);
  triggerBlockReport();

  // Verify short-circuit read still works from DEFAULT storage.  This time,
  // we'll have a checksum written during lazy persistence.
  ensureFileReplicasOnStorageType(path1, DEFAULT);
  metaFile = MiniDFSCluster.getBlockMetadataFile(0,
      DFSTestUtil.getFirstBlock(fs, path1));
  assertTrue(metaFile.length() > BlockMetadataHeader.getHeaderSize());
  assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));

  // In the implementation of legacy short-circuit reads, any failure is
  // trapped silently, reverts back to a remote read, and also disables all
  // subsequent legacy short-circuit reads in the ClientContext.  If the test
  // uses legacy, then assert that it didn't get disabled.
  ClientContext clientContext = client.getClientContext();
  if (clientContext.getUseLegacyBlockReaderLocal()) {
    Assert.assertFalse(clientContext.getDisableLegacyBlockReaderLocal());
  }
}
项目:hadoop-on-lustre2    文件:TestBlockTokenWithDFS.java   
private static void tryRead(final Configuration conf, LocatedBlock lblock,
    boolean shouldSucceed) {
  InetSocketAddress targetAddr = null;
  IOException ioe = null;
  BlockReader blockReader = null;
  ExtendedBlock block = lblock.getBlock();
  try {
    DatanodeInfo[] nodes = lblock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());

    blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
        setFileName(BlockReaderFactory.getFileName(targetAddr, 
                      "test-blockpoolid", block.getBlockId())).
        setBlock(block).
        setBlockToken(lblock.getBlockToken()).
        setInetSocketAddress(targetAddr).
        setStartOffset(0).
        setLength(-1).
        setVerifyChecksum(true).
        setClientName("TestBlockTokenWithDFS").
        setDatanodeInfo(nodes[0]).
        setCachingStrategy(CachingStrategy.newDefaultStrategy()).
        setClientCacheContext(ClientContext.getFromConf(conf)).
        setConfiguration(conf).
        setRemotePeerFactory(new RemotePeerFactory() {
          @Override
          public Peer newConnectedPeer(InetSocketAddress addr)
              throws IOException {
            Peer peer = null;
            Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
            try {
              sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
              sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
              peer = TcpPeerServer.peerFromSocket(sock);
            } finally {
              if (peer == null) {
                IOUtils.closeSocket(sock);
              }
            }
            return peer;
          }
        }).
        build();
  } catch (IOException ex) {
    ioe = ex;
  } finally {
    if (blockReader != null) {
      try {
        blockReader.close();
      } catch (IOException e) {
        throw new RuntimeException(e);
      }
    }
  }
  if (shouldSucceed) {
    Assert.assertNotNull("OP_READ_BLOCK: access token is invalid, "
          + "when it is expected to be valid", blockReader);
  } else {
    Assert.assertNotNull("OP_READ_BLOCK: access token is valid, "
        + "when it is expected to be invalid", ioe);
    Assert.assertTrue(
        "OP_READ_BLOCK failed due to reasons other than access token: ",
        ioe instanceof InvalidBlockTokenException);
  }
}
项目:hadoop-on-lustre2    文件:TestDataNodeVolumeFailure.java   
/**
 * try to access a block on a data node. If fails - throws exception
 * @param datanode
 * @param lblock
 * @throws IOException
 */
private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock)
  throws IOException {
  InetSocketAddress targetAddr = null;
  ExtendedBlock block = lblock.getBlock(); 

  targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());

  BlockReader blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
    setInetSocketAddress(targetAddr).
    setBlock(block).
    setFileName(BlockReaderFactory.getFileName(targetAddr,
                  "test-blockpoolid", block.getBlockId())).
    setBlockToken(lblock.getBlockToken()).
    setStartOffset(0).
    setLength(-1).
    setVerifyChecksum(true).
    setClientName("TestDataNodeVolumeFailure").
    setDatanodeInfo(datanode).
    setCachingStrategy(CachingStrategy.newDefaultStrategy()).
    setClientCacheContext(ClientContext.getFromConf(conf)).
    setConfiguration(conf).
    setRemotePeerFactory(new RemotePeerFactory() {
      @Override
      public Peer newConnectedPeer(InetSocketAddress addr)
          throws IOException {
        Peer peer = null;
        Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
        try {
          sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
          sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
          peer = TcpPeerServer.peerFromSocket(sock);
        } finally {
          if (peer == null) {
            IOUtils.closeSocket(sock);
          }
        }
        return peer;
      }
    }).
    build();
  blockReader.close();
}