Java 类org.apache.hadoop.hdfs.BlockReader 实例源码

项目:aliyun-oss-hadoop-fs    文件:ErasureCodingWorker.java   
private Callable<Void> readFromBlock(final BlockReader reader,
    final ByteBuffer buf) {
  return new Callable<Void>() {

    @Override
    public Void call() throws Exception {
      try {
        actualReadFromBlock(reader, buf);
        return null;
      } catch (IOException e) {
        LOG.info(e.getMessage());
        throw e;
      }
    }

  };
}
项目:aliyun-oss-hadoop-fs    文件:ErasureCodingWorker.java   
private BlockReader newBlockReader(final ExtendedBlock block, 
    long offsetInBlock, DatanodeInfo dnInfo) {
  if (offsetInBlock >= block.getNumBytes()) {
    return null;
  }
  try {
    InetSocketAddress dnAddr = getSocketAddress4Transfer(dnInfo);
    Token<BlockTokenIdentifier> blockToken = datanode.getBlockAccessToken(
        block, EnumSet.of(BlockTokenIdentifier.AccessMode.READ));
    /*
     * This can be further improved if the replica is local, then we can
     * read directly from DN and need to check the replica is FINALIZED
     * state, notice we should not use short-circuit local read which
     * requires config for domain-socket in UNIX or legacy config in Windows.
     *
     * TODO: add proper tracer
     */
    return RemoteBlockReader2.newBlockReader(
        "dummy", block, blockToken, offsetInBlock, 
        block.getNumBytes() - offsetInBlock, true,
        "", newConnectedPeer(block, dnAddr, blockToken, dnInfo), dnInfo,
        null, cachingStrategy, datanode.getTracer());
  } catch (IOException e) {
    return null;
  }
}
项目:indexr    文件:DFSByteBufferReader.java   
private void tryGetLocalFile() {
    if (tryGetLocalFileTimes >= TRY_GET_LOCAL_FILE_LIMIT) {
        return;
    }
    if (isSingleBlock && HDFS_READ_HACK_ENABLE) {
        try {
            InputStream is = input.getWrappedStream();
            if (is instanceof DFSInputStream) {
                BlockReader blockReader = MemoryUtil.getDFSInputStream_blockReader(is);
                if (blockReader != null && blockReader.isShortCircuit()) {
                    localFile = MemoryUtil.getBlockReaderLocal_dataIn(blockReader);
                }
            }
        } catch (Throwable e) {
            logger.debug("HDFS READ HACK failed.", e);
        }
    }
    tryGetLocalFileTimes++;
}
项目:hadoop-EAR    文件:DFSInputStream.java   
/**
 * Close the given BlockReader and cache its socket.
 */
private void closeBlockReader(BlockReader reader, boolean reuseConnection) 
    throws IOException {
  if (reader.hasSentStatusCode()) {
    Socket oldSock = reader.takeSocket();
    if (dfsClient.getDataTransferProtocolVersion() < 
        DataTransferProtocol.READ_REUSE_CONNECTION_VERSION ||
        !reuseConnection) {
        // close the sock for old datanode.
      if (oldSock != null) {
        IOUtils.closeSocket(oldSock);
      }
    } else {
      socketCache.put(oldSock);
    }
  }
  reader.close();
}
项目:hadoop-plus    文件:TestDataNodeVolumeFailure.java   
/**
 * try to access a block on a data node. If fails - throws exception
 * @param datanode
 * @param lblock
 * @throws IOException
 */
private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock)
  throws IOException {
  InetSocketAddress targetAddr = null;
  Socket s = null;
  ExtendedBlock block = lblock.getBlock(); 

  targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());

  s = NetUtils.getDefaultSocketFactory(conf).createSocket();
  s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
  s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);

  String file = BlockReaderFactory.getFileName(targetAddr, 
      "test-blockpoolid",
      block.getBlockId());
  BlockReader blockReader =
    BlockReaderFactory.newBlockReader(new DFSClient.Conf(conf), file, block,
      lblock.getBlockToken(), 0, -1, true, "TestDataNodeVolumeFailure",
      TcpPeerServer.peerFromSocket(s), datanode, null, null, null, false);
  blockReader.close();
}
项目:hadoop-TCP    文件:TestDataNodeVolumeFailure.java   
/**
 * try to access a block on a data node. If fails - throws exception
 * @param datanode
 * @param lblock
 * @throws IOException
 */
private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock)
  throws IOException {
  InetSocketAddress targetAddr = null;
  Socket s = null;
  ExtendedBlock block = lblock.getBlock(); 

  targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());

  s = NetUtils.getDefaultSocketFactory(conf).createSocket();
  s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
  s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);

  String file = BlockReaderFactory.getFileName(targetAddr, 
      "test-blockpoolid",
      block.getBlockId());
  BlockReader blockReader =
    BlockReaderFactory.newBlockReader(new DFSClient.Conf(conf), file, block,
      lblock.getBlockToken(), 0, -1, true, "TestDataNodeVolumeFailure",
      TcpPeerServer.peerFromSocket(s), datanode, null, null, null, false,
      CachingStrategy.newDefaultStrategy());
  blockReader.close();
}
项目:hardfs    文件:TestDataNodeVolumeFailure.java   
/**
 * try to access a block on a data node. If fails - throws exception
 * @param datanode
 * @param lblock
 * @throws IOException
 */
private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock)
  throws IOException {
  InetSocketAddress targetAddr = null;
  Socket s = null;
  ExtendedBlock block = lblock.getBlock(); 

  targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());

  s = NetUtils.getDefaultSocketFactory(conf).createSocket();
  s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
  s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);

  String file = BlockReaderFactory.getFileName(targetAddr, 
      "test-blockpoolid",
      block.getBlockId());
  BlockReader blockReader =
    BlockReaderFactory.newBlockReader(new DFSClient.Conf(conf), file, block,
      lblock.getBlockToken(), 0, -1, true, "TestDataNodeVolumeFailure",
      TcpPeerServer.peerFromSocket(s), datanode, null, null, null, false,
      CachingStrategy.newDefaultStrategy());
  blockReader.close();
}
项目:cumulus    文件:TestDataNodeVolumeFailure.java   
/**
 * try to access a block on a data node. If fails - throws exception
 * @param datanode
 * @param lblock
 * @throws IOException
 */
private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock)
  throws IOException {
  InetSocketAddress targetAddr = null;
  Socket s = null;
  BlockReader blockReader = null; 
  Block block = lblock.getBlock(); 

  targetAddr = NetUtils.createSocketAddr(datanode.getName());

  s = new Socket();
  s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
  s.setSoTimeout(HdfsConstants.READ_TIMEOUT);

  String file = BlockReader.getFileName(targetAddr, block.getBlockId());
  blockReader = 
    BlockReader.newBlockReader(s, file, block, lblock
      .getBlockToken(), 0, -1, 4096);

  // nothing - if it fails - it will throw and exception
}
项目:aliyun-oss-hadoop-fs    文件:ErasureCodingWorker.java   
private void initChecksumAndBufferSizeIfNeeded(BlockReader blockReader) {
  if (checksum == null) {
    checksum = blockReader.getDataChecksum();
    bytesPerChecksum = checksum.getBytesPerChecksum();
    // The bufferSize is flat to divide bytesPerChecksum
    int readBufferSize = STRIPED_READ_BUFFER_SIZE;
    bufferSize = readBufferSize < bytesPerChecksum ? bytesPerChecksum :
      readBufferSize - readBufferSize % bytesPerChecksum;
  } else {
    assert blockReader.getDataChecksum().equals(checksum);
  }
}
项目:aliyun-oss-hadoop-fs    文件:ErasureCodingWorker.java   
/**
 * Read bytes from block
 */
private void actualReadFromBlock(BlockReader reader, ByteBuffer buf)
    throws IOException {
  int len = buf.remaining();
  int n = 0;
  while (n < len) {
    int nread = reader.read(buf);
    if (nread <= 0) {
      break;
    }
    n += nread;
  }
}
项目:aliyun-oss-hadoop-fs    文件:ErasureCodingWorker.java   
private void closeBlockReader(BlockReader blockReader) {
  try {
    if (blockReader != null) {
      blockReader.close();
    }
  } catch (IOException e) {
    // ignore
  }
}
项目:hadoop-EAR    文件:DFSInputStream.java   
protected BlockReader getBlockReader(int protocolVersion, int namespaceId,
    InetSocketAddress dnAddr, String file, long blockId,
    long generationStamp, long startOffset, long len, int bufferSize,
    boolean verifyChecksum, String clientName, long bytesToCheckReadSpeed,
    long minReadSpeedBps, boolean reuseConnection,
    FSClientReadProfilingData cliData)
    throws IOException {
  return getBlockReader(protocolVersion, namespaceId, dnAddr, file, blockId,
      generationStamp, startOffset, len, bufferSize, verifyChecksum,
      clientName, bytesToCheckReadSpeed, minReadSpeedBps, reuseConnection,
      cliData, options);
}
项目:cumulus    文件:JspHelper.java   
public static void streamBlockInAscii(InetSocketAddress addr, 
    long blockId, Token<BlockTokenIdentifier> blockToken, long genStamp,
    long blockSize, long offsetIntoBlock, long chunkSizeToView,
    JspWriter out, Configuration conf) throws IOException {
  if (chunkSizeToView == 0) return;
  Socket s = new Socket();
  s.connect(addr, HdfsConstants.READ_TIMEOUT);
  s.setSoTimeout(HdfsConstants.READ_TIMEOUT);

    long amtToRead = Math.min(chunkSizeToView, blockSize - offsetIntoBlock);     

    // Use the block name for file name. 
    String file = BlockReader.getFileName(addr, blockId);
    BlockReader blockReader = BlockReader.newBlockReader(s, file,
      new Block(blockId, 0, genStamp), blockToken,
      offsetIntoBlock, amtToRead, conf.getInt("io.file.buffer.size", 4096));

  byte[] buf = new byte[(int)amtToRead];
  int readOffset = 0;
  int retries = 2;
  while ( amtToRead > 0 ) {
    int numRead;
    try {
      numRead = blockReader.readAll(buf, readOffset, (int)amtToRead);
    }
    catch (IOException e) {
      retries--;
      if (retries == 0)
        throw new IOException("Could not read data from datanode");
      continue;
    }
    amtToRead -= numRead;
    readOffset += numRead;
  }
  blockReader = null;
  s.close();
  out.print(HtmlQuoting.quoteHtmlChars(new String(buf)));
}
项目:cumulus    文件:TestDataXceiver.java   
/**
 * Test that we don't call verifiedByClient() when the client only
 * reads a partial block.
 */
@Test
public void testCompletePartialRead() throws Exception {
  // Ask for half the file
  BlockReader reader = util.getBlockReader(testBlock, 0, FILE_SIZE_K * 1024 / 2);
  DataNode dn = util.getDataNode(testBlock);
  DataBlockScanner scanner = spy(dn.blockScanner);
  dn.blockScanner = scanner;

  // And read half the file
  util.readAndCheckEOS(reader, FILE_SIZE_K * 1024 / 2, true);
  verify(scanner, never()).verifiedByClient(Mockito.isA(Block.class));
  reader.close();
}
项目:hadoop    文件:TestBlockTokenWithDFS.java   
private static void tryRead(final Configuration conf, LocatedBlock lblock,
    boolean shouldSucceed) {
  InetSocketAddress targetAddr = null;
  IOException ioe = null;
  BlockReader blockReader = null;
  ExtendedBlock block = lblock.getBlock();
  try {
    DatanodeInfo[] nodes = lblock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());

    blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
        setFileName(BlockReaderFactory.getFileName(targetAddr, 
                      "test-blockpoolid", block.getBlockId())).
        setBlock(block).
        setBlockToken(lblock.getBlockToken()).
        setInetSocketAddress(targetAddr).
        setStartOffset(0).
        setLength(-1).
        setVerifyChecksum(true).
        setClientName("TestBlockTokenWithDFS").
        setDatanodeInfo(nodes[0]).
        setCachingStrategy(CachingStrategy.newDefaultStrategy()).
        setClientCacheContext(ClientContext.getFromConf(conf)).
        setConfiguration(conf).
        setRemotePeerFactory(new RemotePeerFactory() {
          @Override
          public Peer newConnectedPeer(InetSocketAddress addr,
              Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
              throws IOException {
            Peer peer = null;
            Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
            try {
              sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
              sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
              peer = TcpPeerServer.peerFromSocket(sock);
            } finally {
              if (peer == null) {
                IOUtils.closeSocket(sock);
              }
            }
            return peer;
          }
        }).
        build();
  } catch (IOException ex) {
    ioe = ex;
  } finally {
    if (blockReader != null) {
      try {
        blockReader.close();
      } catch (IOException e) {
        throw new RuntimeException(e);
      }
    }
  }
  if (shouldSucceed) {
    Assert.assertNotNull("OP_READ_BLOCK: access token is invalid, "
          + "when it is expected to be valid", blockReader);
  } else {
    Assert.assertNotNull("OP_READ_BLOCK: access token is valid, "
        + "when it is expected to be invalid", ioe);
    Assert.assertTrue(
        "OP_READ_BLOCK failed due to reasons other than access token: ",
        ioe instanceof InvalidBlockTokenException);
  }
}
项目:hadoop    文件:TestDataNodeVolumeFailure.java   
/**
 * try to access a block on a data node. If fails - throws exception
 * @param datanode
 * @param lblock
 * @throws IOException
 */
private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock)
  throws IOException {
  InetSocketAddress targetAddr = null;
  ExtendedBlock block = lblock.getBlock(); 

  targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());

  BlockReader blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
    setInetSocketAddress(targetAddr).
    setBlock(block).
    setFileName(BlockReaderFactory.getFileName(targetAddr,
                  "test-blockpoolid", block.getBlockId())).
    setBlockToken(lblock.getBlockToken()).
    setStartOffset(0).
    setLength(-1).
    setVerifyChecksum(true).
    setClientName("TestDataNodeVolumeFailure").
    setDatanodeInfo(datanode).
    setCachingStrategy(CachingStrategy.newDefaultStrategy()).
    setClientCacheContext(ClientContext.getFromConf(conf)).
    setConfiguration(conf).
    setRemotePeerFactory(new RemotePeerFactory() {
      @Override
      public Peer newConnectedPeer(InetSocketAddress addr,
          Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
          throws IOException {
        Peer peer = null;
        Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
        try {
          sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
          sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
          peer = TcpPeerServer.peerFromSocket(sock);
        } finally {
          if (peer == null) {
            IOUtils.closeSocket(sock);
          }
        }
        return peer;
      }
    }).
    build();
  blockReader.close();
}
项目:aliyun-oss-hadoop-fs    文件:TestBlockTokenWithDFS.java   
protected void tryRead(final Configuration conf, LocatedBlock lblock,
    boolean shouldSucceed) {
  InetSocketAddress targetAddr = null;
  IOException ioe = null;
  BlockReader blockReader = null;
  ExtendedBlock block = lblock.getBlock();
  try {
    DatanodeInfo[] nodes = lblock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());

    blockReader = new BlockReaderFactory(new DfsClientConf(conf)).
        setFileName(BlockReaderFactory.getFileName(targetAddr,
                      "test-blockpoolid", block.getBlockId())).
        setBlock(block).
        setBlockToken(lblock.getBlockToken()).
        setInetSocketAddress(targetAddr).
        setStartOffset(0).
        setLength(-1).
        setVerifyChecksum(true).
        setClientName("TestBlockTokenWithDFS").
        setDatanodeInfo(nodes[0]).
        setCachingStrategy(CachingStrategy.newDefaultStrategy()).
        setClientCacheContext(ClientContext.getFromConf(conf)).
        setConfiguration(conf).
        setTracer(FsTracer.get(conf)).
        setRemotePeerFactory(new RemotePeerFactory() {
          @Override
          public Peer newConnectedPeer(InetSocketAddress addr,
              Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
              throws IOException {
            Peer peer = null;
            Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
            try {
              sock.connect(addr, HdfsConstants.READ_TIMEOUT);
              sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
              peer = DFSUtilClient.peerFromSocket(sock);
            } finally {
              if (peer == null) {
                IOUtils.closeSocket(sock);
              }
            }
            return peer;
          }
        }).
        build();
  } catch (IOException ex) {
    ioe = ex;
  } finally {
    if (blockReader != null) {
      try {
        blockReader.close();
      } catch (IOException e) {
        throw new RuntimeException(e);
      }
    }
  }
  if (shouldSucceed) {
    Assert.assertNotNull("OP_READ_BLOCK: access token is invalid, "
          + "when it is expected to be valid", blockReader);
  } else {
    Assert.assertNotNull("OP_READ_BLOCK: access token is valid, "
        + "when it is expected to be invalid", ioe);
    Assert.assertTrue(
        "OP_READ_BLOCK failed due to reasons other than access token: ",
        ioe instanceof InvalidBlockTokenException);
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestDataNodeVolumeFailure.java   
/**
 * try to access a block on a data node. If fails - throws exception
 * @param datanode
 * @param lblock
 * @throws IOException
 */
private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock)
  throws IOException {
  InetSocketAddress targetAddr = null;
  ExtendedBlock block = lblock.getBlock(); 

  targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());

  BlockReader blockReader = new BlockReaderFactory(new DfsClientConf(conf)).
    setInetSocketAddress(targetAddr).
    setBlock(block).
    setFileName(BlockReaderFactory.getFileName(targetAddr,
                  "test-blockpoolid", block.getBlockId())).
    setBlockToken(lblock.getBlockToken()).
    setStartOffset(0).
    setLength(-1).
    setVerifyChecksum(true).
    setClientName("TestDataNodeVolumeFailure").
    setDatanodeInfo(datanode).
    setCachingStrategy(CachingStrategy.newDefaultStrategy()).
    setClientCacheContext(ClientContext.getFromConf(conf)).
    setConfiguration(conf).
    setTracer(FsTracer.get(conf)).
    setRemotePeerFactory(new RemotePeerFactory() {
      @Override
      public Peer newConnectedPeer(InetSocketAddress addr,
          Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
          throws IOException {
        Peer peer = null;
        Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
        try {
          sock.connect(addr, HdfsConstants.READ_TIMEOUT);
          sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
          peer = DFSUtilClient.peerFromSocket(sock);
        } finally {
          if (peer == null) {
            IOUtils.closeSocket(sock);
          }
        }
        return peer;
      }
    }).
    build();
  blockReader.close();
}
项目:big-c    文件:TestBlockTokenWithDFS.java   
private static void tryRead(final Configuration conf, LocatedBlock lblock,
    boolean shouldSucceed) {
  InetSocketAddress targetAddr = null;
  IOException ioe = null;
  BlockReader blockReader = null;
  ExtendedBlock block = lblock.getBlock();
  try {
    DatanodeInfo[] nodes = lblock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());

    blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
        setFileName(BlockReaderFactory.getFileName(targetAddr, 
                      "test-blockpoolid", block.getBlockId())).
        setBlock(block).
        setBlockToken(lblock.getBlockToken()).
        setInetSocketAddress(targetAddr).
        setStartOffset(0).
        setLength(-1).
        setVerifyChecksum(true).
        setClientName("TestBlockTokenWithDFS").
        setDatanodeInfo(nodes[0]).
        setCachingStrategy(CachingStrategy.newDefaultStrategy()).
        setClientCacheContext(ClientContext.getFromConf(conf)).
        setConfiguration(conf).
        setRemotePeerFactory(new RemotePeerFactory() {
          @Override
          public Peer newConnectedPeer(InetSocketAddress addr,
              Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
              throws IOException {
            Peer peer = null;
            Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
            try {
              sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
              sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
              peer = TcpPeerServer.peerFromSocket(sock);
            } finally {
              if (peer == null) {
                IOUtils.closeSocket(sock);
              }
            }
            return peer;
          }
        }).
        build();
  } catch (IOException ex) {
    ioe = ex;
  } finally {
    if (blockReader != null) {
      try {
        blockReader.close();
      } catch (IOException e) {
        throw new RuntimeException(e);
      }
    }
  }
  if (shouldSucceed) {
    Assert.assertNotNull("OP_READ_BLOCK: access token is invalid, "
          + "when it is expected to be valid", blockReader);
  } else {
    Assert.assertNotNull("OP_READ_BLOCK: access token is valid, "
        + "when it is expected to be invalid", ioe);
    Assert.assertTrue(
        "OP_READ_BLOCK failed due to reasons other than access token: ",
        ioe instanceof InvalidBlockTokenException);
  }
}
项目:big-c    文件:TestDataNodeVolumeFailure.java   
/**
 * try to access a block on a data node. If fails - throws exception
 * @param datanode
 * @param lblock
 * @throws IOException
 */
private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock)
  throws IOException {
  InetSocketAddress targetAddr = null;
  ExtendedBlock block = lblock.getBlock(); 

  targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());

  BlockReader blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
    setInetSocketAddress(targetAddr).
    setBlock(block).
    setFileName(BlockReaderFactory.getFileName(targetAddr,
                  "test-blockpoolid", block.getBlockId())).
    setBlockToken(lblock.getBlockToken()).
    setStartOffset(0).
    setLength(-1).
    setVerifyChecksum(true).
    setClientName("TestDataNodeVolumeFailure").
    setDatanodeInfo(datanode).
    setCachingStrategy(CachingStrategy.newDefaultStrategy()).
    setClientCacheContext(ClientContext.getFromConf(conf)).
    setConfiguration(conf).
    setRemotePeerFactory(new RemotePeerFactory() {
      @Override
      public Peer newConnectedPeer(InetSocketAddress addr,
          Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
          throws IOException {
        Peer peer = null;
        Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
        try {
          sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
          sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
          peer = TcpPeerServer.peerFromSocket(sock);
        } finally {
          if (peer == null) {
            IOUtils.closeSocket(sock);
          }
        }
        return peer;
      }
    }).
    build();
  blockReader.close();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestBlockTokenWithDFS.java   
private static void tryRead(final Configuration conf, LocatedBlock lblock,
    boolean shouldSucceed) {
  InetSocketAddress targetAddr = null;
  IOException ioe = null;
  BlockReader blockReader = null;
  ExtendedBlock block = lblock.getBlock();
  try {
    DatanodeInfo[] nodes = lblock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());

    blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
        setFileName(BlockReaderFactory.getFileName(targetAddr, 
                      "test-blockpoolid", block.getBlockId())).
        setBlock(block).
        setBlockToken(lblock.getBlockToken()).
        setInetSocketAddress(targetAddr).
        setStartOffset(0).
        setLength(-1).
        setVerifyChecksum(true).
        setClientName("TestBlockTokenWithDFS").
        setDatanodeInfo(nodes[0]).
        setCachingStrategy(CachingStrategy.newDefaultStrategy()).
        setClientCacheContext(ClientContext.getFromConf(conf)).
        setConfiguration(conf).
        setRemotePeerFactory(new RemotePeerFactory() {
          @Override
          public Peer newConnectedPeer(InetSocketAddress addr,
              Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
              throws IOException {
            Peer peer = null;
            Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
            try {
              sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
              sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
              peer = TcpPeerServer.peerFromSocket(sock);
            } finally {
              if (peer == null) {
                IOUtils.closeSocket(sock);
              }
            }
            return peer;
          }
        }).
        build();
  } catch (IOException ex) {
    ioe = ex;
  } finally {
    if (blockReader != null) {
      try {
        blockReader.close();
      } catch (IOException e) {
        throw new RuntimeException(e);
      }
    }
  }
  if (shouldSucceed) {
    Assert.assertNotNull("OP_READ_BLOCK: access token is invalid, "
          + "when it is expected to be valid", blockReader);
  } else {
    Assert.assertNotNull("OP_READ_BLOCK: access token is valid, "
        + "when it is expected to be invalid", ioe);
    Assert.assertTrue(
        "OP_READ_BLOCK failed due to reasons other than access token: ",
        ioe instanceof InvalidBlockTokenException);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDataNodeVolumeFailure.java   
/**
 * try to access a block on a data node. If fails - throws exception
 * @param datanode
 * @param lblock
 * @throws IOException
 */
private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock)
  throws IOException {
  InetSocketAddress targetAddr = null;
  ExtendedBlock block = lblock.getBlock(); 

  targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());

  BlockReader blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
    setInetSocketAddress(targetAddr).
    setBlock(block).
    setFileName(BlockReaderFactory.getFileName(targetAddr,
                  "test-blockpoolid", block.getBlockId())).
    setBlockToken(lblock.getBlockToken()).
    setStartOffset(0).
    setLength(-1).
    setVerifyChecksum(true).
    setClientName("TestDataNodeVolumeFailure").
    setDatanodeInfo(datanode).
    setCachingStrategy(CachingStrategy.newDefaultStrategy()).
    setClientCacheContext(ClientContext.getFromConf(conf)).
    setConfiguration(conf).
    setRemotePeerFactory(new RemotePeerFactory() {
      @Override
      public Peer newConnectedPeer(InetSocketAddress addr,
          Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
          throws IOException {
        Peer peer = null;
        Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
        try {
          sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
          sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
          peer = TcpPeerServer.peerFromSocket(sock);
        } finally {
          if (peer == null) {
            IOUtils.closeSocket(sock);
          }
        }
        return peer;
      }
    }).
    build();
  blockReader.close();
}
项目:hadoop-EAR    文件:TestReadSlowDataNode.java   
/**
 * Test that copy on write for blocks works correctly
 * 
 * @throws NoSuchFieldException
 * @throws SecurityException
 * @throws IllegalAccessException
 * @throws IllegalArgumentException
 */
public void testSlowDn() throws IOException, SecurityException,
    NoSuchFieldException, IllegalArgumentException, IllegalAccessException {
  Configuration conf = new Configuration();
  conf.setLong("dfs.bytes.to.check.read.speed", 128 * 1024);
  conf.setLong("dfs.min.read.speed.bps", 1024 * 200);
  conf.setBoolean("dfs.read.switch.for.slow", true);

  MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
  FileSystem fs = cluster.getFileSystem();
  FSDataInputStream in = null;
  try {

    // create a new file, write to it and close it.
    //
    Path file1 = new Path("/filestatus.dat");
    FSDataOutputStream stm = createFile(fs, file1, 2);
    writeFile(stm);
    stm.close();

    in = fs.open(file1);
    in.readByte();

    DFSInputStream dfsClientIn = findDFSClientInputStream(in);      
    Field blockReaderField = DFSInputStream.class.getDeclaredField("blockReader");
    blockReaderField.setAccessible(true);
    BlockReader blockReader = (BlockReader) blockReaderField.get(dfsClientIn);

    blockReader.setArtificialSlowdown(1000);
    blockReader.isReadLocal = false;
    blockReader.isReadRackLocal = false;
    for (int i = 0; i < 1024; i++) {
      in.readByte();
    }

    blockReader.setArtificialSlowdown(0);
    for (int i = 1024; i < fileSize - 1; i++) {
      in.readByte();
    }

    ConcurrentHashMap<DatanodeInfo, DatanodeInfo> deadNodes = getDeadNodes(dfsClientIn);
    TestCase.assertEquals(1, deadNodes.size());
  } finally {
    if (in != null) {
      in.close();
    }
    fs.close();
    cluster.shutdown();
  }
}
项目:hadoop-EAR    文件:JspHelper.java   
public void streamBlockInAscii(InetSocketAddress addr, int namespaceId,
                               long blockId, long genStamp, long blockSize,
                               long offsetIntoBlock, long chunkSizeToView, JspWriter out) 
  throws IOException {
  if (chunkSizeToView == 0) return;
  Socket s = new Socket();
  s.connect(addr, HdfsConstants.READ_TIMEOUT);
  s.setSoTimeout(HdfsConstants.READ_TIMEOUT);

    long amtToRead = Math.min(chunkSizeToView, blockSize - offsetIntoBlock);     

    // Use the block name for file name. 
    BlockReader blockReader = 
      BlockReader.newBlockReader(DataTransferProtocol.DATA_TRANSFER_VERSION,
                                  namespaceId,
                                  s, addr.toString() + ":" + blockId,
                                  blockId, genStamp ,offsetIntoBlock, 
                                  amtToRead, 
                                  conf.getInt("io.file.buffer.size", 4096));

  byte[] buf = new byte[(int)amtToRead];
  int readOffset = 0;
  int retries = 2;
  while ( amtToRead > 0 ) {
    int numRead;
    try {
      numRead = blockReader.readAll(buf, readOffset, (int)amtToRead);
    }
    catch (IOException e) {
      retries--;
      if (retries == 0)
        throw new IOException("Could not read data from datanode");
      continue;
    }
    amtToRead -= numRead;
    readOffset += numRead;
  }
  blockReader = null;
  s.close();
  out.print(new String(buf));
}
项目:hadoop-plus    文件:JspHelper.java   
public static void streamBlockInAscii(InetSocketAddress addr, String poolId,
    long blockId, Token<BlockTokenIdentifier> blockToken, long genStamp,
    long blockSize, long offsetIntoBlock, long chunkSizeToView,
    JspWriter out, Configuration conf, DFSClient.Conf dfsConf,
    DataEncryptionKey encryptionKey)
        throws IOException {
  if (chunkSizeToView == 0) return;
  Socket s = NetUtils.getDefaultSocketFactory(conf).createSocket();
  s.connect(addr, HdfsServerConstants.READ_TIMEOUT);
  s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);

  int amtToRead = (int)Math.min(chunkSizeToView, blockSize - offsetIntoBlock);

    // Use the block name for file name. 
  String file = BlockReaderFactory.getFileName(addr, poolId, blockId);
  BlockReader blockReader = BlockReaderFactory.newBlockReader(dfsConf, file,
      new ExtendedBlock(poolId, blockId, 0, genStamp), blockToken,
      offsetIntoBlock, amtToRead,  true,
      "JspHelper", TcpPeerServer.peerFromSocketAndKey(s, encryptionKey),
      new DatanodeID(addr.getAddress().getHostAddress(),
          addr.getHostName(), poolId, addr.getPort(), 0, 0), null,
          null, null, false);

  final byte[] buf = new byte[amtToRead];
  int readOffset = 0;
  int retries = 2;
  while ( amtToRead > 0 ) {
    int numRead = amtToRead;
    try {
      blockReader.readFully(buf, readOffset, amtToRead);
    }
    catch (IOException e) {
      retries--;
      if (retries == 0)
        throw new IOException("Could not read data from datanode");
      continue;
    }
    amtToRead -= numRead;
    readOffset += numRead;
  }
  blockReader.close();
  out.print(HtmlQuoting.quoteHtmlChars(new String(buf, Charsets.UTF_8)));
}
项目:hadoop-plus    文件:TestBlockTokenWithDFS.java   
private static void tryRead(Configuration conf, LocatedBlock lblock,
    boolean shouldSucceed) {
  InetSocketAddress targetAddr = null;
  Socket s = null;
  BlockReader blockReader = null;
  ExtendedBlock block = lblock.getBlock();
  try {
    DatanodeInfo[] nodes = lblock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
    s = NetUtils.getDefaultSocketFactory(conf).createSocket();
    s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
    s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);

    String file = BlockReaderFactory.getFileName(targetAddr, 
        "test-blockpoolid", block.getBlockId());
    blockReader = BlockReaderFactory.newBlockReader(
        new DFSClient.Conf(conf), file, block, lblock.getBlockToken(), 0, -1,
        true, "TestBlockTokenWithDFS", TcpPeerServer.peerFromSocket(s),
        nodes[0], null, null, null, false);

  } catch (IOException ex) {
    if (ex instanceof InvalidBlockTokenException) {
      assertFalse("OP_READ_BLOCK: access token is invalid, "
          + "when it is expected to be valid", shouldSucceed);
      return;
    }
    fail("OP_READ_BLOCK failed due to reasons other than access token: "
        + StringUtils.stringifyException(ex));
  } finally {
    if (s != null) {
      try {
        s.close();
      } catch (IOException iex) {
      } finally {
        s = null;
      }
    }
  }
  if (blockReader == null) {
    fail("OP_READ_BLOCK failed due to reasons other than access token");
  }
  assertTrue("OP_READ_BLOCK: access token is valid, "
      + "when it is expected to be invalid", shouldSucceed);
}
项目:FlexMap    文件:TestBlockTokenWithDFS.java   
private static void tryRead(final Configuration conf, LocatedBlock lblock,
    boolean shouldSucceed) {
  InetSocketAddress targetAddr = null;
  IOException ioe = null;
  BlockReader blockReader = null;
  ExtendedBlock block = lblock.getBlock();
  try {
    DatanodeInfo[] nodes = lblock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());

    blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
        setFileName(BlockReaderFactory.getFileName(targetAddr, 
                      "test-blockpoolid", block.getBlockId())).
        setBlock(block).
        setBlockToken(lblock.getBlockToken()).
        setInetSocketAddress(targetAddr).
        setStartOffset(0).
        setLength(-1).
        setVerifyChecksum(true).
        setClientName("TestBlockTokenWithDFS").
        setDatanodeInfo(nodes[0]).
        setCachingStrategy(CachingStrategy.newDefaultStrategy()).
        setClientCacheContext(ClientContext.getFromConf(conf)).
        setConfiguration(conf).
        setRemotePeerFactory(new RemotePeerFactory() {
          @Override
          public Peer newConnectedPeer(InetSocketAddress addr,
              Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
              throws IOException {
            Peer peer = null;
            Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
            try {
              sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
              sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
              peer = TcpPeerServer.peerFromSocket(sock);
            } finally {
              if (peer == null) {
                IOUtils.closeSocket(sock);
              }
            }
            return peer;
          }
        }).
        build();
  } catch (IOException ex) {
    ioe = ex;
  } finally {
    if (blockReader != null) {
      try {
        blockReader.close();
      } catch (IOException e) {
        throw new RuntimeException(e);
      }
    }
  }
  if (shouldSucceed) {
    Assert.assertNotNull("OP_READ_BLOCK: access token is invalid, "
          + "when it is expected to be valid", blockReader);
  } else {
    Assert.assertNotNull("OP_READ_BLOCK: access token is valid, "
        + "when it is expected to be invalid", ioe);
    Assert.assertTrue(
        "OP_READ_BLOCK failed due to reasons other than access token: ",
        ioe instanceof InvalidBlockTokenException);
  }
}
项目:FlexMap    文件:TestDataNodeVolumeFailure.java   
/**
 * try to access a block on a data node. If fails - throws exception
 * @param datanode
 * @param lblock
 * @throws IOException
 */
private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock)
  throws IOException {
  InetSocketAddress targetAddr = null;
  ExtendedBlock block = lblock.getBlock(); 

  targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());

  BlockReader blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
    setInetSocketAddress(targetAddr).
    setBlock(block).
    setFileName(BlockReaderFactory.getFileName(targetAddr,
                  "test-blockpoolid", block.getBlockId())).
    setBlockToken(lblock.getBlockToken()).
    setStartOffset(0).
    setLength(-1).
    setVerifyChecksum(true).
    setClientName("TestDataNodeVolumeFailure").
    setDatanodeInfo(datanode).
    setCachingStrategy(CachingStrategy.newDefaultStrategy()).
    setClientCacheContext(ClientContext.getFromConf(conf)).
    setConfiguration(conf).
    setRemotePeerFactory(new RemotePeerFactory() {
      @Override
      public Peer newConnectedPeer(InetSocketAddress addr,
          Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
          throws IOException {
        Peer peer = null;
        Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
        try {
          sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
          sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
          peer = TcpPeerServer.peerFromSocket(sock);
        } finally {
          if (peer == null) {
            IOUtils.closeSocket(sock);
          }
        }
        return peer;
      }
    }).
    build();
  blockReader.close();
}
项目:hops    文件:JspHelper.java   
public static void streamBlockInAscii(InetSocketAddress addr, String poolId,
    long blockId, Token<BlockTokenIdentifier> blockToken, long genStamp,
    long blockSize, long offsetIntoBlock, long chunkSizeToView, JspWriter out,
    Configuration conf, DataEncryptionKey encryptionKey) throws IOException {
  if (chunkSizeToView == 0) {
    return;
  }
  Socket s = NetUtils.getDefaultSocketFactory(conf).createSocket();
  s.connect(addr, HdfsServerConstants.READ_TIMEOUT);
  s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);

  int amtToRead =
      (int) Math.min(chunkSizeToView, blockSize - offsetIntoBlock);

  // Use the block name for file name.
  String file = BlockReaderFactory.getFileName(addr, poolId, blockId);
  BlockReader blockReader = BlockReaderFactory.newBlockReader(conf, s, file,
      new ExtendedBlock(poolId, blockId, 0, genStamp), blockToken,
      offsetIntoBlock, amtToRead, encryptionKey);

  byte[] buf = new byte[(int) amtToRead];
  int readOffset = 0;
  int retries = 2;
  while (amtToRead > 0) {
    int numRead = amtToRead;
    try {
      blockReader.readFully(buf, readOffset, amtToRead);
    } catch (IOException e) {
      retries--;
      if (retries == 0) {
        throw new IOException("Could not read data from datanode");
      }
      continue;
    }
    amtToRead -= numRead;
    readOffset += numRead;
  }
  blockReader = null;
  s.close();
  out.print(HtmlQuoting.quoteHtmlChars(new String(buf, Charsets.UTF_8)));
}
项目:hops    文件:TestBlockTokenWithDFS.java   
private static void tryRead(Configuration conf, LocatedBlock lblock,
    boolean shouldSucceed) {
  InetSocketAddress targetAddr = null;
  Socket s = null;
  BlockReader blockReader = null;
  ExtendedBlock block = lblock.getBlock();
  try {
    DatanodeInfo[] nodes = lblock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
    s = NetUtils.getDefaultSocketFactory(conf).createSocket();
    s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
    s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);

    String file = BlockReaderFactory
        .getFileName(targetAddr, "test-blockpoolid", block.getBlockId());
    blockReader = BlockReaderFactory
        .newBlockReader(conf, s, file, block, lblock.getBlockToken(), 0, -1,
            null);

  } catch (IOException ex) {
    if (ex instanceof InvalidBlockTokenException) {
      assertFalse("OP_READ_BLOCK: access token is invalid, " +
          "when it is expected to be valid", shouldSucceed);
      return;
    }
    fail("OP_READ_BLOCK failed due to reasons other than access token: " +
        StringUtils.stringifyException(ex));
  } finally {
    if (s != null) {
      try {
        s.close();
      } catch (IOException iex) {
      } finally {
        s = null;
      }
    }
  }
  if (blockReader == null) {
    fail("OP_READ_BLOCK failed due to reasons other than access token");
  }
  assertTrue("OP_READ_BLOCK: access token is valid, " +
      "when it is expected to be invalid", shouldSucceed);
}
项目:hadoop-TCP    文件:JspHelper.java   
public static void streamBlockInAscii(InetSocketAddress addr, String poolId,
    long blockId, Token<BlockTokenIdentifier> blockToken, long genStamp,
    long blockSize, long offsetIntoBlock, long chunkSizeToView,
    JspWriter out, Configuration conf, DFSClient.Conf dfsConf,
    DataEncryptionKey encryptionKey)
        throws IOException {
  if (chunkSizeToView == 0) return;
  Socket s = NetUtils.getDefaultSocketFactory(conf).createSocket();
  s.connect(addr, HdfsServerConstants.READ_TIMEOUT);
  s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);

  int amtToRead = (int)Math.min(chunkSizeToView, blockSize - offsetIntoBlock);

    // Use the block name for file name. 
  String file = BlockReaderFactory.getFileName(addr, poolId, blockId);
  BlockReader blockReader = BlockReaderFactory.newBlockReader(dfsConf, file,
      new ExtendedBlock(poolId, blockId, 0, genStamp), blockToken,
      offsetIntoBlock, amtToRead,  true,
      "JspHelper", TcpPeerServer.peerFromSocketAndKey(s, encryptionKey),
      new DatanodeID(addr.getAddress().getHostAddress(),
          addr.getHostName(), poolId, addr.getPort(), 0, 0, 0), null,
          null, null, false, CachingStrategy.newDefaultStrategy());

  final byte[] buf = new byte[amtToRead];
  int readOffset = 0;
  int retries = 2;
  while ( amtToRead > 0 ) {
    int numRead = amtToRead;
    try {
      blockReader.readFully(buf, readOffset, amtToRead);
    }
    catch (IOException e) {
      retries--;
      if (retries == 0)
        throw new IOException("Could not read data from datanode");
      continue;
    }
    amtToRead -= numRead;
    readOffset += numRead;
  }
  blockReader.close();
  out.print(HtmlQuoting.quoteHtmlChars(new String(buf, Charsets.UTF_8)));
}
项目:hadoop-TCP    文件:TestBlockTokenWithDFS.java   
private static void tryRead(Configuration conf, LocatedBlock lblock,
    boolean shouldSucceed) {
  InetSocketAddress targetAddr = null;
  Socket s = null;
  BlockReader blockReader = null;
  ExtendedBlock block = lblock.getBlock();
  try {
    DatanodeInfo[] nodes = lblock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
    s = NetUtils.getDefaultSocketFactory(conf).createSocket();
    s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
    s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);

    String file = BlockReaderFactory.getFileName(targetAddr, 
        "test-blockpoolid", block.getBlockId());
    blockReader = BlockReaderFactory.newBlockReader(
        new DFSClient.Conf(conf), file, block, lblock.getBlockToken(), 0, -1,
        true, "TestBlockTokenWithDFS", TcpPeerServer.peerFromSocket(s),
        nodes[0], null, null, null, false,
        CachingStrategy.newDefaultStrategy());

  } catch (IOException ex) {
    if (ex instanceof InvalidBlockTokenException) {
      assertFalse("OP_READ_BLOCK: access token is invalid, "
          + "when it is expected to be valid", shouldSucceed);
      return;
    }
    fail("OP_READ_BLOCK failed due to reasons other than access token: "
        + StringUtils.stringifyException(ex));
  } finally {
    if (s != null) {
      try {
        s.close();
      } catch (IOException iex) {
      } finally {
        s = null;
      }
    }
  }
  if (blockReader == null) {
    fail("OP_READ_BLOCK failed due to reasons other than access token");
  }
  assertTrue("OP_READ_BLOCK: access token is valid, "
      + "when it is expected to be invalid", shouldSucceed);
}
项目:hardfs    文件:JspHelper.java   
public static void streamBlockInAscii(InetSocketAddress addr, String poolId,
    long blockId, Token<BlockTokenIdentifier> blockToken, long genStamp,
    long blockSize, long offsetIntoBlock, long chunkSizeToView,
    JspWriter out, Configuration conf, DFSClient.Conf dfsConf,
    DataEncryptionKey encryptionKey)
        throws IOException {
  if (chunkSizeToView == 0) return;
  Socket s = NetUtils.getDefaultSocketFactory(conf).createSocket();
  s.connect(addr, HdfsServerConstants.READ_TIMEOUT);
  s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);

  int amtToRead = (int)Math.min(chunkSizeToView, blockSize - offsetIntoBlock);

    // Use the block name for file name. 
  String file = BlockReaderFactory.getFileName(addr, poolId, blockId);
  BlockReader blockReader = BlockReaderFactory.newBlockReader(dfsConf, file,
      new ExtendedBlock(poolId, blockId, 0, genStamp), blockToken,
      offsetIntoBlock, amtToRead,  true,
      "JspHelper", TcpPeerServer.peerFromSocketAndKey(s, encryptionKey),
      new DatanodeID(addr.getAddress().getHostAddress(),
          addr.getHostName(), poolId, addr.getPort(), 0, 0, 0), null,
          null, null, false, CachingStrategy.newDefaultStrategy());

  final byte[] buf = new byte[amtToRead];
  int readOffset = 0;
  int retries = 2;
  while ( amtToRead > 0 ) {
    int numRead = amtToRead;
    try {
      blockReader.readFully(buf, readOffset, amtToRead);
    }
    catch (IOException e) {
      retries--;
      if (retries == 0)
        throw new IOException("Could not read data from datanode");
      continue;
    }
    amtToRead -= numRead;
    readOffset += numRead;
  }
  blockReader.close();
  out.print(HtmlQuoting.quoteHtmlChars(new String(buf, Charsets.UTF_8)));
}
项目:hardfs    文件:TestBlockTokenWithDFS.java   
private static void tryRead(Configuration conf, LocatedBlock lblock,
    boolean shouldSucceed) {
  InetSocketAddress targetAddr = null;
  Socket s = null;
  BlockReader blockReader = null;
  ExtendedBlock block = lblock.getBlock();
  try {
    DatanodeInfo[] nodes = lblock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
    s = NetUtils.getDefaultSocketFactory(conf).createSocket();
    s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
    s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);

    String file = BlockReaderFactory.getFileName(targetAddr, 
        "test-blockpoolid", block.getBlockId());
    blockReader = BlockReaderFactory.newBlockReader(
        new DFSClient.Conf(conf), file, block, lblock.getBlockToken(), 0, -1,
        true, "TestBlockTokenWithDFS", TcpPeerServer.peerFromSocket(s),
        nodes[0], null, null, null, false,
        CachingStrategy.newDefaultStrategy());

  } catch (IOException ex) {
    if (ex instanceof InvalidBlockTokenException) {
      assertFalse("OP_READ_BLOCK: access token is invalid, "
          + "when it is expected to be valid", shouldSucceed);
      return;
    }
    fail("OP_READ_BLOCK failed due to reasons other than access token: "
        + StringUtils.stringifyException(ex));
  } finally {
    if (s != null) {
      try {
        s.close();
      } catch (IOException iex) {
      } finally {
        s = null;
      }
    }
  }
  if (blockReader == null) {
    fail("OP_READ_BLOCK failed due to reasons other than access token");
  }
  assertTrue("OP_READ_BLOCK: access token is valid, "
      + "when it is expected to be invalid", shouldSucceed);
}
项目:hadoop-on-lustre2    文件:TestBlockTokenWithDFS.java   
private static void tryRead(final Configuration conf, LocatedBlock lblock,
    boolean shouldSucceed) {
  InetSocketAddress targetAddr = null;
  IOException ioe = null;
  BlockReader blockReader = null;
  ExtendedBlock block = lblock.getBlock();
  try {
    DatanodeInfo[] nodes = lblock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());

    blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
        setFileName(BlockReaderFactory.getFileName(targetAddr, 
                      "test-blockpoolid", block.getBlockId())).
        setBlock(block).
        setBlockToken(lblock.getBlockToken()).
        setInetSocketAddress(targetAddr).
        setStartOffset(0).
        setLength(-1).
        setVerifyChecksum(true).
        setClientName("TestBlockTokenWithDFS").
        setDatanodeInfo(nodes[0]).
        setCachingStrategy(CachingStrategy.newDefaultStrategy()).
        setClientCacheContext(ClientContext.getFromConf(conf)).
        setConfiguration(conf).
        setRemotePeerFactory(new RemotePeerFactory() {
          @Override
          public Peer newConnectedPeer(InetSocketAddress addr)
              throws IOException {
            Peer peer = null;
            Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
            try {
              sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
              sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
              peer = TcpPeerServer.peerFromSocket(sock);
            } finally {
              if (peer == null) {
                IOUtils.closeSocket(sock);
              }
            }
            return peer;
          }
        }).
        build();
  } catch (IOException ex) {
    ioe = ex;
  } finally {
    if (blockReader != null) {
      try {
        blockReader.close();
      } catch (IOException e) {
        throw new RuntimeException(e);
      }
    }
  }
  if (shouldSucceed) {
    Assert.assertNotNull("OP_READ_BLOCK: access token is invalid, "
          + "when it is expected to be valid", blockReader);
  } else {
    Assert.assertNotNull("OP_READ_BLOCK: access token is valid, "
        + "when it is expected to be invalid", ioe);
    Assert.assertTrue(
        "OP_READ_BLOCK failed due to reasons other than access token: ",
        ioe instanceof InvalidBlockTokenException);
  }
}
项目:hadoop-on-lustre2    文件:TestDataNodeVolumeFailure.java   
/**
 * try to access a block on a data node. If fails - throws exception
 * @param datanode
 * @param lblock
 * @throws IOException
 */
private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock)
  throws IOException {
  InetSocketAddress targetAddr = null;
  ExtendedBlock block = lblock.getBlock(); 

  targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());

  BlockReader blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
    setInetSocketAddress(targetAddr).
    setBlock(block).
    setFileName(BlockReaderFactory.getFileName(targetAddr,
                  "test-blockpoolid", block.getBlockId())).
    setBlockToken(lblock.getBlockToken()).
    setStartOffset(0).
    setLength(-1).
    setVerifyChecksum(true).
    setClientName("TestDataNodeVolumeFailure").
    setDatanodeInfo(datanode).
    setCachingStrategy(CachingStrategy.newDefaultStrategy()).
    setClientCacheContext(ClientContext.getFromConf(conf)).
    setConfiguration(conf).
    setRemotePeerFactory(new RemotePeerFactory() {
      @Override
      public Peer newConnectedPeer(InetSocketAddress addr)
          throws IOException {
        Peer peer = null;
        Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
        try {
          sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
          sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
          peer = TcpPeerServer.peerFromSocket(sock);
        } finally {
          if (peer == null) {
            IOUtils.closeSocket(sock);
          }
        }
        return peer;
      }
    }).
    build();
  blockReader.close();
}
项目:cumulus    文件:TestBlockTokenWithDFS.java   
private static void tryRead(Configuration conf, LocatedBlock lblock,
    boolean shouldSucceed) {
  InetSocketAddress targetAddr = null;
  Socket s = null;
  BlockReader blockReader = null;
  Block block = lblock.getBlock();
  try {
    DatanodeInfo[] nodes = lblock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getName());
    s = new Socket();
    s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
    s.setSoTimeout(HdfsConstants.READ_TIMEOUT);

    String file = BlockReader.getFileName(targetAddr, block.getBlockId());
    blockReader = BlockReader.newBlockReader(s, file, block, 
        lblock.getBlockToken(), 0, -1, 
        conf.getInt("io.file.buffer.size", 4096));

  } catch (IOException ex) {
    if (ex instanceof InvalidBlockTokenException) {
      assertFalse("OP_READ_BLOCK: access token is invalid, "
          + "when it is expected to be valid", shouldSucceed);
      return;
    }
    fail("OP_READ_BLOCK failed due to reasons other than access token");
  } finally {
    if (s != null) {
      try {
        s.close();
      } catch (IOException iex) {
      } finally {
        s = null;
      }
    }
  }
  if (blockReader == null) {
    fail("OP_READ_BLOCK failed due to reasons other than access token");
  }
  assertTrue("OP_READ_BLOCK: access token is valid, "
      + "when it is expected to be invalid", shouldSucceed);
}
项目:RDFS    文件:TestReadSlowDataNode.java   
/**
 * Test that copy on write for blocks works correctly
 * 
 * @throws NoSuchFieldException
 * @throws SecurityException
 * @throws IllegalAccessException
 * @throws IllegalArgumentException
 */
public void testSlowDn() throws IOException, SecurityException,
    NoSuchFieldException, IllegalArgumentException, IllegalAccessException {
  Configuration conf = new Configuration();
  conf.setLong("dfs.min.read.speed.bps", 1024 * 200);

  MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
  FileSystem fs = cluster.getFileSystem();
  FSDataInputStream in = null;
  try {

    // create a new file, write to it and close it.
    //
    Path file1 = new Path("/filestatus.dat");
    FSDataOutputStream stm = createFile(fs, file1, 2);
    writeFile(stm);
    stm.close();

    in = fs.open(file1);
    in.readByte();

    DFSInputStream dfsClientIn = findDFSClientInputStream(in);      
    Field blockReaderField = DFSInputStream.class.getDeclaredField("blockReader");
    blockReaderField.setAccessible(true);
    BlockReader blockReader = (BlockReader) blockReaderField.get(dfsClientIn);

    blockReader.setArtificialSlowdown(1000);
    blockReader.isReadLocal = false;
    blockReader.isReadRackLocal = false;
    blockReader.ENABLE_THROW_FOR_SLOW = true;
    for (int i = 0; i < 1024; i++) {
      in.readByte();
    }

    blockReader.setArtificialSlowdown(0);
    for (int i = 1024; i < fileSize - 1; i++) {
      in.readByte();
    }

    ConcurrentHashMap<DatanodeInfo, DatanodeInfo> deadNodes = getDeadNodes(dfsClientIn);
    TestCase.assertEquals(1, deadNodes.size());
  } finally {
    if (in != null) {
      in.close();
    }
    fs.close();
    cluster.shutdown();
  }
}
项目:RDFS    文件:JspHelper.java   
public void streamBlockInAscii(InetSocketAddress addr, int namespaceId,
                               long blockId, long genStamp, long blockSize,
                               long offsetIntoBlock, long chunkSizeToView, JspWriter out) 
  throws IOException {
  if (chunkSizeToView == 0) return;
  Socket s = new Socket();
  s.connect(addr, HdfsConstants.READ_TIMEOUT);
  s.setSoTimeout(HdfsConstants.READ_TIMEOUT);

    long amtToRead = Math.min(chunkSizeToView, blockSize - offsetIntoBlock);     

    // Use the block name for file name. 
    BlockReader blockReader = 
      BlockReader.newBlockReader(DataTransferProtocol.DATA_TRANSFER_VERSION,
                                  namespaceId,
                                  s, addr.toString() + ":" + blockId,
                                  blockId, genStamp ,offsetIntoBlock, 
                                  amtToRead, 
                                  conf.getInt("io.file.buffer.size", 4096));

  byte[] buf = new byte[(int)amtToRead];
  int readOffset = 0;
  int retries = 2;
  while ( amtToRead > 0 ) {
    int numRead;
    try {
      numRead = blockReader.readAll(buf, readOffset, (int)amtToRead);
    }
    catch (IOException e) {
      retries--;
      if (retries == 0)
        throw new IOException("Could not read data from datanode");
      continue;
    }
    amtToRead -= numRead;
    readOffset += numRead;
  }
  blockReader = null;
  s.close();
  out.print(new String(buf));
}
项目:aliyun-oss-hadoop-fs    文件:ErasureCodingWorker.java   
/**
 * StripedReader is used to read from one source DN, it contains a block
 * reader, buffer and striped block index.
 * Only allocate StripedReader once for one source, and the StripedReader
 * has the same array order with sources. Typically we only need to allocate
 * minimum number (minRequiredSources) of StripedReader, and allocate
 * new for new source DN if some existing DN invalid or slow.
 * If some source DN is corrupt, set the corresponding blockReader to 
 * null and will never read from it again.
 *  
 * @param i the array index of sources
 * @param offsetInBlock offset for the internal block
 * @return StripedReader
 */
private StripedReader addStripedReader(int i, long offsetInBlock) {
  StripedReader reader = new StripedReader(liveIndices[i]);
  stripedReaders.add(reader);

  BlockReader blockReader = newBlockReader(
      getBlock(blockGroup, liveIndices[i]), offsetInBlock, sources[i]);
  if (blockReader != null) {
    initChecksumAndBufferSizeIfNeeded(blockReader);
    reader.blockReader = blockReader;
  }
  reader.buffer = allocateBuffer(bufferSize);
  return reader;
}