Java 类org.apache.hadoop.hdfs.server.datanode.TestInterDatanodeProtocol 实例源码

项目:hadoop-EAR    文件:TestLeaseRecovery.java   
static void checkMetaInfo(int namespaceId, Block b, InterDatanodeProtocol idp
    ) throws IOException {
  TestInterDatanodeProtocol.checkMetaInfo(namespaceId, b, idp, null);
}
项目:hadoop-EAR    文件:TestChecksumFile.java   
static void checkMetaInfo(int namespaceId, Block b, InterDatanodeProtocol idp)
    throws IOException {
  TestInterDatanodeProtocol.checkMetaInfo(namespaceId, b, idp, null);
}
项目:hadoop-EAR    文件:TestChecksumFile.java   
public void testRecoverZeroChecksumFile() throws Exception {
  MiniDFSCluster cluster = createMiniCluster();
  try {
    cluster.waitActive();
    cluster.getDataNodes().get(0).useInlineChecksum = false;

    // create a file
    DistributedFileSystem dfs = (DistributedFileSystem) cluster
        .getFileSystem();
    String filestr = "/zeroSizeFile";
    Path filepath = new Path(filestr);
    FSDataOutputStream out = dfs.create(filepath, true);

    // force creating data pipeline
    Method nextBlockOutputStreamMethod = DFSOutputStream.class
        .getDeclaredMethod("nextBlockOutputStream", String.class);
    nextBlockOutputStreamMethod.setAccessible(true);
    DatanodeInfo[] nodes = (DatanodeInfo[]) nextBlockOutputStreamMethod
        .invoke(out.getWrappedStream(), dfs.dfs.getClientName());

    // get data node
    DataNode datanode = cluster.getDataNode(nodes[0].getIpcPort());
    assertTrue(datanode != null);

    // verifies checksum file is of length 0
    LocatedBlockWithMetaInfo locatedblock = TestInterDatanodeProtocol
        .getLastLocatedBlock(dfs.dfs.namenode, filestr);
    Block lastblock = locatedblock.getBlock();
    DataNode.LOG.info("newblocks=" + lastblock);
    BlockPathInfo blockPathInfo = datanode.getBlockPathInfo(lastblock);
    String blockPath = blockPathInfo.getBlockPath();
    String metaPath = blockPathInfo.getMetaPath();

    File f = new File(blockPath);
    File meta = new File(metaPath);
    assertEquals(0, f.length());
    // set the checksum file to 0
    meta.delete();
    DataOutputStream outs = new DataOutputStream(new FileOutputStream(
        metaPath, false));
    outs.close();

    // issue recovery and makit e sure it succeeds.
    int numTries = 500;
    for (int idxTry = 0; idxTry < numTries; idxTry++) {
      boolean success = dfs.recoverLease(filepath);
      if (success) {
        break;
      } else if (idxTry == numTries - 1) {
        TestCase.fail("Recovery lease failed");
      } else {
        Thread.sleep(10);
      }
    }

    // make sure the meta file is still empty
    locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock(
        dfs.dfs.namenode, filestr);
    Block newBlock = locatedblock.getBlock();
    blockPathInfo = datanode.getBlockPathInfo(newBlock);
    assertEquals(0, blockPathInfo.getNumBytes());
    metaPath = blockPathInfo.getMetaPath();
    meta = new File(metaPath);
    assertEquals(0, meta.length());

    // make sure the file can be opened and read.
    InputStream in = dfs.open(new Path(filestr), 8);
    TestCase.assertEquals(-1, in.read()); // EOF
    in.close();
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop-on-lustre    文件:TestLeaseRecovery.java   
static void checkMetaInfo(Block b, InterDatanodeProtocol idp
    ) throws IOException {
  TestInterDatanodeProtocol.checkMetaInfo(b, idp, null);
}
项目:cumulus    文件:TestLeaseRecovery.java   
static void checkMetaInfo(Block b, DataNode dn
    ) throws IOException {
  TestInterDatanodeProtocol.checkMetaInfo(b, dn);
}
项目:RDFS    文件:TestLeaseRecovery.java   
static void checkMetaInfo(int namespaceId, Block b, InterDatanodeProtocol idp
    ) throws IOException {
  TestInterDatanodeProtocol.checkMetaInfo(namespaceId, b, idp, null);
}
项目:RDFS    文件:TestChecksumFile.java   
static void checkMetaInfo(int namespaceId, Block b, InterDatanodeProtocol idp)
    throws IOException {
  TestInterDatanodeProtocol.checkMetaInfo(namespaceId, b, idp, null);
}
项目:RDFS    文件:TestChecksumFile.java   
public void testRecoverZeroChecksumFile() throws Exception {
  MiniDFSCluster cluster = createMiniCluster();
  try {
    cluster.waitActive();

    // create a file
    DistributedFileSystem dfs = (DistributedFileSystem) cluster
        .getFileSystem();
    String filestr = "/zeroSizeFile";
    Path filepath = new Path(filestr);
    FSDataOutputStream out = dfs.create(filepath, true);

    // force creating data pipeline
    Method nextBlockOutputStreamMethod = DFSOutputStream.class
        .getDeclaredMethod("nextBlockOutputStream", String.class);
    nextBlockOutputStreamMethod.setAccessible(true);
    DatanodeInfo[] nodes = (DatanodeInfo[]) nextBlockOutputStreamMethod
        .invoke(out.getWrappedStream(), dfs.dfs.getClientName());

    // get data node
    DataNode datanode = cluster.getDataNode(nodes[0].getIpcPort());
    assertTrue(datanode != null);

    // verifies checksum file is of length 0
    LocatedBlockWithMetaInfo locatedblock = TestInterDatanodeProtocol
        .getLastLocatedBlock(dfs.dfs.namenode, filestr);
    Block lastblock = locatedblock.getBlock();
    DataNode.LOG.info("newblocks=" + lastblock);
    BlockPathInfo blockPathInfo = datanode.getBlockPathInfo(lastblock);
    String blockPath = blockPathInfo.getBlockPath();
    String metaPath = blockPathInfo.getMetaPath();

    File f = new File(blockPath);
    File meta = new File(metaPath);
    assertEquals(0, f.length());
    // set the checksum file to 0
    meta.delete();
    DataOutputStream outs = new DataOutputStream(new FileOutputStream(
        metaPath, false));
    outs.close();

    // issue recovery and makit e sure it succeeds.
    int numTries = 500;
    for (int idxTry = 0; idxTry < numTries; idxTry++) {
      boolean success = dfs.recoverLease(filepath);
      if (success) {
        break;
      } else if (idxTry == numTries - 1) {
        TestCase.fail("Recovery lease failed");
      } else {
        Thread.sleep(10);
      }
    }

    // make sure the meta file is still empty
    locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock(
        dfs.dfs.namenode, filestr);
    Block newBlock = locatedblock.getBlock();
    blockPathInfo = datanode.getBlockPathInfo(newBlock);
    assertEquals(0, blockPathInfo.getNumBytes());
    metaPath = blockPathInfo.getMetaPath();
    meta = new File(metaPath);
    assertEquals(0, meta.length());

    // make sure the file can be opened and read.
    InputStream in = dfs.open(new Path(filestr), 8);
    TestCase.assertEquals(-1, in.read()); // EOF
    in.close();
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop-0.20    文件:TestLeaseRecovery.java   
static void checkMetaInfo(Block b, InterDatanodeProtocol idp
    ) throws IOException {
  TestInterDatanodeProtocol.checkMetaInfo(b, idp, null);
}
项目:hortonworks-extension    文件:TestLeaseRecovery.java   
static void checkMetaInfo(Block b, InterDatanodeProtocol idp
    ) throws IOException {
  TestInterDatanodeProtocol.checkMetaInfo(b, idp, null);
}
项目:hortonworks-extension    文件:TestLeaseRecovery.java   
static void checkMetaInfo(Block b, InterDatanodeProtocol idp
    ) throws IOException {
  TestInterDatanodeProtocol.checkMetaInfo(b, idp, null);
}
项目:hadoop-gpu    文件:TestLeaseRecovery.java   
static void checkMetaInfo(Block b, InterDatanodeProtocol idp
    ) throws IOException {
  TestInterDatanodeProtocol.checkMetaInfo(b, idp, null);
}