Java 类org.apache.hadoop.hdfs.TestFileCreation 实例源码

项目:hadoop    文件:TestBlockUnderConstruction.java   
void writeFile(Path file, FSDataOutputStream stm, int size)
throws IOException {
  long blocksBefore = stm.getPos() / BLOCK_SIZE;

  TestFileCreation.writeFile(stm, BLOCK_SIZE);
  // need to make sure the full block is completely flushed to the DataNodes
  // (see FSOutputSummer#flush)
  stm.flush();
  int blocksAfter = 0;
  // wait until the block is allocated by DataStreamer
  BlockLocation[] locatedBlocks;
  while(blocksAfter <= blocksBefore) {
    locatedBlocks = DFSClientAdapter.getDFSClient(hdfs).getBlockLocations(
        file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS);
    blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
  }
}
项目:hadoop    文件:TestBlockUnderConstruction.java   
@Test
public void testBlockCreation() throws IOException {
  Path file1 = new Path(BASE_DIR, "file1.dat");
  FSDataOutputStream out = TestFileCreation.createFile(hdfs, file1, 3);

  for(int idx = 0; idx < NUM_BLOCKS; idx++) {
    // write one block
    writeFile(file1, out, BLOCK_SIZE);
    // verify consistency
    verifyFileBlocks(file1.toString(), true);
  }

  // close file
  out.close();
  // verify consistency
  verifyFileBlocks(file1.toString(), false);
}
项目:aliyun-oss-hadoop-fs    文件:TestWebHDFS.java   
@Test
public void testWebHdfsCreateNonRecursive() throws IOException, URISyntaxException {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  WebHdfsFileSystem webHdfs = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();

    webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);

    TestFileCreation.testFileCreationNonRecursive(webHdfs);

  } finally {
    if(webHdfs != null) {
     webHdfs.close();
    }

    if(cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestBlockUnderConstruction.java   
void writeFile(Path file, FSDataOutputStream stm, int size)
throws IOException {
  long blocksBefore = stm.getPos() / BLOCK_SIZE;

  TestFileCreation.writeFile(stm, BLOCK_SIZE);
  // need to make sure the full block is completely flushed to the DataNodes
  // (see FSOutputSummer#flush)
  stm.flush();
  int blocksAfter = 0;
  // wait until the block is allocated by DataStreamer
  BlockLocation[] locatedBlocks;
  while(blocksAfter <= blocksBefore) {
    locatedBlocks = DFSClientAdapter.getDFSClient(hdfs).getBlockLocations(
        file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS);
    blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestBlockUnderConstruction.java   
@Test
public void testBlockCreation() throws IOException {
  Path file1 = new Path(BASE_DIR, "file1.dat");
  FSDataOutputStream out = TestFileCreation.createFile(hdfs, file1, 3);

  for(int idx = 0; idx < NUM_BLOCKS; idx++) {
    // write one block
    writeFile(file1, out, BLOCK_SIZE);
    // verify consistency
    verifyFileBlocks(file1.toString(), true);
  }

  // close file
  out.close();
  // verify consistency
  verifyFileBlocks(file1.toString(), false);
}
项目:big-c    文件:TestBlockUnderConstruction.java   
void writeFile(Path file, FSDataOutputStream stm, int size)
throws IOException {
  long blocksBefore = stm.getPos() / BLOCK_SIZE;

  TestFileCreation.writeFile(stm, BLOCK_SIZE);
  // need to make sure the full block is completely flushed to the DataNodes
  // (see FSOutputSummer#flush)
  stm.flush();
  int blocksAfter = 0;
  // wait until the block is allocated by DataStreamer
  BlockLocation[] locatedBlocks;
  while(blocksAfter <= blocksBefore) {
    locatedBlocks = DFSClientAdapter.getDFSClient(hdfs).getBlockLocations(
        file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS);
    blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
  }
}
项目:big-c    文件:TestBlockUnderConstruction.java   
@Test
public void testBlockCreation() throws IOException {
  Path file1 = new Path(BASE_DIR, "file1.dat");
  FSDataOutputStream out = TestFileCreation.createFile(hdfs, file1, 3);

  for(int idx = 0; idx < NUM_BLOCKS; idx++) {
    // write one block
    writeFile(file1, out, BLOCK_SIZE);
    // verify consistency
    verifyFileBlocks(file1.toString(), true);
  }

  // close file
  out.close();
  // verify consistency
  verifyFileBlocks(file1.toString(), false);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestBlockUnderConstruction.java   
void writeFile(Path file, FSDataOutputStream stm, int size)
throws IOException {
  long blocksBefore = stm.getPos() / BLOCK_SIZE;

  TestFileCreation.writeFile(stm, BLOCK_SIZE);
  // need to make sure the full block is completely flushed to the DataNodes
  // (see FSOutputSummer#flush)
  stm.flush();
  int blocksAfter = 0;
  // wait until the block is allocated by DataStreamer
  BlockLocation[] locatedBlocks;
  while(blocksAfter <= blocksBefore) {
    locatedBlocks = DFSClientAdapter.getDFSClient(hdfs).getBlockLocations(
        file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS);
    blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestBlockUnderConstruction.java   
@Test
public void testBlockCreation() throws IOException {
  Path file1 = new Path(BASE_DIR, "file1.dat");
  FSDataOutputStream out = TestFileCreation.createFile(hdfs, file1, 3);

  for(int idx = 0; idx < NUM_BLOCKS; idx++) {
    // write one block
    writeFile(file1, out, BLOCK_SIZE);
    // verify consistency
    verifyFileBlocks(file1.toString(), true);
  }

  // close file
  out.close();
  // verify consistency
  verifyFileBlocks(file1.toString(), false);
}
项目:hadoop-plus    文件:TestBlockUnderConstruction.java   
@Test
public void testBlockCreation() throws IOException {
  Path file1 = new Path(BASE_DIR, "file1.dat");
  FSDataOutputStream out = TestFileCreation.createFile(hdfs, file1, 3);

  for(int idx = 0; idx < NUM_BLOCKS; idx++) {
    // write one block
    writeFile(file1, out, BLOCK_SIZE);
    // verify consistency
    verifyFileBlocks(file1.toString(), true);
  }

  // close file
  out.close();
  // verify consistency
  verifyFileBlocks(file1.toString(), false);
}
项目:FlexMap    文件:TestBlockUnderConstruction.java   
void writeFile(Path file, FSDataOutputStream stm, int size)
throws IOException {
  long blocksBefore = stm.getPos() / BLOCK_SIZE;

  TestFileCreation.writeFile(stm, BLOCK_SIZE);
  // need to make sure the full block is completely flushed to the DataNodes
  // (see FSOutputSummer#flush)
  stm.flush();
  int blocksAfter = 0;
  // wait until the block is allocated by DataStreamer
  BlockLocation[] locatedBlocks;
  while(blocksAfter <= blocksBefore) {
    locatedBlocks = DFSClientAdapter.getDFSClient(hdfs).getBlockLocations(
        file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS);
    blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
  }
}
项目:FlexMap    文件:TestBlockUnderConstruction.java   
@Test
public void testBlockCreation() throws IOException {
  Path file1 = new Path(BASE_DIR, "file1.dat");
  FSDataOutputStream out = TestFileCreation.createFile(hdfs, file1, 3);

  for(int idx = 0; idx < NUM_BLOCKS; idx++) {
    // write one block
    writeFile(file1, out, BLOCK_SIZE);
    // verify consistency
    verifyFileBlocks(file1.toString(), true);
  }

  // close file
  out.close();
  // verify consistency
  verifyFileBlocks(file1.toString(), false);
}
项目:hops    文件:TestSubtreeLock.java   
@Test
public void testDeleteUnclosed() throws IOException, InterruptedException {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRIES_ON_FAILURE_KEY, 0);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    DistributedFileSystem fs = cluster.getFileSystem();
    assertTrue(fs.mkdir(new Path("/foo"), FsPermission.getDefault()));
    TestFileCreation.createFile(fs, new Path("/foo/bar"), 1);
    assertTrue(fs.delete(new Path("/foo/bar"), true));
    assertFalse(fs.exists(new Path("/foo/bar")));
    assertFalse("Not All subtree locks were removed after operation ", subTreeLocksExists());
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hops    文件:TestSubtreeLock.java   
@Test
public void testDeleteSimple() throws IOException, InterruptedException {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRIES_ON_FAILURE_KEY, 0);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    DistributedFileSystem fs = cluster.getFileSystem();
    assertTrue(fs.mkdir(new Path("/foo"), FsPermission.getDefault()));
    TestFileCreation.createFile(fs, new Path("/foo/bar"), 1).close();

    assertTrue(fs.delete(new Path("/foo"), true));
    assertFalse(fs.exists(new Path("/foo")));
    assertFalse("Not All subtree locks were removed after operation ", subTreeLocksExists());
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hops    文件:TestSubtreeLock.java   
@Test
public void testDepricatedRenameMoveFiles() throws IOException {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRIES_ON_FAILURE_KEY, 0);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    DistributedFileSystem fs = cluster.getFileSystem();

    TestFileCreation.createFile(fs, new Path("/foo/file1.txt"), 1).close();
    TestFileCreation.createFile(fs, new Path("/bar/file1.txt"), 1).close();

    assertTrue("Rename Failed", fs.rename(new Path("/foo/file1.txt"), new Path("/bar/file2.txt")));

    assertFalse("Not All subtree locks were removed after operation ", subTreeLocksExists());

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hops    文件:TestBlockUnderConstruction.java   
@Test
public void testBlockCreation() throws IOException {
  Path file1 = new Path(BASE_DIR, "file1.dat");
  FSDataOutputStream out = TestFileCreation.createFile(hdfs, file1, 3);

  for (int idx = 0; idx < NUM_BLOCKS; idx++) {
    // write one block
    writeFile(file1, out, BLOCK_SIZE);
    // verify consistency
    verifyFileBlocks(file1.toString(), true);
  }

  // close file
  out.close();
  // verify consistency
  verifyFileBlocks(file1.toString(), false);
}
项目:hadoop-TCP    文件:TestBlockUnderConstruction.java   
@Test
public void testBlockCreation() throws IOException {
  Path file1 = new Path(BASE_DIR, "file1.dat");
  FSDataOutputStream out = TestFileCreation.createFile(hdfs, file1, 3);

  for(int idx = 0; idx < NUM_BLOCKS; idx++) {
    // write one block
    writeFile(file1, out, BLOCK_SIZE);
    // verify consistency
    verifyFileBlocks(file1.toString(), true);
  }

  // close file
  out.close();
  // verify consistency
  verifyFileBlocks(file1.toString(), false);
}
项目:hardfs    文件:TestBlockUnderConstruction.java   
@Test
public void testBlockCreation() throws IOException {
  Path file1 = new Path(BASE_DIR, "file1.dat");
  FSDataOutputStream out = TestFileCreation.createFile(hdfs, file1, 3);

  for(int idx = 0; idx < NUM_BLOCKS; idx++) {
    // write one block
    writeFile(file1, out, BLOCK_SIZE);
    // verify consistency
    verifyFileBlocks(file1.toString(), true);
  }

  // close file
  out.close();
  // verify consistency
  verifyFileBlocks(file1.toString(), false);
}
项目:hadoop-on-lustre2    文件:TestBlockUnderConstruction.java   
@Test
public void testBlockCreation() throws IOException {
  Path file1 = new Path(BASE_DIR, "file1.dat");
  FSDataOutputStream out = TestFileCreation.createFile(hdfs, file1, 3);

  for(int idx = 0; idx < NUM_BLOCKS; idx++) {
    // write one block
    writeFile(file1, out, BLOCK_SIZE);
    // verify consistency
    verifyFileBlocks(file1.toString(), true);
  }

  // close file
  out.close();
  // verify consistency
  verifyFileBlocks(file1.toString(), false);
}
项目:cumulus    文件:TestBlockUnderConstruction.java   
@Test
public void testBlockCreation() throws IOException {
  Path file1 = new Path(BASE_DIR, "file1.dat");
  FSDataOutputStream out = TestFileCreation.createFile(hdfs, file1, 3);

  for(int idx = 0; idx < NUM_BLOCKS; idx++) {
    // write one block
    writeFile(file1, out, BLOCK_SIZE);
    // verify consistency
    verifyFileBlocks(file1.toString(), true);
  }

  // close file
  out.close();
  // verify consistency
  verifyFileBlocks(file1.toString(), false);
}
项目:hadoop    文件:TestBlockUnderConstruction.java   
/**
 * Test NameNode.getBlockLocations(..) on reading un-closed files.
 */
@Test
public void testGetBlockLocations() throws IOException {
  final NamenodeProtocols namenode = cluster.getNameNodeRpc();
  final Path p = new Path(BASE_DIR, "file2.dat");
  final String src = p.toString();
  final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);

  // write a half block
  int len = BLOCK_SIZE >>> 1;
  writeFile(p, out, len);

  for(int i = 1; i < NUM_BLOCKS; ) {
    // verify consistency
    final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
    final List<LocatedBlock> blocks = lb.getLocatedBlocks();
    assertEquals(i, blocks.size());
    final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
    assertTrue(b instanceof BlockInfoContiguousUnderConstruction);

    if (++i < NUM_BLOCKS) {
      // write one more block
      writeFile(p, out, BLOCK_SIZE);
      len += BLOCK_SIZE;
    }
  }
  // close file
  out.close();
}
项目:aliyun-oss-hadoop-fs    文件:TestBlockUnderConstruction.java   
/**
 * Test NameNode.getBlockLocations(..) on reading un-closed files.
 */
@Test
public void testGetBlockLocations() throws IOException {
  final NamenodeProtocols namenode = cluster.getNameNodeRpc();
  final BlockManager blockManager = cluster.getNamesystem().getBlockManager();
  final Path p = new Path(BASE_DIR, "file2.dat");
  final String src = p.toString();
  final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);

  // write a half block
  int len = BLOCK_SIZE >>> 1;
  writeFile(p, out, len);

  for(int i = 1; i < NUM_BLOCKS; ) {
    // verify consistency
    final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
    final List<LocatedBlock> blocks = lb.getLocatedBlocks();
    assertEquals(i, blocks.size());
    final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
    assertFalse(blockManager.getStoredBlock(b).isComplete());

    if (++i < NUM_BLOCKS) {
      // write one more block
      writeFile(p, out, BLOCK_SIZE);
      len += BLOCK_SIZE;
    }
  }
  // close file
  out.close();
}
项目:big-c    文件:TestBlockUnderConstruction.java   
/**
 * Test NameNode.getBlockLocations(..) on reading un-closed files.
 */
@Test
public void testGetBlockLocations() throws IOException {
  final NamenodeProtocols namenode = cluster.getNameNodeRpc();
  final Path p = new Path(BASE_DIR, "file2.dat");
  final String src = p.toString();
  final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);

  // write a half block
  int len = BLOCK_SIZE >>> 1;
  writeFile(p, out, len);

  for(int i = 1; i < NUM_BLOCKS; ) {
    // verify consistency
    final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
    final List<LocatedBlock> blocks = lb.getLocatedBlocks();
    assertEquals(i, blocks.size());
    final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
    assertTrue(b instanceof BlockInfoContiguousUnderConstruction);

    if (++i < NUM_BLOCKS) {
      // write one more block
      writeFile(p, out, BLOCK_SIZE);
      len += BLOCK_SIZE;
    }
  }
  // close file
  out.close();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestBlockUnderConstruction.java   
/**
 * Test NameNode.getBlockLocations(..) on reading un-closed files.
 */
@Test
public void testGetBlockLocations() throws IOException {
  final NamenodeProtocols namenode = cluster.getNameNodeRpc();
  final Path p = new Path(BASE_DIR, "file2.dat");
  final String src = p.toString();
  final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);

  // write a half block
  int len = BLOCK_SIZE >>> 1;
  writeFile(p, out, len);

  for(int i = 1; i < NUM_BLOCKS; ) {
    // verify consistency
    final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
    final List<LocatedBlock> blocks = lb.getLocatedBlocks();
    assertEquals(i, blocks.size());
    final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
    assertTrue(b instanceof BlockInfoUnderConstruction);

    if (++i < NUM_BLOCKS) {
      // write one more block
      writeFile(p, out, BLOCK_SIZE);
      len += BLOCK_SIZE;
    }
  }
  // close file
  out.close();
}
项目:hadoop-plus    文件:TestBlockUnderConstruction.java   
void writeFile(Path file, FSDataOutputStream stm, int size)
throws IOException {
  long blocksBefore = stm.getPos() / BLOCK_SIZE;

  TestFileCreation.writeFile(stm, BLOCK_SIZE);
  int blocksAfter = 0;
  // wait until the block is allocated by DataStreamer
  BlockLocation[] locatedBlocks;
  while(blocksAfter <= blocksBefore) {
    locatedBlocks = DFSClientAdapter.getDFSClient(hdfs).getBlockLocations(
        file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS);
    blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
  }
}
项目:hadoop-plus    文件:TestBlockUnderConstruction.java   
/**
 * Test NameNode.getBlockLocations(..) on reading un-closed files.
 */
@Test
public void testGetBlockLocations() throws IOException {
  final NamenodeProtocols namenode = cluster.getNameNodeRpc();
  final Path p = new Path(BASE_DIR, "file2.dat");
  final String src = p.toString();
  final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);

  // write a half block
  int len = BLOCK_SIZE >>> 1;
  writeFile(p, out, len);

  for(int i = 1; i < NUM_BLOCKS; ) {
    // verify consistency
    final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
    final List<LocatedBlock> blocks = lb.getLocatedBlocks();
    assertEquals(i, blocks.size());
    final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
    assertTrue(b instanceof BlockInfoUnderConstruction);

    if (++i < NUM_BLOCKS) {
      // write one more block
      writeFile(p, out, BLOCK_SIZE);
      len += BLOCK_SIZE;
    }
  }
  // close file
  out.close();
}
项目:FlexMap    文件:TestBlockUnderConstruction.java   
/**
 * Test NameNode.getBlockLocations(..) on reading un-closed files.
 */
@Test
public void testGetBlockLocations() throws IOException {
  final NamenodeProtocols namenode = cluster.getNameNodeRpc();
  final Path p = new Path(BASE_DIR, "file2.dat");
  final String src = p.toString();
  final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);

  // write a half block
  int len = BLOCK_SIZE >>> 1;
  writeFile(p, out, len);

  for(int i = 1; i < NUM_BLOCKS; ) {
    // verify consistency
    final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
    final List<LocatedBlock> blocks = lb.getLocatedBlocks();
    assertEquals(i, blocks.size());
    final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
    assertTrue(b instanceof BlockInfoUnderConstruction);

    if (++i < NUM_BLOCKS) {
      // write one more block
      writeFile(p, out, BLOCK_SIZE);
      len += BLOCK_SIZE;
    }
  }
  // close file
  out.close();
}
项目:hops    文件:TestSubtreeLock.java   
@Test
public void testDelete() throws IOException, InterruptedException {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRIES_ON_FAILURE_KEY, 0);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    DistributedFileSystem fs = cluster.getFileSystem();
    assertTrue(fs.mkdir(new Path("/foo"), FsPermission.getDefault()));

    TestFileCreation.createFile(fs, new Path("/foo/bar"), 1).close();
    assertTrue(fs.delete(new Path("/foo/bar"), false));
    assertFalse(fs.exists(new Path("/foo/bar")));

    TestFileCreation.createFile(fs, new Path("/foo/bar"), 1).close();
    assertTrue(fs.delete(new Path("/foo/bar"), true));
    assertFalse(fs.exists(new Path("/foo/bar")));

    assertTrue(fs.mkdir(new Path("/foo/bar"), FsPermission.getDefault()));
    TestFileCreation.createFile(fs, new Path("/foo/bar/foo"), 1).close();
    assertTrue(fs.delete(new Path("/foo"), true));
    assertFalse(fs.exists(new Path("/foo/bar/foo")));
    assertFalse(fs.exists(new Path("/foo/bar/foo")));
    assertFalse(fs.exists(new Path("/foo/bar")));
    assertFalse(fs.exists(new Path("/foo")));

    assertFalse("Not All subtree locks were removed after operation ", subTreeLocksExists());
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hops    文件:TestSubtreeLock.java   
@Test
public void testMove() throws IOException {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRIES_ON_FAILURE_KEY, 0);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    DistributedFileSystem fs = cluster.getFileSystem();
    assertTrue(fs.mkdir(new Path("/foo"), FsPermission.getDefault()));
    TestFileCreation.createFile(fs, new Path("/foo/bar"), 1).close();
    assertTrue(fs.mkdir(new Path("/foo1"), FsPermission.getDefault()));
    TestFileCreation.createFile(fs, new Path("/foo1/bar1"), 1).close();
    fs.rename(new Path("/foo1/bar1"), new Path("/foo/bar1"),
            Options.Rename.OVERWRITE);
    assertTrue(fs.exists(new Path("/foo/bar1")));
    assertFalse(fs.exists(new Path("/foo1/bar1")));

    assertFalse("Not All subtree locks were removed after operation ", subTreeLocksExists());

    try {
      fs.rename(new Path("/foo1/bar"), new Path("/foo/bar1"),
              Options.Rename.OVERWRITE);
      fail();
    } catch (FileNotFoundException e) {
    }

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hops    文件:TestSubtreeLock.java   
@Test
public void testSubtreeIgnoreLockRequest() throws IOException {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRIES_ON_FAILURE_KEY, 0);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();

    DistributedFileSystem dfs = cluster.getFileSystem();
    dfs.mkdirs(new Path("/foo"));
    TestFileCreation.createFile(dfs, new Path("/foo/file1.txt"), 1).close();

    boolean isException = false;
    Exception exception = null;


    INodeIdentifier inode = cluster.getNamesystem().lockSubtree("/foo/file1.txt", SubTreeOperation.StoOperationType.NA);
    if (inode != null) {
      fail("nothing should have been locked");
    }

    inode = cluster.getNamesystem().lockSubtree("/", SubTreeOperation.StoOperationType.NA);
    if (inode != null) {
      fail("root should not have been locked");
    }


  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hops    文件:TestBlockUnderConstruction.java   
void writeFile(Path file, FSDataOutputStream stm, int size)
    throws IOException {
  long blocksBefore = stm.getPos() / BLOCK_SIZE;

  TestFileCreation.writeFile(stm, BLOCK_SIZE);
  int blocksAfter = 0;
  // wait until the block is allocated by DataStreamer
  BlockLocation[] locatedBlocks;
  while (blocksAfter <= blocksBefore) {
    locatedBlocks = DFSClientAdapter.getDFSClient(hdfs)
        .getBlockLocations(file.toString(), 0L, BLOCK_SIZE * NUM_BLOCKS);
    blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
  }
}
项目:hops    文件:TestBlockUnderConstruction.java   
/**
 * Test NameNode.getBlockLocations(..) on reading un-closed files.
 */
@Test
public void testGetBlockLocations() throws IOException {
  final NamenodeProtocols namenode = cluster.getNameNodeRpc();
  final Path p = new Path(BASE_DIR, "file2.dat");
  final String src = p.toString();
  final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);

  // write a half block
  int len = BLOCK_SIZE >>> 1;
  writeFile(p, out, len);

  for (int i = 1; i < NUM_BLOCKS; ) {
    // verify consistency
    final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
    final List<LocatedBlock> blocks = lb.getLocatedBlocks();
    assertEquals(i, blocks.size());
    final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
    assertTrue(b instanceof BlockInfoUnderConstruction);

    if (++i < NUM_BLOCKS) {
      // write one more block
      writeFile(p, out, BLOCK_SIZE);
      len += BLOCK_SIZE;
    }
  }
  // close file
  out.close();
}
项目:hadoop-TCP    文件:TestBlockUnderConstruction.java   
void writeFile(Path file, FSDataOutputStream stm, int size)
throws IOException {
  long blocksBefore = stm.getPos() / BLOCK_SIZE;

  TestFileCreation.writeFile(stm, BLOCK_SIZE);
  int blocksAfter = 0;
  // wait until the block is allocated by DataStreamer
  BlockLocation[] locatedBlocks;
  while(blocksAfter <= blocksBefore) {
    locatedBlocks = DFSClientAdapter.getDFSClient(hdfs).getBlockLocations(
        file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS);
    blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
  }
}
项目:hadoop-TCP    文件:TestBlockUnderConstruction.java   
/**
 * Test NameNode.getBlockLocations(..) on reading un-closed files.
 */
@Test
public void testGetBlockLocations() throws IOException {
  final NamenodeProtocols namenode = cluster.getNameNodeRpc();
  final Path p = new Path(BASE_DIR, "file2.dat");
  final String src = p.toString();
  final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);

  // write a half block
  int len = BLOCK_SIZE >>> 1;
  writeFile(p, out, len);

  for(int i = 1; i < NUM_BLOCKS; ) {
    // verify consistency
    final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
    final List<LocatedBlock> blocks = lb.getLocatedBlocks();
    assertEquals(i, blocks.size());
    final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
    assertTrue(b instanceof BlockInfoUnderConstruction);

    if (++i < NUM_BLOCKS) {
      // write one more block
      writeFile(p, out, BLOCK_SIZE);
      len += BLOCK_SIZE;
    }
  }
  // close file
  out.close();
}
项目:s3hdfs    文件:TestBasicUsage.java   
@Test
public void testBasicPutGet()
    throws IOException, URISyntaxException,
    ServiceException, NoSuchAlgorithmException {
  S3HdfsPath s3HdfsPath = testUtil.setUpS3HdfsPath("rewrite", "readme.txt");

  // Create file and blank metadata in HDFS (but not s3)
  Path path = new Path(s3HdfsPath.getFullHdfsObjPath());
  FSDataOutputStream out =
      TestFileCreation.createFile(hdfs, path, 3);
  TestFileCreation.writeFile(out, 128);
  out.close();

  Path pathMeta = new Path(s3HdfsPath.getFullHdfsMetaPath());
  FSDataOutputStream outMeta = hdfs.create(pathMeta);
  outMeta.close();

  // Get the object
  S3Bucket bucket = new S3Bucket(s3HdfsPath.getBucketName());
  String objectKey = s3HdfsPath.getObjectName();

  S3Object returnedObject1 = s3Service.getObject(bucket.getName(), objectKey);
  System.out.println("RETURNED_OBJECT_1");
  System.out.println(returnedObject1); // returned has dataInputStream!

  // Verify the object
  assertEquals(bucket.getName(), returnedObject1.getBucketName());
  assertEquals(objectKey, returnedObject1.getKey());

  // verify returned data
  testUtil.compareS3ObjectWithHdfsFile(returnedObject1.getDataInputStream(),
      path);

  // List objects
  S3Object[] ls = s3Service.listObjects(bucket.getName());
  assertEquals("Should be one object", 1, ls.length);
  System.out.println("LISTED_OBJECTS_1");
  System.out.println(ls[0]);
}
项目:hardfs    文件:TestBlockUnderConstruction.java   
void writeFile(Path file, FSDataOutputStream stm, int size)
throws IOException {
  long blocksBefore = stm.getPos() / BLOCK_SIZE;

  TestFileCreation.writeFile(stm, BLOCK_SIZE);
  int blocksAfter = 0;
  // wait until the block is allocated by DataStreamer
  BlockLocation[] locatedBlocks;
  while(blocksAfter <= blocksBefore) {
    locatedBlocks = DFSClientAdapter.getDFSClient(hdfs).getBlockLocations(
        file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS);
    blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
  }
}
项目:hardfs    文件:TestBlockUnderConstruction.java   
/**
 * Test NameNode.getBlockLocations(..) on reading un-closed files.
 */
@Test
public void testGetBlockLocations() throws IOException {
  final NamenodeProtocols namenode = cluster.getNameNodeRpc();
  final Path p = new Path(BASE_DIR, "file2.dat");
  final String src = p.toString();
  final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);

  // write a half block
  int len = BLOCK_SIZE >>> 1;
  writeFile(p, out, len);

  for(int i = 1; i < NUM_BLOCKS; ) {
    // verify consistency
    final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
    final List<LocatedBlock> blocks = lb.getLocatedBlocks();
    assertEquals(i, blocks.size());
    final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
    assertTrue(b instanceof BlockInfoUnderConstruction);

    if (++i < NUM_BLOCKS) {
      // write one more block
      writeFile(p, out, BLOCK_SIZE);
      len += BLOCK_SIZE;
    }
  }
  // close file
  out.close();
}
项目:hadoop-on-lustre2    文件:TestBlockUnderConstruction.java   
void writeFile(Path file, FSDataOutputStream stm, int size)
throws IOException {
  long blocksBefore = stm.getPos() / BLOCK_SIZE;

  TestFileCreation.writeFile(stm, BLOCK_SIZE);
  int blocksAfter = 0;
  // wait until the block is allocated by DataStreamer
  BlockLocation[] locatedBlocks;
  while(blocksAfter <= blocksBefore) {
    locatedBlocks = DFSClientAdapter.getDFSClient(hdfs).getBlockLocations(
        file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS);
    blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
  }
}
项目:hadoop-on-lustre2    文件:TestBlockUnderConstruction.java   
/**
 * Test NameNode.getBlockLocations(..) on reading un-closed files.
 */
@Test
public void testGetBlockLocations() throws IOException {
  final NamenodeProtocols namenode = cluster.getNameNodeRpc();
  final Path p = new Path(BASE_DIR, "file2.dat");
  final String src = p.toString();
  final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);

  // write a half block
  int len = BLOCK_SIZE >>> 1;
  writeFile(p, out, len);

  for(int i = 1; i < NUM_BLOCKS; ) {
    // verify consistency
    final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
    final List<LocatedBlock> blocks = lb.getLocatedBlocks();
    assertEquals(i, blocks.size());
    final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
    assertTrue(b instanceof BlockInfoUnderConstruction);

    if (++i < NUM_BLOCKS) {
      // write one more block
      writeFile(p, out, BLOCK_SIZE);
      len += BLOCK_SIZE;
    }
  }
  // close file
  out.close();
}
项目:cumulus    文件:TestBlockUnderConstruction.java   
void writeFile(Path file, FSDataOutputStream stm, int size)
throws IOException {
  long blocksBefore = stm.getPos() / BLOCK_SIZE;

  TestFileCreation.writeFile(stm, BLOCK_SIZE);
  int blocksAfter = 0;
  // wait until the block is allocated by DataStreamer
  BlockLocation[] locatedBlocks;
  while(blocksAfter <= blocksBefore) {
    locatedBlocks = hdfs.getClient().getBlockLocations(
        file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS);
    blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
  }
}