Java 类org.apache.hadoop.hdfs.DFSTestUtil 实例源码

项目:hadoop    文件:BaseTestHttpFSWith.java   
private void testConcat() throws Exception {
  Configuration config = getProxiedFSConf();
  config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
  if (!isLocalFS()) {
    FileSystem fs = FileSystem.get(config);
    fs.mkdirs(getProxiedFSTestDir());
    Path path1 = new Path("/test/foo.txt");
    Path path2 = new Path("/test/bar.txt");
    Path path3 = new Path("/test/derp.txt");
    DFSTestUtil.createFile(fs, path1, 1024, (short) 3, 0);
    DFSTestUtil.createFile(fs, path2, 1024, (short) 3, 0);
    DFSTestUtil.createFile(fs, path3, 1024, (short) 3, 0);
    fs.close();
    fs = getHttpFSFileSystem();
    fs.concat(path1, new Path[]{path2, path3});
    fs.close();
    fs = FileSystem.get(config);
    Assert.assertTrue(fs.exists(path1));
    Assert.assertFalse(fs.exists(path2));
    Assert.assertFalse(fs.exists(path3));
    fs.close();
  }
}
项目:hadoop    文件:TestOpenFilesWithSnapshot.java   
@Test
public void testWithCheckpoint() throws Exception {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);
  fs.delete(new Path("/test/test"), true);
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  cluster.restartNameNode(true);

  // read snapshot file after restart
  String test2snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test2");
  DFSTestUtil.readFile(fs, new Path(test2snapshotPath));
  String test3snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test3");
  DFSTestUtil.readFile(fs, new Path(test3snapshotPath));
}
项目:hadoop    文件:OfflineEditsViewerHelper.java   
/**
 * Run file operations to create edits for all op codes
 * to be tested.
 *
 * the following op codes are deprecated and therefore not tested:
 *
 * OP_DATANODE_ADD    ( 5)
 * OP_DATANODE_REMOVE ( 6)
 * OP_SET_NS_QUOTA    (11)
 * OP_CLEAR_NS_QUOTA  (12)
 */
private CheckpointSignature runOperations() throws IOException {
  LOG.info("Creating edits by performing fs operations");
  // no check, if it's not it throws an exception which is what we want
  DistributedFileSystem dfs = cluster.getFileSystem();
  DFSTestUtil.runOperations(cluster, dfs, cluster.getConfiguration(0),
      dfs.getDefaultBlockSize(), 0);

  // OP_ROLLING_UPGRADE_START
  cluster.getNamesystem().getEditLog().logStartRollingUpgrade(Time.now());
  // OP_ROLLING_UPGRADE_FINALIZE
  cluster.getNamesystem().getEditLog().logFinalizeRollingUpgrade(Time.now());

  // Force a roll so we get an OP_END_LOG_SEGMENT txn
  return cluster.getNameNodeRpc().rollEditLog();
}
项目:hadoop    文件:TestDataNodeHotSwapVolumes.java   
@Test(timeout=60000)
public void testReplicatingAfterRemoveVolume()
    throws InterruptedException, TimeoutException, IOException,
    ReconfigurationException {
  startDFSCluster(1, 2);

  final FileSystem fs = cluster.getFileSystem();
  final short replFactor = 2;
  Path testFile = new Path("/test");
  createFile(testFile, 4, replFactor);

  DataNode dn = cluster.getDataNodes().get(0);
  Collection<String> oldDirs = getDataDirs(dn);
  String newDirs = oldDirs.iterator().next();  // Keep the first volume.
  dn.reconfigurePropertyImpl(
      DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs);
  assertFileLocksReleased(
      new ArrayList<String>(oldDirs).subList(1, oldDirs.size()));

  triggerDeleteReport(dn);

  waitReplication(fs, testFile, 1, 1);
  DFSTestUtil.waitReplication(fs, testFile, replFactor);
}
项目:hadoop    文件:TestFsDatasetCache.java   
@Test(timeout=60000)
public void testPageRounder() throws Exception {
  // Write a small file
  Path fileName = new Path("/testPageRounder");
  final int smallBlocks = 512; // This should be smaller than the page size
  assertTrue("Page size should be greater than smallBlocks!",
      PAGE_SIZE > smallBlocks);
  final int numBlocks = 5;
  final int fileLen = smallBlocks * numBlocks;
  FSDataOutputStream out =
      fs.create(fileName, false, 4096, (short)1, smallBlocks);
  out.write(new byte[fileLen]);
  out.close();
  HdfsBlockLocation[] locs = (HdfsBlockLocation[])fs.getFileBlockLocations(
      fileName, 0, fileLen);
  // Cache the file and check the sizes match the page size
  setHeartbeatResponse(cacheBlocks(locs));
  DFSTestUtil.verifyExpectedCacheUsage(PAGE_SIZE * numBlocks, numBlocks, fsd);
  // Uncache and check that it decrements by the page size too
  setHeartbeatResponse(uncacheBlocks(locs));
  DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
}
项目:hadoop    文件:TestSnapshotDeletion.java   
@Test
public void testCorrectNumberOfBlocksAfterRestart() throws IOException {
  final Path foo = new Path("/foo");
  final Path bar = new Path(foo, "bar");
  final Path file = new Path(foo, "file");
  final String snapshotName = "ss0";

  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
  hdfs.mkdirs(bar);
  hdfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
  hdfs.setQuota(bar, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
  hdfs.allowSnapshot(foo);

  hdfs.createSnapshot(foo, snapshotName);
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();

  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  hdfs.deleteSnapshot(foo, snapshotName);
  hdfs.delete(bar, true);
  hdfs.delete(foo, true);

  long numberOfBlocks = cluster.getNamesystem().getBlocksTotal();
  cluster.restartNameNode(0);
  assertEquals(numberOfBlocks, cluster.getNamesystem().getBlocksTotal());
}
项目:hadoop    文件:TestDiskspaceQuotaUpdate.java   
/**
 * Test if the quota can be correctly updated for create file
 */
@Test (timeout=60000)
public void testQuotaUpdateWithFileCreate() throws Exception  {
  final Path foo = new Path(dir, "foo");
  Path createdFile = new Path(foo, "created_file.data");
  dfs.mkdirs(foo);
  dfs.setQuota(foo, Long.MAX_VALUE-1, Long.MAX_VALUE-1);
  long fileLen = BLOCKSIZE * 2 + BLOCKSIZE / 2;
  DFSTestUtil.createFile(dfs, createdFile, BLOCKSIZE / 16,
      fileLen, BLOCKSIZE, REPLICATION, seed);
  INode fnode = fsdir.getINode4Write(foo.toString());
  assertTrue(fnode.isDirectory());
  assertTrue(fnode.isQuotaSet());
  QuotaCounts cnt = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed();
  assertEquals(2, cnt.getNameSpace());
  assertEquals(fileLen * REPLICATION, cnt.getStorageSpace());
}
项目:hadoop    文件:TestSnapshotBlocksMap.java   
@Test(timeout = 30000)
public void testReadSnapshotFileWithCheckpoint() throws Exception {
  Path foo = new Path("/foo");
  hdfs.mkdirs(foo);
  hdfs.allowSnapshot(foo);
  Path bar = new Path("/foo/bar");
  DFSTestUtil.createFile(hdfs, bar, 100, (short) 2, 100024L);
  hdfs.createSnapshot(foo, "s1");
  assertTrue(hdfs.delete(bar, true));

  // checkpoint
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);

  // restart namenode to load snapshot files from fsimage
  cluster.restartNameNode(true);
  String snapshotPath = Snapshot.getSnapshotPath(foo.toString(), "s1/bar");
  DFSTestUtil.readFile(hdfs, new Path(snapshotPath));
}
项目:hadoop    文件:TestSetQuotaWithSnapshot.java   
@Test (timeout=60000)
public void testSetQuota() throws Exception {
  final Path dir = new Path("/TestSnapshot");
  hdfs.mkdirs(dir);
  // allow snapshot on dir and create snapshot s1
  SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");

  Path sub = new Path(dir, "sub");
  hdfs.mkdirs(sub);
  Path fileInSub = new Path(sub, "file");
  DFSTestUtil.createFile(hdfs, fileInSub, BLOCKSIZE, REPLICATION, seed);
  INodeDirectory subNode = INodeDirectory.valueOf(
      fsdir.getINode(sub.toString()), sub);
  // subNode should be a INodeDirectory, but not an INodeDirectoryWithSnapshot
  assertFalse(subNode.isWithSnapshot());

  hdfs.setQuota(sub, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
  subNode = INodeDirectory.valueOf(fsdir.getINode(sub.toString()), sub);
  assertTrue(subNode.isQuotaSet());
  assertFalse(subNode.isWithSnapshot());
}
项目:hadoop    文件:TestPBHelper.java   
private LocatedBlock createLocatedBlockNoStorageMedia() {
  DatanodeInfo[] dnInfos = {
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
                                       AdminStates.DECOMMISSION_INPROGRESS),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
                                       AdminStates.DECOMMISSIONED),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3",
                                       AdminStates.NORMAL)
  };
  LocatedBlock lb = new LocatedBlock(
      new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
  lb.setBlockToken(new Token<BlockTokenIdentifier>(
      "identifier".getBytes(), "password".getBytes(), new Text("kind"),
      new Text("service")));
  return lb;
}
项目:hadoop    文件:TestQuotaByStorageType.java   
@Test(timeout = 60000)
public void testQuotaByStorageTypeParentOffChildOn() throws Exception {
  final Path parent = new Path(dir, "parent");
  final Path child = new Path(parent, "child");
  dfs.mkdirs(parent);
  dfs.mkdirs(child);

  dfs.setStoragePolicy(parent, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
  dfs.setQuotaByStorageType(child, StorageType.SSD, 2 * BLOCKSIZE);

  // Create file of size 2.5 * BLOCKSIZE under child directory
  // Since child directory have SSD quota of 2 * BLOCKSIZE,
  // expect an exception when creating files under child directory.
  Path createdFile1 = new Path(child, "created_file1.data");
  long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2;
  int bufLen = BLOCKSIZE / 16;
  try {
    DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE,
        REPLICATION, seed);
    fail("Should have failed with QuotaByStorageTypeExceededException ");
  } catch (Throwable t) {
    LOG.info("Got expected exception ", t);
  }
}
项目:hadoop    文件:TestFsDatasetCache.java   
@Test(timeout=60000)
public void testUncacheUnknownBlock() throws Exception {
  // Create a file
  Path fileName = new Path("/testUncacheUnknownBlock");
  int fileLen = 4096;
  DFSTestUtil.createFile(fs, fileName, fileLen, (short)1, 0xFDFD);
  HdfsBlockLocation[] locs = (HdfsBlockLocation[])fs.getFileBlockLocations(
      fileName, 0, fileLen);

  // Try to uncache it without caching it first
  setHeartbeatResponse(uncacheBlocks(locs));

  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      return fsd.getNumBlocksFailedToUncache() > 0;
    }
  }, 100, 10000);
}
项目:hadoop    文件:TestSnapshotDeletion.java   
/**
 * Deleting directory with snapshottable descendant with snapshots must fail.
 */
@Test (timeout=300000)
public void testDeleteDirectoryWithSnapshot2() throws Exception {
  Path file0 = new Path(sub, "file0");
  Path file1 = new Path(sub, "file1");
  DFSTestUtil.createFile(hdfs, file0, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);

  Path subfile1 = new Path(subsub, "file0");
  Path subfile2 = new Path(subsub, "file1");
  DFSTestUtil.createFile(hdfs, subfile1, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, subfile2, BLOCKSIZE, REPLICATION, seed);

  // Allow snapshot for subsub1, and create snapshot for it
  hdfs.allowSnapshot(subsub);
  hdfs.createSnapshot(subsub, "s1");

  // Deleting dir while its descedant subsub1 having snapshots should fail
  exception.expect(RemoteException.class);
  String error = subsub.toString()
      + " is snapshottable and already has snapshots";
  exception.expectMessage(error);
  hdfs.delete(dir, true);
}
项目:hadoop    文件:TestDistCpSync.java   
/**
 * make some changes under the given directory (created in the above way).
 * 1. rename dir/foo/d1 to dir/bar/d1
 * 2. delete dir/bar/d1/f3
 * 3. rename dir/foo to /dir/bar/d1/foo
 * 4. delete dir/bar/d1/foo/f1
 * 5. create file dir/bar/d1/foo/f1 whose size is 2*BLOCK_SIZE
 * 6. append one BLOCK to file dir/bar/f2
 * 7. rename dir/bar to dir/foo
 *
 * Thus after all these ops the subtree looks like this:
 *                       dir/
 *                       foo/
 *                 d1/    f2(A)    d2/
 *                foo/             f4
 *                f1(new)
 */
private void changeData(Path dir) throws Exception {
  final Path foo = new Path(dir, "foo");
  final Path bar = new Path(dir, "bar");
  final Path d1 = new Path(foo, "d1");
  final Path f2 = new Path(bar, "f2");

  final Path bar_d1 = new Path(bar, "d1");
  dfs.rename(d1, bar_d1);
  final Path f3 = new Path(bar_d1, "f3");
  dfs.delete(f3, true);
  final Path newfoo = new Path(bar_d1, "foo");
  dfs.rename(foo, newfoo);
  final Path f1 = new Path(newfoo, "f1");
  dfs.delete(f1, true);
  DFSTestUtil.createFile(dfs, f1, 2 * BLOCK_SIZE, DATA_NUM, 0);
  DFSTestUtil.appendFile(dfs, f2, (int) BLOCK_SIZE);
  dfs.rename(bar, new Path(dir, "foo"));
}
项目:hadoop    文件:TestPBHelper.java   
private LocatedBlock createLocatedBlock() {
  DatanodeInfo[] dnInfos = {
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
          AdminStates.DECOMMISSION_INPROGRESS),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
          AdminStates.DECOMMISSIONED),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", 
          AdminStates.NORMAL),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4",
          AdminStates.NORMAL),
  };
  String[] storageIDs = {"s1", "s2", "s3", "s4"};
  StorageType[] media = {
      StorageType.DISK,
      StorageType.SSD,
      StorageType.DISK,
      StorageType.RAM_DISK
  };
  LocatedBlock lb = new LocatedBlock(
      new ExtendedBlock("bp12", 12345, 10, 53),
      dnInfos, storageIDs, media, 5, false, new DatanodeInfo[]{});
  lb.setBlockToken(new Token<BlockTokenIdentifier>(
      "identifier".getBytes(), "password".getBytes(), new Text("kind"),
      new Text("service")));
  return lb;
}
项目:hadoop    文件:TestOpenFilesWithSnapshot.java   
@Test
public void testFilesDeletionWithCheckpoint() throws Exception {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);
  fs.delete(new Path("/test/test/test2"), true);
  fs.delete(new Path("/test/test/test3"), true);
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  cluster.restartNameNode(true);

  // read snapshot file after restart
  String test2snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test2");
  DFSTestUtil.readFile(fs, new Path(test2snapshotPath));
  String test3snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test3");
  DFSTestUtil.readFile(fs, new Path(test3snapshotPath));
}
项目:hadoop    文件:TestHASafeMode.java   
/**
 * Make sure that when we transition to active in safe mode that we don't
 * prematurely consider blocks missing just because not all DNs have reported
 * yet.
 * 
 * This is a regression test for HDFS-3921.
 */
@Test
public void testNoPopulatingReplQueuesWhenStartingActiveInSafeMode()
    throws IOException {
  DFSTestUtil.createFile(fs, new Path("/test"), 15*BLOCK_SIZE, (short)3, 1L);

  // Stop the DN so that when the NN restarts not all blocks wil be reported
  // and the NN won't leave safe mode.
  cluster.stopDataNode(1);
  // Restart the namenode but don't wait for it to hear from all DNs (since
  // one DN is deliberately shut down.)
  cluster.restartNameNode(0, false);
  cluster.transitionToActive(0);

  assertTrue(cluster.getNameNode(0).isInSafeMode());
  // We shouldn't yet consider any blocks "missing" since we're in startup
  // safemode, i.e. not all DNs may have reported.
  assertEquals(0, cluster.getNamesystem(0).getMissingBlocksCount());
}
项目:hadoop    文件:TestRenameWithSnapshots.java   
@Test (timeout=60000)
public void testRenameDirectoryInSnapshot() throws Exception {
  final Path sub2 = new Path(sub1, "sub2");
  final Path sub3 = new Path(sub1, "sub3");
  final Path sub2file1 = new Path(sub2, "sub2file1");
  final String sub1snap1 = "sub1snap1";

  hdfs.mkdirs(sub1);
  hdfs.mkdirs(sub2);
  DFSTestUtil.createFile(hdfs, sub2file1, BLOCKSIZE, REPL, SEED);
  SnapshotTestHelper.createSnapshot(hdfs, sub1, sub1snap1);

  // First rename the sub-directory.
  hdfs.rename(sub2, sub3);

  // Query the diff report and make sure it looks as expected.
  SnapshotDiffReport diffReport = hdfs.getSnapshotDiffReport(sub1, sub1snap1,
      "");
  LOG.info("DiffList is \n\"" + diffReport.toString() + "\"");
  List<DiffReportEntry> entries = diffReport.getDiffList();
  assertEquals(2, entries.size());
  assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
  assertTrue(existsInDiffReport(entries, DiffType.RENAME, sub2.getName(),
      sub3.getName()));
}
项目:hadoop    文件:TestBPOfferService.java   
/**
 * Set up a mock NN with the bare minimum for a DN to register to it.
 */
private DatanodeProtocolClientSideTranslatorPB setupNNMock(int nnIdx)
    throws Exception {
  DatanodeProtocolClientSideTranslatorPB mock =
      Mockito.mock(DatanodeProtocolClientSideTranslatorPB.class);
  Mockito.doReturn(new NamespaceInfo(1, FAKE_CLUSTERID, FAKE_BPID, 0))
      .when(mock).versionRequest();

  Mockito.doReturn(DFSTestUtil.getLocalDatanodeRegistration())
    .when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class));

  Mockito.doAnswer(new HeartbeatAnswer(nnIdx))
    .when(mock).sendHeartbeat(
        Mockito.any(DatanodeRegistration.class),
        Mockito.any(StorageReport[].class),
        Mockito.anyLong(),
        Mockito.anyLong(),
        Mockito.anyInt(),
        Mockito.anyInt(),
        Mockito.anyInt(),
        Mockito.any(VolumeFailureSummary.class));
  mockHaStatuses[nnIdx] = new NNHAStatusHeartbeat(HAServiceState.STANDBY, 0);
  return mock;
}
项目:hadoop    文件:TestDatanodeDescriptor.java   
/**
 * Test that getInvalidateBlocks observes the maxlimit.
 */
@Test
public void testGetInvalidateBlocks() throws Exception {
  final int MAX_BLOCKS = 10;
  final int REMAINING_BLOCKS = 2;
  final int MAX_LIMIT = MAX_BLOCKS - REMAINING_BLOCKS;

  DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor();
  ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
  for (int i=0; i<MAX_BLOCKS; i++) {
    blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
  }
  dd.addBlocksToBeInvalidated(blockList);
  Block[] bc = dd.getInvalidateBlocks(MAX_LIMIT);
  assertEquals(bc.length, MAX_LIMIT);
  bc = dd.getInvalidateBlocks(MAX_LIMIT);
  assertEquals(bc.length, REMAINING_BLOCKS);
}
项目:hadoop    文件:TestHASafeMode.java   
/**
 * Tests the case where, while a standby is down, more blocks are
 * added to the namespace, but not rolled. So, when it starts up,
 * it receives notification about the new blocks during
 * the safemode extension period.
 */
@Test
public void testBlocksAddedBeforeStandbyRestart() throws Exception {
  banner("Starting with NN0 active and NN1 standby, creating some blocks");
  DFSTestUtil.createFile(fs, new Path("/test"), 3*BLOCK_SIZE, (short) 3, 1L);
  // Roll edit log so that, when the SBN restarts, it will load
  // the namespace during startup.
  nn0.getRpcServer().rollEditLog();

  banner("Creating some blocks that won't be in the edit log");
  DFSTestUtil.createFile(fs, new Path("/test2"), 5*BLOCK_SIZE, (short) 3, 1L);

  banner("Restarting standby");
  restartStandby();

  // We expect it not to be stuck in safemode, since those blocks
  // that are already visible to the SBN should be processed
  // in the initial block reports.
  assertSafeMode(nn1, 3, 3, 3, 0);

  banner("Waiting for standby to catch up to active namespace");
  HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
  assertSafeMode(nn1, 8, 8, 3, 0);
}
项目:hadoop    文件:TestDatanodeRestart.java   
@Test public void testFinalizedReplicas() throws Exception {
  // bring up a cluster of 3
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  cluster.waitActive();
  FileSystem fs = cluster.getFileSystem();
  try {
    // test finalized replicas
    final String TopDir = "/test";
    DFSTestUtil util = new DFSTestUtil.Builder().
        setName("TestDatanodeRestart").setNumFiles(2).build();
    util.createFiles(fs, TopDir, (short)3);
    util.waitReplication(fs, TopDir, (short)3);
    util.checkFiles(fs, TopDir);
    cluster.restartDataNodes();
    cluster.waitActive();
    util.checkFiles(fs, TopDir);
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop    文件:TestHASafeMode.java   
/**
 * Regression test for HDFS-2753. In this bug, the following sequence was
 * observed:
 * - Some blocks are written to DNs while the SBN was down. This causes
 *   the blockReceived messages to get queued in the BPServiceActor on the
 *   DN.
 * - When the SBN returns, the DN re-registers with the SBN, and then
 *   flushes its blockReceived queue to the SBN before it sends its
 *   first block report. This caused the first block report to be
 *   incorrect ignored.
 * - The SBN would become stuck in safemode.
 */
@Test
public void testBlocksAddedWhileStandbyIsDown() throws Exception {
  DFSTestUtil.createFile(fs, new Path("/test"), 3*BLOCK_SIZE, (short) 3, 1L);

  banner("Stopping standby");
  cluster.shutdownNameNode(1);

  DFSTestUtil.createFile(fs, new Path("/test2"), 3*BLOCK_SIZE, (short) 3, 1L);

  banner("Rolling edit log so standby gets all edits on restart");
  nn0.getRpcServer().rollEditLog();

  restartStandby();
  assertSafeMode(nn1, 6, 6, 3, 0);
}
项目:hadoop    文件:TestDataNodeVolumeFailureReporting.java   
/**
 * Initializes the cluster.
 *
 * @param numDataNodes number of datanodes
 * @param storagesPerDatanode number of storage locations on each datanode
 * @param failedVolumesTolerated number of acceptable volume failures
 * @throws Exception if there is any failure
 */
private void initCluster(int numDataNodes, int storagesPerDatanode,
    int failedVolumesTolerated) throws Exception {
  conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L);
  /*
   * Lower the DN heartbeat, DF rate, and recheck interval to one second
   * so state about failures and datanode death propagates faster.
   */
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_DF_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
      failedVolumesTolerated);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes)
      .storagesPerDatanode(storagesPerDatanode).build();
  cluster.waitActive();
  fs = cluster.getFileSystem();
  dataDir = cluster.getDataDirectory();
  long dnCapacity = DFSTestUtil.getDatanodeCapacity(
      cluster.getNamesystem().getBlockManager().getDatanodeManager(), 0);
  volumeCapacity = dnCapacity / cluster.getStoragesPerDatanode();
}
项目:hadoop    文件:TestBlockRecovery.java   
private Collection<RecoveringBlock> initRecoveringBlocks() throws IOException {
  Collection<RecoveringBlock> blocks = new ArrayList<RecoveringBlock>(1);
  DatanodeInfo mockOtherDN = DFSTestUtil.getLocalDatanodeInfo();
  DatanodeInfo[] locs = new DatanodeInfo[] {
      new DatanodeInfo(dn.getDNRegistrationForBP(block.getBlockPoolId())),
      mockOtherDN };
  RecoveringBlock rBlock = new RecoveringBlock(block, locs, RECOVERY_ID);
  blocks.add(rBlock);
  return blocks;
}
项目:hadoop    文件:TestSaveNamespace.java   
@Test (timeout=30000)
public void testSaveWhileEditsRolled() throws Exception {
  Configuration conf = getConf();
  NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
  DFSTestUtil.formatNameNode(conf);
  FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);

  try {
    doAnEdit(fsn, 1);
    CheckpointSignature sig = fsn.rollEditLog();
    LOG.warn("Checkpoint signature: " + sig);
    // Do another edit
    doAnEdit(fsn, 2);

    // Save namespace
    fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    fsn.saveNamespace();

    // Now shut down and restart the NN
    fsn.close();
    fsn = null;

    // Start a new namesystem, which should be able to recover
    // the namespace from the previous incarnation.
    fsn = FSNamesystem.loadFromDisk(conf);

    // Make sure the image loaded including our edits.
    checkEditExists(fsn, 1);
    checkEditExists(fsn, 2);
  } finally {
    if (fsn != null) {
      fsn.close();
    }
  }
}
项目:hadoop    文件:TestMRCJCFileInputFormat.java   
static void writeFile(Configuration conf, Path name,
    short replication, int numBlocks)
    throws IOException, TimeoutException, InterruptedException {
  FileSystem fileSys = FileSystem.get(conf);

  FSDataOutputStream stm = fileSys.create(name, true,
                                          conf.getInt("io.file.buffer.size", 4096),
                                          replication, (long)BLOCKSIZE);
  for (int i = 0; i < numBlocks; i++) {
    stm.write(databuf);
  }
  stm.close();
  DFSTestUtil.waitReplication(fileSys, name, replication);
}
项目:hadoop    文件:TestReaddir.java   
@Before
public void createFiles() throws IllegalArgumentException, IOException {
  hdfs.delete(new Path(testdir), true);
  hdfs.mkdirs(new Path(testdir));
  DFSTestUtil.createFile(hdfs, new Path(testdir + "/f1"), 0, (short) 1, 0);
  DFSTestUtil.createFile(hdfs, new Path(testdir + "/f2"), 0, (short) 1, 0);
  DFSTestUtil.createFile(hdfs, new Path(testdir + "/f3"), 0, (short) 1, 0);
}
项目:hadoop    文件:TestRpcProgramNfs3.java   
@Before
public void createFiles() throws IllegalArgumentException, IOException {
  hdfs.delete(new Path(testdir), true);
  hdfs.mkdirs(new Path(testdir));
  hdfs.mkdirs(new Path(testdir + "/foo"));
  DFSTestUtil.createFile(hdfs, new Path(testdir + "/bar"), 0, (short) 1, 0);
}
项目:hadoop    文件:TestRpcProgramNfs3.java   
@Test(timeout = 120000)
public void testEncryptedReadWrite() throws Exception {
  final int len = 8192;

  final Path zone = new Path("/zone");
  hdfs.mkdirs(zone);
  dfsAdmin.createEncryptionZone(zone, TEST_KEY);

  final byte[] buffer = new byte[len];
  for (int i = 0; i < len; i++) {
    buffer[i] = (byte) i;
  }

  final String encFile1 = "/zone/myfile";
  createFileUsingNfs(encFile1, buffer);
  commit(encFile1, len);
  assertArrayEquals("encFile1 not equal",
      getFileContentsUsingNfs(encFile1, len),
      getFileContentsUsingDfs(encFile1, len));

  /*
   * Same thing except this time create the encrypted file using DFS.
   */
  final String encFile2 = "/zone/myfile2";
  final Path encFile2Path = new Path(encFile2);
  DFSTestUtil.createFile(hdfs, encFile2Path, len, (short) 1, 0xFEED);
  assertArrayEquals("encFile2 not equal",
      getFileContentsUsingNfs(encFile2, len),
      getFileContentsUsingDfs(encFile2, len));
}
项目:hadoop    文件:TestPermissionSymlinks.java   
@Before
public void setUp() throws Exception {
  // Create initial test files
  fs.mkdirs(linkParent);
  fs.mkdirs(targetParent);
  DFSTestUtil.createFile(fs, target, 1024, (short)3, 0xBEEFl);
  wrapper.createSymlink(target, link, false);
}
项目:hadoop    文件:TestSnapshotBlocksMap.java   
/**
 * Make sure we delete 0-sized block when deleting an under-construction file
 */
@Test
public void testDeletionWithZeroSizeBlock2() throws Exception {
  final Path foo = new Path("/foo");
  final Path subDir = new Path(foo, "sub");
  final Path bar = new Path(subDir, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);

  hdfs.append(bar);

  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfoContiguous[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
  cluster.getNameNodeRpc()
      .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
          null, barNode.getId(), null);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");

  barNode = fsdir.getINode4Write(bar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(2, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  assertEquals(0, blks[1].getNumBytes());

  hdfs.delete(subDir, true);
  final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar");
  barNode = fsdir.getINode(sbar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
项目:hadoop    文件:TestAuditLogs.java   
/** test that allowed stat puts proper entry in audit log */
@Test
public void testAuditAllowedStat() throws Exception {
  final Path file = new Path(fnames[0]);
  FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);

  setupAuditLogs();
  FileStatus st = userfs.getFileStatus(file);
  verifyAuditLogs(true);
  assertTrue("failed to stat file", st != null && st.isFile());
}
项目:hadoop    文件:TestRenameWithSnapshots.java   
/**
 * Test the rename undo when quota of dst tree is exceeded after rename.
 */
@Test
public void testRenameExceedQuota() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test, "dir1");
  final Path dir2 = new Path(test, "dir2");
  final Path sub_dir2 = new Path(dir2, "subdir");
  final Path subfile_dir2 = new Path(sub_dir2, "subfile");
  hdfs.mkdirs(dir1);
  DFSTestUtil.createFile(hdfs, subfile_dir2, BLOCKSIZE, REPL, SEED);

  final Path foo = new Path(dir1, "foo");
  DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);

  SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");

  // set ns quota of dir2 to 4, so the current remaining is 1 (already has
  // dir2, sub_dir2, subfile_dir2, and s2)
  hdfs.setQuota(dir2, 5, Long.MAX_VALUE - 1);

  // rename /test/dir1/foo to /test/dir2/sub_dir2/subfile_dir2. 
  // FSDirectory#verifyQuota4Rename will pass since foo only be counted 
  // as 1 in NS quota. The rename operation will succeed while the real quota 
  // of dir2 will become 7 (dir2, s2 in dir2, sub_dir2, s2 in sub_dir2,
  // subfile_dir2 in deleted list, new subfile, s1 in new subfile).
  hdfs.rename(foo, subfile_dir2, Rename.OVERWRITE);

  // check dir2
  INode dir2Node = fsdir.getINode4Write(dir2.toString());
  assertTrue(dir2Node.asDirectory().isSnapshottable());
  QuotaCounts counts = dir2Node.computeQuotaUsage(
      fsdir.getBlockStoragePolicySuite());
  assertEquals(4, counts.getNameSpace());
  assertEquals(BLOCKSIZE * REPL * 2, counts.getStorageSpace());
}
项目:hadoop    文件:TestSnapshotRename.java   
/**
 * Test rename a snapshot to another existing snapshot 
 */
@Test (timeout=60000)
public void testRenameToExistingSnapshot() throws Exception {
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
  // Create snapshots for sub1
  SnapshotTestHelper.createSnapshot(hdfs, sub1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sub1, "s2");

  exception.expect(SnapshotException.class);
  String error = "The snapshot s2 already exists for directory "
      + sub1.toString();
  exception.expectMessage(error);
  hdfs.renameSnapshot(sub1, "s1", "s2");
}
项目:hadoop    文件:TestSaslDataTransfer.java   
/**
 * Tests DataTransferProtocol with the given client configuration.
 *
 * @param conf client configuration
 * @throws IOException if there is an I/O error
 */
private void doTest(HdfsConfiguration conf) throws IOException {
  fs = FileSystem.get(cluster.getURI(), conf);
  FileSystemTestHelper.createFile(fs, PATH, NUM_BLOCKS, BLOCK_SIZE);
  assertArrayEquals(FileSystemTestHelper.getFileData(NUM_BLOCKS, BLOCK_SIZE),
    DFSTestUtil.readFile(fs, PATH).getBytes("UTF-8"));
  BlockLocation[] blockLocations = fs.getFileBlockLocations(PATH, 0,
    Long.MAX_VALUE);
  assertNotNull(blockLocations);
  assertEquals(NUM_BLOCKS, blockLocations.length);
  for (BlockLocation blockLocation: blockLocations) {
    assertNotNull(blockLocation.getHosts());
    assertEquals(3, blockLocation.getHosts().length);
  }
}
项目:hadoop    文件:TestHDFSConcat.java   
/**
 * Test that the concat operation is properly persisted in the
 * edit log, and properly replayed on restart.
 */
@Test
public void testConcatInEditLog() throws Exception {
  final Path TEST_DIR = new Path("/testConcatInEditLog");
  final long FILE_LEN = blockSize;

  // 1. Concat some files
  Path[] srcFiles = new Path[3];
  for (int i = 0; i < srcFiles.length; i++) {
    Path path = new Path(TEST_DIR, "src-" + i);
    DFSTestUtil.createFile(dfs, path, FILE_LEN, REPL_FACTOR, 1);
    srcFiles[i] = path;
  }    
  Path targetFile = new Path(TEST_DIR, "target");
  DFSTestUtil.createFile(dfs, targetFile, FILE_LEN, REPL_FACTOR, 1);

  dfs.concat(targetFile, srcFiles);

  // 2. Verify the concat operation basically worked, and record
  // file status.
  assertTrue(dfs.exists(targetFile));
  FileStatus origStatus = dfs.getFileStatus(targetFile);

  // 3. Restart NN to force replay from edit log
  cluster.restartNameNode(true);

  // 4. Verify concat operation was replayed correctly and file status
  // did not change.
  assertTrue(dfs.exists(targetFile));
  assertFalse(dfs.exists(srcFiles[0]));

  FileStatus statusAfterRestart = dfs.getFileStatus(targetFile);

  assertEquals(origStatus.getModificationTime(),
      statusAfterRestart.getModificationTime());
}
项目:hadoop    文件:TestSnapshotNameWithInvalidCharacters.java   
@Test(timeout = 60000)
public void TestSnapshotWithInvalidName1() throws Exception{
  Path file1 = new Path(dir1, file1Name);
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, SEED);

  hdfs.allowSnapshot(dir1);
  try {
      hdfs.createSnapshot(dir1, snapshot2);
  } catch (RemoteException e) {
 }
}
项目:hadoop    文件:TestSequentialBlockId.java   
/**
 * Test that block IDs are generated sequentially.
 *
 * @throws IOException
 */
@Test
public void testBlockIdGeneration() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();

  try {
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();

    // Create a file that is 10 blocks long.
    Path path = new Path("testBlockIdGeneration.dat");
    DFSTestUtil.createFile(
        fs, path, IO_SIZE, BLOCK_SIZE * 10, BLOCK_SIZE, REPLICATION, SEED);
    List<LocatedBlock> blocks = DFSTestUtil.getAllBlocks(fs, path);
    LOG.info("Block0 id is " + blocks.get(0).getBlock().getBlockId());
    long nextBlockExpectedId = blocks.get(0).getBlock().getBlockId() + 1;

    // Ensure that the block IDs are sequentially increasing.
    for (int i = 1; i < blocks.size(); ++i) {
      long nextBlockId = blocks.get(i).getBlock().getBlockId();
      LOG.info("Block" + i + " id is " + nextBlockId);
      assertThat(nextBlockId, is(nextBlockExpectedId));
      ++nextBlockExpectedId;
    }
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop    文件:TestBlockManager.java   
@Test
public void testUseDelHint() {
  DatanodeStorageInfo delHint = new DatanodeStorageInfo(
      DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("id"));
  List<DatanodeStorageInfo> moreThan1Racks = Arrays.asList(delHint);
  List<StorageType> excessTypes = new ArrayList<StorageType>();

  excessTypes.add(StorageType.DEFAULT);
  Assert.assertTrue(BlockManager.useDelHint(true, delHint, null,
      moreThan1Racks, excessTypes));
  excessTypes.remove(0);
  excessTypes.add(StorageType.SSD);
  Assert.assertFalse(BlockManager.useDelHint(true, delHint, null,
      moreThan1Racks, excessTypes));
}