Java 类org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter 实例源码

项目:hadoop    文件:TestBinaryTokenFile.java   
@BeforeClass
public static void setUp() throws Exception {
  final Configuration conf = new Configuration();

  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
  conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG");

  final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
  builder.checkExitOnShutdown(true);
  builder.numDataNodes(numSlaves);
  builder.format(true);
  builder.racks(null);
  dfsCluster = builder.build();

  mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs);
  mrCluster.init(conf);
  mrCluster.start();

  NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads(); 

  FileSystem fs = dfsCluster.getFileSystem(); 
  p1 = new Path("file1");
  p1 = fs.makeQualified(p1);
}
项目:hadoop    文件:TestSetTimes.java   
/**
 * Test that when access time updates are not needed, the FSNamesystem
 * write lock is not taken by getBlockLocations.
 * Regression test for HDFS-3981.
 */
@Test(timeout=60000)
public void testGetBlockLocationsOnlyUsesReadLock() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 100*1000);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(0)
    .build();
  ReentrantReadWriteLock spyLock = NameNodeAdapter.spyOnFsLock(cluster.getNamesystem());
  try {
    // Create empty file in the FSN.
    Path p = new Path("/empty-file");
    DFSTestUtil.createFile(cluster.getFileSystem(), p, 0, (short)1, 0L);

    // getBlockLocations() should not need the write lock, since we just created
    // the file (and thus its access time is already within the 100-second
    // accesstime precision configured above). 
    MockitoUtil.doThrowWhenCallStackMatches(
        new AssertionError("Should not need write lock"),
        ".*getBlockLocations.*")
        .when(spyLock).writeLock();
    cluster.getFileSystem().getFileBlockLocations(p, 0, 100);
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop    文件:TestInitializeSharedEdits.java   
private void assertCanStartHaNameNodes(String pathSuffix)
    throws ServiceFailedException, IOException, URISyntaxException,
    InterruptedException {
  // Now should be able to start both NNs. Pass "false" here so that we don't
  // try to waitActive on all NNs, since the second NN doesn't exist yet.
  cluster.restartNameNode(0, false);
  cluster.restartNameNode(1, true);

  // Make sure HA is working.
  cluster.getNameNode(0).getRpcServer().transitionToActive(
      new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));
  FileSystem fs = null;
  try {
    Path newPath = new Path(TEST_PATH, pathSuffix);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    assertTrue(fs.mkdirs(newPath));
    HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
        cluster.getNameNode(1));
    assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
        newPath.toString(), false).isDir());
  } finally {
    if (fs != null) {
      fs.close();
    }
  }
}
项目:hadoop    文件:TestHASafeMode.java   
/**
 * Test case for enter safemode in active namenode, when it is already in startup safemode.
 * It is a regression test for HDFS-2747.
 */
@Test
public void testEnterSafeModeInANNShouldNotThrowNPE() throws Exception {
  banner("Restarting active");
  DFSTestUtil
    .createFile(fs, new Path("/test"), 3 * BLOCK_SIZE, (short) 3, 1L);
  restartActive();
  nn0.getRpcServer().transitionToActive(
      new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));

  FSNamesystem namesystem = nn0.getNamesystem();
  String status = namesystem.getSafemode();
  assertTrue("Bad safemode status: '" + status + "'", status
      .startsWith("Safe mode is ON."));
  NameNodeAdapter.enterSafeMode(nn0, false);
  assertTrue("Failed to enter into safemode in active", namesystem
      .isInSafeMode());
  NameNodeAdapter.enterSafeMode(nn0, false);
  assertTrue("Failed to enter into safemode in active", namesystem
      .isInSafeMode());
}
项目:hadoop    文件:TestHASafeMode.java   
/**
 * Regression test for a bug experienced while developing
 * HDFS-2742. The scenario here is:
 * - image contains some blocks
 * - edits log contains at least one block addition, followed
 *   by deletion of more blocks than were added.
 * - When node starts up, some incorrect accounting of block
 *   totals caused an assertion failure.
 */
@Test
public void testBlocksDeletedInEditLog() throws Exception {
  banner("Starting with NN0 active and NN1 standby, creating some blocks");
  // Make 4 blocks persisted in the image.
  DFSTestUtil.createFile(fs, new Path("/test"),
      4*BLOCK_SIZE, (short) 3, 1L);
  NameNodeAdapter.enterSafeMode(nn0, false);
  NameNodeAdapter.saveNamespace(nn0);
  NameNodeAdapter.leaveSafeMode(nn0);

  // OP_ADD for 2 blocks
  DFSTestUtil.createFile(fs, new Path("/test2"),
      2*BLOCK_SIZE, (short) 3, 1L);

  // OP_DELETE for 4 blocks
  fs.delete(new Path("/test"), true);

  restartActive();
}
项目:hadoop    文件:TestHAStateTransitions.java   
private static void createEmptyInProgressEditLog(MiniDFSCluster cluster,
    NameNode nn, boolean writeHeader) throws IOException {
  long txid = nn.getNamesystem().getEditLog().getLastWrittenTxId();
  URI sharedEditsUri = cluster.getSharedEditsDir(0, 1);
  File sharedEditsDir = new File(sharedEditsUri.getPath());
  StorageDirectory storageDir = new StorageDirectory(sharedEditsDir);
  File inProgressFile = NameNodeAdapter.getInProgressEditsFile(storageDir,
      txid + 1);
  assertTrue("Failed to create in-progress edits file",
      inProgressFile.createNewFile());

  if (writeHeader) {
    DataOutputStream out = new DataOutputStream(new FileOutputStream(
        inProgressFile));
    EditLogFileOutputStream.writeHeader(
        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION, out);
    out.close();
  }
}
项目:hadoop    文件:TestNameNodeMetrics.java   
/** Test to ensure metrics reflects missing blocks */
@Test
public void testMissingBlock() throws Exception {
  // Create a file with single block with two replicas
  Path file = getTestPath("testMissingBlocks");
  createFile(file, 100, (short)1);

  // Corrupt the only replica of the block to result in a missing block
  LocatedBlock block = NameNodeAdapter.getBlockLocations(
      cluster.getNameNode(), file.toString(), 0, 1).get(0);
  cluster.getNamesystem().writeLock();
  try {
    bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
        "STORAGE_ID", "TEST");
  } finally {
    cluster.getNamesystem().writeUnlock();
  }
  updateMetrics();
  MetricsRecordBuilder rb = getMetrics(NS_METRICS);
  assertGauge("UnderReplicatedBlocks", 1L, rb);
  assertGauge("MissingBlocks", 1L, rb);
  assertGauge("MissingReplOneBlocks", 1L, rb);
  fs.delete(file, true);
  waitForDnMetricValue(NS_METRICS, "UnderReplicatedBlocks", 0L);
}
项目:hadoop    文件:TestSnapshotBlocksMap.java   
@Test(timeout = 30000)
public void testReadSnapshotFileWithCheckpoint() throws Exception {
  Path foo = new Path("/foo");
  hdfs.mkdirs(foo);
  hdfs.allowSnapshot(foo);
  Path bar = new Path("/foo/bar");
  DFSTestUtil.createFile(hdfs, bar, 100, (short) 2, 100024L);
  hdfs.createSnapshot(foo, "s1");
  assertTrue(hdfs.delete(bar, true));

  // checkpoint
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);

  // restart namenode to load snapshot files from fsimage
  cluster.restartNameNode(true);
  String snapshotPath = Snapshot.getSnapshotPath(foo.toString(), "s1/bar");
  DFSTestUtil.readFile(hdfs, new Path(snapshotPath));
}
项目:hadoop    文件:TestOpenFilesWithSnapshot.java   
@Test
public void testWithCheckpoint() throws Exception {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);
  fs.delete(new Path("/test/test"), true);
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  cluster.restartNameNode(true);

  // read snapshot file after restart
  String test2snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test2");
  DFSTestUtil.readFile(fs, new Path(test2snapshotPath));
  String test3snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test3");
  DFSTestUtil.readFile(fs, new Path(test3snapshotPath));
}
项目:hadoop    文件:TestOpenFilesWithSnapshot.java   
@Test
public void testFilesDeletionWithCheckpoint() throws Exception {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);
  fs.delete(new Path("/test/test/test2"), true);
  fs.delete(new Path("/test/test/test3"), true);
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  cluster.restartNameNode(true);

  // read snapshot file after restart
  String test2snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test2");
  DFSTestUtil.readFile(fs, new Path(test2snapshotPath));
  String test3snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test3");
  DFSTestUtil.readFile(fs, new Path(test3snapshotPath));
}
项目:hadoop    文件:TestOpenFilesWithSnapshot.java   
private void doTestMultipleSnapshots(boolean saveNamespace)
    throws IOException {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);
  fs.createSnapshot(path, "s2");
  fs.delete(new Path("/test/test"), true);
  fs.deleteSnapshot(path, "s2");
  cluster.triggerBlockReports();
  if (saveNamespace) {
    NameNode nameNode = cluster.getNameNode();
    NameNodeAdapter.enterSafeMode(nameNode, false);
    NameNodeAdapter.saveNamespace(nameNode);
    NameNodeAdapter.leaveSafeMode(nameNode);
  }
  cluster.restartNameNode(true);
}
项目:hadoop    文件:TestOpenFilesWithSnapshot.java   
@Test
public void testOpenFilesWithRename() throws Exception {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);

  // check for zero sized blocks
  Path fileWithEmptyBlock = new Path("/test/test/test4");
  fs.create(fileWithEmptyBlock);
  NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
  String clientName = fs.getClient().getClientName();
  // create one empty block
  nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null,
      INodeId.GRANDFATHER_INODE_ID, null);
  fs.createSnapshot(path, "s2");

  fs.rename(new Path("/test/test"), new Path("/test/test-renamed"));
  fs.delete(new Path("/test/test-renamed"), true);
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  cluster.restartNameNode(true);
}
项目:hadoop    文件:TestBlocksWithNotEnoughRacks.java   
@Test
public void testSufficientlySingleReplBlockUsesNewRack() throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 1;
  final Path filePath = new Path("/testFile");

  String racks[] = {"/rack1", "/rack1", "/rack1", "/rack2"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    // Create a file with one block with a replication factor of 1
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 0);

    REPLICATION_FACTOR = 2;
    NameNodeAdapter.setReplication(ns, "/testFile", REPLICATION_FACTOR);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
  } finally {
    cluster.shutdown();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestBinaryTokenFile.java   
@BeforeClass
public static void setUp() throws Exception {
  final Configuration conf = new Configuration();

  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
  conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG");

  final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
  builder.checkExitOnShutdown(true);
  builder.numDataNodes(numSlaves);
  builder.format(true);
  builder.racks(null);
  dfsCluster = builder.build();

  mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs);
  mrCluster.init(conf);
  mrCluster.start();

  NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads(); 

  FileSystem fs = dfsCluster.getFileSystem(); 
  p1 = new Path("file1");
  p1 = fs.makeQualified(p1);
}
项目:aliyun-oss-hadoop-fs    文件:TestSetTimes.java   
/**
 * Test that when access time updates are not needed, the FSNamesystem
 * write lock is not taken by getBlockLocations.
 * Regression test for HDFS-3981.
 */
@Test(timeout=60000)
public void testGetBlockLocationsOnlyUsesReadLock() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 100*1000);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(0)
    .build();
  ReentrantReadWriteLock spyLock = NameNodeAdapter.spyOnFsLock(cluster.getNamesystem());
  try {
    // Create empty file in the FSN.
    Path p = new Path("/empty-file");
    DFSTestUtil.createFile(cluster.getFileSystem(), p, 0, (short)1, 0L);

    // getBlockLocations() should not need the write lock, since we just created
    // the file (and thus its access time is already within the 100-second
    // accesstime precision configured above). 
    MockitoUtil.doThrowWhenCallStackMatches(
        new AssertionError("Should not need write lock"),
        ".*getBlockLocations.*")
        .when(spyLock).writeLock();
    cluster.getFileSystem().getFileBlockLocations(p, 0, 100);
  } finally {
    cluster.shutdown();
  }
}
项目:big-c    文件:TestHAStateTransitions.java   
private static void createEmptyInProgressEditLog(MiniDFSCluster cluster,
    NameNode nn, boolean writeHeader) throws IOException {
  long txid = nn.getNamesystem().getEditLog().getLastWrittenTxId();
  URI sharedEditsUri = cluster.getSharedEditsDir(0, 1);
  File sharedEditsDir = new File(sharedEditsUri.getPath());
  StorageDirectory storageDir = new StorageDirectory(sharedEditsDir);
  File inProgressFile = NameNodeAdapter.getInProgressEditsFile(storageDir,
      txid + 1);
  assertTrue("Failed to create in-progress edits file",
      inProgressFile.createNewFile());

  if (writeHeader) {
    DataOutputStream out = new DataOutputStream(new FileOutputStream(
        inProgressFile));
    EditLogFileOutputStream.writeHeader(
        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION, out);
    out.close();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestHASafeMode.java   
/**
 * Test case for enter safemode in active namenode, when it is already in startup safemode.
 * It is a regression test for HDFS-2747.
 */
@Test
public void testEnterSafeModeInANNShouldNotThrowNPE() throws Exception {
  banner("Restarting active");
  DFSTestUtil
    .createFile(fs, new Path("/test"), 3 * BLOCK_SIZE, (short) 3, 1L);
  restartActive();
  nn0.getRpcServer().transitionToActive(
      new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));

  FSNamesystem namesystem = nn0.getNamesystem();
  String status = namesystem.getSafemode();
  assertTrue("Bad safemode status: '" + status + "'", status
      .startsWith("Safe mode is ON."));
  NameNodeAdapter.enterSafeMode(nn0, false);
  assertTrue("Failed to enter into safemode in active", namesystem
      .isInSafeMode());
  NameNodeAdapter.enterSafeMode(nn0, false);
  assertTrue("Failed to enter into safemode in active", namesystem
      .isInSafeMode());
}
项目:aliyun-oss-hadoop-fs    文件:TestHASafeMode.java   
/**
 * Regression test for a bug experienced while developing
 * HDFS-2742. The scenario here is:
 * - image contains some blocks
 * - edits log contains at least one block addition, followed
 *   by deletion of more blocks than were added.
 * - When node starts up, some incorrect accounting of block
 *   totals caused an assertion failure.
 */
@Test
public void testBlocksDeletedInEditLog() throws Exception {
  banner("Starting with NN0 active and NN1 standby, creating some blocks");
  // Make 4 blocks persisted in the image.
  DFSTestUtil.createFile(fs, new Path("/test"),
      4*BLOCK_SIZE, (short) 3, 1L);
  NameNodeAdapter.enterSafeMode(nn0, false);
  NameNodeAdapter.saveNamespace(nn0);
  NameNodeAdapter.leaveSafeMode(nn0);

  // OP_ADD for 2 blocks
  DFSTestUtil.createFile(fs, new Path("/test2"),
      2*BLOCK_SIZE, (short) 3, 1L);

  // OP_DELETE for 4 blocks
  fs.delete(new Path("/test"), true);

  restartActive();
}
项目:aliyun-oss-hadoop-fs    文件:TestHAStateTransitions.java   
private static void createEmptyInProgressEditLog(MiniDFSCluster cluster,
    NameNode nn, boolean writeHeader) throws IOException {
  long txid = nn.getNamesystem().getEditLog().getLastWrittenTxId();
  URI sharedEditsUri = cluster.getSharedEditsDir(0, 1);
  File sharedEditsDir = new File(sharedEditsUri.getPath());
  StorageDirectory storageDir = new StorageDirectory(sharedEditsDir);
  File inProgressFile = NameNodeAdapter.getInProgressEditsFile(storageDir,
      txid + 1);
  assertTrue("Failed to create in-progress edits file",
      inProgressFile.createNewFile());

  if (writeHeader) {
    DataOutputStream out = new DataOutputStream(new FileOutputStream(
        inProgressFile));
    EditLogFileOutputStream.writeHeader(
        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION, out);
    out.close();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestNameNodeMetrics.java   
/** Create excess blocks by reducing the replication factor for
 * for a file and ensure metrics reflects it
 */
@Test
public void testExcessBlocks() throws Exception {
  Path file = getTestPath("testExcessBlocks");
  createFile(file, 100, (short)2);
  NameNodeAdapter.setReplication(namesystem, file.toString(), (short)1);
  MetricsRecordBuilder rb = getMetrics(NS_METRICS);
  assertGauge("ExcessBlocks", 1L, rb);

  // verify ExcessBlocks metric is decremented and
  // excessReplicateMap is cleared after deleting a file
  fs.delete(file, true);
  rb = getMetrics(NS_METRICS);
  assertGauge("ExcessBlocks", 0L, rb);
  assertTrue(bm.excessReplicateMap.isEmpty());
}
项目:aliyun-oss-hadoop-fs    文件:TestNameNodeMetrics.java   
/** Test to ensure metrics reflects missing blocks */
@Test
public void testMissingBlock() throws Exception {
  // Create a file with single block with two replicas
  Path file = getTestPath("testMissingBlocks");
  createFile(file, 100, (short)1);

  // Corrupt the only replica of the block to result in a missing block
  LocatedBlock block = NameNodeAdapter.getBlockLocations(
      cluster.getNameNode(), file.toString(), 0, 1).get(0);
  cluster.getNamesystem().writeLock();
  try {
    bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
        "STORAGE_ID", "TEST");
  } finally {
    cluster.getNamesystem().writeUnlock();
  }
  Thread.sleep(1000); // Wait for block to be marked corrupt
  MetricsRecordBuilder rb = getMetrics(NS_METRICS);
  assertGauge("UnderReplicatedBlocks", 1L, rb);
  assertGauge("MissingBlocks", 1L, rb);
  assertGauge("MissingReplOneBlocks", 1L, rb);
  fs.delete(file, true);
  waitForDnMetricValue(NS_METRICS, "UnderReplicatedBlocks", 0L);
}
项目:aliyun-oss-hadoop-fs    文件:TestINodeFileUnderConstructionWithSnapshot.java   
@Test
public void testLease() throws Exception {
  try {
    NameNodeAdapter.setLeasePeriod(fsn, 100, 200);
    final Path foo = new Path(dir, "foo");
    final Path bar = new Path(foo, "bar");
    DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0);
    HdfsDataOutputStream out = appendFileWithoutClosing(bar, 100);
    out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");

    hdfs.delete(foo, true);
    Thread.sleep(1000);
    try {
      fsn.writeLock();
      NameNodeAdapter.getLeaseManager(fsn).runLeaseChecks();
    } finally {
      fsn.writeUnlock();
    }
  } finally {
    NameNodeAdapter.setLeasePeriod(fsn, HdfsConstants.LEASE_SOFTLIMIT_PERIOD,
        HdfsConstants.LEASE_HARDLIMIT_PERIOD);
  }
}
项目:big-c    文件:TestNameNodeMetrics.java   
/** Test to ensure metrics reflects missing blocks */
@Test
public void testMissingBlock() throws Exception {
  // Create a file with single block with two replicas
  Path file = getTestPath("testMissingBlocks");
  createFile(file, 100, (short)1);

  // Corrupt the only replica of the block to result in a missing block
  LocatedBlock block = NameNodeAdapter.getBlockLocations(
      cluster.getNameNode(), file.toString(), 0, 1).get(0);
  cluster.getNamesystem().writeLock();
  try {
    bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
        "STORAGE_ID", "TEST");
  } finally {
    cluster.getNamesystem().writeUnlock();
  }
  updateMetrics();
  MetricsRecordBuilder rb = getMetrics(NS_METRICS);
  assertGauge("UnderReplicatedBlocks", 1L, rb);
  assertGauge("MissingBlocks", 1L, rb);
  assertGauge("MissingReplOneBlocks", 1L, rb);
  fs.delete(file, true);
  waitForDnMetricValue(NS_METRICS, "UnderReplicatedBlocks", 0L);
}
项目:aliyun-oss-hadoop-fs    文件:TestOpenFilesWithSnapshot.java   
@Test
public void testFilesDeletionWithCheckpoint() throws Exception {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);
  fs.delete(new Path("/test/test/test2"), true);
  fs.delete(new Path("/test/test/test3"), true);
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  cluster.restartNameNode(true);

  // read snapshot file after restart
  String test2snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test2");
  DFSTestUtil.readFile(fs, new Path(test2snapshotPath));
  String test3snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test3");
  DFSTestUtil.readFile(fs, new Path(test3snapshotPath));
}
项目:aliyun-oss-hadoop-fs    文件:TestOpenFilesWithSnapshot.java   
private void doTestMultipleSnapshots(boolean saveNamespace)
    throws IOException {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);
  fs.createSnapshot(path, "s2");
  fs.delete(new Path("/test/test"), true);
  fs.deleteSnapshot(path, "s2");
  cluster.triggerBlockReports();
  if (saveNamespace) {
    NameNode nameNode = cluster.getNameNode();
    NameNodeAdapter.enterSafeMode(nameNode, false);
    NameNodeAdapter.saveNamespace(nameNode);
    NameNodeAdapter.leaveSafeMode(nameNode);
  }
  cluster.restartNameNode(true);
}
项目:big-c    文件:TestBinaryTokenFile.java   
@BeforeClass
public static void setUp() throws Exception {
  final Configuration conf = new Configuration();

  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
  conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG");

  final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
  builder.checkExitOnShutdown(true);
  builder.numDataNodes(numSlaves);
  builder.format(true);
  builder.racks(null);
  dfsCluster = builder.build();

  mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs);
  mrCluster.init(conf);
  mrCluster.start();

  NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads(); 

  FileSystem fs = dfsCluster.getFileSystem(); 
  p1 = new Path("file1");
  p1 = fs.makeQualified(p1);
}
项目:big-c    文件:TestSetTimes.java   
/**
 * Test that when access time updates are not needed, the FSNamesystem
 * write lock is not taken by getBlockLocations.
 * Regression test for HDFS-3981.
 */
@Test(timeout=60000)
public void testGetBlockLocationsOnlyUsesReadLock() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 100*1000);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(0)
    .build();
  ReentrantReadWriteLock spyLock = NameNodeAdapter.spyOnFsLock(cluster.getNamesystem());
  try {
    // Create empty file in the FSN.
    Path p = new Path("/empty-file");
    DFSTestUtil.createFile(cluster.getFileSystem(), p, 0, (short)1, 0L);

    // getBlockLocations() should not need the write lock, since we just created
    // the file (and thus its access time is already within the 100-second
    // accesstime precision configured above). 
    MockitoUtil.doThrowWhenCallStackMatches(
        new AssertionError("Should not need write lock"),
        ".*getBlockLocations.*")
        .when(spyLock).writeLock();
    cluster.getFileSystem().getFileBlockLocations(p, 0, 100);
  } finally {
    cluster.shutdown();
  }
}
项目:big-c    文件:TestOpenFilesWithSnapshot.java   
@Test
public void testFilesDeletionWithCheckpoint() throws Exception {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);
  fs.delete(new Path("/test/test/test2"), true);
  fs.delete(new Path("/test/test/test3"), true);
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  cluster.restartNameNode(true);

  // read snapshot file after restart
  String test2snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test2");
  DFSTestUtil.readFile(fs, new Path(test2snapshotPath));
  String test3snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test3");
  DFSTestUtil.readFile(fs, new Path(test3snapshotPath));
}
项目:big-c    文件:TestOpenFilesWithSnapshot.java   
@Test
public void testWithCheckpoint() throws Exception {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);
  fs.delete(new Path("/test/test"), true);
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  cluster.restartNameNode(true);

  // read snapshot file after restart
  String test2snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test2");
  DFSTestUtil.readFile(fs, new Path(test2snapshotPath));
  String test3snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test3");
  DFSTestUtil.readFile(fs, new Path(test3snapshotPath));
}
项目:big-c    文件:TestInitializeSharedEdits.java   
private void assertCanStartHaNameNodes(String pathSuffix)
    throws ServiceFailedException, IOException, URISyntaxException,
    InterruptedException {
  // Now should be able to start both NNs. Pass "false" here so that we don't
  // try to waitActive on all NNs, since the second NN doesn't exist yet.
  cluster.restartNameNode(0, false);
  cluster.restartNameNode(1, true);

  // Make sure HA is working.
  cluster.getNameNode(0).getRpcServer().transitionToActive(
      new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));
  FileSystem fs = null;
  try {
    Path newPath = new Path(TEST_PATH, pathSuffix);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    assertTrue(fs.mkdirs(newPath));
    HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
        cluster.getNameNode(1));
    assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
        newPath.toString(), false).isDir());
  } finally {
    if (fs != null) {
      fs.close();
    }
  }
}
项目:big-c    文件:TestHASafeMode.java   
/**
 * Test case for enter safemode in active namenode, when it is already in startup safemode.
 * It is a regression test for HDFS-2747.
 */
@Test
public void testEnterSafeModeInANNShouldNotThrowNPE() throws Exception {
  banner("Restarting active");
  DFSTestUtil
    .createFile(fs, new Path("/test"), 3 * BLOCK_SIZE, (short) 3, 1L);
  restartActive();
  nn0.getRpcServer().transitionToActive(
      new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));

  FSNamesystem namesystem = nn0.getNamesystem();
  String status = namesystem.getSafemode();
  assertTrue("Bad safemode status: '" + status + "'", status
      .startsWith("Safe mode is ON."));
  NameNodeAdapter.enterSafeMode(nn0, false);
  assertTrue("Failed to enter into safemode in active", namesystem
      .isInSafeMode());
  NameNodeAdapter.enterSafeMode(nn0, false);
  assertTrue("Failed to enter into safemode in active", namesystem
      .isInSafeMode());
}
项目:big-c    文件:TestHASafeMode.java   
/**
 * Regression test for a bug experienced while developing
 * HDFS-2742. The scenario here is:
 * - image contains some blocks
 * - edits log contains at least one block addition, followed
 *   by deletion of more blocks than were added.
 * - When node starts up, some incorrect accounting of block
 *   totals caused an assertion failure.
 */
@Test
public void testBlocksDeletedInEditLog() throws Exception {
  banner("Starting with NN0 active and NN1 standby, creating some blocks");
  // Make 4 blocks persisted in the image.
  DFSTestUtil.createFile(fs, new Path("/test"),
      4*BLOCK_SIZE, (short) 3, 1L);
  NameNodeAdapter.enterSafeMode(nn0, false);
  NameNodeAdapter.saveNamespace(nn0);
  NameNodeAdapter.leaveSafeMode(nn0);

  // OP_ADD for 2 blocks
  DFSTestUtil.createFile(fs, new Path("/test2"),
      2*BLOCK_SIZE, (short) 3, 1L);

  // OP_DELETE for 4 blocks
  fs.delete(new Path("/test"), true);

  restartActive();
}
项目:big-c    文件:TestBlocksWithNotEnoughRacks.java   
@Test
public void testSufficientlySingleReplBlockUsesNewRack() throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 1;
  final Path filePath = new Path("/testFile");

  String racks[] = {"/rack1", "/rack1", "/rack1", "/rack2"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    // Create a file with one block with a replication factor of 1
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 0);

    REPLICATION_FACTOR = 2;
    NameNodeAdapter.setReplication(ns, "/testFile", REPLICATION_FACTOR);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop    文件:MiniDFSCluster.java   
/**
 * Expire a DataNode heartbeat on the NameNode
 * @param dnId
 * @throws IOException
 */
public void setDataNodeDead(DatanodeID dnId) throws IOException {
  DatanodeDescriptor dnd =
      NameNodeAdapter.getDatanode(getNamesystem(), dnId);
  DFSTestUtil.setDatanodeDead(dnd);
  BlockManagerTestUtil.checkHeartbeat(getNamesystem().getBlockManager());
}
项目:hadoop    文件:MiniDFSCluster.java   
/**
 * Returns true if the NameNode is running and is out of Safe Mode
 * or if waiting for safe mode is disabled.
 */
public boolean isNameNodeUp(int nnIndex) {
  NameNode nameNode = nameNodes[nnIndex].nameNode;
  if (nameNode == null) {
    return false;
  }
  long[] sizes;
  sizes = NameNodeAdapter.getStats(nameNode.getNamesystem());
  boolean isUp = false;
  synchronized (this) {
    isUp = ((!nameNode.isInSafeMode() || !waitSafeMode) &&
        sizes[ClientProtocol.GET_STATS_CAPACITY_IDX] != 0);
  }
  return isUp;
}
项目:hadoop    文件:TestXAttrConfigFlag.java   
/**
 * Restart the cluster, optionally saving a new checkpoint.
 *
 * @param checkpoint boolean true to save a new checkpoint
 * @param xattrsEnabled if true, XAttr support is enabled
 * @throws Exception if restart fails
 */
private void restart(boolean checkpoint, boolean xattrsEnabled)
    throws Exception {
  NameNode nameNode = cluster.getNameNode();
  if (checkpoint) {
    NameNodeAdapter.enterSafeMode(nameNode, false);
    NameNodeAdapter.saveNamespace(nameNode);
  }
  shutdown();
  initCluster(false, xattrsEnabled);
}
项目:hadoop    文件:TestBootstrapStandby.java   
/**
 * Test for downloading a checkpoint made at a later checkpoint
 * from the active.
 */
@Test
public void testDownloadingLaterCheckpoint() throws Exception {
  // Roll edit logs a few times to inflate txid
  nn0.getRpcServer().rollEditLog();
  nn0.getRpcServer().rollEditLog();
  // Make checkpoint
  NameNodeAdapter.enterSafeMode(nn0, false);
  NameNodeAdapter.saveNamespace(nn0);
  NameNodeAdapter.leaveSafeMode(nn0);
  long expectedCheckpointTxId = NameNodeAdapter.getNamesystem(nn0)
    .getFSImage().getMostRecentCheckpointTxId();
  assertEquals(6, expectedCheckpointTxId);

  int rc = BootstrapStandby.run(
      new String[]{"-force"},
      cluster.getConfiguration(1));
  assertEquals(0, rc);

  // Should have copied over the namespace from the active
  FSImageTestUtil.assertNNHasCheckpoints(cluster, 1,
      ImmutableList.of((int)expectedCheckpointTxId));
  FSImageTestUtil.assertNNFilesMatch(cluster);

  // We should now be able to start the standby successfully.
  cluster.restartNameNode(1);
}
项目:hadoop    文件:TestFailureToReadEdits.java   
private LimitedEditLogAnswer causeFailureOnEditLogRead() throws IOException {
  FSEditLog spyEditLog = NameNodeAdapter.spyOnEditLog(nn1);
  LimitedEditLogAnswer answer = new LimitedEditLogAnswer(); 
  doAnswer(answer).when(spyEditLog).selectInputStreams(
      anyLong(), anyLong(), (MetaRecoveryContext)anyObject(), anyBoolean());
  return answer;
}
项目:hadoop    文件:TestHASafeMode.java   
/**
 * Test case for enter safemode in standby namenode, when it is already in startup safemode.
 * It is a regression test for HDFS-2747.
 */
@Test
public void testEnterSafeModeInSBNShouldNotThrowNPE() throws Exception {
  banner("Starting with NN0 active and NN1 standby, creating some blocks");
  DFSTestUtil
      .createFile(fs, new Path("/test"), 3 * BLOCK_SIZE, (short) 3, 1L);
  // Roll edit log so that, when the SBN restarts, it will load
  // the namespace during startup and enter safemode.
  nn0.getRpcServer().rollEditLog();
  banner("Creating some blocks that won't be in the edit log");
  DFSTestUtil.createFile(fs, new Path("/test2"), 5 * BLOCK_SIZE, (short) 3,
      1L);
  banner("Deleting the original blocks");
  fs.delete(new Path("/test"), true);
  banner("Restarting standby");
  restartStandby();
  FSNamesystem namesystem = nn1.getNamesystem();
  String status = namesystem.getSafemode();
  assertTrue("Bad safemode status: '" + status + "'", status
      .startsWith("Safe mode is ON."));
  NameNodeAdapter.enterSafeMode(nn1, false);
  assertTrue("Failed to enter into safemode in standby", namesystem
      .isInSafeMode());
  NameNodeAdapter.enterSafeMode(nn1, false);
  assertTrue("Failed to enter into safemode in standby", namesystem
      .isInSafeMode());
}