Java 类org.apache.hadoop.hdfs.MiniDFSCluster 实例源码

项目:hadoop    文件:TestCheckpoint.java   
/**
 * Assert that, if sdToLock is locked, the cluster is not allowed to start up.
 * @param conf cluster conf to use
 * @param sdToLock the storage directory to lock
 */
private static void assertClusterStartFailsWhenDirLocked(
    Configuration conf, StorageDirectory sdToLock) throws IOException {
  // Lock the edits dir, then start the NN, and make sure it fails to start
  sdToLock.lock();
  MiniDFSCluster cluster = null;
  try {      
    cluster = new MiniDFSCluster.Builder(conf).format(false)
        .manageNameDfsDirs(false).numDataNodes(0).build();
    assertFalse("cluster should fail to start after locking " +
        sdToLock, sdToLock.isLockSupported());
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains("already locked", ioe);
  } finally {
    cleanup(cluster);
    cluster = null;
    sdToLock.unlock();
  }
}
项目:monarch    文件:HDFSQuasiService.java   
private MiniDFSCluster createCluster() throws HDFSQuasiServiceException {
  MiniDFSCluster hdfsCluster = null;

  File baseDir = new File(getWorkingDir()).getAbsoluteFile();
  FileUtil.fullyDelete(baseDir);
  this.conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());

  LOG.info("Using base dir " + baseDir.getAbsolutePath());

  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(this.conf);
  builder.numDataNodes(getNumberOfDataNodes());
  try {
    hdfsCluster = builder.build();
  } catch (IOException e) {
    LOG.error("Error in creating mini DFS cluster ", e);
    throw new HDFSQuasiServiceException("Error in creating mini DFS cluster ", e);
  }
  ListIterator<DataNode> itr = hdfsCluster.getDataNodes().listIterator();
  LOG.info("NameNode: " + hdfsCluster.getNameNode().getNameNodeAddressHostPortString());
  while (itr.hasNext()) {
    DataNode dn = itr.next();
    LOG.info("DataNode: " + dn.getDisplayName());
  }
  return hdfsCluster;
}
项目:hadoop    文件:TestWebHDFS.java   
@Test
public void testDTInInsecureCluster() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsFileSystem.SCHEME);
    webHdfs.getDelegationToken(null);
    fail("No exception is thrown.");
  } catch (AccessControlException ace) {
    Assert.assertTrue(ace.getMessage().startsWith(
        WebHdfsFileSystem.CANT_FALLBACK_TO_INSECURE_MSG));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop    文件:TestWriteToReplica.java   
@Test
public void testWriteToTemporary() throws Exception {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
  try {
    cluster.waitActive();
    DataNode dn = cluster.getDataNodes().get(0);
    FsDatasetImpl dataSet = (FsDatasetImpl)DataNodeTestUtils.getFSDataset(dn);

    // set up replicasMap
    String bpid = cluster.getNamesystem().getBlockPoolId();
    ExtendedBlock[] blocks = setup(bpid, dataSet);

    // test writeToTemporary
    testWriteToTemporary(dataSet, blocks);
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop    文件:TestMerge.java   
public void testMerge() throws Exception {
  MiniDFSCluster dfsCluster = null;
  MiniMRClientCluster mrCluster = null;
  FileSystem fileSystem = null;
  try {
    Configuration conf = new Configuration();
    // Start the mini-MR and mini-DFS clusters
    dfsCluster = new MiniDFSCluster.Builder(conf)
        .numDataNodes(NUM_HADOOP_DATA_NODES).build();
    fileSystem = dfsCluster.getFileSystem();
    mrCluster = MiniMRClientClusterFactory.create(this.getClass(),
                                               NUM_HADOOP_DATA_NODES, conf);
    // Generate input.
    createInput(fileSystem);
    // Run the test.
    runMergeTest(new JobConf(mrCluster.getConfig()), fileSystem);
  } finally {
    if (dfsCluster != null) {
      dfsCluster.shutdown();
    }
    if (mrCluster != null) {
      mrCluster.stop();
    }
  }
}
项目:hadoop    文件:TestJournalNode.java   
@Before
public void setup() throws Exception {
  File editsDir = new File(MiniDFSCluster.getBaseDirectory() +
      File.separator + "TestJournalNode");
  FileUtil.fullyDelete(editsDir);

  conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY,
      editsDir.getAbsolutePath());
  conf.set(DFSConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_KEY,
      "0.0.0.0:0");
  jn = new JournalNode();
  jn.setConf(conf);
  jn.start();
  journalId = "test-journalid-" + GenericTestUtils.uniqueSequenceId();
  journal = jn.getOrCreateJournal(journalId);
  journal.format(FAKE_NSINFO);

  ch = new IPCLoggerChannel(conf, FAKE_NSINFO, journalId, jn.getBoundIpcAddress());
}
项目:hadoop    文件:TestNameNodeRetryCacheMetrics.java   
/** Start a cluster */
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3)
      .build();
  cluster.waitActive();
  cluster.transitionToActive(namenodeId);
  HATestUtil.setFailoverConfigurations(cluster, conf);
  filesystem = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
  namesystem = cluster.getNamesystem(namenodeId);
  metrics = namesystem.getRetryCache().getMetricsForTests();
}
项目:hadoop    文件:TestJobSysDirWithDFS.java   
public void testWithDFS() throws IOException {
  MiniDFSCluster dfs = null;
  MiniMRCluster mr = null;
  FileSystem fileSys = null;
  try {
    final int taskTrackers = 4;

    JobConf conf = new JobConf();
    conf.set(JTConfig.JT_SYSTEM_DIR, "/tmp/custom/mapred/system");
    dfs = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(taskTrackers, fileSys.getUri().toString(), 1, null, null, conf);

    runWordCount(mr, mr.createJobConf(), conf.get("mapred.system.dir"));
  } finally {
    if (dfs != null) { dfs.shutdown(); }
    if (mr != null) { mr.shutdown();
    }
  }
}
项目:hadoop    文件:TestBinaryTokenFile.java   
@BeforeClass
public static void setUp() throws Exception {
  final Configuration conf = new Configuration();

  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
  conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG");

  final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
  builder.checkExitOnShutdown(true);
  builder.numDataNodes(numSlaves);
  builder.format(true);
  builder.racks(null);
  dfsCluster = builder.build();

  mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs);
  mrCluster.init(conf);
  mrCluster.start();

  NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads(); 

  FileSystem fs = dfsCluster.getFileSystem(); 
  p1 = new Path("file1");
  p1 = fs.makeQualified(p1);
}
项目:hadoop    文件:TestFSDirectory.java   
@Before
public void setUp() throws Exception {
  conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, 2);
  cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(REPLICATION)
    .build();
  cluster.waitActive();

  fsn = cluster.getNamesystem();
  fsdir = fsn.getFSDirectory();

  hdfs = cluster.getFileSystem();
  DFSTestUtil.createFile(hdfs, file1, 1024, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, file2, 1024, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, file3, 1024, REPLICATION, seed);

  DFSTestUtil.createFile(hdfs, file5, 1024, REPLICATION, seed);
  hdfs.mkdirs(sub2);

}
项目:hadoop    文件:TestJoinProperties.java   
public static Test suite() {
  TestSetup setup = new TestSetup(new TestSuite(TestJoinProperties.class)) {
    protected void setUp() throws Exception {
      Configuration conf = new Configuration();
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
      base = cluster.getFileSystem().makeQualified(new Path("/nested"));
      src = generateSources(conf);
    }
    protected void tearDown() throws Exception {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  };
  return setup;
}
项目:hadoop    文件:TestFsck.java   
@Test
public void testFsckNonExistent() throws Exception {
  DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsck").
      setNumFiles(20).build();
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    fs = cluster.getFileSystem();
    util.createFiles(fs, "/srcdat");
    util.waitReplication(fs, "/srcdat", (short)3);
    String outStr = runFsck(conf, 0, true, "/non-existent");
    assertEquals(-1, outStr.indexOf(NamenodeFsck.HEALTHY_STATUS));
    System.out.println(outStr);
    util.cleanup(fs, "/srcdat");
  } finally {
    if (fs != null) {try{fs.close();} catch(Exception e){}}
    if (cluster != null) { cluster.shutdown(); }
  }
}
项目:hadoop    文件:TestStartup.java   
/**
 * Verify the following scenario.
 * 1. NN restarts.
 * 2. Heartbeat RPC will retry and succeed. NN asks DN to reregister.
 * 3. After reregistration completes, DN will send Heartbeat, followed by
 *    Blockreport.
 * 4. NN will mark DatanodeStorageInfo#blockContentsStale to false.
 * @throws Exception
 */
@Test(timeout = 60000)
public void testStorageBlockContentsStaleAfterNNRestart() throws Exception {
  MiniDFSCluster dfsCluster = null;
  try {
    Configuration config = new Configuration();
    dfsCluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
    dfsCluster.waitActive();
    dfsCluster.restartNameNode(true);
    BlockManagerTestUtil.checkHeartbeat(
        dfsCluster.getNamesystem().getBlockManager());
    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    ObjectName mxbeanNameFsns = new ObjectName(
        "Hadoop:service=NameNode,name=FSNamesystemState");
    Integer numStaleStorages = (Integer) (mbs.getAttribute(
        mxbeanNameFsns, "NumStaleStorages"));
    assertEquals(0, numStaleStorages.intValue());
  } finally {
    if (dfsCluster != null) {
      dfsCluster.shutdown();
    }
  }

  return;
}
项目:hadoop    文件:TestMover.java   
@Test
public void testMoverCliWithFederationHA() throws Exception {
  final MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new HdfsConfiguration())
      .nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(3))
      .numDataNodes(0).build();
  final Configuration conf = new HdfsConfiguration();
  DFSTestUtil.setFederatedHAConfiguration(cluster, conf);
  try {
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(3, namenodes.size());

    Iterator<URI> iter = namenodes.iterator();
    URI nn1 = iter.next();
    URI nn2 = iter.next();
    URI nn3 = iter.next();
    Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
        "-p", nn1 + "/foo", nn1 + "/bar", nn2 + "/foo/bar", nn3 + "/foobar");
    Assert.assertEquals(3, movePaths.size());
    checkMovePaths(movePaths.get(nn1), new Path("/foo"), new Path("/bar"));
    checkMovePaths(movePaths.get(nn2), new Path("/foo/bar"));
    checkMovePaths(movePaths.get(nn3), new Path("/foobar"));
  } finally {
     cluster.shutdown();
  }
}
项目:hadoop    文件:TestGenericJournalConf.java   
/**
 * Test that a implementation of JournalManager without a 
 * (Configuration,URI) constructor throws an exception
 */
@Test
public void testBadConstructor() throws Exception {
  MiniDFSCluster cluster = null;
  Configuration conf = new Configuration();

  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + ".dummy",
           BadConstructorJournalManager.class.getName());
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
           "dummy://test");
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    fail("Should have failed before this point");
  } catch (IllegalArgumentException iae) {
    if (!iae.getMessage().contains("Unable to construct journal")) {
      fail("Should have failed with unable to construct exception");
    }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop    文件:TestGenericJournalConf.java   
/**
 * Test that an exception is thrown if a journal class doesn't
 * exist in the classloader.
 */
@Test(expected=IllegalArgumentException.class)
public void testClassDoesntExist() throws Exception {
  MiniDFSCluster cluster = null;
  Configuration conf = new Configuration();

  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + ".dummy",
           "org.apache.hadoop.nonexistent");
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
           "dummy://test");

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop    文件:TestStartup.java   
@Test(timeout=30000)
  public void testCorruptImageFallback() throws IOException {
    // Create two checkpoints
    createCheckPoint(2);
    // Delete a single md5sum
    corruptFSImageMD5(false);
    // Should still be able to start
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(config)
        .format(false)
        .manageDataDfsDirs(false)
        .manageNameDfsDirs(false)
        .build();
    try {
      cluster.waitActive();
    } finally {
      cluster.shutdown();
    }
}
项目:hadoop    文件:TestCheckpoint.java   
/**
 * Test that the NN locks its storage and edits directories, and won't start up
 * if the directories are already locked
 **/
@Test
public void testNameDirLocking() throws IOException {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;

  // Start a NN, and verify that lock() fails in all of the configured
  // directories
  StorageDirectory savedSd = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
    for (StorageDirectory sd : storage.dirIterable(null)) {
      assertLockFails(sd);
      savedSd = sd;
    }
  } finally {
    cleanup(cluster);
    cluster = null;
  }
  assertNotNull(savedSd);

  // Lock one of the saved directories, then start the NN, and make sure it
  // fails to start
  assertClusterStartFailsWhenDirLocked(conf, savedSd);
}
项目:hadoop    文件:TestWebHdfsWithMultipleNameNodes.java   
private static void setupCluster(final int nNameNodes, final int nDataNodes)
    throws Exception {
  LOG.info("nNameNodes=" + nNameNodes + ", nDataNodes=" + nDataNodes);

  conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);

  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes))
      .numDataNodes(nDataNodes)
      .build();
  cluster.waitActive();

  webhdfs = new WebHdfsFileSystem[nNameNodes];
  for(int i = 0; i < webhdfs.length; i++) {
    final InetSocketAddress addr = cluster.getNameNode(i).getHttpAddress();
    final String uri = WebHdfsFileSystem.SCHEME  + "://"
        + addr.getHostName() + ":" + addr.getPort() + "/";
    webhdfs[i] = (WebHdfsFileSystem)FileSystem.get(new URI(uri), conf);
  }
}
项目:hadoop    文件:TestStartup.java   
@Before
public void setUp() throws Exception {
  config = new HdfsConfiguration();
  hdfsDir = new File(MiniDFSCluster.getBaseDirectory());

  if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
    throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
  }
  LOG.info("--hdfsdir is " + hdfsDir.getAbsolutePath());
  config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      fileAsURI(new File(hdfsDir, "name")).toString());
  config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
      new File(hdfsDir, "data").getPath());
  config.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
  config.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  config.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
  config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
      fileAsURI(new File(hdfsDir, "secondary")).toString());
  config.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
      WILDCARD_HTTP_HOST + "0");

  FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
}
项目:hadoop    文件:TestCacheAdminCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);

  // Many of the tests expect a replication value of 1 in the output
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();

  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
}
项目:elasticsearch_my    文件:MiniHDFS.java   
public static void main(String[] args) throws Exception {
    if (args.length != 1) {
       throw new IllegalArgumentException("MiniHDFS <baseDirectory>");
    }
    // configure Paths
    Path baseDir = Paths.get(args[0]);
    // hadoop-home/, so logs will not complain
    if (System.getenv("HADOOP_HOME") == null) {
        Path hadoopHome = baseDir.resolve("hadoop-home");
        Files.createDirectories(hadoopHome);
        System.setProperty("hadoop.home.dir", hadoopHome.toAbsolutePath().toString());
    }
    // hdfs-data/, where any data is going
    Path hdfsHome = baseDir.resolve("hdfs-data");

    // start cluster
    Configuration cfg = new Configuration();
    cfg.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsHome.toAbsolutePath().toString());
    // lower default permission: TODO: needed?
    cfg.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY, "766");
    // TODO: remove hardcoded port!
    MiniDFSCluster dfs = new MiniDFSCluster.Builder(cfg).nameNodePort(9999).build();

    // write our PID file
    Path tmp = Files.createTempFile(baseDir, null, null);
    String pid = ManagementFactory.getRuntimeMXBean().getName().split("@")[0];
    Files.write(tmp, pid.getBytes(StandardCharsets.UTF_8));
    Files.move(tmp, baseDir.resolve(PID_FILE_NAME), StandardCopyOption.ATOMIC_MOVE);

    // write our port file
    tmp = Files.createTempFile(baseDir, null, null);
    Files.write(tmp, Integer.toString(dfs.getNameNodePort()).getBytes(StandardCharsets.UTF_8));
    Files.move(tmp, baseDir.resolve(PORT_FILE_NAME), StandardCopyOption.ATOMIC_MOVE);
}
项目:kafka-connect-hdfs    文件:TestWithMiniDFSCluster.java   
private MiniDFSCluster createDFSCluster(Configuration conf) throws IOException {
  MiniDFSCluster cluster;
  String[] hosts = {"localhost", "localhost", "localhost"};
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
  builder.hosts(hosts).nameNodePort(9001).numDataNodes(3);
  cluster = builder.build();
  cluster.waitActive();
  return cluster;
}
项目:hadoop    文件:TestINodeAttributeProvider.java   
@Before
public void setUp() throws IOException {
  CALLED.clear();
  Configuration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_KEY,
      MyAuthorizationProvider.class.getName());
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
  miniDFS = new MiniDFSCluster.Builder(conf).build();
}
项目:hadoop    文件:TestFSImageWithXAttr.java   
@BeforeClass
public static void setUp() throws IOException {
  conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
}
项目:hadoop    文件:UtilsForTests.java   
/**
 * Signal the maps/reduces to start.
 */
static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys, 
                        String mapSignalFile, 
                        String reduceSignalFile, int replication) 
    throws IOException, TimeoutException {
  try {
    writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(mapSignalFile), 
              (short)replication);
    writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(reduceSignalFile), (short)replication);
  } catch (InterruptedException ie) {
    // Ignore
  }
}
项目:hadoop    文件:UtilsForTests.java   
/**
 * Signal the maps/reduces to start.
 */
static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys, 
                        boolean isMap, String mapSignalFile, 
                        String reduceSignalFile)
    throws IOException, TimeoutException {
  try {
    //  signal the maps to complete
    writeFile(dfs.getNameNode(), fileSys.getConf(),
              isMap 
              ? new Path(mapSignalFile)
              : new Path(reduceSignalFile), (short)1);
  } catch (InterruptedException ie) {
    // Ignore
  }
}
项目:hadoop    文件:TestMiniMRClasspath.java   
@Test
public void testExternalWritable()
  throws IOException {

  String namenode = null;
  MiniDFSCluster dfs = null;
  MiniMRCluster mr = null;
  FileSystem fileSys = null;

  try {

    final int taskTrackers = 4;

    Configuration conf = new Configuration();
    dfs = new MiniDFSCluster.Builder(conf).build();
    fileSys = dfs.getFileSystem();
    namenode = fileSys.getUri().toString();
    mr = new MiniMRCluster(taskTrackers, namenode, 3);      
    JobConf jobConf = mr.createJobConf();
    String result;

    result = launchExternal(fileSys.getUri(), jobConf,
        "Dennis was here!\nDennis again!", 3, 1);
    Assert.assertEquals("Dennis again!\t1\nDennis was here!\t1\n", result);

  } 
  finally {
    if (dfs != null) { dfs.shutdown(); }
    if (mr != null) { mr.shutdown();
    }
  }
}
项目:hadoop    文件:HATestUtil.java   
/** Sets the required configurations for performing failover.  */
public static void setFailoverConfigurations(MiniDFSCluster cluster,
    Configuration conf, String logicalName, int nsIndex) {
  InetSocketAddress nnAddr1 = cluster.getNameNode(2 * nsIndex).getNameNodeAddress();
  InetSocketAddress nnAddr2 = cluster.getNameNode(2 * nsIndex + 1).getNameNodeAddress();
  setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);
}
项目:hadoop    文件:TestShortCircuitCache.java   
@Test(timeout=60000)
public void testDataXceiverCleansUpSlotsOnFailure() throws Exception {
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testDataXceiverCleansUpSlotsOnFailure", sockDir);
  conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
      1000000000L);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  final Path TEST_PATH1 = new Path("/test_file1");
  final Path TEST_PATH2 = new Path("/test_file2");
  final int TEST_FILE_LEN = 4096;
  final int SEED = 0xFADE1;
  DFSTestUtil.createFile(fs, TEST_PATH1, TEST_FILE_LEN,
      (short)1, SEED);
  DFSTestUtil.createFile(fs, TEST_PATH2, TEST_FILE_LEN,
      (short)1, SEED);

  // The first read should allocate one shared memory segment and slot.
  DFSTestUtil.readFileBuffer(fs, TEST_PATH1);

  // The second read should fail, and we should only have 1 segment and 1 slot
  // left.
  fs.getClient().getConf().brfFailureInjector =
      new TestCleanupFailureInjector();
  try {
    DFSTestUtil.readFileBuffer(fs, TEST_PATH2);
  } catch (Throwable t) {
    GenericTestUtils.assertExceptionContains("TCP reads were disabled for " +
        "testing, but we failed to do a non-TCP read.", t);
  }
  checkNumberOfSegmentsAndSlots(1, 1,
      cluster.getDataNodes().get(0).getShortCircuitRegistry());
  cluster.shutdown();
  sockDir.close();
}
项目:hadoop    文件:TestShortCircuitCache.java   
@Test(timeout=60000)
public void testPreReceiptVerificationDfsClientCanDoScr() throws Exception {
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testPreReceiptVerificationDfsClientCanDoScr", sockDir);
  conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
      1000000000L);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  fs.getClient().getConf().brfFailureInjector =
      new TestPreReceiptVerificationFailureInjector();
  final Path TEST_PATH1 = new Path("/test_file1");
  DFSTestUtil.createFile(fs, TEST_PATH1, 4096, (short)1, 0xFADE2);
  final Path TEST_PATH2 = new Path("/test_file2");
  DFSTestUtil.createFile(fs, TEST_PATH2, 4096, (short)1, 0xFADE2);
  DFSTestUtil.readFileBuffer(fs, TEST_PATH1);
  DFSTestUtil.readFileBuffer(fs, TEST_PATH2);
  ShortCircuitRegistry registry =
      cluster.getDataNodes().get(0).getShortCircuitRegistry();
  registry.visit(new ShortCircuitRegistry.Visitor() {
    @Override
    public void accept(HashMap<ShmId, RegisteredShm> segments,
                       HashMultimap<ExtendedBlockId, Slot> slots) {
      Assert.assertEquals(1, segments.size());
      Assert.assertEquals(2, slots.size());
    }
  });
  cluster.shutdown();
  sockDir.close();
}
项目:hadoop    文件:TestMiniMRDFSCaching.java   
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    dfs = new MiniDFSCluster.Builder(conf).build();
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getUri().toString(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
项目:hadoop    文件:TestNamenodeRetryCache.java   
/** Start a cluster */
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize);
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
  cluster = new MiniDFSCluster.Builder(conf).build();
  cluster.waitActive();
  nnRpc = cluster.getNameNode().getRpcServer();
  filesystem = cluster.getFileSystem();
}
项目:hadoop    文件:TestDFSUpgradeWithHA.java   
private static void checkNnPreviousDirExistence(MiniDFSCluster cluster,
    int index, boolean shouldExist) {
  Collection<URI> nameDirs = cluster.getNameDirs(index);
  for (URI nnDir : nameDirs) {
    checkPreviousDirExistence(new File(nnDir), shouldExist);
  }
}
项目:hadoop    文件:TestWebHDFS.java   
/**
 * Test snapshot deletion through WebHdfs
 */
@Test
public void testWebHdfsDeleteSnapshot() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsFileSystem.SCHEME);

    final Path foo = new Path("/foo");
    dfs.mkdirs(foo);
    dfs.allowSnapshot(foo);

    webHdfs.createSnapshot(foo, "s1");
    final Path spath = webHdfs.createSnapshot(foo, null);
    Assert.assertTrue(webHdfs.exists(spath));
    final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
    Assert.assertTrue(webHdfs.exists(s1path));

    // delete the two snapshots
    webHdfs.deleteSnapshot(foo, "s1");
    Assert.assertFalse(webHdfs.exists(s1path));
    webHdfs.deleteSnapshot(foo, spath.getName());
    Assert.assertFalse(webHdfs.exists(spath));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop    文件:TestUniformSizeInputFormat.java   
@BeforeClass
public static void setup() throws Exception {
  cluster = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(1)
                                        .format(true).build();
  totalFileSize = 0;

  for (int i=0; i<N_FILES; ++i)
    totalFileSize += createFile("/tmp/source/" + String.valueOf(i), SIZEOF_EACH_FILE);
}
项目:hadoop    文件:TestNNWithQJM.java   
@Test (timeout = 30000)
public void testLogAndRestart() throws IOException {
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image");
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      mjc.getQuorumJournalURI("myjournal").toString());

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(0)
    .manageNameDfsDirs(false)
    .build();
  try {
    cluster.getFileSystem().mkdirs(TEST_PATH);

    // Restart the NN and make sure the edit was persisted
    // and loaded again
    cluster.restartNameNode();

    assertTrue(cluster.getFileSystem().exists(TEST_PATH));
    cluster.getFileSystem().mkdirs(TEST_PATH_2);

    // Restart the NN again and make sure both edits are persisted.
    cluster.restartNameNode();
    assertTrue(cluster.getFileSystem().exists(TEST_PATH));
    assertTrue(cluster.getFileSystem().exists(TEST_PATH_2));
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop    文件:TestAddBlockRetry.java   
@Before
public void setUp() throws Exception {
  conf = new Configuration();
  cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(REPLICATION)
    .build();
  cluster.waitActive();
}
项目:hadoop    文件:TestStreamFile.java   
@Test
public void testDoGetShouldWriteTheFileContentIntoServletOutputStream()
    throws Exception {

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(1)
      .build();
  try {
    Path testFile = createFile();
    setUpForDoGetTest(cluster, testFile);
    ServletOutputStreamExtn outStream = new ServletOutputStreamExtn();
    Mockito.doReturn(outStream).when(mockHttpServletResponse)
        .getOutputStream();
    StreamFile sfile = new StreamFile() {

      private static final long serialVersionUID = 7715590481809562722L;

      @Override
      public ServletContext getServletContext() {
        return mockServletContext;
      }
    };
    sfile.doGet(mockHttpServletRequest, mockHttpServletResponse);
    assertEquals("Not writing the file data into ServletOutputStream",
        outStream.getResult(), "test");
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop    文件:TestHistoryServerFileSystemStateStoreService.java   
@Test
public void testTokenStoreHdfs() throws IOException {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  conf = cluster.getConfiguration(0);
  try {
    testTokenStore("/tmp/historystore");
  } finally {
      cluster.shutdown();
  }
}