Java 类org.apache.hadoop.hdfs.HdfsConfiguration 实例源码

项目:hadoop    文件:FileChecksumServlets.java   
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response
    ) throws ServletException, IOException {
  final PrintWriter out = response.getWriter();
  final String path = ServletUtil.getDecodedPath(request, "/getFileChecksum");
  final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
  xml.declaration();

  final ServletContext context = getServletContext();
  final DataNode datanode = (DataNode) context.getAttribute("datanode");
  final Configuration conf = 
    new HdfsConfiguration(datanode.getConf());

  try {
    final DFSClient dfs = DatanodeJspHelper.getDFSClient(request, 
        datanode, conf, getUGI(request, conf));
    final MD5MD5CRC32FileChecksum checksum = dfs.getFileChecksum(path, Long.MAX_VALUE);
    MD5MD5CRC32FileChecksum.write(xml, checksum);
  } catch(IOException ioe) {
    writeXml(ioe, path, xml);
  } catch (InterruptedException e) {
    writeXml(e, path, xml);
  }
  xml.endDocument();
}
项目:hadoop    文件:DfsServlet.java   
/**
 * Create a {@link NameNode} proxy from the current {@link ServletContext}. 
 */
protected ClientProtocol createNameNodeProxy() throws IOException {
  ServletContext context = getServletContext();
  // if we are running in the Name Node, use it directly rather than via 
  // rpc
  NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
  if (nn != null) {
    return nn.getRpcServer();
  }
  InetSocketAddress nnAddr =
    NameNodeHttpServer.getNameNodeAddressFromContext(context);
  Configuration conf = new HdfsConfiguration(
      NameNodeHttpServer.getConfFromContext(context));
  return NameNodeProxies.createProxy(conf, NameNode.getUri(nnAddr),
      ClientProtocol.class).getProxy();
}
项目:hadoop    文件:TestAuditLogger.java   
/**
 * Tests that AuditLogger works as expected.
 */
@Test
public void testAuditLogger() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
      DummyAuditLogger.class.getName());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  try {
    cluster.waitClusterUp();
    assertTrue(DummyAuditLogger.initialized);
    DummyAuditLogger.resetLogCount();

    FileSystem fs = cluster.getFileSystem();
    long time = System.currentTimeMillis();
    fs.setTimes(new Path("/"), time, time);
    assertEquals(1, DummyAuditLogger.logCount);
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop    文件:TestAuditLogger.java   
/**
 * Tests that TopAuditLogger can be disabled
 */
@Test
public void testDisableTopAuditLogger() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setBoolean(NNTOP_ENABLED_KEY, false);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  try {
    cluster.waitClusterUp();
    List<AuditLogger> auditLoggers =
        cluster.getNameNode().getNamesystem().getAuditLoggers();
    for (AuditLogger auditLogger : auditLoggers) {
      assertFalse(
          "top audit logger is still hooked in after it is disabled",
          auditLogger instanceof TopAuditLogger);
    }
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop    文件:TestDatanodeProtocolRetryPolicy.java   
/**
 * Starts an instance of DataNode
 * @throws IOException
 */
@Before
public void startUp() throws IOException, URISyntaxException {
  tearDownDone = false;
  conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, DATA_DIR);
  conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
  conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
  conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
  FileSystem.setDefaultUri(conf,
      "hdfs://" + NN_ADDR.getHostName() + ":" + NN_ADDR.getPort());
  File dataDir = new File(DATA_DIR);
  FileUtil.fullyDelete(dataDir);
  dataDir.mkdirs();
  StorageLocation location = StorageLocation.parse(dataDir.getPath());
  locations.add(location);
}
项目:hadoop    文件:TestDelegationTokenForProxyUser.java   
@BeforeClass
public static void setUp() throws Exception {
  config = new HdfsConfiguration();
  config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
  config.setLong(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
  config.setLong(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
  config.setStrings(DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserGroupConfKey(REAL_USER),
      "group1");
  config.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  configureSuperUserIPAddresses(config, REAL_USER);
  FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
  cluster = new MiniDFSCluster.Builder(config).build();
  cluster.waitActive();
  ProxyUsers.refreshSuperUserGroupsConfiguration(config);
  ugi = UserGroupInformation.createRemoteUser(REAL_USER);
  proxyUgi = UserGroupInformation.createProxyUserForTesting(PROXY_USER, ugi,
      GROUP_NAMES);
}
项目:hadoop    文件:SaslDataTransferTestCase.java   
/**
 * Creates configuration for starting a secure cluster.
 *
 * @param dataTransferProtection supported QOPs
 * @return configuration for starting a secure cluster
 * @throws Exception if there is any failure
 */
protected HdfsConfiguration createSecureConfig(
    String dataTransferProtection) throws Exception {
  HdfsConfiguration conf = new HdfsConfiguration();
  SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
  conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab);
  conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab);
  conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal);
  conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
  conf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, dataTransferProtection);
  conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
  conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
  conf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
  conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10);

  String keystoresDir = baseDir.getAbsolutePath();
  String sslConfDir = KeyStoreTestUtil.getClasspathDir(this.getClass());
  KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
  return conf;
}
项目:hadoop    文件:TestHostsFiles.java   
private Configuration getConf() {
  Configuration conf = new HdfsConfiguration();

  // Lower the heart beat interval so the NN quickly learns of dead
  // or decommissioned DNs and the NN issues replication and invalidation
  // commands quickly (as replies to heartbeats)
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);

  // Have the NN ReplicationMonitor compute the replication and
  // invalidation commands to send DNs every second.
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);

  // Have the NN check for pending replications every second so it
  // quickly schedules additional replicas as they are identified.
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);

  // The DNs report blocks every second.
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);

  // Indicates we have multiple racks
  conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, "xyz");
  return conf;
}
项目:hadoop    文件:TestOverReplicatedBlocks.java   
/**
 * Test over replicated block should get invalidated when decreasing the
 * replication for a partial block.
 */
@Test
public void testInvalidateOverReplicatedBlock() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
      .build();
  try {
    final FSNamesystem namesystem = cluster.getNamesystem();
    final BlockManager bm = namesystem.getBlockManager();
    FileSystem fs = cluster.getFileSystem();
    Path p = new Path(MiniDFSCluster.getBaseDirectory(), "/foo1");
    FSDataOutputStream out = fs.create(p, (short) 2);
    out.writeBytes("HDFS-3119: " + p);
    out.hsync();
    fs.setReplication(p, (short) 1);
    out.close();
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, p);
    assertEquals("Expected only one live replica for the block", 1, bm
        .countNodes(block.getLocalBlock()).liveReplicas());
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop    文件:TestDatanodeRestart.java   
@Test public void testFinalizedReplicas() throws Exception {
  // bring up a cluster of 3
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  cluster.waitActive();
  FileSystem fs = cluster.getFileSystem();
  try {
    // test finalized replicas
    final String TopDir = "/test";
    DFSTestUtil util = new DFSTestUtil.Builder().
        setName("TestDatanodeRestart").setNumFiles(2).build();
    util.createFiles(fs, TopDir, (short)3);
    util.waitReplication(fs, TopDir, (short)3);
    util.checkFiles(fs, TopDir);
    cluster.restartDataNodes();
    cluster.waitActive();
    util.checkFiles(fs, TopDir);
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop    文件:TestFSNamesystem.java   
/**
 * Test that FSNamesystem#clear clears all leases.
 */
@Test
public void testFSNamespaceClearLeases() throws Exception {
  Configuration conf = new HdfsConfiguration();
  File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
  conf.set(DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());

  NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
  DFSTestUtil.formatNameNode(conf);
  FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
  LeaseManager leaseMan = fsn.getLeaseManager();
  leaseMan.addLease("client1", "importantFile");
  assertEquals(1, leaseMan.countLease());
  fsn.clear();
  leaseMan = fsn.getLeaseManager();
  assertEquals(0, leaseMan.countLease());
}
项目:hadoop    文件:TestBackupNode.java   
BackupNode startBackupNode(Configuration conf,
                           StartupOption startupOpt,
                           int idx) throws IOException {
  Configuration c = new HdfsConfiguration(conf);
  String dirs = getBackupNodeDir(startupOpt, idx);
  c.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, dirs);
  c.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
  c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,
      "127.0.0.1:0");
  c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
          "127.0.0.1:0");

  BackupNode bn = (BackupNode)NameNode.createNameNode(
      new String[]{startupOpt.getName()}, c);
  assertTrue(bn.getRole() + " must be in SafeMode.", bn.isInSafeMode());
  assertTrue(bn.getRole() + " must be in StandbyState",
             bn.getNamesystem().getHAState()
               .equalsIgnoreCase(HAServiceState.STANDBY.name()));
  return bn;
}
项目:hadoop    文件:TestWriteToReplica.java   
@Test
public void testClose() throws Exception {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();

  try {
    cluster.waitActive();
    DataNode dn = cluster.getDataNodes().get(0);
    FsDatasetImpl dataSet = (FsDatasetImpl)DataNodeTestUtils.getFSDataset(dn);

    // set up replicasMap
    String bpid = cluster.getNamesystem().getBlockPoolId();

    ExtendedBlock[] blocks = setup(bpid, dataSet);

    // test close
    testClose(dataSet, blocks);
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop    文件:TestNameNodeRetryCacheMetrics.java   
/** Start a cluster */
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3)
      .build();
  cluster.waitActive();
  cluster.transitionToActive(namenodeId);
  HATestUtil.setFailoverConfigurations(cluster, conf);
  filesystem = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
  namesystem = cluster.getNamesystem(namenodeId);
  metrics = namesystem.getRetryCache().getMetricsForTests();
}
项目:hadoop    文件:TestEditLog.java   
/**
 * Test case for loading a very simple edit log from a format
 * prior to the inclusion of edit transaction IDs in the log.
 */
@Test
public void testPreTxidEditLogWithEdits() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final FSNamesystem namesystem = cluster.getNamesystem();

    long numEdits = testLoad(HADOOP20_SOME_EDITS, namesystem);
    assertEquals(3, numEdits);
    // Sanity check the edit
    HdfsFileStatus fileInfo = namesystem.getFileInfo("/myfile", false);
    assertEquals("supergroup", fileInfo.getGroup());
    assertEquals(3, fileInfo.getReplication());
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
项目:hadoop    文件:TestBalancerWithNodeGroup.java   
static Configuration createConf() {
  Configuration conf = new HdfsConfiguration();
  TestBalancer.initConf(conf);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
  conf.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY, 
      NetworkTopologyWithNodeGroup.class.getName());
  conf.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, 
      BlockPlacementPolicyWithNodeGroup.class.getName());
  return conf;
}
项目:hadoop    文件:TestFSRMStateStore.java   
@Test(timeout = 60000)
public void testFSRMStateStore() throws Exception {
  HdfsConfiguration conf = new HdfsConfiguration();
  MiniDFSCluster cluster =
          new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  try {
    fsTester = new TestFSRMStateStoreTester(cluster, false);
    // If the state store is FileSystemRMStateStore then add corrupted entry.
    // It should discard the entry and remove it from file system.
    FSDataOutputStream fsOut = null;
    FileSystemRMStateStore fileSystemRMStateStore =
            (FileSystemRMStateStore) fsTester.getRMStateStore();
    String appAttemptIdStr3 = "appattempt_1352994193343_0001_000003";
    ApplicationAttemptId attemptId3 =
            ConverterUtils.toApplicationAttemptId(appAttemptIdStr3);
    Path appDir =
            fsTester.store.getAppDir(attemptId3.getApplicationId().toString());
    Path tempAppAttemptFile =
            new Path(appDir, attemptId3.toString() + ".tmp");
    fsOut = fileSystemRMStateStore.fs.create(tempAppAttemptFile, false);
    fsOut.write("Some random data ".getBytes());
    fsOut.close();

    testRMAppStateStore(fsTester);
    Assert.assertFalse(fsTester.workingDirPathURI
            .getFileSystem(conf).exists(tempAppAttemptFile));
    testRMDTSecretManagerStateStore(fsTester);
    testCheckVersion(fsTester);
    testEpoch(fsTester);
    testAppDeletion(fsTester);
    testDeleteStore(fsTester);
    testAMRMTokenSecretManagerStateStore(fsTester);
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop    文件:TestStorageRestore.java   
@Before
public void setUpNameDirs() throws Exception {
  config = new HdfsConfiguration();
  hdfsDir = new File(MiniDFSCluster.getBaseDirectory()).getCanonicalFile();
  if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
    throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
  }

  hdfsDir.mkdirs();
  path1 = new File(hdfsDir, "name1");
  path2 = new File(hdfsDir, "name2");
  path3 = new File(hdfsDir, "name3");

  path1.mkdir(); path2.mkdir(); path3.mkdir();
  if(!path2.exists() ||  !path3.exists() || !path1.exists()) {
    throw new IOException("Couldn't create dfs.name dirs in " + hdfsDir.getAbsolutePath());
  }

  String dfs_name_dir = new String(path1.getPath() + "," + path2.getPath());
  System.out.println("configuring hdfsdir is " + hdfsDir.getAbsolutePath() + 
      "; dfs_name_dir = "+ dfs_name_dir + ";dfs_name_edits_dir(only)=" + path3.getPath());

  config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, dfs_name_dir);
  config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, dfs_name_dir + "," + path3.getPath());

  config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,new File(hdfsDir, "secondary").getPath());

  FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");

  config.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");

  // set the restore feature on
  config.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true);
}
项目:hadoop    文件:TestHistoryFileManager.java   
@BeforeClass
public static void setUpClass() throws Exception {
  coreSitePath = "." + File.separator + "target" + File.separator +
          "test-classes" + File.separator + "core-site.xml";
  Configuration conf = new HdfsConfiguration();
  Configuration conf2 = new HdfsConfiguration();
  dfsCluster = new MiniDFSCluster.Builder(conf).build();
  conf2.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,
          conf.get(MiniDFSCluster.HDFS_MINIDFS_BASEDIR) + "_2");
  dfsCluster2 = new MiniDFSCluster.Builder(conf2).build();
}
项目:hadoop    文件:Mover.java   
/**
 * Run a Mover in command line.
 *
 * @param args Command line arguments
 */
public static void main(String[] args) {
  if (DFSUtil.parseHelpArgument(args, Cli.USAGE, System.out, true)) {
    System.exit(0);
  }

  try {
    System.exit(ToolRunner.run(new HdfsConfiguration(), new Cli(), args));
  } catch (Throwable e) {
    LOG.error("Exiting " + Mover.class.getSimpleName()
        + " due to an exception", e);
    System.exit(-1);
  }
}
项目:hadoop    文件:TransferFsImage.java   
private static void setTimeout(HttpURLConnection connection) {
  if (timeout <= 0) {
    Configuration conf = new HdfsConfiguration();
    timeout = conf.getInt(DFSConfigKeys.DFS_IMAGE_TRANSFER_TIMEOUT_KEY,
        DFSConfigKeys.DFS_IMAGE_TRANSFER_TIMEOUT_DEFAULT);
    LOG.info("Image Transfer timeout configured to " + timeout
        + " milliseconds");
  }

  if (timeout > 0) {
    connection.setConnectTimeout(timeout);
    connection.setReadTimeout(timeout);
  }
}
项目:hadoop    文件:TestDatanodeRestart.java   
public void testRbwReplicas() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
  cluster.waitActive();
  try {
    testRbwReplicas(cluster, false);
    testRbwReplicas(cluster, true);
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop    文件:SecondaryNameNode.java   
/**
 * main() has some simple utility methods.
 * @param argv Command line parameters.
 * @exception Exception if the filesystem does not exist.
 */
public static void main(String[] argv) throws Exception {
  CommandLineOpts opts = SecondaryNameNode.parseArgs(argv);
  if (opts == null) {
    LOG.fatal("Failed to parse options");
    terminate(1);
  } else if (opts.shouldPrintHelp()) {
    opts.usage();
    System.exit(0);
  }

  StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
  Configuration tconf = new HdfsConfiguration();
  SecondaryNameNode secondary = null;
  try {
    secondary = new SecondaryNameNode(tconf, opts);
  } catch (IOException ioe) {
    LOG.fatal("Failed to start secondary namenode", ioe);
    terminate(1);
  }

  if (opts != null && opts.getCommand() != null) {
    int ret = secondary.processStartupCommand(opts);
    terminate(ret);
  }

  if (secondary != null) {
    secondary.startCheckpointThread();
    secondary.join();
  }
}
项目:hadoop    文件:Balancer.java   
/**
 * Run a balancer
 * @param args Command line arguments
 */
public static void main(String[] args) {
  if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
    System.exit(0);
  }

  try {
    System.exit(ToolRunner.run(new HdfsConfiguration(), new Cli(), args));
  } catch (Throwable e) {
    LOG.error("Exiting balancer due an exception", e);
    System.exit(-1);
  }
}
项目:hadoop    文件:TestBalancer.java   
/**
 * Test a cluster with even distribution,
 * then three nodes are added to the cluster,
 * runs balancer with two of the nodes in the exclude list in a file
 */
@Test(timeout=100000)
public void testBalancerCliWithExcludeListInAFile() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  initConf(conf);
  Set<String> excludeHosts = new HashSet<String>();
  excludeHosts.add( "datanodeY");
  excludeHosts.add( "datanodeZ");
  doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2,
      new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"},
      excludeHosts, Parameters.DEFAULT.nodesToBeIncluded), true, true);
}
项目:hadoop    文件:TestFsDatasetCacheRevocation.java   
private static Configuration getDefaultConf() {
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.setLong(
      DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 50);
  conf.setLong(DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 250);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
      TestFsDatasetCache.CACHE_CAPACITY);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
    new File(sockDir.getDir(), "sock").getAbsolutePath());
  return conf;
}
项目:hadoop    文件:TestHSync.java   
/** Test hsync via SequenceFiles */
@Test
public void testSequenceFileSync() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  final FileSystem fs = cluster.getFileSystem();
  final Path p = new Path("/testSequenceFileSync/foo");
  final int len = 1 << 16;
  FSDataOutputStream out = fs.create(p, FsPermission.getDefault(),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
      4096, (short) 1, len, null);
  Writer w = SequenceFile.createWriter(new Configuration(),
      Writer.stream(out),
      Writer.keyClass(RandomDatum.class),
      Writer.valueClass(RandomDatum.class),
      Writer.compression(CompressionType.NONE, new DefaultCodec()));
  w.hflush();
  checkSyncMetric(cluster, 0);
  w.hsync();
  checkSyncMetric(cluster, 1);
  int seed = new Random().nextInt();
  RandomDatum.Generator generator = new RandomDatum.Generator(seed);
  generator.next();
  w.append(generator.getKey(), generator.getValue());
  w.hsync();
  checkSyncMetric(cluster, 2);
  w.close();
  checkSyncMetric(cluster, 2);
  out.close();
  checkSyncMetric(cluster, 3);
  cluster.shutdown();
}
项目:hadoop    文件:TestDecommissioningStatus.java   
@BeforeClass
public static void setUp() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
      false);

  // Set up the hosts/exclude files.
  localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  dir = new Path(workingDir, "build/test/data/work-dir/decommission");
  assertTrue(localFileSys.mkdirs(dir));
  excludeFile = new Path(dir, "exclude");
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  Path includeFile = new Path(dir, "include");
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 
      1000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
      4);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1);
  conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, 1);

  writeConfigFile(localFileSys, excludeFile, null);
  writeConfigFile(localFileSys, includeFile, null);

  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
  cluster.waitActive();
  fileSys = cluster.getFileSystem();
  cluster.getNamesystem().getBlockManager().getDatanodeManager()
      .setHeartbeatExpireInterval(3000);
  Logger.getLogger(DecommissionManager.class).setLevel(Level.DEBUG);
}
项目:hadoop    文件:TestDFSHAAdmin.java   
private HdfsConfiguration getHAConf() {
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, NSID);    
  conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID, NSID);
  conf.set(DFSUtil.addKeySuffixes(
      DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX, NSID), "nn1,nn2");    
  conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");
  conf.set(DFSUtil.addKeySuffixes(
          DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, NSID, "nn1"),
      HOST_A + ":12345");
  conf.set(DFSUtil.addKeySuffixes(
          DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, NSID, "nn2"),
      HOST_B + ":12345");
  return conf;
}
项目:hadoop    文件:TestBalancer.java   
/**
 * Test a cluster with even distribution,
 * then three nodes are added to the cluster,
 * runs balancer with two of the nodes in the include list
 */
@Test(timeout=100000)
public void testBalancerCliWithIncludeListWithPorts() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  initConf(conf);
  doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1},
      CAPACITY, RACK2, new PortNumberBasedNodes(3, 0, 1), true, false);
}
项目:hadoop    文件:TestStickyBit.java   
@BeforeClass
public static void init() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
  initCluster(true);
}
项目:hadoop    文件:TestFcHdfsSetUMask.java   
@BeforeClass
public static void clusterSetupAtBegining()
      throws IOException, LoginException, URISyntaxException  {
  Configuration conf = new HdfsConfiguration();
  // set permissions very restrictive
  conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,  "077");
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
  fc = FileContext.getFileContext(cluster.getURI(0), conf);
  defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
      UserGroupInformation.getCurrentUser().getShortUserName()));
  fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
}
项目:hadoop    文件:TestResolveHdfsSymlink.java   
@BeforeClass
public static void setUp() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  cluster = new MiniDFSCluster.Builder(conf).build();
  cluster.waitActive();

}
项目:hadoop    文件:TestCheckpoint.java   
/**
 * Regression test for HDFS-3835 - "Long-lived 2NN cannot perform a
 * checkpoint if security is enabled and the NN restarts without outstanding
 * delegation tokens"
 */
@Test
public void testSecondaryNameNodeWithDelegationTokens() throws IOException {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;

  Configuration conf = new HdfsConfiguration();
  conf.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
        .format(true).build();

    assertNotNull(cluster.getNamesystem().getDelegationToken(new Text("atm")));

    secondary = startSecondaryNameNode(conf);

    // Checkpoint once, so the 2NN loads the DT into its in-memory sate.
    secondary.doCheckpoint();

    // Perform a saveNamespace, so that the NN has a new fsimage, and the 2NN
    // therefore needs to download a new fsimage the next time it performs a
    // checkpoint.
    cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    cluster.getNameNodeRpc().saveNamespace();
    cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);

    // Ensure that the 2NN can still perform a checkpoint.
    secondary.doCheckpoint();
  } finally {
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
项目:hadoop    文件:TestHSync.java   
/** Test hsync on an exact block boundary */
@Test
public void testHSyncBlockBoundary() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  final FileSystem fs = cluster.getFileSystem();

  final Path p = new Path("/testHSyncBlockBoundary/foo");
  final int len = 1 << 16;
  final byte[] fileContents = AppendTestUtil.initBuffer(len);
  FSDataOutputStream out = fs.create(p, FsPermission.getDefault(),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
      4096, (short) 1, len, null);
  // fill exactly one block (tests the SYNC_BLOCK case) and flush
  out.write(fileContents, 0, len);
  out.hflush();
  // the full block should have caused a sync
  checkSyncMetric(cluster, 1);
  out.hsync();
  // first on block again
  checkSyncMetric(cluster, 1);
  // write one more byte and sync again
  out.write(1);
  out.hsync();
  checkSyncMetric(cluster, 2);
  out.close();
  checkSyncMetric(cluster, 3);
  cluster.shutdown();
}
项目:hadoop    文件:TestBalancer.java   
/**
 * Test a cluster with even distribution,
 * then three nodes are added to the cluster,
 * runs balancer with two of the nodes in the include list
 */
@Test(timeout=100000)
public void testBalancerCliWithIncludeList() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  initConf(conf);
  Set<String> includeHosts = new HashSet<String>();
  includeHosts.add( "datanodeY");
  doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2,
      new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"},
      Parameters.DEFAULT.nodesToBeExcluded, includeHosts), true, false);
}
项目:hadoop    文件:TestBalancer.java   
/**
 * Test a cluster with even distribution,
 * then three nodes are added to the cluster,
 * runs balancer with two of the nodes in the include list
 */
@Test(timeout=100000)
public void testBalancerWithIncludeList() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  initConf(conf);
  Set<String> includeHosts = new HashSet<String>();
  includeHosts.add( "datanodeY");
  doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2,
      new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"},
      Parameters.DEFAULT.nodesToBeExcluded, includeHosts), false, false);
}
项目:hadoop    文件:TestHdfsTextCommand.java   
@Before
  public void setUp() throws IOException{
  Configuration conf = new HdfsConfiguration();
  cluster = new MiniDFSCluster.Builder(conf).build();
  cluster.waitActive();
  fs = cluster.getFileSystem();
}
项目:hadoop    文件:TestSymlinkHdfs.java   
@BeforeClass
public static void beforeClassSetup() throws Exception {
  Configuration conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
  conf.set(FsPermission.UMASK_LABEL, "000");
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, 0);
  cluster = new MiniDFSCluster.Builder(conf).build();
  webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
  dfs = cluster.getFileSystem();
}
项目:hadoop    文件:TestStoragePolicyCommands.java   
@Before
public void clusterSetUp() throws IOException {
  conf = new HdfsConfiguration();
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL).build();
  cluster.waitActive();
  fs = cluster.getFileSystem();
}