Java 类org.apache.hadoop.hdfs.DFSConfigKeys 实例源码

项目:angel    文件:DistributeLog.java   
/**
 * Init
 * @throws IOException
 */
public void init() throws IOException {
  int flushLen = conf.getInt(AngelConf.ANGEL_LOG_FLUSH_MIN_SIZE, AngelConf.DEFAULT_ANGEL_LOG_FLUSH_MIN_SIZE);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, flushLen);
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, flushLen);

  String pathStr = conf.get(AngelConf.ANGEL_LOG_PATH);
  if (pathStr == null) {
    throw new IOException("log directory is null. you must set " + AngelConf.ANGEL_LOG_PATH);
  }

  LOG.info("algorithm log output directory=" + pathStr);

  Path path = new Path(pathStr + "/log");
  FileSystem fs = path.getFileSystem(conf);
  if (fs.exists(path)) {
    fs.delete(path, true);
  }
  outputStream =  fs.create(path, true);
}
项目:hadoop    文件:BaseTestHttpFSWith.java   
private void testConcat() throws Exception {
  Configuration config = getProxiedFSConf();
  config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
  if (!isLocalFS()) {
    FileSystem fs = FileSystem.get(config);
    fs.mkdirs(getProxiedFSTestDir());
    Path path1 = new Path("/test/foo.txt");
    Path path2 = new Path("/test/bar.txt");
    Path path3 = new Path("/test/derp.txt");
    DFSTestUtil.createFile(fs, path1, 1024, (short) 3, 0);
    DFSTestUtil.createFile(fs, path2, 1024, (short) 3, 0);
    DFSTestUtil.createFile(fs, path3, 1024, (short) 3, 0);
    fs.close();
    fs = getHttpFSFileSystem();
    fs.concat(path1, new Path[]{path2, path3});
    fs.close();
    fs = FileSystem.get(config);
    Assert.assertTrue(fs.exists(path1));
    Assert.assertFalse(fs.exists(path2));
    Assert.assertFalse(fs.exists(path3));
    fs.close();
  }
}
项目:hadoop    文件:TestDelegationTokenForProxyUser.java   
@BeforeClass
public static void setUp() throws Exception {
  config = new HdfsConfiguration();
  config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
  config.setLong(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
  config.setLong(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
  config.setStrings(DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserGroupConfKey(REAL_USER),
      "group1");
  config.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  configureSuperUserIPAddresses(config, REAL_USER);
  FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
  cluster = new MiniDFSCluster.Builder(config).build();
  cluster.waitActive();
  ProxyUsers.refreshSuperUserGroupsConfiguration(config);
  ugi = UserGroupInformation.createRemoteUser(REAL_USER);
  proxyUgi = UserGroupInformation.createProxyUserForTesting(PROXY_USER, ugi,
      GROUP_NAMES);
}
项目:hadoop    文件:TestRetryCacheWithHA.java   
private DFSClient genClientWithDummyHandler() throws IOException {
  URI nnUri = dfs.getUri();
  FailoverProxyProvider<ClientProtocol> failoverProxyProvider = 
      NameNodeProxies.createFailoverProxyProvider(conf, 
          nnUri, ClientProtocol.class, true, null);
  InvocationHandler dummyHandler = new DummyRetryInvocationHandler(
      failoverProxyProvider, RetryPolicies
      .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
          Integer.MAX_VALUE,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT));
  ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance(
      failoverProxyProvider.getInterface().getClassLoader(),
      new Class[] { ClientProtocol.class }, dummyHandler);

  DFSClient client = new DFSClient(null, proxy, conf, null);
  return client;
}
项目:hadoop    文件:QuorumJournalManager.java   
private static List<InetSocketAddress> getLoggerAddresses(URI uri)
    throws IOException {
  String authority = uri.getAuthority();
  Preconditions.checkArgument(authority != null && !authority.isEmpty(),
      "URI has no authority: " + uri);

  String[] parts = StringUtils.split(authority, ';');
  for (int i = 0; i < parts.length; i++) {
    parts[i] = parts[i].trim();
  }

  if (parts.length % 2 == 0) {
    LOG.warn("Quorum journal URI '" + uri + "' has an even number " +
        "of Journal Nodes specified. This is not recommended!");
  }

  List<InetSocketAddress> addrs = Lists.newArrayList();
  for (String addr : parts) {
    addrs.add(NetUtils.createSocketAddr(
        addr, DFSConfigKeys.DFS_JOURNALNODE_RPC_PORT_DEFAULT));
  }
  return addrs;
}
项目:hadoop    文件:IPFailoverProxyProvider.java   
public IPFailoverProxyProvider(Configuration conf, URI uri,
    Class<T> xface) {
  Preconditions.checkArgument(
      xface.isAssignableFrom(NamenodeProtocols.class),
      "Interface class %s is not a valid NameNode protocol!");
  this.xface = xface;
  this.nameNodeUri = uri;

  this.conf = new Configuration(conf);
  int maxRetries = this.conf.getInt(
      DFSConfigKeys.DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_KEY,
      DFSConfigKeys.DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_DEFAULT);
  this.conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
      maxRetries);

  int maxRetriesOnSocketTimeouts = this.conf.getInt(
      DFSConfigKeys.DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
      DFSConfigKeys.DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT);
  this.conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
      maxRetriesOnSocketTimeouts);
}
项目:hadoop    文件:TestJournalNode.java   
@Test(timeout=100000)
public void testFailToStartWithBadConfig() throws Exception {
  Configuration conf = new Configuration();
  conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY, "non-absolute-path");
  conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  assertJNFailsToStart(conf, "should be an absolute path");

  // Existing file which is not a directory 
  File existingFile = new File(TEST_BUILD_DATA, "testjournalnodefile");
  assertTrue(existingFile.createNewFile());
  try {
    conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY,
        existingFile.getAbsolutePath());
    assertJNFailsToStart(conf, "Not a directory");
  } finally {
    existingFile.delete();
  }

  // Directory which cannot be created
  conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY,
      Shell.WINDOWS ? "\\\\cannotBeCreated" : "/proc/does-not-exist");
  assertJNFailsToStart(conf, "Cannot create directory");
}
项目:hadoop    文件:TestBootstrapStandbyWithBKJM.java   
@Before
public void setUp() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 5);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
      .createJournalURI("/bootstrapStandby").toString());
  BKJMUtil.addJournalManagerDefinition(conf);
  conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
  conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
      SlowCodec.class.getCanonicalName());
  CompressionCodecFactory.setCodecClasses(conf,
      ImmutableList.<Class> of(SlowCodec.class));
  MiniDFSNNTopology topology = new MiniDFSNNTopology()
      .addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(
          new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001)).addNN(
          new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
  cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology)
      .numDataNodes(1).manageNameDfsSharedDirs(false).build();
  cluster.waitActive();
}
项目:hadoop    文件:TestFsLimits.java   
@Test
/**
 * This test verifies that error string contains the
 * right parent directory name if the operation fails with
 * PathComponentTooLongException
 */
public void testParentDirectoryNameIsCorrect() throws Exception {
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, 20);
  mkdirs("/user", null);
  mkdirs("/user/testHome", null);
  mkdirs("/user/testHome/FileNameLength", null);

  mkdirCheckParentDirectory(
    "/user/testHome/FileNameLength/really_big_name_0003_fail",
    "/user/testHome/FileNameLength", PathComponentTooLongException.class);

  renameCheckParentDirectory("/user/testHome/FileNameLength",
    "/user/testHome/really_big_name_0003_fail", "/user/testHome/",
    PathComponentTooLongException.class);

}
项目:hadoop    文件:TestDataNodeVolumeFailureReporting.java   
/**
 * Initializes the cluster.
 *
 * @param numDataNodes number of datanodes
 * @param storagesPerDatanode number of storage locations on each datanode
 * @param failedVolumesTolerated number of acceptable volume failures
 * @throws Exception if there is any failure
 */
private void initCluster(int numDataNodes, int storagesPerDatanode,
    int failedVolumesTolerated) throws Exception {
  conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L);
  /*
   * Lower the DN heartbeat, DF rate, and recheck interval to one second
   * so state about failures and datanode death propagates faster.
   */
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_DF_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
      failedVolumesTolerated);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes)
      .storagesPerDatanode(storagesPerDatanode).build();
  cluster.waitActive();
  fs = cluster.getFileSystem();
  dataDir = cluster.getDataDirectory();
  long dnCapacity = DFSTestUtil.getDatanodeCapacity(
      cluster.getNamesystem().getBlockManager().getDatanodeManager(), 0);
  volumeCapacity = dnCapacity / cluster.getStoragesPerDatanode();
}
项目:hadoop    文件:FSImage.java   
/**
 * Construct the FSImage. Set the default checkpoint directories.
 *
 * Setup storage and initialize the edit log.
 *
 * @param conf Configuration
 * @param imageDirs Directories the image can be stored in.
 * @param editsDirs Directories the editlog can be stored in.
 * @throws IOException if directories are invalid.
 */
protected FSImage(Configuration conf,
                  Collection<URI> imageDirs,
                  List<URI> editsDirs)
    throws IOException {
  this.conf = conf;

  storage = new NNStorage(conf, imageDirs, editsDirs);
  if(conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY,
                     DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_DEFAULT)) {
    storage.setRestoreFailedStorage(true);
  }

  this.editLog = new FSEditLog(conf, storage, editsDirs);

  archivalManager = new NNStorageRetentionManager(conf, storage, editLog);
}
项目:hadoop    文件:TestMiniJournalCluster.java   
@Test
public void testStartStop() throws IOException {
  Configuration conf = new Configuration();
  MiniJournalCluster c = new MiniJournalCluster.Builder(conf)
    .build();
  try {
    URI uri = c.getQuorumJournalURI("myjournal");
    String[] addrs = uri.getAuthority().split(";");
    assertEquals(3, addrs.length);

    JournalNode node = c.getJournalNode(0);
    String dir = node.getConf().get(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY);
    assertEquals(
        new File(MiniDFSCluster.getBaseDirectory() + "journalnode-0")
          .getAbsolutePath(),
        dir);
  } finally {
    c.shutdown();
  }
}
项目:hadoop    文件:TestWebHDFS.java   
/**
 * Test for catching "no datanode" IOException, when to create a file
 * but datanode is not running for some reason.
 */
@Test(timeout=300000)
public void testCreateWithNoDN() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
    cluster.waitActive();
    FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsFileSystem.SCHEME);
    fs.create(new Path("/testnodatanode"));
    Assert.fail("No exception was thrown");
  } catch (IOException ex) {
    GenericTestUtils.assertExceptionContains("Failed to find datanode", ex);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop    文件:TestFileTruncate.java   
@BeforeClass
public static void startUp() throws IOException {
  conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, SHORT_HEARTBEAT);
  conf.setLong(
      DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);
  cluster = new MiniDFSCluster.Builder(conf)
      .format(true)
      .numDataNodes(DATANODE_NUM)
      .nameNodePort(NameNode.DEFAULT_PORT)
      .waitSafeMode(true)
      .build();
  fs = cluster.getFileSystem();
}
项目:hadoop    文件:TestBlockManager.java   
@Before
public void setupMockCluster() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, "need to set a dummy value here so it assumes a multi-rack cluster");
  fsn = Mockito.mock(FSNamesystem.class);
  Mockito.doReturn(true).when(fsn).hasWriteLock();
  bm = new BlockManager(fsn, conf);
  final String[] racks = {
      "/rackA",
      "/rackA",
      "/rackA",
      "/rackB",
      "/rackB",
      "/rackB"};
  storages = DFSTestUtil.createDatanodeStorageInfos(racks);
  nodes = Arrays.asList(DFSTestUtil.toDatanodeDescriptor(storages));
  rackA = nodes.subList(0, 3);
  rackB = nodes.subList(3, 6);
}
项目:hadoop    文件:TestXAttrsWithHA.java   
@Before
public void setupCluster() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  HAUtil.setAllowStandbyReads(conf, true);

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(1)
    .waitSafeMode(false)
    .build();
  cluster.waitActive();

  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  fs = HATestUtil.configureFailoverFs(cluster, conf);

  cluster.transitionToActive(0);
}
项目:hadoop    文件:TestLossyRetryInvocationHandler.java   
@Test
public void testStartNNWithTrashEmptier() throws Exception {
  MiniDFSCluster cluster = null;
  Configuration conf = new HdfsConfiguration();

  // enable both trash emptier and dropping response
  conf.setLong("fs.trash.interval", 360);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);

  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0)
        .build();
    cluster.waitActive();
    cluster.transitionToActive(0);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop    文件:HeartbeatManager.java   
HeartbeatManager(final Namesystem namesystem,
    final BlockManager blockManager, final Configuration conf) {
  this.namesystem = namesystem;
  this.blockManager = blockManager;
  boolean avoidStaleDataNodesForWrite = conf.getBoolean(
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY,
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT);
  long recheckInterval = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
      DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 min
  long staleInterval = conf.getLong(
      DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
      DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT);// 30s

  if (avoidStaleDataNodesForWrite && staleInterval < recheckInterval) {
    this.heartbeatRecheckInterval = staleInterval;
    LOG.info("Setting heartbeat recheck interval to " + staleInterval
        + " since " + DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY
        + " is less than "
        + DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY);
  } else {
    this.heartbeatRecheckInterval = recheckInterval;
  }
}
项目:hadoop    文件:TestWebHdfsWithMultipleNameNodes.java   
private static void setupCluster(final int nNameNodes, final int nDataNodes)
    throws Exception {
  LOG.info("nNameNodes=" + nNameNodes + ", nDataNodes=" + nDataNodes);

  conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);

  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes))
      .numDataNodes(nDataNodes)
      .build();
  cluster.waitActive();

  webhdfs = new WebHdfsFileSystem[nNameNodes];
  for(int i = 0; i < webhdfs.length; i++) {
    final InetSocketAddress addr = cluster.getNameNode(i).getHttpAddress();
    final String uri = WebHdfsFileSystem.SCHEME  + "://"
        + addr.getHostName() + ":" + addr.getPort() + "/";
    webhdfs[i] = (WebHdfsFileSystem)FileSystem.get(new URI(uri), conf);
  }
}
项目:hadoop    文件:JournalNodeHttpServer.java   
void start() throws IOException {
  final InetSocketAddress httpAddr = getAddress(conf);

  final String httpsAddrString = conf.get(
      DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_KEY,
      DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_DEFAULT);
  InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);

  HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
      httpAddr, httpsAddr, "journal",
      DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
      DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY);

  httpServer = builder.build();
  httpServer.setAttribute(JN_ATTRIBUTE_KEY, localJournalNode);
  httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
  httpServer.addInternalServlet("getJournal", "/getJournal",
      GetJournalEditServlet.class, true);
  httpServer.start();
}
项目:hadoop    文件:TestHASafeMode.java   
@Before
public void setupCluster() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(3)
    .waitSafeMode(false)
    .build();
  cluster.waitActive();

  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  fs = HATestUtil.configureFailoverFs(cluster, conf);

  cluster.transitionToActive(0);
}
项目:hadoop    文件:FsVolumeImpl.java   
protected ThreadPoolExecutor initializeCacheExecutor(File parent) {
  if (storageType.isTransient()) {
    return null;
  }
  if (dataset.datanode == null) {
    // FsVolumeImpl is used in test.
    return null;
  }

  final int maxNumThreads = dataset.datanode.getConf().getInt(
      DFSConfigKeys.DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_KEY,
      DFSConfigKeys.DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_DEFAULT);

  ThreadFactory workerFactory = new ThreadFactoryBuilder()
      .setDaemon(true)
      .setNameFormat("FsVolumeImplWorker-" + parent.toString() + "-%d")
      .build();
  ThreadPoolExecutor executor = new ThreadPoolExecutor(
      1, maxNumThreads,
      60, TimeUnit.SECONDS,
      new LinkedBlockingQueue<Runnable>(),
      workerFactory);
  executor.allowCoreThreadTimeOut(true);
  return executor;
}
项目:hadoop    文件:TestDataNodeVolumeFailureToleration.java   
@Before
public void setUp() throws Exception {
  conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L);
  /*
   * Lower the DN heartbeat, DF rate, and recheck interval to one second
   * so state about failures and datanode death propagates faster.
   */
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_DF_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
  // Allow a single volume failure (there are two volumes)
  conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  fs = cluster.getFileSystem();
  dataDir = cluster.getDataDirectory();
}
项目:hadoop    文件:TestEnhancedByteBufferAccess.java   
public static HdfsConfiguration initZeroCopyTest() {
  Assume.assumeTrue(NativeIO.isAvailable());
  Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_SIZE, 3);
  conf.setLong(DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS, 100);
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
      new File(sockDir.getDir(),
        "TestRequestMmapAccess._PORT.sock").getAbsolutePath());
  conf.setBoolean(DFSConfigKeys.
      DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, true);
  conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setLong(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 1000);
  conf.setLong(DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 1000);
  return conf;
}
项目:hadoop    文件:TestParam.java   
@Test
public void testReplicationParam() {
  final ReplicationParam p = new ReplicationParam(ReplicationParam.DEFAULT);
  Assert.assertEquals(null, p.getValue());
  Assert.assertEquals(
      (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
          DFSConfigKeys.DFS_REPLICATION_DEFAULT),
      p.getValue(conf));

  new ReplicationParam((short)1);

  try {
    new ReplicationParam((short)0);
    Assert.fail();
  } catch(IllegalArgumentException e) {
    LOG.info("EXPECTED: " + e);
  }
}
项目:hadoop    文件:TestHAConfiguration.java   
@Test
public void testGetOtherNNHttpAddress() throws IOException {
  // Use non-local addresses to avoid host address matching
  Configuration conf = getHAConf("ns1", "1.2.3.1", "1.2.3.2");
  conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID, "ns1");

  // This is done by the NN before the StandbyCheckpointer is created
  NameNode.initializeGenericKeys(conf, "ns1", "nn1");

  // Since we didn't configure the HTTP address, and the default is
  // 0.0.0.0, it should substitute the address from the RPC configuration
  // above.
  StandbyCheckpointer checkpointer = new StandbyCheckpointer(conf, fsn);
  assertEquals(new URL("http", "1.2.3.2",
      DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, ""),
      checkpointer.getActiveNNAddress());
}
项目:hadoop    文件:TestEditLogTailer.java   
private static void testStandbyTriggersLogRolls(int activeIndex)
    throws Exception {
  Configuration conf = new Configuration();
  // Roll every 1s
  conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);

  // Have to specify IPC ports so the NNs can talk to each other.
  MiniDFSNNTopology topology = new MiniDFSNNTopology()
    .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
      .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10031))
      .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10032)));

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(topology)
    .numDataNodes(0)
    .build();
  try {
    cluster.transitionToActive(activeIndex);
    waitForLogRollInSharedDir(cluster, 3);
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop    文件:TestQuotaByStorageType.java   
@Before
public void setUp() throws Exception {
  conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);

  // Setup a 3-node cluster and configure
  // each node with 1 SSD and 1 DISK without capacity limitation
  cluster = new MiniDFSCluster
      .Builder(conf)
      .numDataNodes(REPLICATION)
      .storageTypes(new StorageType[]{StorageType.SSD, StorageType.DEFAULT})
      .build();
  cluster.waitActive();

  fsdir = cluster.getNamesystem().getFSDirectory();
  dfs = cluster.getFileSystem();
  fsn = cluster.getNamesystem();
}
项目:elasticsearch_my    文件:MiniHDFS.java   
public static void main(String[] args) throws Exception {
    if (args.length != 1) {
       throw new IllegalArgumentException("MiniHDFS <baseDirectory>");
    }
    // configure Paths
    Path baseDir = Paths.get(args[0]);
    // hadoop-home/, so logs will not complain
    if (System.getenv("HADOOP_HOME") == null) {
        Path hadoopHome = baseDir.resolve("hadoop-home");
        Files.createDirectories(hadoopHome);
        System.setProperty("hadoop.home.dir", hadoopHome.toAbsolutePath().toString());
    }
    // hdfs-data/, where any data is going
    Path hdfsHome = baseDir.resolve("hdfs-data");

    // start cluster
    Configuration cfg = new Configuration();
    cfg.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsHome.toAbsolutePath().toString());
    // lower default permission: TODO: needed?
    cfg.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY, "766");
    // TODO: remove hardcoded port!
    MiniDFSCluster dfs = new MiniDFSCluster.Builder(cfg).nameNodePort(9999).build();

    // write our PID file
    Path tmp = Files.createTempFile(baseDir, null, null);
    String pid = ManagementFactory.getRuntimeMXBean().getName().split("@")[0];
    Files.write(tmp, pid.getBytes(StandardCharsets.UTF_8));
    Files.move(tmp, baseDir.resolve(PID_FILE_NAME), StandardCopyOption.ATOMIC_MOVE);

    // write our port file
    tmp = Files.createTempFile(baseDir, null, null);
    Files.write(tmp, Integer.toString(dfs.getNameNodePort()).getBytes(StandardCharsets.UTF_8));
    Files.move(tmp, baseDir.resolve(PORT_FILE_NAME), StandardCopyOption.ATOMIC_MOVE);
}
项目:hadoop    文件:TestDataNodeMetrics.java   
@Test
public void testReceivePacketMetrics() throws Exception {
  Configuration conf = new HdfsConfiguration();
  final int interval = 1;
  conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  try {
    cluster.waitActive();
    DistributedFileSystem fs = cluster.getFileSystem();

    Path testFile = new Path("/testFlushNanosMetric.txt");
    FSDataOutputStream fout = fs.create(testFile);
    fout.write(new byte[1]);
    fout.hsync();
    fout.close();
    List<DataNode> datanodes = cluster.getDataNodes();
    DataNode datanode = datanodes.get(0);
    MetricsRecordBuilder dnMetrics = getMetrics(datanode.getMetrics().name());
    // Expect two flushes, 1 for the flush that occurs after writing, 
    // 1 that occurs on closing the data and metadata files.
    assertCounter("FlushNanosNumOps", 2L, dnMetrics);
    // Expect two syncs, one from the hsync, one on close.
    assertCounter("FsyncNanosNumOps", 2L, dnMetrics);
    // Wait for at least 1 rollover
    Thread.sleep((interval + 1) * 1000);
    // Check the receivePacket percentiles that should be non-zero
    String sec = interval + "s";
    assertQuantileGauges("FlushNanos" + sec, dnMetrics);
    assertQuantileGauges("FsyncNanos" + sec, dnMetrics);
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
项目:hadoop    文件:TestDFSIO.java   
@BeforeClass
public static void beforeClass() throws Exception {
  bench = new TestDFSIO();
  bench.getConf().setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
  bench.getConf().setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  cluster = new MiniDFSCluster.Builder(bench.getConf())
                              .numDataNodes(2)
                              .format(true)
                              .build();
  FileSystem fs = cluster.getFileSystem();
  bench.createControlFile(fs, DEFAULT_NR_BYTES, DEFAULT_NR_FILES);

  /** Check write here, as it is required for other tests */
  testWrite();
}
项目:hadoop    文件:TestFsDatasetCache.java   
@Before
public void setUp() throws Exception {
  conf = new HdfsConfiguration();
  conf.setLong(
      DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 100);
  conf.setLong(DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 500);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
      CACHE_CAPACITY);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);

  prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
  NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());

  cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(1).build();
  cluster.waitActive();

  fs = cluster.getFileSystem();
  nn = cluster.getNameNode();
  fsImage = nn.getFSImage();
  dn = cluster.getDataNodes().get(0);
  fsd = dn.getFSDataset();

  spyNN = DataNodeTestUtils.spyOnBposToNN(dn, nn);

}
项目:hadoop    文件:MiniJournalCluster.java   
private Configuration createConfForNode(Builder b, int idx) {
  Configuration conf = new Configuration(b.conf);
  File logDir = getStorageDir(idx);
  conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY, logDir.toString());
  conf.set(DFSConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_KEY, "localhost:0");
  conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY, "localhost:0");
  return conf;
}
项目:hadoop    文件:Nfs3Metrics.java   
public static Nfs3Metrics create(Configuration conf, String gatewayName) {
  String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
  MetricsSystem ms = DefaultMetricsSystem.instance();
  JvmMetrics jm = JvmMetrics.create(gatewayName, sessionId, ms);

  // Percentile measurement is [50th,75th,90th,95th,99th] currently 
  int[] intervals = conf
      .getInts(NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_KEY);
  return ms.register(new Nfs3Metrics(gatewayName, sessionId, intervals, jm));
}
项目:hadoop    文件:TestFsLimits.java   
@Before
public void setUp() throws IOException {
  conf = new Configuration();
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
           fileAsURI(new File(MiniDFSCluster.getBaseDirectory(),
                              "namenode")).toString());
  NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
  fs = null;
  fsIsReady = true;
}
项目:hadoop    文件:TestFSMainOperationsWebHdfs.java   
@BeforeClass
public static void setupCluster() {
  final Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    cluster.waitActive();

    //change root permission to 777
    cluster.getFileSystem().setPermission(
        new Path("/"), new FsPermission((short)0777));

    final String uri = WebHdfsFileSystem.SCHEME  + "://"
        + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);

    //get file system as a non-superuser
    final UserGroupInformation current = UserGroupInformation.getCurrentUser();
    final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
        current.getShortUserName() + "x", new String[]{"user"});
    fileSystem = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
      @Override
      public FileSystem run() throws Exception {
        return FileSystem.get(new URI(uri), conf);
      }
    });

    defaultWorkingDirectory = fileSystem.getWorkingDirectory();
  } catch (Exception e) {
    throw new RuntimeException(e);
  }
}
项目:hadoop    文件:TestDecommissioningStatus.java   
@BeforeClass
public static void setUp() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
      false);

  // Set up the hosts/exclude files.
  localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  dir = new Path(workingDir, "build/test/data/work-dir/decommission");
  assertTrue(localFileSys.mkdirs(dir));
  excludeFile = new Path(dir, "exclude");
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  Path includeFile = new Path(dir, "include");
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 
      1000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
      4);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1);
  conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, 1);

  writeConfigFile(localFileSys, excludeFile, null);
  writeConfigFile(localFileSys, includeFile, null);

  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
  cluster.waitActive();
  fileSys = cluster.getFileSystem();
  cluster.getNamesystem().getBlockManager().getDatanodeManager()
      .setHeartbeatExpireInterval(3000);
  Logger.getLogger(DecommissionManager.class).setLevel(Level.DEBUG);
}
项目:hadoop    文件:TestDFSHAAdmin.java   
/**
 * Test that the fencing configuration can be overridden per-nameservice
 * or per-namenode
 */
@Test
public void testFencingConfigPerNameNode() throws Exception {
  Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();

  final String nsSpecificKey = DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY + "." + NSID;
  final String nnSpecificKey = nsSpecificKey + ".nn1";

  HdfsConfiguration conf = getHAConf();
  // Set the default fencer to succeed
  conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
  tool.setConf(conf);
  assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));

  // Set the NN-specific fencer to fail. Should fail to fence.
  conf.set(nnSpecificKey, getFencerFalseCommand());
  tool.setConf(conf);
  assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
  conf.unset(nnSpecificKey);

  // Set an NS-specific fencer to fail. Should fail.
  conf.set(nsSpecificKey, getFencerFalseCommand());
  tool.setConf(conf);
  assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));

  // Set the NS-specific fencer to succeed. Should succeed
  conf.set(nsSpecificKey, getFencerTrueCommand());
  tool.setConf(conf);
  assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
}
项目:hadoop    文件:TestDistCpWithAcls.java   
/**
 * Initialize the cluster, wait for it to become active, and get FileSystem.
 *
 * @param format if true, format the NameNode and DataNodes before starting up
 * @param aclsEnabled if true, ACL support is enabled
 * @throws Exception if any step fails
 */
private static void initCluster(boolean format, boolean aclsEnabled)
    throws Exception {
  conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, aclsEnabled);
  conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "stubfs:///");
  conf.setClass("fs.stubfs.impl", StubFileSystem.class, FileSystem.class);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format)
    .build();
  cluster.waitActive();
  fs = cluster.getFileSystem();
}