Java 类org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption 实例源码

项目:hadoop    文件:NNStorage.java   
void readProperties(StorageDirectory sd, StartupOption startupOption)
    throws IOException {
  Properties props = readPropertiesFile(sd.getVersionFile());
  if (HdfsServerConstants.RollingUpgradeStartupOption.ROLLBACK.matches
      (startupOption)) {
    int lv = Integer.parseInt(getProperty(props, sd, "layoutVersion"));
    if (lv > getServiceLayoutVersion()) {
      // we should not use a newer version for rollingUpgrade rollback
      throw new IncorrectVersionException(getServiceLayoutVersion(), lv,
          "storage directory " + sd.getRoot().getAbsolutePath());
    }
    props.setProperty("layoutVersion",
        Integer.toString(HdfsConstants.NAMENODE_LAYOUT_VERSION));
  }
  setFieldsFromProperties(props, sd);
}
项目:hadoop    文件:TestHDFSServerPorts.java   
/**
 * Start the BackupNode
 */
public BackupNode startBackupNode(Configuration conf) throws IOException {
  // Set up testing environment directories
  hdfsDir = new File(TEST_DATA_DIR, "backupNode");
  if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
    throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
  }
  File currDir = new File(hdfsDir, "name2");
  File currDir2 = new File(currDir, "current");
  File currDir3 = new File(currDir, "image");

  assertTrue(currDir.mkdirs());
  assertTrue(currDir2.mkdirs());
  assertTrue(currDir3.mkdirs());

  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      fileAsURI(new File(hdfsDir, "name2")).toString());
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");

  // Start BackupNode
  String[] args = new String [] { StartupOption.BACKUP.getName() };
  BackupNode bu = (BackupNode)NameNode.createNameNode(args, conf);

  return bu;
}
项目:hadoop    文件:DataNode.java   
/**
 * Parse and verify command line arguments and set configuration parameters.
 *
 * @return false if passed argements are incorrect
 */
@VisibleForTesting
static boolean parseArguments(String args[], Configuration conf) {
  StartupOption startOpt = StartupOption.REGULAR;
  int i = 0;

  if (args != null && args.length != 0) {
    String cmd = args[i++];
    if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
      LOG.error("-r, --rack arguments are not supported anymore. RackID " +
          "resolution is handled by the NameNode.");
      return false;
    } else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.ROLLBACK;
    } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.REGULAR;
    } else {
      return false;
    }
  }

  setStartupOption(conf, startOpt);
  return (args == null || i == args.length);    // Fail if more than one cmd specified!
}
项目:hadoop    文件:BlockPoolSliceStorage.java   
/**
 * Analyze and load storage directories. Recover from previous transitions if
 * required.
 *
 * The block pool storages are either all analyzed or none of them is loaded.
 * Therefore, a failure on loading any block pool storage results a faulty
 * data volume.
 *
 * @param datanode Datanode to which this storage belongs to
 * @param nsInfo namespace information
 * @param dataDirs storage directories of block pool
 * @param startOpt startup option
 * @return an array of loaded block pool directories.
 * @throws IOException on error
 */
List<StorageDirectory> loadBpStorageDirectories(
    DataNode datanode, NamespaceInfo nsInfo,
    Collection<File> dataDirs, StartupOption startOpt) throws IOException {
  List<StorageDirectory> succeedDirs = Lists.newArrayList();
  try {
    for (File dataDir : dataDirs) {
      if (containsStorageDir(dataDir)) {
        throw new IOException(
            "BlockPoolSliceStorage.recoverTransitionRead: " +
                "attempt to load an used block storage: " + dataDir);
      }
      StorageDirectory sd =
          loadStorageDirectory(datanode, nsInfo, dataDir, startOpt);
      succeedDirs.add(sd);
    }
  } catch (IOException e) {
    LOG.warn("Failed to analyze storage directories for block pool "
        + nsInfo.getBlockPoolID(), e);
    throw e;
  }
  return succeedDirs;
}
项目:hadoop    文件:Journal.java   
Journal(Configuration conf, File logDir, String journalId,
    StartupOption startOpt, StorageErrorReporter errorReporter)
    throws IOException {
  storage = new JNStorage(conf, logDir, startOpt, errorReporter);
  this.journalId = journalId;

  refreshCachedData();

  this.fjm = storage.getJournalManager();

  this.metrics = JournalMetrics.create(this);

  EditLogFile latest = scanStorageForLatestEdits();
  if (latest != null) {
    highestWrittenTxId = latest.getLastTxId();
  }
}
项目:hadoop    文件:TestDFSUpgrade.java   
@Ignore
public void testUpgrade4() throws Exception {
  int numDirs = 4;
  conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);      
  conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, false);
  conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
  String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);

  log("NameNode upgrade with one bad storage dir", numDirs);
  UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
  try {
    // assert("storage dir has been prepared for failure before reaching this point");
    startNameNodeShouldFail(StartupOption.UPGRADE, IOException.class,
        Pattern.compile("failed in 1 storage"));
  } finally {
    // assert("storage dir shall be returned to normal state before exiting");
    UpgradeUtilities.createEmptyDirs(nameNodeDirs);
  }
}
项目:hadoop    文件:TestHdfsServerConstants.java   
/**
 * Test that we can parse a StartupOption string with a
 * RollingUpgradeStartupOption.
 */
@Test
public void testRollingUpgradeStartupOptionParsing() {
  verifyStartupOptionResult("ROLLINGUPGRADE(ROLLBACK)",
                            StartupOption.ROLLINGUPGRADE,
                            RollingUpgradeStartupOption.ROLLBACK);
  verifyStartupOptionResult("ROLLINGUPGRADE(DOWNGRADE)",
                            StartupOption.ROLLINGUPGRADE,
                            RollingUpgradeStartupOption.DOWNGRADE);
  verifyStartupOptionResult("ROLLINGUPGRADE(STARTED)",
      StartupOption.ROLLINGUPGRADE,
      RollingUpgradeStartupOption.STARTED);

  try {
    verifyStartupOptionResult("ROLLINGUPGRADE(UNKNOWNOPTION)", StartupOption.ROLLINGUPGRADE, null);
    fail("Failed to get expected IllegalArgumentException");
  } catch(IllegalArgumentException iae) {
    // Expected!
  }
}
项目:hadoop    文件:MiniDFSCluster.java   
/**
 * Restart the namenode at a given index. Optionally wait for the cluster
 * to become active.
 */
public synchronized void restartNameNode(int nnIndex, boolean waitActive,
    String... args) throws IOException {
  String nameserviceId = nameNodes[nnIndex].nameserviceId;
  String nnId = nameNodes[nnIndex].nnId;
  StartupOption startOpt = nameNodes[nnIndex].startOpt;
  Configuration conf = nameNodes[nnIndex].conf;
  shutdownNameNode(nnIndex);
  if (args.length != 0) {
    startOpt = null;
  } else {
    args = createArgs(startOpt);
  }
  NameNode nn = NameNode.createNameNode(args, conf);
  nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId, startOpt,
      conf);
  if (waitActive) {
    waitClusterUp();
    LOG.info("Restarted the namenode");
    waitActive();
  }
}
项目:hadoop    文件:TestBackupNode.java   
BackupNode startBackupNode(Configuration conf,
                           StartupOption startupOpt,
                           int idx) throws IOException {
  Configuration c = new HdfsConfiguration(conf);
  String dirs = getBackupNodeDir(startupOpt, idx);
  c.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, dirs);
  c.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
  c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,
      "127.0.0.1:0");
  c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
          "127.0.0.1:0");

  BackupNode bn = (BackupNode)NameNode.createNameNode(
      new String[]{startupOpt.getName()}, c);
  assertTrue(bn.getRole() + " must be in SafeMode.", bn.isInSafeMode());
  assertTrue(bn.getRole() + " must be in StandbyState",
             bn.getNamesystem().getHAState()
               .equalsIgnoreCase(HAServiceState.STANDBY.name()));
  return bn;
}
项目:hadoop    文件:TestClusterId.java   
@Test
public void testFormatClusterIdOption() throws IOException {

  // 1. should format without cluster id
  //StartupOption.FORMAT.setClusterId("");
  NameNode.format(config);
  // see if cluster id not empty.
  String cid = getClusterId(config);
  assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")) );

  // 2. successful format with given clusterid
  StartupOption.FORMAT.setClusterId("mycluster");
  NameNode.format(config);
  // see if cluster id matches with given clusterid.
  cid = getClusterId(config);
  assertTrue("ClusterId didn't match", cid.equals("mycluster"));

  // 3. format without any clusterid again. It should generate new
  //clusterid.
  StartupOption.FORMAT.setClusterId("");
  NameNode.format(config);
  String newCid = getClusterId(config);
  assertFalse("ClusterId should not be the same", newCid.equals(cid));
}
项目:hadoop    文件:TestFileAppendRestart.java   
/**
 * Earlier versions of HDFS had a bug (HDFS-2991) which caused
 * append(), when called exactly at a block boundary,
 * to not log an OP_ADD. This ensures that we can read from
 * such buggy versions correctly, by loading an image created
 * using a namesystem image created with 0.23.1-rc2 exhibiting
 * the issue.
 */
@Test
public void testLoadLogsFromBuggyEarlierVersions() throws IOException {
  final Configuration conf = new HdfsConfiguration();

  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_23_BROKEN_APPEND_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-with-buggy-append");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));

  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);

  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
    .format(false)
    .manageDataDfsDirs(false)
    .manageNameDfsDirs(false)
    .numDataNodes(0)
    .waitSafeMode(false)
    .startupOption(StartupOption.UPGRADE)
    .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/io_data/test_io_0");
    assertEquals(2*1024*1024, fs.getFileStatus(testPath).getLen());
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop    文件:NNStorage.java   
/** 
 * Processes the startup options for the clusterid and blockpoolid 
 * for the upgrade. 
 * @param startOpt Startup options 
 * @param layoutVersion Layout version for the upgrade 
 * @throws IOException
 */
void processStartupOptionsForUpgrade(StartupOption startOpt, int layoutVersion)
    throws IOException {
  if (startOpt == StartupOption.UPGRADE || startOpt == StartupOption.UPGRADEONLY) {
    // If upgrade from a release that does not support federation,
    // if clusterId is provided in the startupOptions use it.
    // Else generate a new cluster ID      
    if (!NameNodeLayoutVersion.supports(
        LayoutVersion.Feature.FEDERATION, layoutVersion)) {
      if (startOpt.getClusterId() == null) {
        startOpt.setClusterId(newClusterID());
      }
      setClusterID(startOpt.getClusterId());
      setBlockPoolID(newBlockPoolID());
    } else {
      // Upgrade from one version of federation to another supported
      // version of federation doesn't require clusterID.
      // Warn the user if the current clusterid didn't match with the input
      // clusterid.
      if (startOpt.getClusterId() != null
          && !startOpt.getClusterId().equals(getClusterID())) {
        LOG.warn("Clusterid mismatch - current clusterid: " + getClusterID()
            + ", Ignoring given clusterid: " + startOpt.getClusterId());
      }
    }
    LOG.info("Using clusterid: " + getClusterID());
  }
}
项目:hadoop    文件:TestDFSUpgradeFromImage.java   
void upgradeAndVerify(MiniDFSCluster.Builder bld, ClusterVerifier verifier)
    throws IOException {
  MiniDFSCluster cluster = null;
  try {
    bld.format(false).startupOption(StartupOption.UPGRADE)
      .clusterId("testClusterId");
    cluster = bld.build();
    cluster.waitActive();
    DistributedFileSystem dfs = cluster.getFileSystem();
    DFSClient dfsClient = dfs.dfs;
    //Safemode will be off only after upgrade is complete. Wait for it.
    while ( dfsClient.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET) ) {
      LOG.info("Waiting for SafeMode to be OFF.");
      try {
        Thread.sleep(1000);
      } catch (InterruptedException ignored) {}
    }
    recoverAllLeases(dfsClient, new Path("/"));
    verifyFileSystem(dfs);

    if (verifier != null) {
      verifier.verifyClusterPostUpgrade(cluster);
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  } 
}
项目:hadoop    文件:NameNode.java   
protected HAState createHAState(StartupOption startOpt) {
  if (!haEnabled || startOpt == StartupOption.UPGRADE 
      || startOpt == StartupOption.UPGRADEONLY) {
    return ACTIVE_STATE;
  } else {
    return STANDBY_STATE;
  }
}
项目:hadoop    文件:TestFSImage.java   
/**
 * In this test case, I have created an image with a file having
 * preferredblockSize = 0. We are trying to read this image (since file with
 * preferredblockSize = 0 was allowed pre 2.1.0-beta version. The namenode 
 * after 2.6 version will not be able to read this particular file.
 * See HDFS-7788 for more information.
 * @throws Exception
 */
@Test
public void testZeroBlockSize() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-with-zero-block-size");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));
  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, 
      nameDir.getAbsolutePath());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(false)
      .manageDataDfsDirs(false)
      .manageNameDfsDirs(false)
      .waitSafeMode(false)
      .startupOption(StartupOption.UPGRADE)
      .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/zeroBlockFile");
    assertTrue("File /tmp/zeroBlockFile doesn't exist ", fs.exists(testPath));
    assertTrue("Name node didn't come up", cluster.isNameNodeUp(0));
  } finally {
    cluster.shutdown();
    //Clean up
    FileUtil.fullyDelete(dfsDir);
  }
}
项目:hadoop    文件:NameNode.java   
/**
 * Verify that configured directories exist, then print the metadata versions
 * of the software and the image.
 *
 * @param conf configuration to use
 * @throws IOException
 */
private static boolean printMetadataVersion(Configuration conf)
  throws IOException {
  final String nsId = DFSUtil.getNamenodeNameServiceId(conf);
  final String namenodeId = HAUtil.getNameNodeId(conf, nsId);
  NameNode.initializeGenericKeys(conf, nsId, namenodeId);
  final FSImage fsImage = new FSImage(conf);
  final FSNamesystem fs = new FSNamesystem(conf, fsImage, false);
  return fsImage.recoverTransitionRead(
    StartupOption.METADATAVERSION, fs, null);
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Instantiates an FSNamesystem loaded from the image and edits
 * directories specified in the passed Configuration.
 *
 * @param conf the Configuration which specifies the storage directories
 *             from which to load
 * @return an FSNamesystem which contains the loaded namespace
 * @throws IOException if loading fails
 */
static FSNamesystem loadFromDisk(Configuration conf) throws IOException {

  checkConfiguration(conf);
  FSImage fsImage = new FSImage(conf,
      FSNamesystem.getNamespaceDirs(conf),
      FSNamesystem.getNamespaceEditsDirs(conf));
  FSNamesystem namesystem = new FSNamesystem(conf, fsImage, false);
  StartupOption startOpt = NameNode.getStartupOption(conf);
  if (startOpt == StartupOption.RECOVER) {
    namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  }

  long loadStart = monotonicNow();
  try {
    namesystem.loadFSImage(startOpt);
  } catch (IOException ioe) {
    LOG.warn("Encountered exception loading fsimage", ioe);
    fsImage.close();
    throw ioe;
  }
  long timeTakenToLoadFSImage = monotonicNow() - loadStart;
  LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs");
  NameNodeMetrics nnMetrics = NameNode.getNameNodeMetrics();
  if (nnMetrics != null) {
    nnMetrics.setFsImageLoadTime((int) timeTakenToLoadFSImage);
  }
  return namesystem;
}
项目:hadoop    文件:FSNamesystem.java   
private static Collection<URI> getStorageDirs(Configuration conf,
                                              String propertyName) {
  Collection<String> dirNames = conf.getTrimmedStringCollection(propertyName);
  StartupOption startOpt = NameNode.getStartupOption(conf);
  if(startOpt == StartupOption.IMPORT) {
    // In case of IMPORT this will get rid of default directories 
    // but will retain directories specified in hdfs-site.xml
    // When importing image from a checkpoint, the name-node can
    // start with empty set of storage directories.
    Configuration cE = new HdfsConfiguration(false);
    cE.addResource("core-default.xml");
    cE.addResource("core-site.xml");
    cE.addResource("hdfs-default.xml");
    Collection<String> dirNames2 = cE.getTrimmedStringCollection(propertyName);
    dirNames.removeAll(dirNames2);
    if(dirNames.isEmpty())
      LOG.warn("!!! WARNING !!!" +
        "\n\tThe NameNode currently runs without persistent storage." +
        "\n\tAny changes to the file system meta-data may be lost." +
        "\n\tRecommended actions:" +
        "\n\t\t- shutdown and restart NameNode with configured \"" 
        + propertyName + "\" in hdfs-site.xml;" +
        "\n\t\t- use Backup Node as a persistent and up-to-date storage " +
        "of the file system meta-data.");
  } else if (dirNames.isEmpty()) {
    dirNames = Collections.singletonList(
        DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_DEFAULT);
  }
  return Util.stringCollectionAsURIs(dirNames);
}
项目:hadoop    文件:FSImage.java   
/**
 * Load image from a checkpoint directory and save it into the current one.
 * @param target the NameSystem to import into
 * @throws IOException
 */
void doImportCheckpoint(FSNamesystem target) throws IOException {
  Collection<URI> checkpointDirs =
    FSImage.getCheckpointDirs(conf, null);
  List<URI> checkpointEditsDirs =
    FSImage.getCheckpointEditsDirs(conf, null);

  if (checkpointDirs == null || checkpointDirs.isEmpty()) {
    throw new IOException("Cannot import image from a checkpoint. "
                          + "\"dfs.namenode.checkpoint.dir\" is not set." );
  }

  if (checkpointEditsDirs == null || checkpointEditsDirs.isEmpty()) {
    throw new IOException("Cannot import image from a checkpoint. "
                          + "\"dfs.namenode.checkpoint.dir\" is not set." );
  }

  FSImage realImage = target.getFSImage();
  FSImage ckptImage = new FSImage(conf, 
                                  checkpointDirs, checkpointEditsDirs);
  // load from the checkpoint dirs
  try {
    ckptImage.recoverTransitionRead(StartupOption.REGULAR, target, null);
  } finally {
    ckptImage.close();
  }
  // return back the real image
  realImage.getStorage().setStorageInfo(ckptImage.getStorage());
  realImage.getEditLog().setNextTxId(ckptImage.getEditLog().getLastWrittenTxId()+1);
  realImage.initEditLog(StartupOption.IMPORT);

  realImage.getStorage().setBlockPoolID(ckptImage.getBlockPoolID());

  // and save it but keep the same checkpointTime
  saveNamespace(target);
  getStorage().writeAll();
}
项目:hadoop    文件:FSImage.java   
void loadFSImageFile(FSNamesystem target, MetaRecoveryContext recovery,
    FSImageFile imageFile, StartupOption startupOption) throws IOException {
  LOG.debug("Planning to load image :\n" + imageFile);
  StorageDirectory sdForProperties = imageFile.sd;
  storage.readProperties(sdForProperties, startupOption);

  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.TXID_BASED_LAYOUT, getLayoutVersion())) {
    // For txid-based layout, we should have a .md5 file
    // next to the image file
    boolean isRollingRollback = RollingUpgradeStartupOption.ROLLBACK
        .matches(startupOption);
    loadFSImage(imageFile.getFile(), target, recovery, isRollingRollback);
  } else if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.FSIMAGE_CHECKSUM, getLayoutVersion())) {
    // In 0.22, we have the checksum stored in the VERSION file.
    String md5 = storage.getDeprecatedProperty(
        NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY);
    if (md5 == null) {
      throw new InconsistentFSStateException(sdForProperties.getRoot(),
          "Message digest property " +
          NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY +
          " not set for storage directory " + sdForProperties.getRoot());
    }
    loadFSImage(imageFile.getFile(), new MD5Hash(md5), target, recovery,
        false);
  } else {
    // We don't have any record of the md5sum
    loadFSImage(imageFile.getFile(), null, target, recovery, false);
  }
}
项目:hadoop    文件:FSImage.java   
public void initEditLog(StartupOption startOpt) throws IOException {
  Preconditions.checkState(getNamespaceID() != 0,
      "Must know namespace ID before initting edit log");
  String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
  if (!HAUtil.isHAEnabled(conf, nameserviceId)) {
    // If this NN is not HA
    editLog.initJournalsForWrite();
    editLog.recoverUnclosedStreams();
  } else if (HAUtil.isHAEnabled(conf, nameserviceId)
      && (startOpt == StartupOption.UPGRADE
          || startOpt == StartupOption.UPGRADEONLY
          || RollingUpgradeStartupOption.ROLLBACK.matches(startOpt))) {
    // This NN is HA, but we're doing an upgrade or a rollback of rolling
    // upgrade so init the edit log for write.
    editLog.initJournalsForWrite();
    if (startOpt == StartupOption.UPGRADE
        || startOpt == StartupOption.UPGRADEONLY) {
      long sharedLogCTime = editLog.getSharedLogCTime();
      if (this.storage.getCTime() < sharedLogCTime) {
        throw new IOException("It looks like the shared log is already " +
            "being upgraded but this NN has not been upgraded yet. You " +
            "should restart this NameNode with the '" +
            StartupOption.BOOTSTRAPSTANDBY.getName() + "' option to bring " +
            "this NN in sync with the other.");
      }
    }
    editLog.recoverUnclosedStreams();
  } else {
    // This NN is HA and we're not doing an upgrade.
    editLog.initSharedJournalsForRead();
  }
}
项目:hadoop    文件:FSImage.java   
private long loadEdits(Iterable<EditLogInputStream> editStreams,
    FSNamesystem target, StartupOption startOpt, MetaRecoveryContext recovery)
    throws IOException {
  LOG.debug("About to load edits:\n  " + Joiner.on("\n  ").join(editStreams));
  StartupProgress prog = NameNode.getStartupProgress();
  prog.beginPhase(Phase.LOADING_EDITS);

  long prevLastAppliedTxId = lastAppliedTxId;  
  try {    
    FSEditLogLoader loader = new FSEditLogLoader(target, lastAppliedTxId);

    // Load latest edits
    for (EditLogInputStream editIn : editStreams) {
      LOG.info("Reading " + editIn + " expecting start txid #" +
            (lastAppliedTxId + 1));
      try {
        loader.loadFSEdits(editIn, lastAppliedTxId + 1, startOpt, recovery);
      } finally {
        // Update lastAppliedTxId even in case of error, since some ops may
        // have been successfully applied before the error.
        lastAppliedTxId = loader.getLastAppliedTxId();
      }
      // If we are in recovery mode, we may have skipped over some txids.
      if (editIn.getLastTxId() != HdfsConstants.INVALID_TXID) {
        lastAppliedTxId = editIn.getLastTxId();
      }
    }
  } finally {
    FSEditLog.closeAllStreams(editStreams);
    // update the counts
    updateCountForQuota(target.getBlockManager().getStoragePolicySuite(),
        target.dir.rootDir);
  }
  prog.endPhase(Phase.LOADING_EDITS);
  return lastAppliedTxId - prevLastAppliedTxId;
}
项目:hadoop    文件:DataNode.java   
/**
 * Initializes the {@link #data}. The initialization is done only once, when
 * handshake with the the first namenode is completed.
 */
private void initStorage(final NamespaceInfo nsInfo) throws IOException {
  final FsDatasetSpi.Factory<? extends FsDatasetSpi<?>> factory
      = FsDatasetSpi.Factory.getFactory(conf);

  if (!factory.isSimulated()) {
    final StartupOption startOpt = getStartupOption(conf);
    if (startOpt == null) {
      throw new IOException("Startup option not set.");
    }
    final String bpid = nsInfo.getBlockPoolID();
    //read storage info, lock data dirs and transition fs state if necessary
    synchronized (this) {
      storage.recoverTransitionRead(this, nsInfo, dataDirs, startOpt);
    }
    final StorageInfo bpStorage = storage.getBPStorage(bpid);
    LOG.info("Setting up storage: nsid=" + bpStorage.getNamespaceID()
        + ";bpid=" + bpid + ";lv=" + storage.getLayoutVersion()
        + ";nsInfo=" + nsInfo + ";dnuuid=" + storage.getDatanodeUuid());
  }

  // If this is a newly formatted DataNode then assign a new DatanodeUuid.
  checkDatanodeUuid();

  synchronized(this)  {
    if (data == null) {
      data = factory.newInstance(this, storage, conf);
    }
  }
}
项目:hadoop    文件:DataStorage.java   
private StorageDirectory loadStorageDirectory(DataNode datanode,
    NamespaceInfo nsInfo, File dataDir, StartupOption startOpt)
    throws IOException {
  StorageDirectory sd = new StorageDirectory(dataDir, null, false);
  try {
    StorageState curState = sd.analyzeStorage(startOpt, this);
    // sd is locked but not opened
    switch (curState) {
    case NORMAL:
      break;
    case NON_EXISTENT:
      LOG.info("Storage directory " + dataDir + " does not exist");
      throw new IOException("Storage directory " + dataDir
          + " does not exist");
    case NOT_FORMATTED: // format
      LOG.info("Storage directory " + dataDir + " is not formatted for "
          + nsInfo.getBlockPoolID());
      LOG.info("Formatting ...");
      format(sd, nsInfo, datanode.getDatanodeUuid());
      break;
    default:  // recovery part is common
      sd.doRecover(curState);
    }

    // 2. Do transitions
    // Each storage directory is treated individually.
    // During startup some of them can upgrade or roll back
    // while others could be up-to-date for the regular startup.
    doTransition(datanode, sd, nsInfo, startOpt);

    // 3. Update successfully loaded storage.
    setServiceLayoutVersion(getServiceLayoutVersion());
    writeProperties(sd);

    return sd;
  } catch (IOException ioe) {
    sd.unlock();
    throw ioe;
  }
}
项目:hadoop    文件:DataStorage.java   
/**
 * Prepare a storage directory. It creates a builder which can be used to add
 * to the volume. If the volume cannot be added, it is OK to discard the
 * builder later.
 *
 * @param datanode DataNode object.
 * @param volume the root path of a storage directory.
 * @param nsInfos an array of namespace infos.
 * @return a VolumeBuilder that holds the metadata of this storage directory
 * and can be added to DataStorage later.
 * @throws IOException if encounters I/O errors.
 *
 * Note that if there is IOException, the state of DataStorage is not modified.
 */
public VolumeBuilder prepareVolume(DataNode datanode, File volume,
    List<NamespaceInfo> nsInfos) throws IOException {
  if (containsStorageDir(volume)) {
    final String errorMessage = "Storage directory is in use";
    LOG.warn(errorMessage + ".");
    throw new IOException(errorMessage);
  }

  StorageDirectory sd = loadStorageDirectory(
      datanode, nsInfos.get(0), volume, StartupOption.HOTSWAP);
  VolumeBuilder builder =
      new VolumeBuilder(this, sd);
  for (NamespaceInfo nsInfo : nsInfos) {
    List<File> bpDataDirs = Lists.newArrayList();
    bpDataDirs.add(BlockPoolSliceStorage.getBpRoot(
        nsInfo.getBlockPoolID(), new File(volume, STORAGE_DIR_CURRENT)));
    makeBlockPoolDataDir(bpDataDirs, null);

    BlockPoolSliceStorage bpStorage;
    final String bpid = nsInfo.getBlockPoolID();
    synchronized (this) {
      bpStorage = this.bpStorageMap.get(bpid);
      if (bpStorage == null) {
        bpStorage = new BlockPoolSliceStorage(
            nsInfo.getNamespaceID(), bpid, nsInfo.getCTime(),
            nsInfo.getClusterID());
        addBlockPoolStorage(bpid, bpStorage);
      }
    }
    builder.addBpStorageDirectories(
        bpid, bpStorage.loadBpStorageDirectories(
            datanode, nsInfo, bpDataDirs, StartupOption.HOTSWAP));
  }
  return builder;
}
项目:hadoop    文件:JNStorage.java   
/**
 * @param conf Configuration object
 * @param logDir the path to the directory in which data will be stored
 * @param errorReporter a callback to report errors
 * @throws IOException 
 */
protected JNStorage(Configuration conf, File logDir, StartupOption startOpt,
    StorageErrorReporter errorReporter) throws IOException {
  super(NodeType.JOURNAL_NODE);

  sd = new StorageDirectory(logDir);
  this.addStorageDir(sd);
  this.fjm = new FileJournalManager(conf, sd, errorReporter);

  analyzeAndRecoverStorage(startOpt);
}
项目:hadoop    文件:JNStorage.java   
void analyzeAndRecoverStorage(StartupOption startOpt) throws IOException {
  this.state = sd.analyzeStorage(startOpt, this);
  final boolean needRecover = state != StorageState.NORMAL
      && state != StorageState.NON_EXISTENT
      && state != StorageState.NOT_FORMATTED;
  if (state == StorageState.NORMAL && startOpt != StartupOption.ROLLBACK) {
    readProperties(sd);
  } else if (needRecover) {
    sd.doRecover(state);
  }
}
项目:hadoop    文件:JournalNode.java   
synchronized Journal getOrCreateJournal(String jid, StartupOption startOpt)
    throws IOException {
  QuorumJournalManager.checkJournalId(jid);

  Journal journal = journalsById.get(jid);
  if (journal == null) {
    File logDir = getLogDir(jid);
    LOG.info("Initializing journal in directory " + logDir);      
    journal = new Journal(conf, logDir, jid, startOpt, new ErrorReporter());
    journalsById.put(jid, journal);
  }

  return journal;
}
项目:hadoop    文件:TestDatanodeConfig.java   
/**
 * Test that a data-node does not start if configuration specifies
 * incorrect URI scheme in data directory.
 * Test that a data-node starts if data directory is specified as
 * URI = "file:///path" or as a non URI path.
 */
@Test
public void testDataDirectories() throws IOException {
  File dataDir = new File(BASE_DIR, "data").getCanonicalFile();
  Configuration conf = cluster.getConfiguration(0);
  // 1. Test unsupported schema. Only "file:" is supported.
  String dnDir = makeURI("shv", null, fileAsURI(dataDir).getPath());
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dnDir);
  DataNode dn = null;
  try {
    dn = DataNode.createDataNode(new String[]{}, conf);
    fail();
  } catch(Exception e) {
    // expecting exception here
  } finally {
    if (dn != null) {
      dn.shutdown();
    }
  }
  assertNull("Data-node startup should have failed.", dn);

  // 2. Test "file:" schema and no schema (path-only). Both should work.
  String dnDir1 = fileAsURI(dataDir).toString() + "1";
  String dnDir2 = makeURI("file", "localhost",
                  fileAsURI(dataDir).getPath() + "2");
  String dnDir3 = dataDir.getAbsolutePath() + "3";
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
              dnDir1 + "," + dnDir2 + "," + dnDir3);
  try {
    cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
    assertTrue("Data-node should startup.", cluster.isDataNodeUp());
  } finally {
    if (cluster != null) {
      cluster.shutdownDataNodes();
    }
  }
}
项目:hadoop    文件:DFSTestUtil.java   
/**
 * when formatting a namenode - we must provide clusterid.
 * @param conf
 * @throws IOException
 */
public static void formatNameNode(Configuration conf) throws IOException {
  String clusterId = StartupOption.FORMAT.getClusterId();
  if(clusterId == null || clusterId.isEmpty())
    StartupOption.FORMAT.setClusterId("testClusterID");
  // Use a copy of conf as it can be altered by namenode during format.
  NameNode.format(new Configuration(conf));
}
项目:hadoop    文件:TestDFSUpgrade.java   
/**
 * Attempts to start a NameNode with the given operation.  Starting
 * the NameNode should throw an exception.
 * @param operation - NameNode startup operation
 * @param exceptionClass - if non-null, will check that the caught exception
 *     is assignment-compatible with exceptionClass
 * @param messagePattern - if non-null, will check that a substring of the 
 *     message from the caught exception matches this pattern, via the
 *     {@link Matcher#find()} method.
 */
void startNameNodeShouldFail(StartupOption operation,
    Class<? extends Exception> exceptionClass, Pattern messagePattern) {
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
                                              .startupOption(operation)
                                              .format(false)
                                              .manageDataDfsDirs(false)
                                              .manageNameDfsDirs(false)
                                              .build(); // should fail
    fail("NameNode should have failed to start");

  } catch (Exception e) {
    // expect exception
    if (exceptionClass != null) {
      assertTrue("Caught exception is not of expected class "
          + exceptionClass.getSimpleName() + ": "
          + StringUtils.stringifyException(e), 
          exceptionClass.isInstance(e));
    }
    if (messagePattern != null) {
      assertTrue("Caught exception message string does not match expected pattern \""
          + messagePattern.pattern() + "\" : "
          + StringUtils.stringifyException(e), 
          messagePattern.matcher(e.getMessage()).find());
    }
    LOG.info("Successfully detected expected NameNode startup failure.");
  }
}
项目:hadoop    文件:TestDFSUpgrade.java   
/**
 * Create an instance of a newly configured cluster for testing that does
 * not manage its own directories or files
 */
private MiniDFSCluster createCluster() throws IOException {
  return new MiniDFSCluster.Builder(conf).numDataNodes(0)
                                         .format(false)
                                         .manageDataDfsDirs(false)
                                         .manageNameDfsDirs(false)
                                         .startupOption(StartupOption.UPGRADE)
                                         .build();
}
项目:hadoop    文件:MiniDFSClusterWithNodeGroup.java   
public synchronized void startDataNodes(Configuration conf, int numDataNodes, 
    boolean manageDfsDirs, StartupOption operation, 
    String[] racks, String[] nodeGroups, String[] hosts,
    long[] simulatedCapacities,
    boolean setupHostsFile) throws IOException {
  startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, nodeGroups,
      hosts, null, simulatedCapacities, setupHostsFile, false, false);
}
项目:hadoop    文件:MiniDFSClusterWithNodeGroup.java   
public void startDataNodes(Configuration conf, int numDataNodes, 
    boolean manageDfsDirs, StartupOption operation, 
    String[] racks, long[] simulatedCapacities,
    String[] nodeGroups) throws IOException {
  startDataNodes(conf, numDataNodes, manageDfsDirs, operation, racks, nodeGroups,
      null, simulatedCapacities, false);
}
项目:hadoop    文件:MiniDFSClusterWithNodeGroup.java   
@Override
public synchronized void startDataNodes(Configuration conf, int numDataNodes, 
    StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation,
    String[] racks, String[] hosts,
    long[][] storageCapacities,
    long[] simulatedCapacities,
    boolean setupHostsFile,
    boolean checkDataNodeAddrConfig,
    boolean checkDataNodeHostConfig,
    Configuration[] dnConfOverlays) throws IOException {
  startDataNodes(conf, numDataNodes, storageTypes, manageDfsDirs, operation, racks,
      NODE_GROUPS, hosts, storageCapacities, simulatedCapacities, setupHostsFile,
      checkDataNodeAddrConfig, checkDataNodeHostConfig);
}
项目:hadoop    文件:MiniDFSCluster.java   
NameNodeInfo(NameNode nn, String nameserviceId, String nnId,
    StartupOption startOpt, Configuration conf) {
  this.nameNode = nn;
  this.nameserviceId = nameserviceId;
  this.nnId = nnId;
  this.startOpt = startOpt;
  this.conf = conf;
}
项目:hadoop    文件:TestDatanodeStartupOptions.java   
/**
 * A few options that should all parse successfully.
 */
@Test (timeout=60000)
public void testStartupSuccess() {
  checkExpected(true, StartupOption.REGULAR, conf);
  checkExpected(true, StartupOption.REGULAR, conf, "-regular");
  checkExpected(true, StartupOption.REGULAR, conf, "-REGULAR");
  checkExpected(true, StartupOption.ROLLBACK, conf, "-rollback");
}
项目:hadoop    文件:TestJournal.java   
@Test (timeout = 10000)
public void testJournalLocking() throws Exception {
  Assume.assumeTrue(journal.getStorage().getStorageDir(0).isLockSupported());
  StorageDirectory sd = journal.getStorage().getStorageDir(0);
  File lockFile = new File(sd.getRoot(), Storage.STORAGE_FILE_LOCK);

  // Journal should be locked, since the format() call locks it.
  GenericTestUtils.assertExists(lockFile);

  journal.newEpoch(FAKE_NSINFO,  1);
  try {
    new Journal(conf, TEST_LOG_DIR, JID, StartupOption.REGULAR,
        mockErrorReporter);
    fail("Did not fail to create another journal in same dir");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains(
        "Cannot lock storage", ioe);
  }

  journal.close();

  // Journal should no longer be locked after the close() call.
  // Hence, should be able to create a new Journal in the same dir.
  Journal journal2 = new Journal(conf, TEST_LOG_DIR, JID,
      StartupOption.REGULAR, mockErrorReporter);
  journal2.newEpoch(FAKE_NSINFO, 2);
  journal2.close();
}
项目:hadoop    文件:TestJournal.java   
@Before
public void setup() throws Exception {
  FileUtil.fullyDelete(TEST_LOG_DIR);
  conf = new Configuration();
  journal = new Journal(conf, TEST_LOG_DIR, JID, StartupOption.REGULAR,
    mockErrorReporter);
  journal.format(FAKE_NSINFO);
}
项目:hadoop    文件:MiniDFSCluster.java   
public synchronized void startDataNodes(Configuration conf, int numDataNodes,
    boolean manageDfsDirs, StartupOption operation, 
    String[] racks, String[] hosts,
    long[] simulatedCapacities,
    boolean setupHostsFile,
    boolean checkDataNodeAddrConfig) throws IOException {
  startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, hosts,
      null, simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, false, null);
}