Java 类org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile 实例源码

项目:hadoop    文件:TransferFsImage.java   
/**
 * Requests that the NameNode download an image from this node.  Allows for
 * optional external cancelation.
 *
 * @param fsName the http address for the remote NN
 * @param conf Configuration
 * @param storage the storage directory to transfer the image from
 * @param nnf the NameNodeFile type of the image
 * @param txid the transaction ID of the image to be uploaded
 * @param canceler optional canceler to check for abort of upload
 * @throws IOException if there is an I/O error or cancellation
 */
public static void uploadImageFromStorage(URL fsName, Configuration conf,
    NNStorage storage, NameNodeFile nnf, long txid, Canceler canceler)
    throws IOException {
  URL url = new URL(fsName, ImageServlet.PATH_SPEC);
  long startTime = Time.monotonicNow();
  try {
    uploadImage(url, conf, storage, nnf, txid, canceler);
  } catch (HttpPutFailedException e) {
    if (e.getResponseCode() == HttpServletResponse.SC_CONFLICT) {
      // this is OK - this means that a previous attempt to upload
      // this checkpoint succeeded even though we thought it failed.
      LOG.info("Image upload with txid " + txid + 
          " conflicted with a previous image upload to the " +
          "same NameNode. Continuing...", e);
      return;
    } else {
      throw e;
    }
  }
  double xferSec = Math.max(
      ((float) (Time.monotonicNow() - startTime)) / 1000.0, 0.001);
  LOG.info("Uploaded image with txid " + txid + " to namenode at " + fsName
      + " in " + xferSec + " seconds");
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Update internal state to indicate that a rolling upgrade is in progress for
 * non-HA setup. This requires the namesystem is in SafeMode and after doing a
 * checkpoint for rollback the namesystem will quit the safemode automatically 
 */
private void startRollingUpgradeInternalForNonHA(long startTime)
    throws IOException {
  Preconditions.checkState(!haEnabled);
  if (!isInSafeMode()) {
    throw new IOException("Safe mode should be turned ON "
        + "in order to create namespace image.");
  }
  checkRollingUpgrade("start rolling upgrade");
  getFSImage().checkUpgrade();
  // in non-HA setup, we do an extra checkpoint to generate a rollback image
  getFSImage().saveNamespace(this, NameNodeFile.IMAGE_ROLLBACK, null);
  LOG.info("Successfully saved namespace for preparing rolling upgrade.");

  // leave SafeMode automatically
  setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  setRollingUpgradeInfo(true, startTime);
}
项目:hadoop    文件:FSImage.java   
/** rollback for rolling upgrade. */
private void rollingRollback(long discardSegmentTxId, long ckptId)
    throws IOException {
  // discard discard unnecessary editlog segments starting from the given id
  this.editLog.discardSegments(discardSegmentTxId);
  // rename the special checkpoint
  renameCheckpoint(ckptId, NameNodeFile.IMAGE_ROLLBACK, NameNodeFile.IMAGE,
      true);
  // purge all the checkpoints after the marker
  archivalManager.purgeCheckpoinsAfter(NameNodeFile.IMAGE, ckptId);
  String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
  if (HAUtil.isHAEnabled(conf, nameserviceId)) {
    // close the editlog since it is currently open for write
    this.editLog.close();
    // reopen the editlog for read
    this.editLog.initSharedJournalsForRead();
  }
}
项目:hadoop    文件:FSImage.java   
/**
 * Rename FSImage with the specific txid
 */
private void renameCheckpoint(long txid, NameNodeFile fromNnf,
    NameNodeFile toNnf, boolean renameMD5) throws IOException {
  ArrayList<StorageDirectory> al = null;

  for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.IMAGE)) {
    try {
      renameImageFileInDir(sd, fromNnf, toNnf, txid, renameMD5);
    } catch (IOException ioe) {
      LOG.warn("Unable to rename checkpoint in " + sd, ioe);
      if (al == null) {
        al = Lists.newArrayList();
      }
      al.add(sd);
    }
  }
  if(al != null) storage.reportErrorsOnDirectories(al);
}
项目:hadoop    文件:FSImage.java   
/**
 * Rename all the fsimage files with the specific NameNodeFile type. The
 * associated checksum files will also be renamed.
 */
void renameCheckpoint(NameNodeFile fromNnf, NameNodeFile toNnf)
    throws IOException {
  ArrayList<StorageDirectory> al = null;
  FSImageTransactionalStorageInspector inspector =
      new FSImageTransactionalStorageInspector(EnumSet.of(fromNnf));
  storage.inspectStorageDirs(inspector);
  for (FSImageFile image : inspector.getFoundImages()) {
    try {
      renameImageFileInDir(image.sd, fromNnf, toNnf, image.txId, true);
    } catch (IOException ioe) {
      LOG.warn("Unable to rename checkpoint in " + image.sd, ioe);
      if (al == null) {
        al = Lists.newArrayList();
      }
      al.add(image.sd);
    }
  }
  if(al != null) {
    storage.reportErrorsOnDirectories(al);
  }
}
项目:hadoop    文件:FSImage.java   
private void renameImageFileInDir(StorageDirectory sd, NameNodeFile fromNnf,
    NameNodeFile toNnf, long txid, boolean renameMD5) throws IOException {
  final File fromFile = NNStorage.getStorageFile(sd, fromNnf, txid);
  final File toFile = NNStorage.getStorageFile(sd, toNnf, txid);
  // renameTo fails on Windows if the destination file already exists.
  if(LOG.isDebugEnabled()) {
    LOG.debug("renaming  " + fromFile.getAbsolutePath() 
              + " to " + toFile.getAbsolutePath());
  }
  if (!fromFile.renameTo(toFile)) {
    if (!toFile.delete() || !fromFile.renameTo(toFile)) {
      throw new IOException("renaming  " + fromFile.getAbsolutePath() + " to "  + 
          toFile.getAbsolutePath() + " FAILED");
    }
  }
  if (renameMD5) {
    MD5FileUtils.renameMD5File(fromFile, toFile);
  }
}
项目:hadoop    文件:FSImagePreTransactionalStorageInspector.java   
/**
 * Determine the checkpoint time of the specified StorageDirectory
 *
 * @param sd StorageDirectory to check
 * @return If file exists and can be read, last checkpoint time. If not, 0L.
 * @throws IOException On errors processing file pointed to by sd
 */
static long readCheckpointTime(StorageDirectory sd) throws IOException {
  File timeFile = NNStorage.getStorageFile(sd, NameNodeFile.TIME);
  long timeStamp = 0L;
  if (timeFile.exists() && FileUtil.canRead(timeFile)) {
    DataInputStream in = new DataInputStream(new FileInputStream(timeFile));
    try {
      timeStamp = in.readLong();
      in.close();
      in = null;
    } finally {
      IOUtils.cleanup(LOG, in);
    }
  }
  return timeStamp;
}
项目:hadoop    文件:TestStandbyCheckpoints.java   
/**
 * Test for the case when the SBN is configured to checkpoint based
 * on a time period, but no transactions are happening on the
 * active. Thus, it would want to save a second checkpoint at the
 * same txid, which is a no-op. This test makes sure this doesn't
 * cause any problem.
 */
@Test(timeout = 300000)
public void testCheckpointWhenNoNewTransactionsHappened()
    throws Exception {
  // Checkpoint as fast as we can, in a tight loop.
  cluster.getConfiguration(1).setInt(
      DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 0);
  cluster.restartNameNode(1);
  nn1 = cluster.getNameNode(1);

  FSImage spyImage1 = NameNodeAdapter.spyOnFsImage(nn1);

  // We shouldn't save any checkpoints at txid=0
  Thread.sleep(1000);
  Mockito.verify(spyImage1, Mockito.never())
    .saveNamespace((FSNamesystem) Mockito.anyObject());

  // Roll the primary and wait for the standby to catch up
  HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
  Thread.sleep(2000);

  // We should make exactly one checkpoint at this new txid. 
  Mockito.verify(spyImage1, Mockito.times(1)).saveNamespace(
      (FSNamesystem) Mockito.anyObject(), Mockito.eq(NameNodeFile.IMAGE),
      (Canceler) Mockito.anyObject());
}
项目:hadoop    文件:TestStartup.java   
/**
 * verify that edits log and fsimage are in different directories and of a correct size
 */
private void verifyDifferentDirs(FSImage img, long expectedImgSize, long expectedEditsSize) {
  StorageDirectory sd =null;
  for (Iterator<StorageDirectory> it = img.getStorage().dirIterator(); it.hasNext();) {
    sd = it.next();

    if(sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) {
      img.getStorage();
      File imf = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE, 0);
      LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length() + "; expected = " + expectedImgSize);
      assertEquals(expectedImgSize, imf.length());  
    } else if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) {
      img.getStorage();
      File edf = NNStorage.getStorageFile(sd, NameNodeFile.EDITS, 0);
      LOG.info("-- edits file " + edf.getAbsolutePath() + "; len = " + edf.length()  + "; expected = " + expectedEditsSize);
      assertEquals(expectedEditsSize, edf.length());    
    } else {
      fail("Image/Edits directories are not different");
    }
  }

}
项目:aliyun-oss-hadoop-fs    文件:TransferFsImage.java   
/**
 * Requests that the NameNode download an image from this node.  Allows for
 * optional external cancelation.
 *
 * @param fsName the http address for the remote NN
 * @param conf Configuration
 * @param storage the storage directory to transfer the image from
 * @param nnf the NameNodeFile type of the image
 * @param txid the transaction ID of the image to be uploaded
 * @param canceler optional canceler to check for abort of upload
 * @throws IOException if there is an I/O error or cancellation
 */
public static TransferResult uploadImageFromStorage(URL fsName, Configuration conf,
    NNStorage storage, NameNodeFile nnf, long txid, Canceler canceler)
    throws IOException {
  URL url = new URL(fsName, ImageServlet.PATH_SPEC);
  long startTime = Time.monotonicNow();
  try {
    uploadImage(url, conf, storage, nnf, txid, canceler);
  } catch (HttpPutFailedException e) {
    // translate the error code to a result, which is a bit more obvious in usage
    TransferResult result = TransferResult.getResultForCode(e.getResponseCode());
    if (result.shouldReThrowException) {
      throw e;
    }
    return result;
  }
  double xferSec = Math.max(
      ((float) (Time.monotonicNow() - startTime)) / 1000.0, 0.001);
  LOG.info("Uploaded image with txid " + txid + " to namenode at " + fsName
      + " in " + xferSec + " seconds");
  return TransferResult.SUCCESS;
}
项目:aliyun-oss-hadoop-fs    文件:FSNamesystem.java   
/**
 * Update internal state to indicate that a rolling upgrade is in progress for
 * non-HA setup. This requires the namesystem is in SafeMode and after doing a
 * checkpoint for rollback the namesystem will quit the safemode automatically 
 */
private void startRollingUpgradeInternalForNonHA(long startTime)
    throws IOException {
  Preconditions.checkState(!haEnabled);
  if (!isInSafeMode()) {
    throw new IOException("Safe mode should be turned ON "
        + "in order to create namespace image.");
  }
  checkRollingUpgrade("start rolling upgrade");
  getFSImage().checkUpgrade();
  // in non-HA setup, we do an extra checkpoint to generate a rollback image
  getFSImage().saveNamespace(this, NameNodeFile.IMAGE_ROLLBACK, null);
  LOG.info("Successfully saved namespace for preparing rolling upgrade.");

  // leave SafeMode automatically
  setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  setRollingUpgradeInfo(true, startTime);
}
项目:aliyun-oss-hadoop-fs    文件:FSImage.java   
/** rollback for rolling upgrade. */
private void rollingRollback(long discardSegmentTxId, long ckptId)
    throws IOException {
  // discard discard unnecessary editlog segments starting from the given id
  this.editLog.discardSegments(discardSegmentTxId);
  // rename the special checkpoint
  renameCheckpoint(ckptId, NameNodeFile.IMAGE_ROLLBACK, NameNodeFile.IMAGE,
      true);
  // purge all the checkpoints after the marker
  archivalManager.purgeCheckpoinsAfter(NameNodeFile.IMAGE, ckptId);
  // HDFS-7939: purge all old fsimage_rollback_*
  archivalManager.purgeCheckpoints(NameNodeFile.IMAGE_ROLLBACK);
  String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
  if (HAUtil.isHAEnabled(conf, nameserviceId)) {
    // close the editlog since it is currently open for write
    this.editLog.close();
    // reopen the editlog for read
    this.editLog.initSharedJournalsForRead();
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSImage.java   
/**
 * @param timeWindow a checkpoint is done if the latest checkpoint
 *                   was done more than this number of seconds ago.
 * @param txGap a checkpoint is done also if the gap between the latest tx id
 *              and the latest checkpoint is greater than this number.
 * @return true if a checkpoint has been made
 * @see #saveNamespace(FSNamesystem, NameNodeFile, Canceler)
 */
public synchronized boolean saveNamespace(long timeWindow, long txGap,
    FSNamesystem source) throws IOException {
  if (timeWindow > 0 || txGap > 0) {
    final FSImageStorageInspector inspector = storage.readAndInspectDirs(
        EnumSet.of(NameNodeFile.IMAGE, NameNodeFile.IMAGE_ROLLBACK),
        StartupOption.REGULAR);
    FSImageFile image = inspector.getLatestImages().get(0);
    File imageFile = image.getFile();

    final long checkpointTxId = image.getCheckpointTxId();
    final long checkpointAge = Time.now() - imageFile.lastModified();
    if (checkpointAge <= timeWindow * 1000 &&
        checkpointTxId >= this.getLastAppliedOrWrittenTxId() - txGap) {
      return false;
    }
  }
  saveNamespace(source, NameNodeFile.IMAGE, null);
  return true;
}
项目:aliyun-oss-hadoop-fs    文件:FSImage.java   
/**
 * Rename FSImage with the specific txid
 */
private void renameCheckpoint(long txid, NameNodeFile fromNnf,
    NameNodeFile toNnf, boolean renameMD5) throws IOException {
  ArrayList<StorageDirectory> al = null;

  for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.IMAGE)) {
    try {
      renameImageFileInDir(sd, fromNnf, toNnf, txid, renameMD5);
    } catch (IOException ioe) {
      LOG.warn("Unable to rename checkpoint in " + sd, ioe);
      if (al == null) {
        al = Lists.newArrayList();
      }
      al.add(sd);
    }
  }
  if(al != null) storage.reportErrorsOnDirectories(al);
}
项目:aliyun-oss-hadoop-fs    文件:FSImage.java   
/**
 * Rename all the fsimage files with the specific NameNodeFile type. The
 * associated checksum files will also be renamed.
 */
void renameCheckpoint(NameNodeFile fromNnf, NameNodeFile toNnf)
    throws IOException {
  ArrayList<StorageDirectory> al = null;
  FSImageTransactionalStorageInspector inspector =
      new FSImageTransactionalStorageInspector(EnumSet.of(fromNnf));
  storage.inspectStorageDirs(inspector);
  for (FSImageFile image : inspector.getFoundImages()) {
    try {
      renameImageFileInDir(image.sd, fromNnf, toNnf, image.txId, true);
    } catch (IOException ioe) {
      LOG.warn("Unable to rename checkpoint in " + image.sd, ioe);
      if (al == null) {
        al = Lists.newArrayList();
      }
      al.add(image.sd);
    }
  }
  if(al != null) {
    storage.reportErrorsOnDirectories(al);
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSImage.java   
private void renameImageFileInDir(StorageDirectory sd, NameNodeFile fromNnf,
    NameNodeFile toNnf, long txid, boolean renameMD5) throws IOException {
  final File fromFile = NNStorage.getStorageFile(sd, fromNnf, txid);
  final File toFile = NNStorage.getStorageFile(sd, toNnf, txid);
  // renameTo fails on Windows if the destination file already exists.
  if(LOG.isDebugEnabled()) {
    LOG.debug("renaming  " + fromFile.getAbsolutePath() 
              + " to " + toFile.getAbsolutePath());
  }
  if (!fromFile.renameTo(toFile)) {
    if (!toFile.delete() || !fromFile.renameTo(toFile)) {
      throw new IOException("renaming  " + fromFile.getAbsolutePath() + " to "  + 
          toFile.getAbsolutePath() + " FAILED");
    }
  }
  if (renameMD5) {
    MD5FileUtils.renameMD5File(fromFile, toFile);
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSImagePreTransactionalStorageInspector.java   
/**
 * Determine the checkpoint time of the specified StorageDirectory
 *
 * @param sd StorageDirectory to check
 * @return If file exists and can be read, last checkpoint time. If not, 0L.
 * @throws IOException On errors processing file pointed to by sd
 */
static long readCheckpointTime(StorageDirectory sd) throws IOException {
  File timeFile = NNStorage.getStorageFile(sd, NameNodeFile.TIME);
  long timeStamp = 0L;
  if (timeFile.exists() && FileUtil.canRead(timeFile)) {
    DataInputStream in = new DataInputStream(new FileInputStream(timeFile));
    try {
      timeStamp = in.readLong();
      in.close();
      in = null;
    } finally {
      IOUtils.cleanup(LOG, in);
    }
  }
  return timeStamp;
}
项目:aliyun-oss-hadoop-fs    文件:TestStandbyCheckpoints.java   
/**
 * Test for the case when the SBN is configured to checkpoint based
 * on a time period, but no transactions are happening on the
 * active. Thus, it would want to save a second checkpoint at the
 * same txid, which is a no-op. This test makes sure this doesn't
 * cause any problem.
 */
@Test(timeout = 300000)
public void testCheckpointWhenNoNewTransactionsHappened()
    throws Exception {
  // Checkpoint as fast as we can, in a tight loop.
  cluster.getConfiguration(1).setInt(
      DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 0);
  cluster.restartNameNode(1);
  nns[1] = cluster.getNameNode(1);

  FSImage spyImage1 = NameNodeAdapter.spyOnFsImage(nns[1]);

  // We shouldn't save any checkpoints at txid=0
  Thread.sleep(1000);
  Mockito.verify(spyImage1, Mockito.never())
    .saveNamespace((FSNamesystem) Mockito.anyObject());

  // Roll the primary and wait for the standby to catch up
  HATestUtil.waitForStandbyToCatchUp(nns[0], nns[1]);
  Thread.sleep(2000);

  // We should make exactly one checkpoint at this new txid. 
  Mockito.verify(spyImage1, Mockito.times(1)).saveNamespace(
      (FSNamesystem) Mockito.anyObject(), Mockito.eq(NameNodeFile.IMAGE),
      (Canceler) Mockito.anyObject());
}
项目:aliyun-oss-hadoop-fs    文件:TestStartup.java   
/**
 * verify that edits log and fsimage are in different directories and of a correct size
 */
private void verifyDifferentDirs(FSImage img, long expectedImgSize, long expectedEditsSize) {
  StorageDirectory sd =null;
  for (Iterator<StorageDirectory> it = img.getStorage().dirIterator(); it.hasNext();) {
    sd = it.next();

    if(sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) {
      img.getStorage();
      File imf = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE, 0);
      LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length() + "; expected = " + expectedImgSize);
      assertEquals(expectedImgSize, imf.length());  
    } else if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) {
      img.getStorage();
      File edf = NNStorage.getStorageFile(sd, NameNodeFile.EDITS, 0);
      LOG.info("-- edits file " + edf.getAbsolutePath() + "; len = " + edf.length()  + "; expected = " + expectedEditsSize);
      assertEquals(expectedEditsSize, edf.length());    
    } else {
      fail("Image/Edits directories are not different");
    }
  }

}
项目:big-c    文件:TransferFsImage.java   
/**
 * Requests that the NameNode download an image from this node.  Allows for
 * optional external cancelation.
 *
 * @param fsName the http address for the remote NN
 * @param conf Configuration
 * @param storage the storage directory to transfer the image from
 * @param nnf the NameNodeFile type of the image
 * @param txid the transaction ID of the image to be uploaded
 * @param canceler optional canceler to check for abort of upload
 * @throws IOException if there is an I/O error or cancellation
 */
public static void uploadImageFromStorage(URL fsName, Configuration conf,
    NNStorage storage, NameNodeFile nnf, long txid, Canceler canceler)
    throws IOException {
  URL url = new URL(fsName, ImageServlet.PATH_SPEC);
  long startTime = Time.monotonicNow();
  try {
    uploadImage(url, conf, storage, nnf, txid, canceler);
  } catch (HttpPutFailedException e) {
    if (e.getResponseCode() == HttpServletResponse.SC_CONFLICT) {
      // this is OK - this means that a previous attempt to upload
      // this checkpoint succeeded even though we thought it failed.
      LOG.info("Image upload with txid " + txid + 
          " conflicted with a previous image upload to the " +
          "same NameNode. Continuing...", e);
      return;
    } else {
      throw e;
    }
  }
  double xferSec = Math.max(
      ((float) (Time.monotonicNow() - startTime)) / 1000.0, 0.001);
  LOG.info("Uploaded image with txid " + txid + " to namenode at " + fsName
      + " in " + xferSec + " seconds");
}
项目:big-c    文件:FSNamesystem.java   
/**
 * Update internal state to indicate that a rolling upgrade is in progress for
 * non-HA setup. This requires the namesystem is in SafeMode and after doing a
 * checkpoint for rollback the namesystem will quit the safemode automatically 
 */
private void startRollingUpgradeInternalForNonHA(long startTime)
    throws IOException {
  Preconditions.checkState(!haEnabled);
  if (!isInSafeMode()) {
    throw new IOException("Safe mode should be turned ON "
        + "in order to create namespace image.");
  }
  checkRollingUpgrade("start rolling upgrade");
  getFSImage().checkUpgrade();
  // in non-HA setup, we do an extra checkpoint to generate a rollback image
  getFSImage().saveNamespace(this, NameNodeFile.IMAGE_ROLLBACK, null);
  LOG.info("Successfully saved namespace for preparing rolling upgrade.");

  // leave SafeMode automatically
  setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  setRollingUpgradeInfo(true, startTime);
}
项目:big-c    文件:FSImage.java   
/** rollback for rolling upgrade. */
private void rollingRollback(long discardSegmentTxId, long ckptId)
    throws IOException {
  // discard discard unnecessary editlog segments starting from the given id
  this.editLog.discardSegments(discardSegmentTxId);
  // rename the special checkpoint
  renameCheckpoint(ckptId, NameNodeFile.IMAGE_ROLLBACK, NameNodeFile.IMAGE,
      true);
  // purge all the checkpoints after the marker
  archivalManager.purgeCheckpoinsAfter(NameNodeFile.IMAGE, ckptId);
  String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
  if (HAUtil.isHAEnabled(conf, nameserviceId)) {
    // close the editlog since it is currently open for write
    this.editLog.close();
    // reopen the editlog for read
    this.editLog.initSharedJournalsForRead();
  }
}
项目:big-c    文件:FSImage.java   
/**
 * Rename FSImage with the specific txid
 */
private void renameCheckpoint(long txid, NameNodeFile fromNnf,
    NameNodeFile toNnf, boolean renameMD5) throws IOException {
  ArrayList<StorageDirectory> al = null;

  for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.IMAGE)) {
    try {
      renameImageFileInDir(sd, fromNnf, toNnf, txid, renameMD5);
    } catch (IOException ioe) {
      LOG.warn("Unable to rename checkpoint in " + sd, ioe);
      if (al == null) {
        al = Lists.newArrayList();
      }
      al.add(sd);
    }
  }
  if(al != null) storage.reportErrorsOnDirectories(al);
}
项目:big-c    文件:FSImage.java   
/**
 * Rename all the fsimage files with the specific NameNodeFile type. The
 * associated checksum files will also be renamed.
 */
void renameCheckpoint(NameNodeFile fromNnf, NameNodeFile toNnf)
    throws IOException {
  ArrayList<StorageDirectory> al = null;
  FSImageTransactionalStorageInspector inspector =
      new FSImageTransactionalStorageInspector(EnumSet.of(fromNnf));
  storage.inspectStorageDirs(inspector);
  for (FSImageFile image : inspector.getFoundImages()) {
    try {
      renameImageFileInDir(image.sd, fromNnf, toNnf, image.txId, true);
    } catch (IOException ioe) {
      LOG.warn("Unable to rename checkpoint in " + image.sd, ioe);
      if (al == null) {
        al = Lists.newArrayList();
      }
      al.add(image.sd);
    }
  }
  if(al != null) {
    storage.reportErrorsOnDirectories(al);
  }
}
项目:big-c    文件:FSImage.java   
private void renameImageFileInDir(StorageDirectory sd, NameNodeFile fromNnf,
    NameNodeFile toNnf, long txid, boolean renameMD5) throws IOException {
  final File fromFile = NNStorage.getStorageFile(sd, fromNnf, txid);
  final File toFile = NNStorage.getStorageFile(sd, toNnf, txid);
  // renameTo fails on Windows if the destination file already exists.
  if(LOG.isDebugEnabled()) {
    LOG.debug("renaming  " + fromFile.getAbsolutePath() 
              + " to " + toFile.getAbsolutePath());
  }
  if (!fromFile.renameTo(toFile)) {
    if (!toFile.delete() || !fromFile.renameTo(toFile)) {
      throw new IOException("renaming  " + fromFile.getAbsolutePath() + " to "  + 
          toFile.getAbsolutePath() + " FAILED");
    }
  }
  if (renameMD5) {
    MD5FileUtils.renameMD5File(fromFile, toFile);
  }
}
项目:big-c    文件:FSImagePreTransactionalStorageInspector.java   
/**
 * Determine the checkpoint time of the specified StorageDirectory
 *
 * @param sd StorageDirectory to check
 * @return If file exists and can be read, last checkpoint time. If not, 0L.
 * @throws IOException On errors processing file pointed to by sd
 */
static long readCheckpointTime(StorageDirectory sd) throws IOException {
  File timeFile = NNStorage.getStorageFile(sd, NameNodeFile.TIME);
  long timeStamp = 0L;
  if (timeFile.exists() && FileUtil.canRead(timeFile)) {
    DataInputStream in = new DataInputStream(new FileInputStream(timeFile));
    try {
      timeStamp = in.readLong();
      in.close();
      in = null;
    } finally {
      IOUtils.cleanup(LOG, in);
    }
  }
  return timeStamp;
}
项目:big-c    文件:TestStandbyCheckpoints.java   
/**
 * Test for the case when the SBN is configured to checkpoint based
 * on a time period, but no transactions are happening on the
 * active. Thus, it would want to save a second checkpoint at the
 * same txid, which is a no-op. This test makes sure this doesn't
 * cause any problem.
 */
@Test(timeout = 300000)
public void testCheckpointWhenNoNewTransactionsHappened()
    throws Exception {
  // Checkpoint as fast as we can, in a tight loop.
  cluster.getConfiguration(1).setInt(
      DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 0);
  cluster.restartNameNode(1);
  nn1 = cluster.getNameNode(1);

  FSImage spyImage1 = NameNodeAdapter.spyOnFsImage(nn1);

  // We shouldn't save any checkpoints at txid=0
  Thread.sleep(1000);
  Mockito.verify(spyImage1, Mockito.never())
    .saveNamespace((FSNamesystem) Mockito.anyObject());

  // Roll the primary and wait for the standby to catch up
  HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
  Thread.sleep(2000);

  // We should make exactly one checkpoint at this new txid. 
  Mockito.verify(spyImage1, Mockito.times(1)).saveNamespace(
      (FSNamesystem) Mockito.anyObject(), Mockito.eq(NameNodeFile.IMAGE),
      (Canceler) Mockito.anyObject());
}
项目:big-c    文件:TestStartup.java   
/**
 * verify that edits log and fsimage are in different directories and of a correct size
 */
private void verifyDifferentDirs(FSImage img, long expectedImgSize, long expectedEditsSize) {
  StorageDirectory sd =null;
  for (Iterator<StorageDirectory> it = img.getStorage().dirIterator(); it.hasNext();) {
    sd = it.next();

    if(sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) {
      img.getStorage();
      File imf = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE, 0);
      LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length() + "; expected = " + expectedImgSize);
      assertEquals(expectedImgSize, imf.length());  
    } else if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) {
      img.getStorage();
      File edf = NNStorage.getStorageFile(sd, NameNodeFile.EDITS, 0);
      LOG.info("-- edits file " + edf.getAbsolutePath() + "; len = " + edf.length()  + "; expected = " + expectedEditsSize);
      assertEquals(expectedEditsSize, edf.length());    
    } else {
      fail("Image/Edits directories are not different");
    }
  }

}
项目:hadoop-2.6.0-cdh5.4.3    文件:TransferFsImage.java   
/**
 * Requests that the NameNode download an image from this node.  Allows for
 * optional external cancelation.
 *
 * @param fsName the http address for the remote NN
 * @param conf Configuration
 * @param storage the storage directory to transfer the image from
 * @param nnf the NameNodeFile type of the image
 * @param txid the transaction ID of the image to be uploaded
 * @param canceler optional canceler to check for abort of upload
 * @throws IOException if there is an I/O error or cancellation
 */
public static void uploadImageFromStorage(URL fsName, Configuration conf,
    NNStorage storage, NameNodeFile nnf, long txid, Canceler canceler)
    throws IOException {
  URL url = new URL(fsName, ImageServlet.PATH_SPEC);
  long startTime = Time.monotonicNow();
  try {
    uploadImage(url, conf, storage, nnf, txid, canceler);
  } catch (HttpPutFailedException e) {
    if (e.getResponseCode() == HttpServletResponse.SC_CONFLICT) {
      // this is OK - this means that a previous attempt to upload
      // this checkpoint succeeded even though we thought it failed.
      LOG.info("Image upload with txid " + txid + 
          " conflicted with a previous image upload to the " +
          "same NameNode. Continuing...", e);
      return;
    } else {
      throw e;
    }
  }
  double xferSec = Math.max(
      ((float) (Time.monotonicNow() - startTime)) / 1000.0, 0.001);
  LOG.info("Uploaded image with txid " + txid + " to namenode at " + fsName
      + " in " + xferSec + " seconds");
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSNamesystem.java   
/**
 * Update internal state to indicate that a rolling upgrade is in progress for
 * non-HA setup. This requires the namesystem is in SafeMode and after doing a
 * checkpoint for rollback the namesystem will quit the safemode automatically 
 */
private void startRollingUpgradeInternalForNonHA(long startTime)
    throws IOException {
  Preconditions.checkState(!haEnabled);
  if (!isInSafeMode()) {
    throw new IOException("Safe mode should be turned ON "
        + "in order to create namespace image.");
  }
  checkRollingUpgrade("start rolling upgrade");
  getFSImage().checkUpgrade();
  // in non-HA setup, we do an extra checkpoint to generate a rollback image
  getFSImage().saveNamespace(this, NameNodeFile.IMAGE_ROLLBACK, null);
  LOG.info("Successfully saved namespace for preparing rolling upgrade.");

  // leave SafeMode automatically
  setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  setRollingUpgradeInfo(true, startTime);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSImage.java   
/** rollback for rolling upgrade. */
private void rollingRollback(long discardSegmentTxId, long ckptId)
    throws IOException {
  // discard discard unnecessary editlog segments starting from the given id
  this.editLog.discardSegments(discardSegmentTxId);
  // rename the special checkpoint
  renameCheckpoint(ckptId, NameNodeFile.IMAGE_ROLLBACK, NameNodeFile.IMAGE,
      true);
  // purge all the checkpoints after the marker
  archivalManager.purgeCheckpoinsAfter(NameNodeFile.IMAGE, ckptId);
  String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
  if (HAUtil.isHAEnabled(conf, nameserviceId)) {
    // close the editlog since it is currently open for write
    this.editLog.close();
    // reopen the editlog for read
    this.editLog.initSharedJournalsForRead();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSImage.java   
/**
 * Rename FSImage with the specific txid
 */
private void renameCheckpoint(long txid, NameNodeFile fromNnf,
    NameNodeFile toNnf, boolean renameMD5) throws IOException {
  ArrayList<StorageDirectory> al = null;

  for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.IMAGE)) {
    try {
      renameImageFileInDir(sd, fromNnf, toNnf, txid, renameMD5);
    } catch (IOException ioe) {
      LOG.warn("Unable to rename checkpoint in " + sd, ioe);
      if (al == null) {
        al = Lists.newArrayList();
      }
      al.add(sd);
    }
  }
  if(al != null) storage.reportErrorsOnDirectories(al);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSImage.java   
/**
 * Rename all the fsimage files with the specific NameNodeFile type. The
 * associated checksum files will also be renamed.
 */
void renameCheckpoint(NameNodeFile fromNnf, NameNodeFile toNnf)
    throws IOException {
  ArrayList<StorageDirectory> al = null;
  FSImageTransactionalStorageInspector inspector =
      new FSImageTransactionalStorageInspector(EnumSet.of(fromNnf));
  storage.inspectStorageDirs(inspector);
  for (FSImageFile image : inspector.getFoundImages()) {
    try {
      renameImageFileInDir(image.sd, fromNnf, toNnf, image.txId, true);
    } catch (IOException ioe) {
      LOG.warn("Unable to rename checkpoint in " + image.sd, ioe);
      if (al == null) {
        al = Lists.newArrayList();
      }
      al.add(image.sd);
    }
  }
  if(al != null) {
    storage.reportErrorsOnDirectories(al);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSImage.java   
private void renameImageFileInDir(StorageDirectory sd, NameNodeFile fromNnf,
    NameNodeFile toNnf, long txid, boolean renameMD5) throws IOException {
  final File fromFile = NNStorage.getStorageFile(sd, fromNnf, txid);
  final File toFile = NNStorage.getStorageFile(sd, toNnf, txid);
  // renameTo fails on Windows if the destination file already exists.
  if(LOG.isDebugEnabled()) {
    LOG.debug("renaming  " + fromFile.getAbsolutePath() 
              + " to " + toFile.getAbsolutePath());
  }
  if (!fromFile.renameTo(toFile)) {
    if (!toFile.delete() || !fromFile.renameTo(toFile)) {
      throw new IOException("renaming  " + fromFile.getAbsolutePath() + " to "  + 
          toFile.getAbsolutePath() + " FAILED");
    }
  }
  if (renameMD5) {
    MD5FileUtils.renameMD5File(fromFile, toFile);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSImagePreTransactionalStorageInspector.java   
/**
 * Determine the checkpoint time of the specified StorageDirectory
 *
 * @param sd StorageDirectory to check
 * @return If file exists and can be read, last checkpoint time. If not, 0L.
 * @throws IOException On errors processing file pointed to by sd
 */
static long readCheckpointTime(StorageDirectory sd) throws IOException {
  File timeFile = NNStorage.getStorageFile(sd, NameNodeFile.TIME);
  long timeStamp = 0L;
  if (timeFile.exists() && FileUtil.canRead(timeFile)) {
    DataInputStream in = new DataInputStream(new FileInputStream(timeFile));
    try {
      timeStamp = in.readLong();
      in.close();
      in = null;
    } finally {
      IOUtils.cleanup(LOG, in);
    }
  }
  return timeStamp;
}
项目:FlexMap    文件:TestStandbyCheckpoints.java   
/**
 * Test for the case when the SBN is configured to checkpoint based
 * on a time period, but no transactions are happening on the
 * active. Thus, it would want to save a second checkpoint at the
 * same txid, which is a no-op. This test makes sure this doesn't
 * cause any problem.
 */
@Test(timeout = 300000)
public void testCheckpointWhenNoNewTransactionsHappened()
    throws Exception {
  // Checkpoint as fast as we can, in a tight loop.
  cluster.getConfiguration(1).setInt(
      DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 0);
  cluster.restartNameNode(1);
  nn1 = cluster.getNameNode(1);

  FSImage spyImage1 = NameNodeAdapter.spyOnFsImage(nn1);

  // We shouldn't save any checkpoints at txid=0
  Thread.sleep(1000);
  Mockito.verify(spyImage1, Mockito.never())
    .saveNamespace((FSNamesystem) Mockito.anyObject());

  // Roll the primary and wait for the standby to catch up
  HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
  Thread.sleep(2000);

  // We should make exactly one checkpoint at this new txid. 
  Mockito.verify(spyImage1, Mockito.times(1)).saveNamespace(
      (FSNamesystem) Mockito.anyObject(), Mockito.eq(NameNodeFile.IMAGE),
      (Canceler) Mockito.anyObject());
}
项目:hadoop-EAR    文件:TestStartup.java   
/**
 * verify that edits log and fsimage are in different directories and of a correct size
 */
private void verifyDifferentDirs(FSImage img, long expectedImgSize, long expectedEditsSize) {
  StorageDirectory sd =null;
  for (Iterator<StorageDirectory> it = img.dirIterator(); it.hasNext();) {
    sd = it.next();

    if(sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) {
      File imf = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE);
      LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length() + "; expected = " + expectedImgSize);
      assertEquals(expectedImgSize, imf.length());  
    } else if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) {
      File edf = NNStorage.getStorageFile(sd, NameNodeFile.EDITS);
      LOG.info("-- edits file " + edf.getAbsolutePath() + "; len = " + edf.length()  + "; expected = " + expectedEditsSize);
      assertEquals(expectedEditsSize, edf.length());    
    } else {
      fail("Image/Edits directories are not different");
    }
  }

}
项目:hadoop-EAR    文件:SnapshotNode.java   
/**
 * Merge image and edit log (in memory).
 * Files to merge include fsimage, edits, and possibly edits.new
 * @throws IOException
 */
void doMerge() throws IOException {
  StorageDirectory sdTemp = null;
  Iterator<StorageDirectory> it = dirIterator(NameNodeDirType.IMAGE_AND_EDITS);
  if (it.hasNext()) {
    sdTemp = it.next();
  } else {
    throw new IOException("Could not locate snapshot temp directory.");
  }

  loadFSImage(NNStorage.getStorageFile(sdTemp, NameNodeFile.IMAGE));
  Collection<EditLogInputStream> editStreams = new ArrayList<EditLogInputStream>(); 
  EditLogInputStream is = new EditLogFileInputStream(NNStorage.getStorageFile(sdTemp, NameNodeFile.EDITS));
  editStreams.add(is);
  File editsNew = NNStorage.getStorageFile(sdTemp, NameNodeFile.EDITS_NEW);
  if (editsNew.exists()) {
    is = new EditLogFileInputStream(editsNew);
    editStreams.add(is);
  }
  loadEdits(editStreams);
}
项目:FlexMap    文件:TestStartup.java   
/**
 * verify that edits log and fsimage are in different directories and of a correct size
 */
private void verifyDifferentDirs(FSImage img, long expectedImgSize, long expectedEditsSize) {
  StorageDirectory sd =null;
  for (Iterator<StorageDirectory> it = img.getStorage().dirIterator(); it.hasNext();) {
    sd = it.next();

    if(sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) {
      img.getStorage();
      File imf = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE, 0);
      LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length() + "; expected = " + expectedImgSize);
      assertEquals(expectedImgSize, imf.length());  
    } else if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) {
      img.getStorage();
      File edf = NNStorage.getStorageFile(sd, NameNodeFile.EDITS, 0);
      LOG.info("-- edits file " + edf.getAbsolutePath() + "; len = " + edf.length()  + "; expected = " + expectedEditsSize);
      assertEquals(expectedEditsSize, edf.length());    
    } else {
      fail("Image/Edits directories are not different");
    }
  }

}
项目:hadoop-EAR    文件:FileImageManager.java   
/**
 * Rolls checkpointed image.
 */
private static void renameCheckpointInDir(StorageDirectory sd, long txid)
    throws IOException {
  File ckpt = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE_NEW, txid);
  File curFile = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE, txid);
  // renameTo fails on Windows if the destination file 
  // already exists.
  LOG.info("Renaming  " + ckpt.getAbsolutePath() + " to "
      + curFile.getAbsolutePath());
  if (!ckpt.renameTo(curFile)) {
    if (!curFile.delete() || !ckpt.renameTo(curFile)) {
      throw new IOException("renaming  " + ckpt.getAbsolutePath() + " to "  + 
          curFile.getAbsolutePath() + " FAILED");
    }
  }    
}