Java 类org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile 实例源码

项目:hadoop    文件:NNStorageRetentionManager.java   
/**
 * @param inspector inspector that has already inspected all storage dirs
 * @return the transaction ID corresponding to the oldest checkpoint
 * that should be retained. 
 */
private long getImageTxIdToRetain(FSImageTransactionalStorageInspector inspector) {

  List<FSImageFile> images = inspector.getFoundImages();
  TreeSet<Long> imageTxIds = Sets.newTreeSet();
  for (FSImageFile image : images) {
    imageTxIds.add(image.getCheckpointTxId());
  }

  List<Long> imageTxIdsList = Lists.newArrayList(imageTxIds);
  if (imageTxIdsList.isEmpty()) {
    return 0;
  }

  Collections.reverse(imageTxIdsList);
  int toRetain = Math.min(numCheckpointsToRetain, imageTxIdsList.size());    
  long minTxId = imageTxIdsList.get(toRetain - 1);
  LOG.info("Going to retain " + toRetain + " images with txid >= " +
      minTxId);
  return minTxId;
}
项目:hadoop    文件:FSImage.java   
/**
 * Rename all the fsimage files with the specific NameNodeFile type. The
 * associated checksum files will also be renamed.
 */
void renameCheckpoint(NameNodeFile fromNnf, NameNodeFile toNnf)
    throws IOException {
  ArrayList<StorageDirectory> al = null;
  FSImageTransactionalStorageInspector inspector =
      new FSImageTransactionalStorageInspector(EnumSet.of(fromNnf));
  storage.inspectStorageDirs(inspector);
  for (FSImageFile image : inspector.getFoundImages()) {
    try {
      renameImageFileInDir(image.sd, fromNnf, toNnf, image.txId, true);
    } catch (IOException ioe) {
      LOG.warn("Unable to rename checkpoint in " + image.sd, ioe);
      if (al == null) {
        al = Lists.newArrayList();
      }
      al.add(image.sd);
    }
  }
  if(al != null) {
    storage.reportErrorsOnDirectories(al);
  }
}
项目:hadoop    文件:TestFSImageStorageInspector.java   
/**
 * Simple test with image, edits, and inprogress edits
 */
@Test
public void testCurrentStorageInspector() throws IOException {
  FSImageTransactionalStorageInspector inspector = 
      new FSImageTransactionalStorageInspector();

  StorageDirectory mockDir = FSImageTestUtil.mockStorageDirectory(
      NameNodeDirType.IMAGE_AND_EDITS,
      false,
      "/foo/current/" + getImageFileName(123),
      "/foo/current/" + getFinalizedEditsFileName(123, 456),
      "/foo/current/" + getImageFileName(456),
      "/foo/current/" + getInProgressEditsFileName(457));

  inspector.inspectDirectory(mockDir);
  assertEquals(2, inspector.foundImages.size());

  FSImageFile latestImage = inspector.getLatestImages().get(0);
  assertEquals(456, latestImage.txId);
  assertSame(mockDir, latestImage.sd);
  assertTrue(inspector.isUpgradeFinalized());

  assertEquals(new File("/foo/current/"+getImageFileName(456)), 
      latestImage.getFile());
}
项目:hadoop    文件:FSImageTestUtil.java   
/**
 * Assert that all of the given directories have the same newest filename
 * for fsimage that they hold the same data.
 */
public static void assertSameNewestImage(List<File> dirs) throws Exception {
  if (dirs.size() < 2) return;

  long imageTxId = -1;

  List<File> imageFiles = new ArrayList<File>();
  for (File dir : dirs) {
    FSImageTransactionalStorageInspector inspector =
      inspectStorageDirectory(dir, NameNodeDirType.IMAGE);
    List<FSImageFile> latestImages = inspector.getLatestImages();
    assert(!latestImages.isEmpty());
    long thisTxId = latestImages.get(0).getCheckpointTxId();
    if (imageTxId != -1 && thisTxId != imageTxId) {
      fail("Storage directory " + dir + " does not have the same " +
          "last image index " + imageTxId + " as another");
    }
    imageTxId = thisTxId;
    imageFiles.add(inspector.getLatestImages().get(0).getFile());
  }

  assertFileContentsSame(imageFiles.toArray(new File[0]));
}
项目:aliyun-oss-hadoop-fs    文件:NNStorageRetentionManager.java   
/**
 * @param inspector inspector that has already inspected all storage dirs
 * @return the transaction ID corresponding to the oldest checkpoint
 * that should be retained. 
 */
private long getImageTxIdToRetain(FSImageTransactionalStorageInspector inspector) {

  List<FSImageFile> images = inspector.getFoundImages();
  TreeSet<Long> imageTxIds = Sets.newTreeSet();
  for (FSImageFile image : images) {
    imageTxIds.add(image.getCheckpointTxId());
  }

  List<Long> imageTxIdsList = Lists.newArrayList(imageTxIds);
  if (imageTxIdsList.isEmpty()) {
    return 0;
  }

  Collections.reverse(imageTxIdsList);
  int toRetain = Math.min(numCheckpointsToRetain, imageTxIdsList.size());    
  long minTxId = imageTxIdsList.get(toRetain - 1);
  LOG.info("Going to retain " + toRetain + " images with txid >= " +
      minTxId);
  return minTxId;
}
项目:aliyun-oss-hadoop-fs    文件:FSImage.java   
/**
 * @param timeWindow a checkpoint is done if the latest checkpoint
 *                   was done more than this number of seconds ago.
 * @param txGap a checkpoint is done also if the gap between the latest tx id
 *              and the latest checkpoint is greater than this number.
 * @return true if a checkpoint has been made
 * @see #saveNamespace(FSNamesystem, NameNodeFile, Canceler)
 */
public synchronized boolean saveNamespace(long timeWindow, long txGap,
    FSNamesystem source) throws IOException {
  if (timeWindow > 0 || txGap > 0) {
    final FSImageStorageInspector inspector = storage.readAndInspectDirs(
        EnumSet.of(NameNodeFile.IMAGE, NameNodeFile.IMAGE_ROLLBACK),
        StartupOption.REGULAR);
    FSImageFile image = inspector.getLatestImages().get(0);
    File imageFile = image.getFile();

    final long checkpointTxId = image.getCheckpointTxId();
    final long checkpointAge = Time.now() - imageFile.lastModified();
    if (checkpointAge <= timeWindow * 1000 &&
        checkpointTxId >= this.getLastAppliedOrWrittenTxId() - txGap) {
      return false;
    }
  }
  saveNamespace(source, NameNodeFile.IMAGE, null);
  return true;
}
项目:aliyun-oss-hadoop-fs    文件:FSImage.java   
/**
 * Rename all the fsimage files with the specific NameNodeFile type. The
 * associated checksum files will also be renamed.
 */
void renameCheckpoint(NameNodeFile fromNnf, NameNodeFile toNnf)
    throws IOException {
  ArrayList<StorageDirectory> al = null;
  FSImageTransactionalStorageInspector inspector =
      new FSImageTransactionalStorageInspector(EnumSet.of(fromNnf));
  storage.inspectStorageDirs(inspector);
  for (FSImageFile image : inspector.getFoundImages()) {
    try {
      renameImageFileInDir(image.sd, fromNnf, toNnf, image.txId, true);
    } catch (IOException ioe) {
      LOG.warn("Unable to rename checkpoint in " + image.sd, ioe);
      if (al == null) {
        al = Lists.newArrayList();
      }
      al.add(image.sd);
    }
  }
  if(al != null) {
    storage.reportErrorsOnDirectories(al);
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestFSImageStorageInspector.java   
/**
 * Simple test with image, edits, and inprogress edits
 */
@Test
public void testCurrentStorageInspector() throws IOException {
  FSImageTransactionalStorageInspector inspector = 
      new FSImageTransactionalStorageInspector();

  StorageDirectory mockDir = FSImageTestUtil.mockStorageDirectory(
      NameNodeDirType.IMAGE_AND_EDITS,
      false,
      "/foo/current/" + getImageFileName(123),
      "/foo/current/" + getFinalizedEditsFileName(123, 456),
      "/foo/current/" + getImageFileName(456),
      "/foo/current/" + getInProgressEditsFileName(457));

  inspector.inspectDirectory(mockDir);
  assertEquals(2, inspector.foundImages.size());

  FSImageFile latestImage = inspector.getLatestImages().get(0);
  assertEquals(456, latestImage.txId);
  assertSame(mockDir, latestImage.sd);
  assertTrue(inspector.isUpgradeFinalized());

  assertEquals(new File("/foo/current/"+getImageFileName(456)), 
      latestImage.getFile());
}
项目:aliyun-oss-hadoop-fs    文件:FSImageTestUtil.java   
/**
 * Assert that all of the given directories have the same newest filename
 * for fsimage that they hold the same data.
 */
public static void assertSameNewestImage(List<File> dirs) throws Exception {
  if (dirs.size() < 2) return;

  long imageTxId = -1;

  List<File> imageFiles = new ArrayList<File>();
  for (File dir : dirs) {
    FSImageTransactionalStorageInspector inspector =
      inspectStorageDirectory(dir, NameNodeDirType.IMAGE);
    List<FSImageFile> latestImages = inspector.getLatestImages();
    assert(!latestImages.isEmpty());
    long thisTxId = latestImages.get(0).getCheckpointTxId();
    if (imageTxId != -1 && thisTxId != imageTxId) {
      fail("Storage directory " + dir + " does not have the same " +
          "last image index " + imageTxId + " as another");
    }
    imageTxId = thisTxId;
    imageFiles.add(inspector.getLatestImages().get(0).getFile());
  }

  assertFileContentsSame(imageFiles.toArray(new File[0]));
}
项目:big-c    文件:NNStorageRetentionManager.java   
/**
 * @param inspector inspector that has already inspected all storage dirs
 * @return the transaction ID corresponding to the oldest checkpoint
 * that should be retained. 
 */
private long getImageTxIdToRetain(FSImageTransactionalStorageInspector inspector) {

  List<FSImageFile> images = inspector.getFoundImages();
  TreeSet<Long> imageTxIds = Sets.newTreeSet();
  for (FSImageFile image : images) {
    imageTxIds.add(image.getCheckpointTxId());
  }

  List<Long> imageTxIdsList = Lists.newArrayList(imageTxIds);
  if (imageTxIdsList.isEmpty()) {
    return 0;
  }

  Collections.reverse(imageTxIdsList);
  int toRetain = Math.min(numCheckpointsToRetain, imageTxIdsList.size());    
  long minTxId = imageTxIdsList.get(toRetain - 1);
  LOG.info("Going to retain " + toRetain + " images with txid >= " +
      minTxId);
  return minTxId;
}
项目:big-c    文件:FSImage.java   
/**
 * Rename all the fsimage files with the specific NameNodeFile type. The
 * associated checksum files will also be renamed.
 */
void renameCheckpoint(NameNodeFile fromNnf, NameNodeFile toNnf)
    throws IOException {
  ArrayList<StorageDirectory> al = null;
  FSImageTransactionalStorageInspector inspector =
      new FSImageTransactionalStorageInspector(EnumSet.of(fromNnf));
  storage.inspectStorageDirs(inspector);
  for (FSImageFile image : inspector.getFoundImages()) {
    try {
      renameImageFileInDir(image.sd, fromNnf, toNnf, image.txId, true);
    } catch (IOException ioe) {
      LOG.warn("Unable to rename checkpoint in " + image.sd, ioe);
      if (al == null) {
        al = Lists.newArrayList();
      }
      al.add(image.sd);
    }
  }
  if(al != null) {
    storage.reportErrorsOnDirectories(al);
  }
}
项目:big-c    文件:TestFSImageStorageInspector.java   
/**
 * Simple test with image, edits, and inprogress edits
 */
@Test
public void testCurrentStorageInspector() throws IOException {
  FSImageTransactionalStorageInspector inspector = 
      new FSImageTransactionalStorageInspector();

  StorageDirectory mockDir = FSImageTestUtil.mockStorageDirectory(
      NameNodeDirType.IMAGE_AND_EDITS,
      false,
      "/foo/current/" + getImageFileName(123),
      "/foo/current/" + getFinalizedEditsFileName(123, 456),
      "/foo/current/" + getImageFileName(456),
      "/foo/current/" + getInProgressEditsFileName(457));

  inspector.inspectDirectory(mockDir);
  assertEquals(2, inspector.foundImages.size());

  FSImageFile latestImage = inspector.getLatestImages().get(0);
  assertEquals(456, latestImage.txId);
  assertSame(mockDir, latestImage.sd);
  assertTrue(inspector.isUpgradeFinalized());

  assertEquals(new File("/foo/current/"+getImageFileName(456)), 
      latestImage.getFile());
}
项目:big-c    文件:FSImageTestUtil.java   
/**
 * Assert that all of the given directories have the same newest filename
 * for fsimage that they hold the same data.
 */
public static void assertSameNewestImage(List<File> dirs) throws Exception {
  if (dirs.size() < 2) return;

  long imageTxId = -1;

  List<File> imageFiles = new ArrayList<File>();
  for (File dir : dirs) {
    FSImageTransactionalStorageInspector inspector =
      inspectStorageDirectory(dir, NameNodeDirType.IMAGE);
    List<FSImageFile> latestImages = inspector.getLatestImages();
    assert(!latestImages.isEmpty());
    long thisTxId = latestImages.get(0).getCheckpointTxId();
    if (imageTxId != -1 && thisTxId != imageTxId) {
      fail("Storage directory " + dir + " does not have the same " +
          "last image index " + imageTxId + " as another");
    }
    imageTxId = thisTxId;
    imageFiles.add(inspector.getLatestImages().get(0).getFile());
  }

  assertFileContentsSame(imageFiles.toArray(new File[0]));
}
项目:hadoop-2.6.0-cdh5.4.3    文件:NNStorageRetentionManager.java   
/**
 * @param inspector inspector that has already inspected all storage dirs
 * @return the transaction ID corresponding to the oldest checkpoint
 * that should be retained. 
 */
private long getImageTxIdToRetain(FSImageTransactionalStorageInspector inspector) {

  List<FSImageFile> images = inspector.getFoundImages();
  TreeSet<Long> imageTxIds = Sets.newTreeSet();
  for (FSImageFile image : images) {
    imageTxIds.add(image.getCheckpointTxId());
  }

  List<Long> imageTxIdsList = Lists.newArrayList(imageTxIds);
  if (imageTxIdsList.isEmpty()) {
    return 0;
  }

  Collections.reverse(imageTxIdsList);
  int toRetain = Math.min(numCheckpointsToRetain, imageTxIdsList.size());    
  long minTxId = imageTxIdsList.get(toRetain - 1);
  LOG.info("Going to retain " + toRetain + " images with txid >= " +
      minTxId);
  return minTxId;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSImage.java   
/**
 * Rename all the fsimage files with the specific NameNodeFile type. The
 * associated checksum files will also be renamed.
 */
void renameCheckpoint(NameNodeFile fromNnf, NameNodeFile toNnf)
    throws IOException {
  ArrayList<StorageDirectory> al = null;
  FSImageTransactionalStorageInspector inspector =
      new FSImageTransactionalStorageInspector(EnumSet.of(fromNnf));
  storage.inspectStorageDirs(inspector);
  for (FSImageFile image : inspector.getFoundImages()) {
    try {
      renameImageFileInDir(image.sd, fromNnf, toNnf, image.txId, true);
    } catch (IOException ioe) {
      LOG.warn("Unable to rename checkpoint in " + image.sd, ioe);
      if (al == null) {
        al = Lists.newArrayList();
      }
      al.add(image.sd);
    }
  }
  if(al != null) {
    storage.reportErrorsOnDirectories(al);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestFSImageStorageInspector.java   
/**
 * Simple test with image, edits, and inprogress edits
 */
@Test
public void testCurrentStorageInspector() throws IOException {
  FSImageTransactionalStorageInspector inspector = 
      new FSImageTransactionalStorageInspector();

  StorageDirectory mockDir = FSImageTestUtil.mockStorageDirectory(
      NameNodeDirType.IMAGE_AND_EDITS,
      false,
      "/foo/current/" + getImageFileName(123),
      "/foo/current/" + getFinalizedEditsFileName(123, 456),
      "/foo/current/" + getImageFileName(456),
      "/foo/current/" + getInProgressEditsFileName(457));

  inspector.inspectDirectory(mockDir);
  assertEquals(2, inspector.foundImages.size());

  FSImageFile latestImage = inspector.getLatestImages().get(0);
  assertEquals(456, latestImage.txId);
  assertSame(mockDir, latestImage.sd);
  assertTrue(inspector.isUpgradeFinalized());

  assertEquals(new File("/foo/current/"+getImageFileName(456)), 
      latestImage.getFile());
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSImageTestUtil.java   
/**
 * Assert that all of the given directories have the same newest filename
 * for fsimage that they hold the same data.
 */
public static void assertSameNewestImage(List<File> dirs) throws Exception {
  if (dirs.size() < 2) return;

  long imageTxId = -1;

  List<File> imageFiles = new ArrayList<File>();
  for (File dir : dirs) {
    FSImageTransactionalStorageInspector inspector =
      inspectStorageDirectory(dir, NameNodeDirType.IMAGE);
    List<FSImageFile> latestImages = inspector.getLatestImages();
    assert(!latestImages.isEmpty());
    long thisTxId = latestImages.get(0).getCheckpointTxId();
    if (imageTxId != -1 && thisTxId != imageTxId) {
      fail("Storage directory " + dir + " does not have the same " +
          "last image index " + imageTxId + " as another");
    }
    imageTxId = thisTxId;
    imageFiles.add(inspector.getLatestImages().get(0).getFile());
  }

  assertFileContentsSame(imageFiles.toArray(new File[0]));
}
项目:hadoop-EAR    文件:TestFSImageStorageInspector.java   
/**
 * Simple test with image, edits, and inprogress edits
 */
@Test
public void testCurrentStorageInspector() throws IOException {
  FSImageTransactionalStorageInspector inspector = 
      new FSImageTransactionalStorageInspector();

  StorageDirectory mockDir = FSImageTestUtil.mockStorageDirectory(
      NameNodeDirType.IMAGE_AND_EDITS,
      false,
      "/foo/current/" + getImageFileName(123),
      "/foo/current/" + getFinalizedEditsFileName(123, 456),
      "/foo/current/" + getImageFileName(456),
      "/foo/current/" + getInProgressEditsFileName(457));

  inspector.inspectDirectory(mockDir);
  assertEquals(2, inspector.foundImages.size());

  FSImageFile latestImage = inspector.getLatestImage();
  assertEquals(456, latestImage.txId);
  assertSame(mockDir, latestImage.sd);
  assertTrue(inspector.isUpgradeFinalized());

  assertEquals(new File("/foo/current/"+getImageFileName(456)), 
      latestImage.getFile());
}
项目:hadoop-EAR    文件:FSImageTestUtil.java   
/**
 * Assert that all of the given directories have the same newest filename
 * for fsimage that they hold the same data.
 */
public static void assertSameNewestImage(List<File> dirs) throws Exception {
  if (dirs.size() < 2) return;

  long imageTxId = -1;

  List<File> imageFiles = new ArrayList<File>();
  for (File dir : dirs) {
    FSImageTransactionalStorageInspector inspector =
      inspectStorageDirectory(dir, NameNodeDirType.IMAGE);
    FSImageFile latestImage = inspector.getLatestImage();
    assertNotNull("No image in " + dir, latestImage);      
    long thisTxId = latestImage.getCheckpointTxId();
    if (imageTxId != -1 && thisTxId != imageTxId) {
      fail("Storage directory " + dir + " does not have the same " +
          "last image index " + imageTxId + " as another");
    }
    imageTxId = thisTxId;
    imageFiles.add(inspector.getLatestImage().getFile());
  }

  assertFileContentsSame(imageFiles.toArray(new File[0]));
}
项目:hadoop-EAR    文件:NNStorageRetentionManager.java   
/**
 * @param inspector inspector that has already inspected all storage dirs
 * @return the transaction ID corresponding to the oldest checkpoint
 * that should be retained. 
 */
private long getImageTxIdToRetain(FSImageTransactionalStorageInspector inspector) {

  List<FSImageFile> images = inspector.getFoundImages();
  TreeSet<Long> imageTxIds = new TreeSet<Long>();
  for (FSImageFile image : images) {
    imageTxIds.add(image.getCheckpointTxId());
  }

  List<Long> imageTxIdsList = new ArrayList<Long>(imageTxIds); 
  if (imageTxIdsList.isEmpty()) {
    return -1;
  }

  Collections.reverse(imageTxIdsList);
  int toRetain = Math.min(numCheckpointsToRetain, imageTxIdsList.size());    
  long minTxId = imageTxIdsList.get(toRetain - 1);
  LOG.info("Going to retain " + toRetain + " images with txid >= " +
      minTxId);
  return minTxId;
}
项目:hadoop-plus    文件:NNStorageRetentionManager.java   
/**
 * @param inspector inspector that has already inspected all storage dirs
 * @return the transaction ID corresponding to the oldest checkpoint
 * that should be retained. 
 */
private long getImageTxIdToRetain(FSImageTransactionalStorageInspector inspector) {

  List<FSImageFile> images = inspector.getFoundImages();
  TreeSet<Long> imageTxIds = Sets.newTreeSet();
  for (FSImageFile image : images) {
    imageTxIds.add(image.getCheckpointTxId());
  }

  List<Long> imageTxIdsList = Lists.newArrayList(imageTxIds);
  if (imageTxIdsList.isEmpty()) {
    return 0;
  }

  Collections.reverse(imageTxIdsList);
  int toRetain = Math.min(numCheckpointsToRetain, imageTxIdsList.size());    
  long minTxId = imageTxIdsList.get(toRetain - 1);
  LOG.info("Going to retain " + toRetain + " images with txid >= " +
      minTxId);
  return minTxId;
}
项目:hadoop-plus    文件:TestFSImageStorageInspector.java   
/**
 * Simple test with image, edits, and inprogress edits
 */
@Test
public void testCurrentStorageInspector() throws IOException {
  FSImageTransactionalStorageInspector inspector = 
      new FSImageTransactionalStorageInspector();

  StorageDirectory mockDir = FSImageTestUtil.mockStorageDirectory(
      NameNodeDirType.IMAGE_AND_EDITS,
      false,
      "/foo/current/" + getImageFileName(123),
      "/foo/current/" + getFinalizedEditsFileName(123, 456),
      "/foo/current/" + getImageFileName(456),
      "/foo/current/" + getInProgressEditsFileName(457));

  inspector.inspectDirectory(mockDir);
  assertEquals(2, inspector.foundImages.size());

  FSImageFile latestImage = inspector.getLatestImages().get(0);
  assertEquals(456, latestImage.txId);
  assertSame(mockDir, latestImage.sd);
  assertTrue(inspector.isUpgradeFinalized());

  assertEquals(new File("/foo/current/"+getImageFileName(456)), 
      latestImage.getFile());
}
项目:hadoop-plus    文件:FSImageTestUtil.java   
/**
 * Assert that all of the given directories have the same newest filename
 * for fsimage that they hold the same data.
 */
public static void assertSameNewestImage(List<File> dirs) throws Exception {
  if (dirs.size() < 2) return;

  long imageTxId = -1;

  List<File> imageFiles = new ArrayList<File>();
  for (File dir : dirs) {
    FSImageTransactionalStorageInspector inspector =
      inspectStorageDirectory(dir, NameNodeDirType.IMAGE);
    List<FSImageFile> latestImages = inspector.getLatestImages();
    assert(!latestImages.isEmpty());
    long thisTxId = latestImages.get(0).getCheckpointTxId();
    if (imageTxId != -1 && thisTxId != imageTxId) {
      fail("Storage directory " + dir + " does not have the same " +
          "last image index " + imageTxId + " as another");
    }
    imageTxId = thisTxId;
    imageFiles.add(inspector.getLatestImages().get(0).getFile());
  }

  assertFileContentsSame(imageFiles.toArray(new File[0]));
}
项目:FlexMap    文件:NNStorageRetentionManager.java   
/**
 * @param inspector inspector that has already inspected all storage dirs
 * @return the transaction ID corresponding to the oldest checkpoint
 * that should be retained. 
 */
private long getImageTxIdToRetain(FSImageTransactionalStorageInspector inspector) {

  List<FSImageFile> images = inspector.getFoundImages();
  TreeSet<Long> imageTxIds = Sets.newTreeSet();
  for (FSImageFile image : images) {
    imageTxIds.add(image.getCheckpointTxId());
  }

  List<Long> imageTxIdsList = Lists.newArrayList(imageTxIds);
  if (imageTxIdsList.isEmpty()) {
    return 0;
  }

  Collections.reverse(imageTxIdsList);
  int toRetain = Math.min(numCheckpointsToRetain, imageTxIdsList.size());    
  long minTxId = imageTxIdsList.get(toRetain - 1);
  LOG.info("Going to retain " + toRetain + " images with txid >= " +
      minTxId);
  return minTxId;
}
项目:FlexMap    文件:FSImage.java   
/**
 * Rename all the fsimage files with the specific NameNodeFile type. The
 * associated checksum files will also be renamed.
 */
void renameCheckpoint(NameNodeFile fromNnf, NameNodeFile toNnf)
    throws IOException {
  ArrayList<StorageDirectory> al = null;
  FSImageTransactionalStorageInspector inspector =
      new FSImageTransactionalStorageInspector(EnumSet.of(fromNnf));
  storage.inspectStorageDirs(inspector);
  for (FSImageFile image : inspector.getFoundImages()) {
    try {
      renameImageFileInDir(image.sd, fromNnf, toNnf, image.txId, true);
    } catch (IOException ioe) {
      LOG.warn("Unable to rename checkpoint in " + image.sd, ioe);
      if (al == null) {
        al = Lists.newArrayList();
      }
      al.add(image.sd);
    }
  }
  if(al != null) {
    storage.reportErrorsOnDirectories(al);
  }
}
项目:FlexMap    文件:TestFSImageStorageInspector.java   
/**
 * Simple test with image, edits, and inprogress edits
 */
@Test
public void testCurrentStorageInspector() throws IOException {
  FSImageTransactionalStorageInspector inspector = 
      new FSImageTransactionalStorageInspector();

  StorageDirectory mockDir = FSImageTestUtil.mockStorageDirectory(
      NameNodeDirType.IMAGE_AND_EDITS,
      false,
      "/foo/current/" + getImageFileName(123),
      "/foo/current/" + getFinalizedEditsFileName(123, 456),
      "/foo/current/" + getImageFileName(456),
      "/foo/current/" + getInProgressEditsFileName(457));

  inspector.inspectDirectory(mockDir);
  assertEquals(2, inspector.foundImages.size());

  FSImageFile latestImage = inspector.getLatestImages().get(0);
  assertEquals(456, latestImage.txId);
  assertSame(mockDir, latestImage.sd);
  assertTrue(inspector.isUpgradeFinalized());

  assertEquals(new File("/foo/current/"+getImageFileName(456)), 
      latestImage.getFile());
}
项目:FlexMap    文件:FSImageTestUtil.java   
/**
 * Assert that all of the given directories have the same newest filename
 * for fsimage that they hold the same data.
 */
public static void assertSameNewestImage(List<File> dirs) throws Exception {
  if (dirs.size() < 2) return;

  long imageTxId = -1;

  List<File> imageFiles = new ArrayList<File>();
  for (File dir : dirs) {
    FSImageTransactionalStorageInspector inspector =
      inspectStorageDirectory(dir, NameNodeDirType.IMAGE);
    List<FSImageFile> latestImages = inspector.getLatestImages();
    assert(!latestImages.isEmpty());
    long thisTxId = latestImages.get(0).getCheckpointTxId();
    if (imageTxId != -1 && thisTxId != imageTxId) {
      fail("Storage directory " + dir + " does not have the same " +
          "last image index " + imageTxId + " as another");
    }
    imageTxId = thisTxId;
    imageFiles.add(inspector.getLatestImages().get(0).getFile());
  }

  assertFileContentsSame(imageFiles.toArray(new File[0]));
}
项目:hadoop-TCP    文件:NNStorageRetentionManager.java   
/**
 * @param inspector inspector that has already inspected all storage dirs
 * @return the transaction ID corresponding to the oldest checkpoint
 * that should be retained. 
 */
private long getImageTxIdToRetain(FSImageTransactionalStorageInspector inspector) {

  List<FSImageFile> images = inspector.getFoundImages();
  TreeSet<Long> imageTxIds = Sets.newTreeSet();
  for (FSImageFile image : images) {
    imageTxIds.add(image.getCheckpointTxId());
  }

  List<Long> imageTxIdsList = Lists.newArrayList(imageTxIds);
  if (imageTxIdsList.isEmpty()) {
    return 0;
  }

  Collections.reverse(imageTxIdsList);
  int toRetain = Math.min(numCheckpointsToRetain, imageTxIdsList.size());    
  long minTxId = imageTxIdsList.get(toRetain - 1);
  LOG.info("Going to retain " + toRetain + " images with txid >= " +
      minTxId);
  return minTxId;
}
项目:hadoop-TCP    文件:TestFSImageStorageInspector.java   
/**
 * Simple test with image, edits, and inprogress edits
 */
@Test
public void testCurrentStorageInspector() throws IOException {
  FSImageTransactionalStorageInspector inspector = 
      new FSImageTransactionalStorageInspector();

  StorageDirectory mockDir = FSImageTestUtil.mockStorageDirectory(
      NameNodeDirType.IMAGE_AND_EDITS,
      false,
      "/foo/current/" + getImageFileName(123),
      "/foo/current/" + getFinalizedEditsFileName(123, 456),
      "/foo/current/" + getImageFileName(456),
      "/foo/current/" + getInProgressEditsFileName(457));

  inspector.inspectDirectory(mockDir);
  assertEquals(2, inspector.foundImages.size());

  FSImageFile latestImage = inspector.getLatestImages().get(0);
  assertEquals(456, latestImage.txId);
  assertSame(mockDir, latestImage.sd);
  assertTrue(inspector.isUpgradeFinalized());

  assertEquals(new File("/foo/current/"+getImageFileName(456)), 
      latestImage.getFile());
}
项目:hadoop-TCP    文件:FSImageTestUtil.java   
/**
 * Assert that all of the given directories have the same newest filename
 * for fsimage that they hold the same data.
 */
public static void assertSameNewestImage(List<File> dirs) throws Exception {
  if (dirs.size() < 2) return;

  long imageTxId = -1;

  List<File> imageFiles = new ArrayList<File>();
  for (File dir : dirs) {
    FSImageTransactionalStorageInspector inspector =
      inspectStorageDirectory(dir, NameNodeDirType.IMAGE);
    List<FSImageFile> latestImages = inspector.getLatestImages();
    assert(!latestImages.isEmpty());
    long thisTxId = latestImages.get(0).getCheckpointTxId();
    if (imageTxId != -1 && thisTxId != imageTxId) {
      fail("Storage directory " + dir + " does not have the same " +
          "last image index " + imageTxId + " as another");
    }
    imageTxId = thisTxId;
    imageFiles.add(inspector.getLatestImages().get(0).getFile());
  }

  assertFileContentsSame(imageFiles.toArray(new File[0]));
}
项目:hardfs    文件:NNStorageRetentionManager.java   
/**
 * @param inspector inspector that has already inspected all storage dirs
 * @return the transaction ID corresponding to the oldest checkpoint
 * that should be retained. 
 */
private long getImageTxIdToRetain(FSImageTransactionalStorageInspector inspector) {

  List<FSImageFile> images = inspector.getFoundImages();
  TreeSet<Long> imageTxIds = Sets.newTreeSet();
  for (FSImageFile image : images) {
    imageTxIds.add(image.getCheckpointTxId());
  }

  List<Long> imageTxIdsList = Lists.newArrayList(imageTxIds);
  if (imageTxIdsList.isEmpty()) {
    return 0;
  }

  Collections.reverse(imageTxIdsList);
  int toRetain = Math.min(numCheckpointsToRetain, imageTxIdsList.size());    
  long minTxId = imageTxIdsList.get(toRetain - 1);
  LOG.info("Going to retain " + toRetain + " images with txid >= " +
      minTxId);
  return minTxId;
}
项目:hardfs    文件:TestFSImageStorageInspector.java   
/**
 * Simple test with image, edits, and inprogress edits
 */
@Test
public void testCurrentStorageInspector() throws IOException {
  FSImageTransactionalStorageInspector inspector = 
      new FSImageTransactionalStorageInspector();

  StorageDirectory mockDir = FSImageTestUtil.mockStorageDirectory(
      NameNodeDirType.IMAGE_AND_EDITS,
      false,
      "/foo/current/" + getImageFileName(123),
      "/foo/current/" + getFinalizedEditsFileName(123, 456),
      "/foo/current/" + getImageFileName(456),
      "/foo/current/" + getInProgressEditsFileName(457));

  inspector.inspectDirectory(mockDir);
  assertEquals(2, inspector.foundImages.size());

  FSImageFile latestImage = inspector.getLatestImages().get(0);
  assertEquals(456, latestImage.txId);
  assertSame(mockDir, latestImage.sd);
  assertTrue(inspector.isUpgradeFinalized());

  assertEquals(new File("/foo/current/"+getImageFileName(456)), 
      latestImage.getFile());
}
项目:hardfs    文件:FSImageTestUtil.java   
/**
 * Assert that all of the given directories have the same newest filename
 * for fsimage that they hold the same data.
 */
public static void assertSameNewestImage(List<File> dirs) throws Exception {
  if (dirs.size() < 2) return;

  long imageTxId = -1;

  List<File> imageFiles = new ArrayList<File>();
  for (File dir : dirs) {
    FSImageTransactionalStorageInspector inspector =
      inspectStorageDirectory(dir, NameNodeDirType.IMAGE);
    List<FSImageFile> latestImages = inspector.getLatestImages();
    assert(!latestImages.isEmpty());
    long thisTxId = latestImages.get(0).getCheckpointTxId();
    if (imageTxId != -1 && thisTxId != imageTxId) {
      fail("Storage directory " + dir + " does not have the same " +
          "last image index " + imageTxId + " as another");
    }
    imageTxId = thisTxId;
    imageFiles.add(inspector.getLatestImages().get(0).getFile());
  }

  assertFileContentsSame(imageFiles.toArray(new File[0]));
}
项目:hadoop-on-lustre2    文件:NNStorageRetentionManager.java   
/**
 * @param inspector inspector that has already inspected all storage dirs
 * @return the transaction ID corresponding to the oldest checkpoint
 * that should be retained. 
 */
private long getImageTxIdToRetain(FSImageTransactionalStorageInspector inspector) {

  List<FSImageFile> images = inspector.getFoundImages();
  TreeSet<Long> imageTxIds = Sets.newTreeSet();
  for (FSImageFile image : images) {
    imageTxIds.add(image.getCheckpointTxId());
  }

  List<Long> imageTxIdsList = Lists.newArrayList(imageTxIds);
  if (imageTxIdsList.isEmpty()) {
    return 0;
  }

  Collections.reverse(imageTxIdsList);
  int toRetain = Math.min(numCheckpointsToRetain, imageTxIdsList.size());    
  long minTxId = imageTxIdsList.get(toRetain - 1);
  LOG.info("Going to retain " + toRetain + " images with txid >= " +
      minTxId);
  return minTxId;
}
项目:hadoop-on-lustre2    文件:FSImage.java   
/**
 * Rename all the fsimage files with the specific NameNodeFile type. The
 * associated checksum files will also be renamed.
 */
void renameCheckpoint(NameNodeFile fromNnf, NameNodeFile toNnf)
    throws IOException {
  ArrayList<StorageDirectory> al = null;
  FSImageTransactionalStorageInspector inspector =
      new FSImageTransactionalStorageInspector(EnumSet.of(fromNnf));
  storage.inspectStorageDirs(inspector);
  for (FSImageFile image : inspector.getFoundImages()) {
    try {
      renameImageFileInDir(image.sd, fromNnf, toNnf, image.txId, true);
    } catch (IOException ioe) {
      LOG.warn("Unable to rename checkpoint in " + image.sd, ioe);
      if (al == null) {
        al = Lists.newArrayList();
      }
      al.add(image.sd);
    }
  }
  if(al != null) {
    storage.reportErrorsOnDirectories(al);
  }
}
项目:hadoop-on-lustre2    文件:TestFSImageStorageInspector.java   
/**
 * Simple test with image, edits, and inprogress edits
 */
@Test
public void testCurrentStorageInspector() throws IOException {
  FSImageTransactionalStorageInspector inspector = 
      new FSImageTransactionalStorageInspector();

  StorageDirectory mockDir = FSImageTestUtil.mockStorageDirectory(
      NameNodeDirType.IMAGE_AND_EDITS,
      false,
      "/foo/current/" + getImageFileName(123),
      "/foo/current/" + getFinalizedEditsFileName(123, 456),
      "/foo/current/" + getImageFileName(456),
      "/foo/current/" + getInProgressEditsFileName(457));

  inspector.inspectDirectory(mockDir);
  assertEquals(2, inspector.foundImages.size());

  FSImageFile latestImage = inspector.getLatestImages().get(0);
  assertEquals(456, latestImage.txId);
  assertSame(mockDir, latestImage.sd);
  assertTrue(inspector.isUpgradeFinalized());

  assertEquals(new File("/foo/current/"+getImageFileName(456)), 
      latestImage.getFile());
}
项目:hadoop-on-lustre2    文件:FSImageTestUtil.java   
/**
 * Assert that all of the given directories have the same newest filename
 * for fsimage that they hold the same data.
 */
public static void assertSameNewestImage(List<File> dirs) throws Exception {
  if (dirs.size() < 2) return;

  long imageTxId = -1;

  List<File> imageFiles = new ArrayList<File>();
  for (File dir : dirs) {
    FSImageTransactionalStorageInspector inspector =
      inspectStorageDirectory(dir, NameNodeDirType.IMAGE);
    List<FSImageFile> latestImages = inspector.getLatestImages();
    assert(!latestImages.isEmpty());
    long thisTxId = latestImages.get(0).getCheckpointTxId();
    if (imageTxId != -1 && thisTxId != imageTxId) {
      fail("Storage directory " + dir + " does not have the same " +
          "last image index " + imageTxId + " as another");
    }
    imageTxId = thisTxId;
    imageFiles.add(inspector.getLatestImages().get(0).getFile());
  }

  assertFileContentsSame(imageFiles.toArray(new File[0]));
}
项目:hadoop    文件:NNStorageRetentionManager.java   
void purgeCheckpoinsAfter(NameNodeFile nnf, long fromTxId)
    throws IOException {
  FSImageTransactionalStorageInspector inspector =
      new FSImageTransactionalStorageInspector(EnumSet.of(nnf));
  storage.inspectStorageDirs(inspector);
  for (FSImageFile image : inspector.getFoundImages()) {
    if (image.getCheckpointTxId() > fromTxId) {
      purger.purgeImage(image);
    }
  }
}
项目:hadoop    文件:NNStorageRetentionManager.java   
private void purgeCheckpointsOlderThan(
    FSImageTransactionalStorageInspector inspector,
    long minTxId) {
  for (FSImageFile image : inspector.getFoundImages()) {
    if (image.getCheckpointTxId() < minTxId) {
      purger.purgeImage(image);
    }
  }
}
项目:hadoop    文件:FSImage.java   
/**
 * @return true if there is rollback fsimage (for rolling upgrade) in NameNode
 * directory.
 */
public boolean hasRollbackFSImage() throws IOException {
  final FSImageStorageInspector inspector = new FSImageTransactionalStorageInspector(
      EnumSet.of(NameNodeFile.IMAGE_ROLLBACK));
  storage.inspectStorageDirs(inspector);
  try {
    List<FSImageFile> images = inspector.getLatestImages();
    return images != null && !images.isEmpty();
  } catch (FileNotFoundException e) {
    return false;
  }
}