Java 类org.apache.hadoop.hdfs.util.MD5FileUtils 实例源码

项目:hadoop    文件:FSImage.java   
private void renameImageFileInDir(StorageDirectory sd, NameNodeFile fromNnf,
    NameNodeFile toNnf, long txid, boolean renameMD5) throws IOException {
  final File fromFile = NNStorage.getStorageFile(sd, fromNnf, txid);
  final File toFile = NNStorage.getStorageFile(sd, toNnf, txid);
  // renameTo fails on Windows if the destination file already exists.
  if(LOG.isDebugEnabled()) {
    LOG.debug("renaming  " + fromFile.getAbsolutePath() 
              + " to " + toFile.getAbsolutePath());
  }
  if (!fromFile.renameTo(toFile)) {
    if (!toFile.delete() || !fromFile.renameTo(toFile)) {
      throw new IOException("renaming  " + fromFile.getAbsolutePath() + " to "  + 
          toFile.getAbsolutePath() + " FAILED");
    }
  }
  if (renameMD5) {
    MD5FileUtils.renameMD5File(fromFile, toFile);
  }
}
项目:hadoop    文件:TestStartup.java   
/**
 * Corrupts the MD5 sum of the fsimage.
 * 
 * @param corruptAll
 *          whether to corrupt one or all of the MD5 sums in the configured
 *          namedirs
 * @throws IOException
 */
private void corruptFSImageMD5(boolean corruptAll) throws IOException {
  List<URI> nameDirs = (List<URI>)FSNamesystem.getNamespaceDirs(config);
  // Corrupt the md5 files in all the namedirs
  for (URI uri: nameDirs) {
    // Directory layout looks like:
    // test/data/dfs/nameN/current/{fsimage,edits,...}
    File nameDir = new File(uri.getPath());
    File dfsDir = nameDir.getParentFile();
    assertEquals(dfsDir.getName(), "dfs"); // make sure we got right dir
    // Set the md5 file to all zeros
    File imageFile = new File(nameDir,
        Storage.STORAGE_DIR_CURRENT + "/"
        + NNStorage.getImageFileName(0));
    MD5FileUtils.saveMD5File(imageFile, new MD5Hash(new byte[16]));
    // Only need to corrupt one if !corruptAll
    if (!corruptAll) {
      break;
    }
  }
}
项目:hadoop    文件:TestFSImage.java   
/**
 * Ensure that the digest written by the saver equals to the digest of the
 * file.
 */
@Test
public void testDigest() throws IOException {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    DistributedFileSystem fs = cluster.getFileSystem();
    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    fs.saveNamespace();
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
    File currentDir = FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(
        0);
    File fsimage = FSImageTestUtil.findNewestImageFile(currentDir
        .getAbsolutePath());
    assertEquals(MD5FileUtils.readStoredMd5ForFile(fsimage),
        MD5FileUtils.computeMd5ForFile(fsimage));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSImage.java   
private void renameImageFileInDir(StorageDirectory sd, NameNodeFile fromNnf,
    NameNodeFile toNnf, long txid, boolean renameMD5) throws IOException {
  final File fromFile = NNStorage.getStorageFile(sd, fromNnf, txid);
  final File toFile = NNStorage.getStorageFile(sd, toNnf, txid);
  // renameTo fails on Windows if the destination file already exists.
  if(LOG.isDebugEnabled()) {
    LOG.debug("renaming  " + fromFile.getAbsolutePath() 
              + " to " + toFile.getAbsolutePath());
  }
  if (!fromFile.renameTo(toFile)) {
    if (!toFile.delete() || !fromFile.renameTo(toFile)) {
      throw new IOException("renaming  " + fromFile.getAbsolutePath() + " to "  + 
          toFile.getAbsolutePath() + " FAILED");
    }
  }
  if (renameMD5) {
    MD5FileUtils.renameMD5File(fromFile, toFile);
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestStartup.java   
/**
 * Corrupts the MD5 sum of the fsimage.
 * 
 * @param corruptAll
 *          whether to corrupt one or all of the MD5 sums in the configured
 *          namedirs
 * @throws IOException
 */
private void corruptFSImageMD5(boolean corruptAll) throws IOException {
  List<URI> nameDirs = (List<URI>)FSNamesystem.getNamespaceDirs(config);
  // Corrupt the md5 files in all the namedirs
  for (URI uri: nameDirs) {
    // Directory layout looks like:
    // test/data/dfs/nameN/current/{fsimage,edits,...}
    File nameDir = new File(uri.getPath());
    File dfsDir = nameDir.getParentFile();
    assertEquals(dfsDir.getName(), "dfs"); // make sure we got right dir
    // Set the md5 file to all zeros
    File imageFile = new File(nameDir,
        Storage.STORAGE_DIR_CURRENT + "/"
        + NNStorage.getImageFileName(0));
    MD5FileUtils.saveMD5File(imageFile, new MD5Hash(new byte[16]));
    // Only need to corrupt one if !corruptAll
    if (!corruptAll) {
      break;
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestFSImage.java   
/**
 * Ensure that the digest written by the saver equals to the digest of the
 * file.
 */
@Test
public void testDigest() throws IOException {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    DistributedFileSystem fs = cluster.getFileSystem();
    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    fs.saveNamespace();
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
    File currentDir = FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(
        0);
    File fsimage = FSImageTestUtil.findNewestImageFile(currentDir
        .getAbsolutePath());
    assertEquals(MD5FileUtils.readStoredMd5ForFile(fsimage),
        MD5FileUtils.computeMd5ForFile(fsimage));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:big-c    文件:FSImage.java   
private void renameImageFileInDir(StorageDirectory sd, NameNodeFile fromNnf,
    NameNodeFile toNnf, long txid, boolean renameMD5) throws IOException {
  final File fromFile = NNStorage.getStorageFile(sd, fromNnf, txid);
  final File toFile = NNStorage.getStorageFile(sd, toNnf, txid);
  // renameTo fails on Windows if the destination file already exists.
  if(LOG.isDebugEnabled()) {
    LOG.debug("renaming  " + fromFile.getAbsolutePath() 
              + " to " + toFile.getAbsolutePath());
  }
  if (!fromFile.renameTo(toFile)) {
    if (!toFile.delete() || !fromFile.renameTo(toFile)) {
      throw new IOException("renaming  " + fromFile.getAbsolutePath() + " to "  + 
          toFile.getAbsolutePath() + " FAILED");
    }
  }
  if (renameMD5) {
    MD5FileUtils.renameMD5File(fromFile, toFile);
  }
}
项目:big-c    文件:TestStartup.java   
/**
 * Corrupts the MD5 sum of the fsimage.
 * 
 * @param corruptAll
 *          whether to corrupt one or all of the MD5 sums in the configured
 *          namedirs
 * @throws IOException
 */
private void corruptFSImageMD5(boolean corruptAll) throws IOException {
  List<URI> nameDirs = (List<URI>)FSNamesystem.getNamespaceDirs(config);
  // Corrupt the md5 files in all the namedirs
  for (URI uri: nameDirs) {
    // Directory layout looks like:
    // test/data/dfs/nameN/current/{fsimage,edits,...}
    File nameDir = new File(uri.getPath());
    File dfsDir = nameDir.getParentFile();
    assertEquals(dfsDir.getName(), "dfs"); // make sure we got right dir
    // Set the md5 file to all zeros
    File imageFile = new File(nameDir,
        Storage.STORAGE_DIR_CURRENT + "/"
        + NNStorage.getImageFileName(0));
    MD5FileUtils.saveMD5File(imageFile, new MD5Hash(new byte[16]));
    // Only need to corrupt one if !corruptAll
    if (!corruptAll) {
      break;
    }
  }
}
项目:big-c    文件:TestFSImage.java   
/**
 * Ensure that the digest written by the saver equals to the digest of the
 * file.
 */
@Test
public void testDigest() throws IOException {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    DistributedFileSystem fs = cluster.getFileSystem();
    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    fs.saveNamespace();
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
    File currentDir = FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(
        0);
    File fsimage = FSImageTestUtil.findNewestImageFile(currentDir
        .getAbsolutePath());
    assertEquals(MD5FileUtils.readStoredMd5ForFile(fsimage),
        MD5FileUtils.computeMd5ForFile(fsimage));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSImage.java   
private void renameImageFileInDir(StorageDirectory sd, NameNodeFile fromNnf,
    NameNodeFile toNnf, long txid, boolean renameMD5) throws IOException {
  final File fromFile = NNStorage.getStorageFile(sd, fromNnf, txid);
  final File toFile = NNStorage.getStorageFile(sd, toNnf, txid);
  // renameTo fails on Windows if the destination file already exists.
  if(LOG.isDebugEnabled()) {
    LOG.debug("renaming  " + fromFile.getAbsolutePath() 
              + " to " + toFile.getAbsolutePath());
  }
  if (!fromFile.renameTo(toFile)) {
    if (!toFile.delete() || !fromFile.renameTo(toFile)) {
      throw new IOException("renaming  " + fromFile.getAbsolutePath() + " to "  + 
          toFile.getAbsolutePath() + " FAILED");
    }
  }
  if (renameMD5) {
    MD5FileUtils.renameMD5File(fromFile, toFile);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestStartup.java   
/**
 * Corrupts the MD5 sum of the fsimage.
 * 
 * @param corruptAll
 *          whether to corrupt one or all of the MD5 sums in the configured
 *          namedirs
 * @throws IOException
 */
private void corruptFSImageMD5(boolean corruptAll) throws IOException {
  List<URI> nameDirs = (List<URI>)FSNamesystem.getNamespaceDirs(config);
  // Corrupt the md5 files in all the namedirs
  for (URI uri: nameDirs) {
    // Directory layout looks like:
    // test/data/dfs/nameN/current/{fsimage,edits,...}
    File nameDir = new File(uri.getPath());
    File dfsDir = nameDir.getParentFile();
    assertEquals(dfsDir.getName(), "dfs"); // make sure we got right dir
    // Set the md5 file to all zeros
    File imageFile = new File(nameDir,
        Storage.STORAGE_DIR_CURRENT + "/"
        + NNStorage.getImageFileName(0));
    MD5FileUtils.saveMD5File(imageFile, new MD5Hash(new byte[16]));
    // Only need to corrupt one if !corruptAll
    if (!corruptAll) {
      break;
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestFSImage.java   
/**
 * Ensure that the digest written by the saver equals to the digest of the
 * file.
 */
@Test
public void testDigest() throws IOException {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    DistributedFileSystem fs = cluster.getFileSystem();
    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    fs.saveNamespace();
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
    File currentDir = FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(
        0);
    File fsimage = FSImageTestUtil.findNewestImageFile(currentDir
        .getAbsolutePath());
    assertEquals(MD5FileUtils.readStoredMd5ForFile(fsimage),
        MD5FileUtils.computeMd5ForFile(fsimage));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop-EAR    文件:TestMD5FileUtils.java   
/**
 * Test when .md5 file exists but has a bad format
 */
@Test
public void testVerifyMD5FileBadFormat() throws Exception {
  FileWriter writer = new FileWriter(MD5FileUtils.getDigestFileForFile(TEST_FILE));
  try {
    writer.write("this is not an md5 file");
  } finally {
    writer.close();
  }

  try {
    MD5FileUtils.verifySavedMD5(TEST_FILE, TEST_MD5);
    fail("Did not throw");
  } catch (IOException ioe) {
    // expected
  }
}
项目:hadoop-EAR    文件:TestJournalNodeImageManifest.java   
/**
 * Create image with given txid. Finalize and/or delete md5 file if specified.
 */
private void createImage(long txid, boolean finalize, boolean removeMd5)
    throws IOException {
  // create image
  ContentBody cb = genContent(txid);
  HttpClient httpClient = new DefaultHttpClient();
  HttpPost postRequest = TestJournalNodeImageUpload.createRequest(
      httpAddress, cb);
  UploadImageParam.setHeaders(postRequest, journalId,
      FAKE_NSINFO.toColonSeparatedString(), 0, txid, 0, 0, true);
  httpClient.execute(postRequest);

  if (finalize) {
    // roll the image
    journal.saveDigestAndRenameCheckpointImage(txid, digests.get(txid));

    // remove md5 file if requested which leaves only the image file
    if (removeMd5) {
      MD5FileUtils.getDigestFileForFile(
          journal.getImageStorage().getImageFile(txid)).delete();
    }
  }
}
项目:hadoop-EAR    文件:FSImage.java   
/**
 * Deletes the checkpoint file in every storage directory,
 * since the checkpoint was cancelled. Attepmts to remove
 * image/md5/ckptimage files.
 */
void deleteCheckpoint(long txId) throws IOException {
  for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) {
    StorageDirectory sd = it.next();
    // image file
    File imageFile = NNStorage.getImageFile(sd, txId);
    if (imageFile.delete())
      LOG.info("Delete checkpoint: deleted: " + imageFile);

    // md5 file
    File imageFileMD5 = MD5FileUtils.getDigestFileForFile(imageFile);
    if (imageFileMD5.delete())
      LOG.info("Delete checkpoint: deleted: " + imageFileMD5);

    // image ckpt file
    File imageCkptFile = NNStorage.getCheckpointImageFile(sd, txId);
    if (imageCkptFile.delete())
      LOG.info("Delete checkpoint: deleted: " + imageCkptFile);
  }
}
项目:hadoop-plus    文件:TestStartup.java   
/**
 * Corrupts the MD5 sum of the fsimage.
 * 
 * @param corruptAll
 *          whether to corrupt one or all of the MD5 sums in the configured
 *          namedirs
 * @throws IOException
 */
private void corruptFSImageMD5(boolean corruptAll) throws IOException {
  List<URI> nameDirs = (List<URI>)FSNamesystem.getNamespaceDirs(config);
  // Corrupt the md5 files in all the namedirs
  for (URI uri: nameDirs) {
    // Directory layout looks like:
    // test/data/dfs/nameN/current/{fsimage,edits,...}
    File nameDir = new File(uri.getPath());
    File dfsDir = nameDir.getParentFile();
    assertEquals(dfsDir.getName(), "dfs"); // make sure we got right dir
    // Set the md5 file to all zeros
    File imageFile = new File(nameDir,
        Storage.STORAGE_DIR_CURRENT + "/"
        + NNStorage.getImageFileName(0));
    MD5FileUtils.saveMD5File(imageFile, new MD5Hash(new byte[16]));
    // Only need to corrupt one if !corruptAll
    if (!corruptAll) {
      break;
    }
  }
}
项目:FlexMap    文件:FSImage.java   
private void renameImageFileInDir(StorageDirectory sd, NameNodeFile fromNnf,
    NameNodeFile toNnf, long txid, boolean renameMD5) throws IOException {
  final File fromFile = NNStorage.getStorageFile(sd, fromNnf, txid);
  final File toFile = NNStorage.getStorageFile(sd, toNnf, txid);
  // renameTo fails on Windows if the destination file already exists.
  if(LOG.isDebugEnabled()) {
    LOG.debug("renaming  " + fromFile.getAbsolutePath() 
              + " to " + toFile.getAbsolutePath());
  }
  if (!fromFile.renameTo(toFile)) {
    if (!toFile.delete() || !fromFile.renameTo(toFile)) {
      throw new IOException("renaming  " + fromFile.getAbsolutePath() + " to "  + 
          toFile.getAbsolutePath() + " FAILED");
    }
  }
  if (renameMD5) {
    MD5FileUtils.renameMD5File(fromFile, toFile);
  }
}
项目:FlexMap    文件:TestStartup.java   
/**
 * Corrupts the MD5 sum of the fsimage.
 * 
 * @param corruptAll
 *          whether to corrupt one or all of the MD5 sums in the configured
 *          namedirs
 * @throws IOException
 */
private void corruptFSImageMD5(boolean corruptAll) throws IOException {
  List<URI> nameDirs = (List<URI>)FSNamesystem.getNamespaceDirs(config);
  // Corrupt the md5 files in all the namedirs
  for (URI uri: nameDirs) {
    // Directory layout looks like:
    // test/data/dfs/nameN/current/{fsimage,edits,...}
    File nameDir = new File(uri.getPath());
    File dfsDir = nameDir.getParentFile();
    assertEquals(dfsDir.getName(), "dfs"); // make sure we got right dir
    // Set the md5 file to all zeros
    File imageFile = new File(nameDir,
        Storage.STORAGE_DIR_CURRENT + "/"
        + NNStorage.getImageFileName(0));
    MD5FileUtils.saveMD5File(imageFile, new MD5Hash(new byte[16]));
    // Only need to corrupt one if !corruptAll
    if (!corruptAll) {
      break;
    }
  }
}
项目:FlexMap    文件:TestFSImage.java   
/**
 * Ensure that the digest written by the saver equals to the digest of the
 * file.
 */
@Test
public void testDigest() throws IOException {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    DistributedFileSystem fs = cluster.getFileSystem();
    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    fs.saveNamespace();
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
    File currentDir = FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(
        0);
    File fsimage = FSImageTestUtil.findNewestImageFile(currentDir
        .getAbsolutePath());
    assertEquals(MD5FileUtils.readStoredMd5ForFile(fsimage),
        MD5FileUtils.computeMd5ForFile(fsimage));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop-TCP    文件:TestStartup.java   
/**
 * Corrupts the MD5 sum of the fsimage.
 * 
 * @param corruptAll
 *          whether to corrupt one or all of the MD5 sums in the configured
 *          namedirs
 * @throws IOException
 */
private void corruptFSImageMD5(boolean corruptAll) throws IOException {
  List<URI> nameDirs = (List<URI>)FSNamesystem.getNamespaceDirs(config);
  // Corrupt the md5 files in all the namedirs
  for (URI uri: nameDirs) {
    // Directory layout looks like:
    // test/data/dfs/nameN/current/{fsimage,edits,...}
    File nameDir = new File(uri.getPath());
    File dfsDir = nameDir.getParentFile();
    assertEquals(dfsDir.getName(), "dfs"); // make sure we got right dir
    // Set the md5 file to all zeros
    File imageFile = new File(nameDir,
        Storage.STORAGE_DIR_CURRENT + "/"
        + NNStorage.getImageFileName(0));
    MD5FileUtils.saveMD5File(imageFile, new MD5Hash(new byte[16]));
    // Only need to corrupt one if !corruptAll
    if (!corruptAll) {
      break;
    }
  }
}
项目:hardfs    文件:TestStartup.java   
/**
 * Corrupts the MD5 sum of the fsimage.
 * 
 * @param corruptAll
 *          whether to corrupt one or all of the MD5 sums in the configured
 *          namedirs
 * @throws IOException
 */
private void corruptFSImageMD5(boolean corruptAll) throws IOException {
  List<URI> nameDirs = (List<URI>)FSNamesystem.getNamespaceDirs(config);
  // Corrupt the md5 files in all the namedirs
  for (URI uri: nameDirs) {
    // Directory layout looks like:
    // test/data/dfs/nameN/current/{fsimage,edits,...}
    File nameDir = new File(uri.getPath());
    File dfsDir = nameDir.getParentFile();
    assertEquals(dfsDir.getName(), "dfs"); // make sure we got right dir
    // Set the md5 file to all zeros
    File imageFile = new File(nameDir,
        Storage.STORAGE_DIR_CURRENT + "/"
        + NNStorage.getImageFileName(0));
    MD5FileUtils.saveMD5File(imageFile, new MD5Hash(new byte[16]));
    // Only need to corrupt one if !corruptAll
    if (!corruptAll) {
      break;
    }
  }
}
项目:hadoop-on-lustre2    文件:FSImage.java   
private void renameImageFileInDir(StorageDirectory sd, NameNodeFile fromNnf,
    NameNodeFile toNnf, long txid, boolean renameMD5) throws IOException {
  final File fromFile = NNStorage.getStorageFile(sd, fromNnf, txid);
  final File toFile = NNStorage.getStorageFile(sd, toNnf, txid);
  // renameTo fails on Windows if the destination file already exists.
  if(LOG.isDebugEnabled()) {
    LOG.debug("renaming  " + fromFile.getAbsolutePath() 
              + " to " + toFile.getAbsolutePath());
  }
  if (!fromFile.renameTo(toFile)) {
    if (!toFile.delete() || !fromFile.renameTo(toFile)) {
      throw new IOException("renaming  " + fromFile.getAbsolutePath() + " to "  + 
          toFile.getAbsolutePath() + " FAILED");
    }
  }
  if (renameMD5) {
    MD5FileUtils.renameMD5File(fromFile, toFile);
  }
}
项目:hadoop-on-lustre2    文件:TestStartup.java   
/**
 * Corrupts the MD5 sum of the fsimage.
 * 
 * @param corruptAll
 *          whether to corrupt one or all of the MD5 sums in the configured
 *          namedirs
 * @throws IOException
 */
private void corruptFSImageMD5(boolean corruptAll) throws IOException {
  List<URI> nameDirs = (List<URI>)FSNamesystem.getNamespaceDirs(config);
  // Corrupt the md5 files in all the namedirs
  for (URI uri: nameDirs) {
    // Directory layout looks like:
    // test/data/dfs/nameN/current/{fsimage,edits,...}
    File nameDir = new File(uri.getPath());
    File dfsDir = nameDir.getParentFile();
    assertEquals(dfsDir.getName(), "dfs"); // make sure we got right dir
    // Set the md5 file to all zeros
    File imageFile = new File(nameDir,
        Storage.STORAGE_DIR_CURRENT + "/"
        + NNStorage.getImageFileName(0));
    MD5FileUtils.saveMD5File(imageFile, new MD5Hash(new byte[16]));
    // Only need to corrupt one if !corruptAll
    if (!corruptAll) {
      break;
    }
  }
}
项目:hadoop-on-lustre2    文件:TestFSImage.java   
/**
 * Ensure that the digest written by the saver equals to the digest of the
 * file.
 */
@Test
public void testDigest() throws IOException {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    DistributedFileSystem fs = cluster.getFileSystem();
    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    fs.saveNamespace();
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
    File currentDir = FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(
        0);
    File fsimage = FSImageTestUtil.findNewestImageFile(currentDir
        .getAbsolutePath());
    assertEquals(MD5FileUtils.readStoredMd5ForFile(fsimage),
        MD5FileUtils.computeMd5ForFile(fsimage));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop    文件:FSImageFormatProtobuf.java   
void load(File file) throws IOException {
  long start = Time.monotonicNow();
  imgDigest = MD5FileUtils.computeMd5ForFile(file);
  RandomAccessFile raFile = new RandomAccessFile(file, "r");
  FileInputStream fin = new FileInputStream(file);
  try {
    loadInternal(raFile, fin);
    long end = Time.monotonicNow();
    LOG.info("Loaded FSImage in " + (end - start) / 1000 + " seconds.");
  } finally {
    fin.close();
    raFile.close();
  }
}
项目:hadoop    文件:FSImage.java   
/**
 * Load the image namespace from the given image file, verifying
 * it against the MD5 sum stored in its associated .md5 file.
 */
private void loadFSImage(File imageFile, FSNamesystem target,
    MetaRecoveryContext recovery, boolean requireSameLayoutVersion)
    throws IOException {
  MD5Hash expectedMD5 = MD5FileUtils.readStoredMd5ForFile(imageFile);
  if (expectedMD5 == null) {
    throw new IOException("No MD5 file found corresponding to image file "
        + imageFile);
  }
  loadFSImage(imageFile, expectedMD5, target, recovery,
      requireSameLayoutVersion);
}
项目:hadoop    文件:FSImage.java   
/**
 * Save the contents of the FS image to the file.
 */
void saveFSImage(SaveNamespaceContext context, StorageDirectory sd,
    NameNodeFile dstType) throws IOException {
  long txid = context.getTxId();
  File newFile = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE_NEW, txid);
  File dstFile = NNStorage.getStorageFile(sd, dstType, txid);

  FSImageFormatProtobuf.Saver saver = new FSImageFormatProtobuf.Saver(context);
  FSImageCompression compression = FSImageCompression.createCompression(conf);
  saver.save(newFile, compression);

  MD5FileUtils.saveMD5File(dstFile, saver.getSavedDigest());
  storage.setMostRecentCheckpointInfo(txid, Time.now());
}
项目:hadoop    文件:FSImage.java   
/**
 * This is called by the 2NN after having downloaded an image, and by
 * the NN after having received a new image from the 2NN. It
 * renames the image from fsimage_N.ckpt to fsimage_N and also
 * saves the related .md5 file into place.
 */
public synchronized void saveDigestAndRenameCheckpointImage(NameNodeFile nnf,
    long txid, MD5Hash digest) throws IOException {
  // Write and rename MD5 file
  List<StorageDirectory> badSds = Lists.newArrayList();

  for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.IMAGE)) {
    File imageFile = NNStorage.getImageFile(sd, nnf, txid);
    try {
      MD5FileUtils.saveMD5File(imageFile, digest);
    } catch (IOException ioe) {
      badSds.add(sd);
    }
  }
  storage.reportErrorsOnDirectories(badSds);

  CheckpointFaultInjector.getInstance().afterMD5Rename();

  // Rename image from tmp file
  renameCheckpoint(txid, NameNodeFile.IMAGE_NEW, nnf, false);
  // So long as this is the newest image available,
  // advertise it as such to other checkpointers
  // from now on
  if (txid > storage.getMostRecentCheckpointTxId()) {
    storage.setMostRecentCheckpointInfo(txid, Time.now());
  }
}
项目:hadoop    文件:ImageServlet.java   
/**
 * Set headers for content length, and, if available, md5.
 * @throws IOException 
 */
public static void setVerificationHeadersForGet(HttpServletResponse response,
    File file) throws IOException {
  response.setHeader(TransferFsImage.CONTENT_LENGTH,
      String.valueOf(file.length()));
  MD5Hash hash = MD5FileUtils.readStoredMd5ForFile(file);
  if (hash != null) {
    response.setHeader(TransferFsImage.MD5_HEADER, hash.toString());
  }
}
项目:hadoop    文件:ImageServlet.java   
/**
 * Set headers for image length and if available, md5.
 * 
 * @throws IOException
 */
static void setVerificationHeadersForPut(HttpURLConnection connection,
    File file) throws IOException {
  connection.setRequestProperty(TransferFsImage.CONTENT_LENGTH,
      String.valueOf(file.length()));
  MD5Hash hash = MD5FileUtils.readStoredMd5ForFile(file);
  if (hash != null) {
    connection
        .setRequestProperty(TransferFsImage.MD5_HEADER, hash.toString());
  }
}
项目:hadoop    文件:TestFetchImage.java   
/**
 * Run `hdfs dfsadmin -fetchImage ...' and verify that the downloaded image is
 * correct.
 */
private static void runFetchImage(DFSAdmin dfsAdmin, MiniDFSCluster cluster)
    throws Exception {
  int retVal = dfsAdmin.run(new String[]{"-fetchImage",
      FETCHED_IMAGE_FILE.getPath() });

  assertEquals(0, retVal);

  File highestImageOnNn = getHighestFsImageOnCluster(cluster);
  MD5Hash expected = MD5FileUtils.computeMd5ForFile(highestImageOnNn);
  MD5Hash actual = MD5FileUtils.computeMd5ForFile(
      new File(FETCHED_IMAGE_FILE, highestImageOnNn.getName()));

  assertEquals(expected, actual);
}
项目:aliyun-oss-hadoop-fs    文件:FSImageFormatProtobuf.java   
void load(File file) throws IOException {
  long start = Time.monotonicNow();
  imgDigest = MD5FileUtils.computeMd5ForFile(file);
  RandomAccessFile raFile = new RandomAccessFile(file, "r");
  FileInputStream fin = new FileInputStream(file);
  try {
    loadInternal(raFile, fin);
    long end = Time.monotonicNow();
    LOG.info("Loaded FSImage in " + (end - start) / 1000 + " seconds.");
  } finally {
    fin.close();
    raFile.close();
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSImage.java   
/**
 * Load the image namespace from the given image file, verifying
 * it against the MD5 sum stored in its associated .md5 file.
 */
private void loadFSImage(File imageFile, FSNamesystem target,
    MetaRecoveryContext recovery, boolean requireSameLayoutVersion)
    throws IOException {
  MD5Hash expectedMD5 = MD5FileUtils.readStoredMd5ForFile(imageFile);
  if (expectedMD5 == null) {
    throw new IOException("No MD5 file found corresponding to image file "
        + imageFile);
  }
  loadFSImage(imageFile, expectedMD5, target, recovery,
      requireSameLayoutVersion);
}
项目:aliyun-oss-hadoop-fs    文件:FSImage.java   
/**
 * Save the contents of the FS image to the file.
 */
void saveFSImage(SaveNamespaceContext context, StorageDirectory sd,
    NameNodeFile dstType) throws IOException {
  long txid = context.getTxId();
  File newFile = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE_NEW, txid);
  File dstFile = NNStorage.getStorageFile(sd, dstType, txid);

  FSImageFormatProtobuf.Saver saver = new FSImageFormatProtobuf.Saver(context);
  FSImageCompression compression = FSImageCompression.createCompression(conf);
  saver.save(newFile, compression);

  MD5FileUtils.saveMD5File(dstFile, saver.getSavedDigest());
  storage.setMostRecentCheckpointInfo(txid, Time.now());
}
项目:aliyun-oss-hadoop-fs    文件:FSImage.java   
/**
 * This is called by the 2NN after having downloaded an image, and by
 * the NN after having received a new image from the 2NN. It
 * renames the image from fsimage_N.ckpt to fsimage_N and also
 * saves the related .md5 file into place.
 */
public synchronized void saveDigestAndRenameCheckpointImage(NameNodeFile nnf,
    long txid, MD5Hash digest) throws IOException {
  // Write and rename MD5 file
  List<StorageDirectory> badSds = Lists.newArrayList();

  for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.IMAGE)) {
    File imageFile = NNStorage.getImageFile(sd, nnf, txid);
    try {
      MD5FileUtils.saveMD5File(imageFile, digest);
    } catch (IOException ioe) {
      badSds.add(sd);
    }
  }
  storage.reportErrorsOnDirectories(badSds);

  CheckpointFaultInjector.getInstance().afterMD5Rename();

  // Rename image from tmp file
  renameCheckpoint(txid, NameNodeFile.IMAGE_NEW, nnf, false);
  // So long as this is the newest image available,
  // advertise it as such to other checkpointers
  // from now on
  if (txid > storage.getMostRecentCheckpointTxId()) {
    storage.setMostRecentCheckpointInfo(txid, Time.now());
  }
}
项目:aliyun-oss-hadoop-fs    文件:ImageServlet.java   
/**
 * Set headers for content length, and, if available, md5.
 * @throws IOException 
 */
public static void setVerificationHeadersForGet(HttpServletResponse response,
    File file) throws IOException {
  response.setHeader(TransferFsImage.CONTENT_LENGTH,
      String.valueOf(file.length()));
  MD5Hash hash = MD5FileUtils.readStoredMd5ForFile(file);
  if (hash != null) {
    response.setHeader(TransferFsImage.MD5_HEADER, hash.toString());
  }
}
项目:aliyun-oss-hadoop-fs    文件:ImageServlet.java   
/**
 * Set headers for image length and if available, md5.
 * 
 * @throws IOException
 */
static void setVerificationHeadersForPut(HttpURLConnection connection,
    File file) throws IOException {
  connection.setRequestProperty(TransferFsImage.CONTENT_LENGTH,
      String.valueOf(file.length()));
  MD5Hash hash = MD5FileUtils.readStoredMd5ForFile(file);
  if (hash != null) {
    connection
        .setRequestProperty(TransferFsImage.MD5_HEADER, hash.toString());
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestFetchImage.java   
/**
 * Run `hdfs dfsadmin -fetchImage ...' and verify that the downloaded image is
 * correct.
 */
private static void runFetchImage(DFSAdmin dfsAdmin, MiniDFSCluster cluster)
    throws Exception {
  int retVal = dfsAdmin.run(new String[]{"-fetchImage",
      FETCHED_IMAGE_FILE.getPath() });

  assertEquals(0, retVal);

  File highestImageOnNn = getHighestFsImageOnCluster(cluster);
  MD5Hash expected = MD5FileUtils.computeMd5ForFile(highestImageOnNn);
  MD5Hash actual = MD5FileUtils.computeMd5ForFile(
      new File(FETCHED_IMAGE_FILE, highestImageOnNn.getName()));

  assertEquals(expected, actual);
}
项目:big-c    文件:FSImageFormatProtobuf.java   
void load(File file) throws IOException {
  long start = Time.monotonicNow();
  imgDigest = MD5FileUtils.computeMd5ForFile(file);
  RandomAccessFile raFile = new RandomAccessFile(file, "r");
  FileInputStream fin = new FileInputStream(file);
  try {
    loadInternal(raFile, fin);
    long end = Time.monotonicNow();
    LOG.info("Loaded FSImage in " + (end - start) / 1000 + " seconds.");
  } finally {
    fin.close();
    raFile.close();
  }
}
项目:big-c    文件:FSImage.java   
/**
 * Load the image namespace from the given image file, verifying
 * it against the MD5 sum stored in its associated .md5 file.
 */
private void loadFSImage(File imageFile, FSNamesystem target,
    MetaRecoveryContext recovery, boolean requireSameLayoutVersion)
    throws IOException {
  MD5Hash expectedMD5 = MD5FileUtils.readStoredMd5ForFile(imageFile);
  if (expectedMD5 == null) {
    throw new IOException("No MD5 file found corresponding to image file "
        + imageFile);
  }
  loadFSImage(imageFile, expectedMD5, target, recovery,
      requireSameLayoutVersion);
}