Java 类org.apache.hadoop.io.MD5Hash 实例源码

项目:hadoop    文件:TransferFsImage.java   
public static MD5Hash downloadImageToStorage(URL fsName, long imageTxId,
    Storage dstStorage, boolean needDigest) throws IOException {
  String fileid = ImageServlet.getParamStringForImage(null,
      imageTxId, dstStorage);
  String fileName = NNStorage.getCheckpointImageFileName(imageTxId);

  List<File> dstFiles = dstStorage.getFiles(
      NameNodeDirType.IMAGE, fileName);
  if (dstFiles.isEmpty()) {
    throw new IOException("No targets in destination storage!");
  }

  MD5Hash hash = getFileClient(fsName, fileid, dstFiles, dstStorage, needDigest);
  LOG.info("Downloaded file " + dstFiles.get(0).getName() + " size " +
      dstFiles.get(0).length() + " bytes.");
  return hash;
}
项目:hadoop    文件:TransferFsImage.java   
static MD5Hash handleUploadImageRequest(HttpServletRequest request,
    long imageTxId, Storage dstStorage, InputStream stream,
    long advertisedSize, DataTransferThrottler throttler) throws IOException {

  String fileName = NNStorage.getCheckpointImageFileName(imageTxId);

  List<File> dstFiles = dstStorage.getFiles(NameNodeDirType.IMAGE, fileName);
  if (dstFiles.isEmpty()) {
    throw new IOException("No targets in destination storage!");
  }

  MD5Hash advertisedDigest = parseMD5Header(request);
  MD5Hash hash = receiveFile(fileName, dstFiles, dstStorage, true,
      advertisedSize, advertisedDigest, fileName, stream, throttler);
  LOG.info("Downloaded file " + dstFiles.get(0).getName() + " size "
      + dstFiles.get(0).length() + " bytes.");
  return hash;
}
项目:hadoop    文件:FSImage.java   
/**
 * Load in the filesystem image from file. It's a big list of
 * filenames and blocks.
 */
private void loadFSImage(File curFile, MD5Hash expectedMd5,
    FSNamesystem target, MetaRecoveryContext recovery,
    boolean requireSameLayoutVersion) throws IOException {
  // BlockPoolId is required when the FsImageLoader loads the rolling upgrade
  // information. Make sure the ID is properly set.
  target.setBlockPoolId(this.getBlockPoolID());

  FSImageFormat.LoaderDelegator loader = FSImageFormat.newLoader(conf, target);
  loader.load(curFile, requireSameLayoutVersion);

  // Check that the image digest we loaded matches up with what
  // we expected
  MD5Hash readImageMd5 = loader.getLoadedImageMd5();
  if (expectedMd5 != null &&
      !expectedMd5.equals(readImageMd5)) {
    throw new IOException("Image file " + curFile +
        " is corrupt with MD5 checksum of " + readImageMd5 +
        " but expecting " + expectedMd5);
  }

  long txId = loader.getLoadedImageTxId();
  LOG.info("Loaded image for txid " + txId + " from " + curFile);
  lastAppliedTxId = txId;
  storage.setMostRecentCheckpointInfo(txId, curFile.lastModified());
}
项目:hadoop    文件:MD5FileUtils.java   
/**
 * Read the md5 checksum stored alongside the given data file.
 * @param dataFile the file containing data
 * @return the checksum stored in dataFile.md5
 */
public static MD5Hash readStoredMd5ForFile(File dataFile) throws IOException {
  final File md5File = getDigestFileForFile(dataFile);
  if (!md5File.exists()) {
    return null;
  }

  final Matcher matcher = readStoredMd5(md5File);
  String storedHash = matcher.group(1);
  File referencedFile = new File(matcher.group(2));

  // Sanity check: Make sure that the file referenced in the .md5 file at
  // least has the same name as the file we expect
  if (!referencedFile.getName().equals(dataFile.getName())) {
    throw new IOException(
        "MD5 file at " + md5File + " references file named " +
        referencedFile.getName() + " but we expected it to reference " +
        dataFile);
  }
  return new MD5Hash(storedHash);
}
项目:hadoop    文件:TestStartup.java   
/**
 * Corrupts the MD5 sum of the fsimage.
 * 
 * @param corruptAll
 *          whether to corrupt one or all of the MD5 sums in the configured
 *          namedirs
 * @throws IOException
 */
private void corruptFSImageMD5(boolean corruptAll) throws IOException {
  List<URI> nameDirs = (List<URI>)FSNamesystem.getNamespaceDirs(config);
  // Corrupt the md5 files in all the namedirs
  for (URI uri: nameDirs) {
    // Directory layout looks like:
    // test/data/dfs/nameN/current/{fsimage,edits,...}
    File nameDir = new File(uri.getPath());
    File dfsDir = nameDir.getParentFile();
    assertEquals(dfsDir.getName(), "dfs"); // make sure we got right dir
    // Set the md5 file to all zeros
    File imageFile = new File(nameDir,
        Storage.STORAGE_DIR_CURRENT + "/"
        + NNStorage.getImageFileName(0));
    MD5FileUtils.saveMD5File(imageFile, new MD5Hash(new byte[16]));
    // Only need to corrupt one if !corruptAll
    if (!corruptAll) {
      break;
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:TransferFsImage.java   
public static MD5Hash downloadImageToStorage(URL fsName, long imageTxId,
    Storage dstStorage, boolean needDigest, boolean isBootstrapStandby)
    throws IOException {
  String fileid = ImageServlet.getParamStringForImage(null,
      imageTxId, dstStorage, isBootstrapStandby);
  String fileName = NNStorage.getCheckpointImageFileName(imageTxId);

  List<File> dstFiles = dstStorage.getFiles(
      NameNodeDirType.IMAGE, fileName);
  if (dstFiles.isEmpty()) {
    throw new IOException("No targets in destination storage!");
  }

  MD5Hash hash = getFileClient(fsName, fileid, dstFiles, dstStorage, needDigest);
  LOG.info("Downloaded file " + dstFiles.get(0).getName() + " size " +
      dstFiles.get(0).length() + " bytes.");
  return hash;
}
项目:aliyun-oss-hadoop-fs    文件:TransferFsImage.java   
static MD5Hash handleUploadImageRequest(HttpServletRequest request,
    long imageTxId, Storage dstStorage, InputStream stream,
    long advertisedSize, DataTransferThrottler throttler) throws IOException {

  String fileName = NNStorage.getCheckpointImageFileName(imageTxId);

  List<File> dstFiles = dstStorage.getFiles(NameNodeDirType.IMAGE, fileName);
  if (dstFiles.isEmpty()) {
    throw new IOException("No targets in destination storage!");
  }

  MD5Hash advertisedDigest = parseMD5Header(request);
  MD5Hash hash = receiveFile(fileName, dstFiles, dstStorage, true,
      advertisedSize, advertisedDigest, fileName, stream, throttler);
  LOG.info("Downloaded file " + dstFiles.get(0).getName() + " size "
      + dstFiles.get(0).length() + " bytes.");
  return hash;
}
项目:aliyun-oss-hadoop-fs    文件:FSImage.java   
/**
 * Load in the filesystem image from file. It's a big list of
 * filenames and blocks.
 */
private void loadFSImage(File curFile, MD5Hash expectedMd5,
    FSNamesystem target, MetaRecoveryContext recovery,
    boolean requireSameLayoutVersion) throws IOException {
  // BlockPoolId is required when the FsImageLoader loads the rolling upgrade
  // information. Make sure the ID is properly set.
  target.setBlockPoolId(this.getBlockPoolID());

  FSImageFormat.LoaderDelegator loader = FSImageFormat.newLoader(conf, target);
  loader.load(curFile, requireSameLayoutVersion);

  // Check that the image digest we loaded matches up with what
  // we expected
  MD5Hash readImageMd5 = loader.getLoadedImageMd5();
  if (expectedMd5 != null &&
      !expectedMd5.equals(readImageMd5)) {
    throw new IOException("Image file " + curFile +
        " is corrupt with MD5 checksum of " + readImageMd5 +
        " but expecting " + expectedMd5);
  }

  long txId = loader.getLoadedImageTxId();
  LOG.info("Loaded image for txid " + txId + " from " + curFile);
  lastAppliedTxId = txId;
  storage.setMostRecentCheckpointInfo(txId, curFile.lastModified());
}
项目:aliyun-oss-hadoop-fs    文件:MD5FileUtils.java   
/**
 * Read the md5 checksum stored alongside the given data file.
 * @param dataFile the file containing data
 * @return the checksum stored in dataFile.md5
 */
public static MD5Hash readStoredMd5ForFile(File dataFile) throws IOException {
  final File md5File = getDigestFileForFile(dataFile);
  if (!md5File.exists()) {
    return null;
  }

  final Matcher matcher = readStoredMd5(md5File);
  String storedHash = matcher.group(1);
  File referencedFile = new File(matcher.group(2));

  // Sanity check: Make sure that the file referenced in the .md5 file at
  // least has the same name as the file we expect
  if (!referencedFile.getName().equals(dataFile.getName())) {
    throw new IOException(
        "MD5 file at " + md5File + " references file named " +
        referencedFile.getName() + " but we expected it to reference " +
        dataFile);
  }
  return new MD5Hash(storedHash);
}
项目:aliyun-oss-hadoop-fs    文件:TestStartup.java   
/**
 * Corrupts the MD5 sum of the fsimage.
 * 
 * @param corruptAll
 *          whether to corrupt one or all of the MD5 sums in the configured
 *          namedirs
 * @throws IOException
 */
private void corruptFSImageMD5(boolean corruptAll) throws IOException {
  List<URI> nameDirs = (List<URI>)FSNamesystem.getNamespaceDirs(config);
  // Corrupt the md5 files in all the namedirs
  for (URI uri: nameDirs) {
    // Directory layout looks like:
    // test/data/dfs/nameN/current/{fsimage,edits,...}
    File nameDir = new File(uri.getPath());
    File dfsDir = nameDir.getParentFile();
    assertEquals(dfsDir.getName(), "dfs"); // make sure we got right dir
    // Set the md5 file to all zeros
    File imageFile = new File(nameDir,
        Storage.STORAGE_DIR_CURRENT + "/"
        + NNStorage.getImageFileName(0));
    MD5FileUtils.saveMD5File(imageFile, new MD5Hash(new byte[16]));
    // Only need to corrupt one if !corruptAll
    if (!corruptAll) {
      break;
    }
  }
}
项目:big-c    文件:TransferFsImage.java   
public static MD5Hash downloadImageToStorage(URL fsName, long imageTxId,
    Storage dstStorage, boolean needDigest) throws IOException {
  String fileid = ImageServlet.getParamStringForImage(null,
      imageTxId, dstStorage);
  String fileName = NNStorage.getCheckpointImageFileName(imageTxId);

  List<File> dstFiles = dstStorage.getFiles(
      NameNodeDirType.IMAGE, fileName);
  if (dstFiles.isEmpty()) {
    throw new IOException("No targets in destination storage!");
  }

  MD5Hash hash = getFileClient(fsName, fileid, dstFiles, dstStorage, needDigest);
  LOG.info("Downloaded file " + dstFiles.get(0).getName() + " size " +
      dstFiles.get(0).length() + " bytes.");
  return hash;
}
项目:big-c    文件:TransferFsImage.java   
static MD5Hash handleUploadImageRequest(HttpServletRequest request,
    long imageTxId, Storage dstStorage, InputStream stream,
    long advertisedSize, DataTransferThrottler throttler) throws IOException {

  String fileName = NNStorage.getCheckpointImageFileName(imageTxId);

  List<File> dstFiles = dstStorage.getFiles(NameNodeDirType.IMAGE, fileName);
  if (dstFiles.isEmpty()) {
    throw new IOException("No targets in destination storage!");
  }

  MD5Hash advertisedDigest = parseMD5Header(request);
  MD5Hash hash = receiveFile(fileName, dstFiles, dstStorage, true,
      advertisedSize, advertisedDigest, fileName, stream, throttler);
  LOG.info("Downloaded file " + dstFiles.get(0).getName() + " size "
      + dstFiles.get(0).length() + " bytes.");
  return hash;
}
项目:big-c    文件:FSImage.java   
/**
 * Load in the filesystem image from file. It's a big list of
 * filenames and blocks.
 */
private void loadFSImage(File curFile, MD5Hash expectedMd5,
    FSNamesystem target, MetaRecoveryContext recovery,
    boolean requireSameLayoutVersion) throws IOException {
  // BlockPoolId is required when the FsImageLoader loads the rolling upgrade
  // information. Make sure the ID is properly set.
  target.setBlockPoolId(this.getBlockPoolID());

  FSImageFormat.LoaderDelegator loader = FSImageFormat.newLoader(conf, target);
  loader.load(curFile, requireSameLayoutVersion);

  // Check that the image digest we loaded matches up with what
  // we expected
  MD5Hash readImageMd5 = loader.getLoadedImageMd5();
  if (expectedMd5 != null &&
      !expectedMd5.equals(readImageMd5)) {
    throw new IOException("Image file " + curFile +
        " is corrupt with MD5 checksum of " + readImageMd5 +
        " but expecting " + expectedMd5);
  }

  long txId = loader.getLoadedImageTxId();
  LOG.info("Loaded image for txid " + txId + " from " + curFile);
  lastAppliedTxId = txId;
  storage.setMostRecentCheckpointInfo(txId, curFile.lastModified());
}
项目:big-c    文件:MD5FileUtils.java   
/**
 * Read the md5 checksum stored alongside the given data file.
 * @param dataFile the file containing data
 * @return the checksum stored in dataFile.md5
 */
public static MD5Hash readStoredMd5ForFile(File dataFile) throws IOException {
  final File md5File = getDigestFileForFile(dataFile);
  if (!md5File.exists()) {
    return null;
  }

  final Matcher matcher = readStoredMd5(md5File);
  String storedHash = matcher.group(1);
  File referencedFile = new File(matcher.group(2));

  // Sanity check: Make sure that the file referenced in the .md5 file at
  // least has the same name as the file we expect
  if (!referencedFile.getName().equals(dataFile.getName())) {
    throw new IOException(
        "MD5 file at " + md5File + " references file named " +
        referencedFile.getName() + " but we expected it to reference " +
        dataFile);
  }
  return new MD5Hash(storedHash);
}
项目:big-c    文件:TestStartup.java   
/**
 * Corrupts the MD5 sum of the fsimage.
 * 
 * @param corruptAll
 *          whether to corrupt one or all of the MD5 sums in the configured
 *          namedirs
 * @throws IOException
 */
private void corruptFSImageMD5(boolean corruptAll) throws IOException {
  List<URI> nameDirs = (List<URI>)FSNamesystem.getNamespaceDirs(config);
  // Corrupt the md5 files in all the namedirs
  for (URI uri: nameDirs) {
    // Directory layout looks like:
    // test/data/dfs/nameN/current/{fsimage,edits,...}
    File nameDir = new File(uri.getPath());
    File dfsDir = nameDir.getParentFile();
    assertEquals(dfsDir.getName(), "dfs"); // make sure we got right dir
    // Set the md5 file to all zeros
    File imageFile = new File(nameDir,
        Storage.STORAGE_DIR_CURRENT + "/"
        + NNStorage.getImageFileName(0));
    MD5FileUtils.saveMD5File(imageFile, new MD5Hash(new byte[16]));
    // Only need to corrupt one if !corruptAll
    if (!corruptAll) {
      break;
    }
  }
}
项目:FlexMap    文件:TransferFsImage.java   
public static MD5Hash downloadImageToStorage(URL fsName, long imageTxId,
    Storage dstStorage, boolean needDigest) throws IOException {
  String fileid = ImageServlet.getParamStringForImage(null,
      imageTxId, dstStorage);
  String fileName = NNStorage.getCheckpointImageFileName(imageTxId);

  List<File> dstFiles = dstStorage.getFiles(
      NameNodeDirType.IMAGE, fileName);
  if (dstFiles.isEmpty()) {
    throw new IOException("No targets in destination storage!");
  }

  MD5Hash hash = getFileClient(fsName, fileid, dstFiles, dstStorage, needDigest);
  LOG.info("Downloaded file " + dstFiles.get(0).getName() + " size " +
      dstFiles.get(0).length() + " bytes.");
  return hash;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSImage.java   
/**
 * Load in the filesystem image from file. It's a big list of
 * filenames and blocks.
 */
private void loadFSImage(File curFile, MD5Hash expectedMd5,
    FSNamesystem target, MetaRecoveryContext recovery,
    boolean requireSameLayoutVersion) throws IOException {
  // BlockPoolId is required when the FsImageLoader loads the rolling upgrade
  // information. Make sure the ID is properly set.
  target.setBlockPoolId(this.getBlockPoolID());

  FSImageFormat.LoaderDelegator loader = FSImageFormat.newLoader(conf, target);
  loader.load(curFile, requireSameLayoutVersion);

  // Check that the image digest we loaded matches up with what
  // we expected
  MD5Hash readImageMd5 = loader.getLoadedImageMd5();
  if (expectedMd5 != null &&
      !expectedMd5.equals(readImageMd5)) {
    throw new IOException("Image file " + curFile +
        " is corrupt with MD5 checksum of " + readImageMd5 +
        " but expecting " + expectedMd5);
  }

  long txId = loader.getLoadedImageTxId();
  LOG.info("Loaded image for txid " + txId + " from " + curFile);
  lastAppliedTxId = txId;
  storage.setMostRecentCheckpointInfo(txId, curFile.lastModified());
}
项目:FlexMap    文件:FSImage.java   
/**
 * Load in the filesystem image from file. It's a big list of
 * filenames and blocks.
 */
private void loadFSImage(File curFile, MD5Hash expectedMd5,
    FSNamesystem target, MetaRecoveryContext recovery,
    boolean requireSameLayoutVersion) throws IOException {
  // BlockPoolId is required when the FsImageLoader loads the rolling upgrade
  // information. Make sure the ID is properly set.
  target.setBlockPoolId(this.getBlockPoolID());

  FSImageFormat.LoaderDelegator loader = FSImageFormat.newLoader(conf, target);
  loader.load(curFile, requireSameLayoutVersion);

  // Check that the image digest we loaded matches up with what
  // we expected
  MD5Hash readImageMd5 = loader.getLoadedImageMd5();
  if (expectedMd5 != null &&
      !expectedMd5.equals(readImageMd5)) {
    throw new IOException("Image file " + curFile +
        " is corrupt with MD5 checksum of " + readImageMd5 +
        " but expecting " + expectedMd5);
  }

  long txId = loader.getLoadedImageTxId();
  LOG.info("Loaded image for txid " + txId + " from " + curFile);
  lastAppliedTxId = txId;
  storage.setMostRecentCheckpointInfo(txId, curFile.lastModified());
}
项目:hadoop-plus    文件:TestStartup.java   
/**
 * Corrupts the MD5 sum of the fsimage.
 * 
 * @param corruptAll
 *          whether to corrupt one or all of the MD5 sums in the configured
 *          namedirs
 * @throws IOException
 */
private void corruptFSImageMD5(boolean corruptAll) throws IOException {
  List<URI> nameDirs = (List<URI>)FSNamesystem.getNamespaceDirs(config);
  // Corrupt the md5 files in all the namedirs
  for (URI uri: nameDirs) {
    // Directory layout looks like:
    // test/data/dfs/nameN/current/{fsimage,edits,...}
    File nameDir = new File(uri.getPath());
    File dfsDir = nameDir.getParentFile();
    assertEquals(dfsDir.getName(), "dfs"); // make sure we got right dir
    // Set the md5 file to all zeros
    File imageFile = new File(nameDir,
        Storage.STORAGE_DIR_CURRENT + "/"
        + NNStorage.getImageFileName(0));
    MD5FileUtils.saveMD5File(imageFile, new MD5Hash(new byte[16]));
    // Only need to corrupt one if !corruptAll
    if (!corruptAll) {
      break;
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestStartup.java   
/**
 * Corrupts the MD5 sum of the fsimage.
 * 
 * @param corruptAll
 *          whether to corrupt one or all of the MD5 sums in the configured
 *          namedirs
 * @throws IOException
 */
private void corruptFSImageMD5(boolean corruptAll) throws IOException {
  List<URI> nameDirs = (List<URI>)FSNamesystem.getNamespaceDirs(config);
  // Corrupt the md5 files in all the namedirs
  for (URI uri: nameDirs) {
    // Directory layout looks like:
    // test/data/dfs/nameN/current/{fsimage,edits,...}
    File nameDir = new File(uri.getPath());
    File dfsDir = nameDir.getParentFile();
    assertEquals(dfsDir.getName(), "dfs"); // make sure we got right dir
    // Set the md5 file to all zeros
    File imageFile = new File(nameDir,
        Storage.STORAGE_DIR_CURRENT + "/"
        + NNStorage.getImageFileName(0));
    MD5FileUtils.saveMD5File(imageFile, new MD5Hash(new byte[16]));
    // Only need to corrupt one if !corruptAll
    if (!corruptAll) {
      break;
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:AbstractDelegationTokenSecretManager.java   
@Override
protected synchronized byte[] createPassword(TokenIdent identifier) {
  int sequenceNum;
  long now = Time.now();
  sequenceNum = incrementDelegationTokenSeqNum();
  identifier.setIssueDate(now);
  identifier.setMaxDate(now + tokenMaxLifetime);
  identifier.setMasterKeyId(currentKey.getKeyId());
  identifier.setSequenceNumber(sequenceNum);
  LOG.info("Creating password for identifier: [" + MD5Hash.digest(identifier.getBytes()) + ", " + currentKey.getKeyId() + "]");
  byte[] password = createPassword(identifier.getBytes(), currentKey.getKey());
  DelegationTokenInformation tokenInfo = new DelegationTokenInformation(now
      + tokenRenewInterval, password, getTrackingIdIfEnabled(identifier));
  try {
    storeToken(identifier, tokenInfo);
  } catch (IOException ioe) {
    LOG.error("Could not store token !!", ioe);
  }
  return password;
}
项目:hadoop-EAR    文件:TestImageUploadStream.java   
/**
 * Test upload with one failed channel.
 */
private void testSingleFailure(InjectionEventI failOn) throws Exception {
  LOG.info("----- testSingleFailure for event : " + failOn);
  Random r = new Random();
  int numNodes = cluster.getNumNodes();
  TestImageUploadStreamInjectionHandler h = new TestImageUploadStreamInjectionHandler(
      numNodes);
  InjectionHandler.set(h);

  for (int i = 0; i < iterations; i++) {
    LOG.info("-- iteration: " + i);
    int failJournal = r.nextInt(numNodes);

    h.setFailure(failJournal, failOn);

    // the write should succeed
    MD5Hash digest = writeDataAndAssertContents(h, i);

    // clear hashes for next iteration
    h.clearHandler();

    // finalize the image
    assertManifest(i, digest, true);
  }
}
项目:hadoop-EAR    文件:MD5MD5CRC32FileChecksum.java   
/** Return the object represented in the attributes. */
public static MD5MD5CRC32FileChecksum valueOf(Attributes attrs
    ) throws SAXException {
  final String bytesPerCRC = attrs.getValue("bytesPerCRC");
  final String crcPerBlock = attrs.getValue("crcPerBlock");
  final String md5 = attrs.getValue("md5");
  if (bytesPerCRC == null || crcPerBlock == null || md5 == null) {
    return null;
  }

  try {
    return new MD5MD5CRC32FileChecksum(Integer.valueOf(bytesPerCRC),
        Integer.valueOf(crcPerBlock), new MD5Hash(md5));
  } catch(Exception e) {
    throw new SAXException("Invalid attributes: bytesPerCRC=" + bytesPerCRC
        + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5, e);
  }
}
项目:FlexMap    文件:MD5FileUtils.java   
/**
 * Read the md5 checksum stored alongside the given data file.
 * @param dataFile the file containing data
 * @return the checksum stored in dataFile.md5
 */
public static MD5Hash readStoredMd5ForFile(File dataFile) throws IOException {
  final File md5File = getDigestFileForFile(dataFile);
  if (!md5File.exists()) {
    return null;
  }

  final Matcher matcher = readStoredMd5(md5File);
  String storedHash = matcher.group(1);
  File referencedFile = new File(matcher.group(2));

  // Sanity check: Make sure that the file referenced in the .md5 file at
  // least has the same name as the file we expect
  if (!referencedFile.getName().equals(dataFile.getName())) {
    throw new IOException(
        "MD5 file at " + md5File + " references file named " +
        referencedFile.getName() + " but we expected it to reference " +
        dataFile);
  }
  return new MD5Hash(storedHash);
}
项目:hadoop-EAR    文件:FSImage.java   
/**
 * This is called by the 2NN after having downloaded an image, and by
 * the NN after having received a new image from the 2NN. It
 * renames the image from fsimage_N.ckpt to fsimage_N and also
 * saves the related .md5 file into place.
 */
synchronized void saveDigestAndRenameCheckpointImage(
    long txid, MD5Hash digest) throws IOException {
  if (!digest.equals(storage.getCheckpointImageDigest(txid))) {
    throw new IOException(
        "Checkpoint image is corrupt: expecting an MD5 checksum of" +
            digest + " but is " + storage.getCheckpointImageDigest(txid));
  }

  imageSet.saveDigestAndRenameCheckpointImage(txid, digest);

  // So long as this is the newest image available,
  // advertise it as such to other checkpointers
  // from now on
  storage.setMostRecentCheckpointTxId(txid);
}
项目:hadoop-plus    文件:FSImage.java   
/**
 * Load in the filesystem image from file. It's a big list of
 * filenames and blocks.
 */
private void loadFSImage(File curFile, MD5Hash expectedMd5,
    FSNamesystem target, MetaRecoveryContext recovery) throws IOException {
  FSImageFormat.Loader loader = new FSImageFormat.Loader(
      conf, target);
  loader.load(curFile);
  target.setBlockPoolId(this.getBlockPoolID());

  // Check that the image digest we loaded matches up with what
  // we expected
  MD5Hash readImageMd5 = loader.getLoadedImageMd5();
  if (expectedMd5 != null &&
      !expectedMd5.equals(readImageMd5)) {
    throw new IOException("Image file " + curFile +
        " is corrupt with MD5 checksum of " + readImageMd5 +
        " but expecting " + expectedMd5);
  }

  long txId = loader.getLoadedImageTxId();
  LOG.info("Loaded image for txid " + txId + " from " + curFile);
  lastAppliedTxId = txId;
  storage.setMostRecentCheckpointInfo(txId, curFile.lastModified());
}
项目:hadoop    文件:TransferFsImage.java   
/**
 * Client-side Method to fetch file from a server
 * Copies the response from the URL to a list of local files.
 * @param dstStorage if an error occurs writing to one of the files,
 *                   this storage object will be notified. 
 * @Return a digest of the received file if getChecksum is true
 */
static MD5Hash getFileClient(URL infoServer,
    String queryString, List<File> localPaths,
    Storage dstStorage, boolean getChecksum) throws IOException {
  URL url = new URL(infoServer, ImageServlet.PATH_SPEC + "?" + queryString);
  LOG.info("Opening connection to " + url);
  return doGetUrl(url, localPaths, dstStorage, getChecksum);
}
项目:hadoop    文件:FSImage.java   
void loadFSImageFile(FSNamesystem target, MetaRecoveryContext recovery,
    FSImageFile imageFile, StartupOption startupOption) throws IOException {
  LOG.debug("Planning to load image :\n" + imageFile);
  StorageDirectory sdForProperties = imageFile.sd;
  storage.readProperties(sdForProperties, startupOption);

  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.TXID_BASED_LAYOUT, getLayoutVersion())) {
    // For txid-based layout, we should have a .md5 file
    // next to the image file
    boolean isRollingRollback = RollingUpgradeStartupOption.ROLLBACK
        .matches(startupOption);
    loadFSImage(imageFile.getFile(), target, recovery, isRollingRollback);
  } else if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.FSIMAGE_CHECKSUM, getLayoutVersion())) {
    // In 0.22, we have the checksum stored in the VERSION file.
    String md5 = storage.getDeprecatedProperty(
        NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY);
    if (md5 == null) {
      throw new InconsistentFSStateException(sdForProperties.getRoot(),
          "Message digest property " +
          NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY +
          " not set for storage directory " + sdForProperties.getRoot());
    }
    loadFSImage(imageFile.getFile(), new MD5Hash(md5), target, recovery,
        false);
  } else {
    // We don't have any record of the md5sum
    loadFSImage(imageFile.getFile(), null, target, recovery, false);
  }
}
项目:hadoop    文件:FSImage.java   
/**
 * Load the image namespace from the given image file, verifying
 * it against the MD5 sum stored in its associated .md5 file.
 */
private void loadFSImage(File imageFile, FSNamesystem target,
    MetaRecoveryContext recovery, boolean requireSameLayoutVersion)
    throws IOException {
  MD5Hash expectedMD5 = MD5FileUtils.readStoredMd5ForFile(imageFile);
  if (expectedMD5 == null) {
    throw new IOException("No MD5 file found corresponding to image file "
        + imageFile);
  }
  loadFSImage(imageFile, expectedMD5, target, recovery,
      requireSameLayoutVersion);
}
项目:hadoop    文件:FSImage.java   
/**
 * This is called by the 2NN after having downloaded an image, and by
 * the NN after having received a new image from the 2NN. It
 * renames the image from fsimage_N.ckpt to fsimage_N and also
 * saves the related .md5 file into place.
 */
public synchronized void saveDigestAndRenameCheckpointImage(NameNodeFile nnf,
    long txid, MD5Hash digest) throws IOException {
  // Write and rename MD5 file
  List<StorageDirectory> badSds = Lists.newArrayList();

  for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.IMAGE)) {
    File imageFile = NNStorage.getImageFile(sd, nnf, txid);
    try {
      MD5FileUtils.saveMD5File(imageFile, digest);
    } catch (IOException ioe) {
      badSds.add(sd);
    }
  }
  storage.reportErrorsOnDirectories(badSds);

  CheckpointFaultInjector.getInstance().afterMD5Rename();

  // Rename image from tmp file
  renameCheckpoint(txid, NameNodeFile.IMAGE_NEW, nnf, false);
  // So long as this is the newest image available,
  // advertise it as such to other checkpointers
  // from now on
  if (txid > storage.getMostRecentCheckpointTxId()) {
    storage.setMostRecentCheckpointInfo(txid, Time.now());
  }
}
项目:hadoop    文件:ImageServlet.java   
/**
 * Set headers for content length, and, if available, md5.
 * @throws IOException 
 */
public static void setVerificationHeadersForGet(HttpServletResponse response,
    File file) throws IOException {
  response.setHeader(TransferFsImage.CONTENT_LENGTH,
      String.valueOf(file.length()));
  MD5Hash hash = MD5FileUtils.readStoredMd5ForFile(file);
  if (hash != null) {
    response.setHeader(TransferFsImage.MD5_HEADER, hash.toString());
  }
}
项目:hadoop    文件:ImageServlet.java   
/**
 * Set headers for image length and if available, md5.
 * 
 * @throws IOException
 */
static void setVerificationHeadersForPut(HttpURLConnection connection,
    File file) throws IOException {
  connection.setRequestProperty(TransferFsImage.CONTENT_LENGTH,
      String.valueOf(file.length()));
  MD5Hash hash = MD5FileUtils.readStoredMd5ForFile(file);
  if (hash != null) {
    connection
        .setRequestProperty(TransferFsImage.MD5_HEADER, hash.toString());
  }
}
项目:hadoop    文件:DataXceiver.java   
private MD5Hash calcPartialBlockChecksum(ExtendedBlock block,
    long requestLength, DataChecksum checksum, DataInputStream checksumIn)
    throws IOException {
  final int bytesPerCRC = checksum.getBytesPerChecksum();
  final int csize = checksum.getChecksumSize();
  final byte[] buffer = new byte[4*1024];
  MessageDigest digester = MD5Hash.getDigester();

  long remaining = requestLength / bytesPerCRC * csize;
  for (int toDigest = 0; remaining > 0; remaining -= toDigest) {
    toDigest = checksumIn.read(buffer, 0,
        (int) Math.min(remaining, buffer.length));
    if (toDigest < 0) {
      break;
    }
    digester.update(buffer, 0, toDigest);
  }

  int partialLength = (int) (requestLength % bytesPerCRC);
  if (partialLength > 0) {
    byte[] buf = new byte[partialLength];
    final InputStream blockIn = datanode.data.getBlockInputStream(block,
        requestLength - partialLength);
    try {
      // Get the CRC of the partialLength.
      IOUtils.readFully(blockIn, buf, 0, partialLength);
    } finally {
      IOUtils.closeStream(blockIn);
    }
    checksum.update(buf, 0, partialLength);
    byte[] partialCrc = new byte[csize];
    checksum.writeValue(partialCrc, 0, true);
    digester.update(partialCrc);
  }
  return new MD5Hash(digester.digest());
}
项目:hadoop    文件:MD5FileUtils.java   
/**
 * Verify that the previously saved md5 for the given file matches
 * expectedMd5.
 * @throws IOException 
 */
public static void verifySavedMD5(File dataFile, MD5Hash expectedMD5)
    throws IOException {
  MD5Hash storedHash = readStoredMd5ForFile(dataFile);
  // Check the hash itself
  if (!expectedMD5.equals(storedHash)) {
    throw new IOException(
        "File " + dataFile + " did not match stored MD5 checksum " +
        " (stored: " + storedHash + ", computed: " + expectedMD5);
  }
}
项目:hadoop    文件:MD5FileUtils.java   
/**
 * Read dataFile and compute its MD5 checksum.
 */
public static MD5Hash computeMd5ForFile(File dataFile) throws IOException {
  InputStream in = new FileInputStream(dataFile);
  try {
    MessageDigest digester = MD5Hash.getDigester();
    DigestInputStream dis = new DigestInputStream(in, digester);
    IOUtils.copyBytes(dis, new IOUtils.NullOutputStream(), 128*1024);

    return new MD5Hash(digester.digest());
  } finally {
    IOUtils.closeStream(in);
  }
}
项目:hadoop    文件:TestFetchImage.java   
/**
 * Run `hdfs dfsadmin -fetchImage ...' and verify that the downloaded image is
 * correct.
 */
private static void runFetchImage(DFSAdmin dfsAdmin, MiniDFSCluster cluster)
    throws Exception {
  int retVal = dfsAdmin.run(new String[]{"-fetchImage",
      FETCHED_IMAGE_FILE.getPath() });

  assertEquals(0, retVal);

  File highestImageOnNn = getHighestFsImageOnCluster(cluster);
  MD5Hash expected = MD5FileUtils.computeMd5ForFile(highestImageOnNn);
  MD5Hash actual = MD5FileUtils.computeMd5ForFile(
      new File(FETCHED_IMAGE_FILE, highestImageOnNn.getName()));

  assertEquals(expected, actual);
}
项目:hadoop    文件:TestMD5FileUtils.java   
/**
 * Test when .md5 file exists but incorrect checksum
 */
@Test
public void testVerifyMD5FileBadDigest() throws Exception {
  MD5FileUtils.saveMD5File(TEST_FILE, MD5Hash.digest(new byte[0]));
  try {
    MD5FileUtils.verifySavedMD5(TEST_FILE, TEST_MD5);
    fail("Did not throw");
  } catch (IOException ioe) {
    // Expected
  }
}
项目:hadoop    文件:ExecutionSummarizer.java   
protected static String getTraceSignature(String input) throws IOException {
  Path inputPath = new Path(input);
  FileSystem fs = inputPath.getFileSystem(new Configuration());
  FileStatus status = fs.getFileStatus(inputPath);
  Path qPath = fs.makeQualified(status.getPath());
  String traceID = status.getModificationTime() + qPath.toString()
                   + status.getOwner() + status.getLen();
  return MD5Hash.digest(traceID).toString();
}
项目:aliyun-oss-hadoop-fs    文件:TransferFsImage.java   
/**
 * Client-side Method to fetch file from a server
 * Copies the response from the URL to a list of local files.
 * @param dstStorage if an error occurs writing to one of the files,
 *                   this storage object will be notified. 
 * @Return a digest of the received file if getChecksum is true
 */
static MD5Hash getFileClient(URL infoServer,
    String queryString, List<File> localPaths,
    Storage dstStorage, boolean getChecksum) throws IOException {
  URL url = new URL(infoServer, ImageServlet.PATH_SPEC + "?" + queryString);
  LOG.info("Opening connection to " + url);
  return doGetUrl(url, localPaths, dstStorage, getChecksum);
}
项目:aliyun-oss-hadoop-fs    文件:TransferFsImage.java   
public static MD5Hash doGetUrl(URL url, List<File> localPaths,
    Storage dstStorage, boolean getChecksum) throws IOException {
  HttpURLConnection connection;
  try {
    connection = (HttpURLConnection)
      connectionFactory.openConnection(url, isSpnegoEnabled);
  } catch (AuthenticationException e) {
    throw new IOException(e);
  }

  setTimeout(connection);

  if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
    throw new HttpGetFailedException(
        "Image transfer servlet at " + url +
        " failed with status code " + connection.getResponseCode() +
        "\nResponse message:\n" + connection.getResponseMessage(),
        connection);
  }

  long advertisedSize;
  String contentLength = connection.getHeaderField(CONTENT_LENGTH);
  if (contentLength != null) {
    advertisedSize = Long.parseLong(contentLength);
  } else {
    throw new IOException(CONTENT_LENGTH + " header is not provided " +
                          "by the namenode when trying to fetch " + url);
  }
  MD5Hash advertisedDigest = parseMD5Header(connection);
  String fsImageName = connection
      .getHeaderField(ImageServlet.HADOOP_IMAGE_EDITS_HEADER);
  InputStream stream = connection.getInputStream();

  return receiveFile(url.toExternalForm(), localPaths, dstStorage,
      getChecksum, advertisedSize, advertisedDigest, fsImageName, stream,
      null);
}