Java 类org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType 实例源码

项目:hadoop-EAR    文件:TestDFSStorageStateRecovery.java   
/**
 * Sets up the storage directories for the given node type, either
 * dfs.name.dir or dfs.data.dir. For each element in dfs.name.dir or
 * dfs.data.dir, the subdirectories represented by the first four elements 
 * of the <code>state</code> array will be created and populated.
 * See UpgradeUtilities.createStorageDirs().
 * 
 * @param nodeType
 *   the type of node that storage should be created for. Based on this
 *   parameter either dfs.name.dir or dfs.data.dir is used from the global conf.
 * @param state
 *   a row from the testCases table which indicates which directories
 *   to setup for the node
 * @return file paths representing either dfs.name.dir or dfs.data.dir
 *   directories
 */
String[] createStorageState(NodeType nodeType, boolean[] state) throws Exception {
  String[] baseDirs = (nodeType == NAME_NODE ?
                       conf.getStrings("dfs.name.dir") :
                       conf.getStrings("dfs.data.dir"));
  UpgradeUtilities.createEmptyDirs(baseDirs);
  if (state[0])  // current
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "current");
  if (state[1])  // previous
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "previous");
  if (state[2])  // previous.tmp
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "previous.tmp");
  if (state[3])  // removed.tmp
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "removed.tmp");
  if (state[4])  // lastcheckpoint.tmp
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "lastcheckpoint.tmp");
  return baseDirs;
}
项目:hadoop-EAR    文件:NameSpaceSliceStorage.java   
/**
 * Format a namespace slice storage. 
 * @param sd the namespace storage
 * @param nsInfo the name space info
 * @throws IOException Signals that an I/O exception has occurred.
 */
private void format(StorageDirectory nsSdir, NamespaceInfo nsInfo) throws IOException {
  LOG.info("Formatting namespace " + namespaceID + " directory "
      + nsSdir.getCurrentDir());
  nsSdir.clearDirectory(); // create directory
  File rbwDir = new File(nsSdir.getCurrentDir(), STORAGE_DIR_RBW);
  File finalizedDir = new File(nsSdir.getCurrentDir(), STORAGE_DIR_FINALIZED);
  LOG.info("Creating Directories : " + rbwDir + ", " + finalizedDir);
  if (!rbwDir.mkdirs() || !finalizedDir.mkdirs()) {
    throw new IOException("Cannot create directories : " + rbwDir + ", "
        + finalizedDir);
  }
  this.layoutVersion = FSConstants.LAYOUT_VERSION;
  this.cTime = nsInfo.getCTime();
  this.namespaceID = nsInfo.getNamespaceID();
  this.storageType = NodeType.DATA_NODE;
  nsSdir.write();
}
项目:hadoop-on-lustre    文件:TestDFSStorageStateRecovery.java   
/**
 * Sets up the storage directories for the given node type, either
 * dfs.name.dir or dfs.data.dir. For each element in dfs.name.dir or
 * dfs.data.dir, the subdirectories represented by the first four elements 
 * of the <code>state</code> array will be created and populated.
 * See UpgradeUtilities.createStorageDirs().
 * 
 * @param nodeType
 *   the type of node that storage should be created for. Based on this
 *   parameter either dfs.name.dir or dfs.data.dir is used from the global conf.
 * @param state
 *   a row from the testCases table which indicates which directories
 *   to setup for the node
 * @return file paths representing either dfs.name.dir or dfs.data.dir
 *   directories
 */
String[] createStorageState(NodeType nodeType, boolean[] state) throws Exception {
  String[] baseDirs = (nodeType == NAME_NODE ?
                       conf.getStrings("dfs.name.dir") :
                       conf.getStrings("dfs.data.dir"));
  UpgradeUtilities.createEmptyDirs(baseDirs);
  if (state[0])  // current
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "current");
  if (state[1])  // previous
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "previous");
  if (state[2])  // previous.tmp
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "previous.tmp");
  if (state[3])  // removed.tmp
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "removed.tmp");
  if (state[4])  // lastcheckpoint.tmp
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "lastcheckpoint.tmp");
  return baseDirs;
}
项目:hadoop-on-lustre    文件:TestDFSRollback.java   
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws IOException {
  switch (nodeType) {
  case NAME_NODE:
    for (int i = 0; i < baseDirs.length; i++) {
      assertTrue(new File(baseDirs[i],"current").isDirectory());
      assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
      assertTrue(new File(baseDirs[i],"current/edits").isFile());
      assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
      assertTrue(new File(baseDirs[i],"current/fstime").isFile());
    }
    break;
  case DATA_NODE:
    for (int i = 0; i < baseDirs.length; i++) {
      assertEquals(
                   UpgradeUtilities.checksumContents(
                                                     nodeType, new File(baseDirs[i],"current")),
                   UpgradeUtilities.checksumMasterContents(nodeType));
    }
    break;
  }
  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
项目:cumulus    文件:TestDFSStorageStateRecovery.java   
/**
 * Sets up the storage directories for the given node type, either
 * dfs.name.dir or dfs.data.dir. For each element in dfs.name.dir or
 * dfs.data.dir, the subdirectories represented by the first four elements 
 * of the <code>state</code> array will be created and populated.
 * See UpgradeUtilities.createStorageDirs().
 * 
 * @param nodeType
 *   the type of node that storage should be created for. Based on this
 *   parameter either dfs.name.dir or dfs.data.dir is used from the global conf.
 * @param state
 *   a row from the testCases table which indicates which directories
 *   to setup for the node
 * @return file paths representing either dfs.name.dir or dfs.data.dir
 *   directories
 */
String[] createStorageState(NodeType nodeType, boolean[] state) throws Exception {
  String[] baseDirs = (nodeType == NAME_NODE ?
                       conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY) :
                       conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
  UpgradeUtilities.createEmptyDirs(baseDirs);
  if (state[0])  // current
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "current");
  if (state[1])  // previous
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "previous");
  if (state[2])  // previous.tmp
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "previous.tmp");
  if (state[3])  // removed.tmp
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "removed.tmp");
  if (state[4])  // lastcheckpoint.tmp
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "lastcheckpoint.tmp");
  return baseDirs;
}
项目:cumulus    文件:UpgradeUtilities.java   
/**
 * Simulate the <code>dfs.name.dir</code> or <code>dfs.data.dir</code>
 * of a populated DFS filesystem.
 *
 * This method creates and populates the directory specified by
 *  <code>parent/dirName</code>, for each parent directory.
 * The contents of the new directories will be
 * appropriate for the given node type.  If the directory does not
 * exist, it will be created.  If the directory already exists, it
 * will first be deleted.
 *
 * By default, a singleton master populated storage
 * directory is created for a Namenode (contains edits, fsimage,
 * version, and time files) and a Datanode (contains version and
 * block files).  These directories are then
 * copied by this method to create new storage
 * directories of the appropriate type (Namenode or Datanode).
 *
 * @return the array of created directories
 */
public static File[] createStorageDirs(NodeType nodeType, String[] parents, String dirName) throws Exception {
  File[] retVal = new File[parents.length];
  for (int i = 0; i < parents.length; i++) {
    File newDir = new File(parents[i], dirName);
    createEmptyDirs(new String[] {newDir.toString()});
    LocalFileSystem localFS = FileSystem.getLocal(new HdfsConfiguration());
    switch (nodeType) {
    case NAME_NODE:
      localFS.copyToLocalFile(new Path(namenodeStorage.toString(), "current"),
                              new Path(newDir.toString()),
                              false);
      break;
    case DATA_NODE:
      localFS.copyToLocalFile(new Path(datanodeStorage.toString(), "current"),
                              new Path(newDir.toString()),
                              false);
      break;
    }
    retVal[i] = newDir;
  }
  return retVal;
}
项目:cumulus    文件:TestDFSRollback.java   
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws IOException {
  switch (nodeType) {
  case NAME_NODE:
    for (int i = 0; i < baseDirs.length; i++) {
      assertTrue(new File(baseDirs[i],"current").isDirectory());
      assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
      assertTrue(new File(baseDirs[i],"current/edits").isFile());
      assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
      assertTrue(new File(baseDirs[i],"current/fstime").isFile());
    }
    break;
  case DATA_NODE:
    for (int i = 0; i < baseDirs.length; i++) {
      assertEquals(
                   UpgradeUtilities.checksumContents(
                                                     nodeType, new File(baseDirs[i],"current")),
                   UpgradeUtilities.checksumMasterContents(nodeType));
    }
    break;
  }
  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
项目:RDFS    文件:TestDFSStorageStateRecovery.java   
/**
 * Sets up the storage directories for the given node type, either
 * dfs.name.dir or dfs.data.dir. For each element in dfs.name.dir or
 * dfs.data.dir, the subdirectories represented by the first four elements 
 * of the <code>state</code> array will be created and populated.
 * See UpgradeUtilities.createStorageDirs().
 * 
 * @param nodeType
 *   the type of node that storage should be created for. Based on this
 *   parameter either dfs.name.dir or dfs.data.dir is used from the global conf.
 * @param state
 *   a row from the testCases table which indicates which directories
 *   to setup for the node
 * @return file paths representing either dfs.name.dir or dfs.data.dir
 *   directories
 */
String[] createStorageState(NodeType nodeType, boolean[] state) throws Exception {
  String[] baseDirs = (nodeType == NAME_NODE ?
                       conf.getStrings("dfs.name.dir") :
                       conf.getStrings("dfs.data.dir"));
  UpgradeUtilities.createEmptyDirs(baseDirs);
  if (state[0])  // current
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "current");
  if (state[1])  // previous
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "previous");
  if (state[2])  // previous.tmp
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "previous.tmp");
  if (state[3])  // removed.tmp
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "removed.tmp");
  if (state[4])  // lastcheckpoint.tmp
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "lastcheckpoint.tmp");
  return baseDirs;
}
项目:RDFS    文件:NameSpaceSliceStorage.java   
/**
 * Format a namespace slice storage. 
 * @param sd the namespace storage
 * @param nsInfo the name space info
 * @throws IOException Signals that an I/O exception has occurred.
 */
private void format(StorageDirectory nsSdir, NamespaceInfo nsInfo) throws IOException {
  LOG.info("Formatting namespace " + namespaceID + " directory "
      + nsSdir.getCurrentDir());
  nsSdir.clearDirectory(); // create directory
  File rbwDir = new File(nsSdir.getCurrentDir(), STORAGE_DIR_RBW);
  File finalizedDir = new File(nsSdir.getCurrentDir(), STORAGE_DIR_FINALIZED);
  LOG.info("Creating Directories : " + rbwDir + ", " + finalizedDir);
  if (!rbwDir.mkdirs() || !finalizedDir.mkdirs()) {
    throw new IOException("Cannot create directories : " + rbwDir + ", "
        + finalizedDir);
  }
  this.layoutVersion = FSConstants.LAYOUT_VERSION;
  this.cTime = nsInfo.getCTime();
  this.namespaceID = nsInfo.getNamespaceID();
  this.storageType = NodeType.DATA_NODE;
  nsSdir.write();
}
项目:hadoop-0.20    文件:TestDFSStorageStateRecovery.java   
/**
 * Sets up the storage directories for the given node type, either
 * dfs.name.dir or dfs.data.dir. For each element in dfs.name.dir or
 * dfs.data.dir, the subdirectories represented by the first four elements 
 * of the <code>state</code> array will be created and populated.
 * See UpgradeUtilities.createStorageDirs().
 * 
 * @param nodeType
 *   the type of node that storage should be created for. Based on this
 *   parameter either dfs.name.dir or dfs.data.dir is used from the global conf.
 * @param state
 *   a row from the testCases table which indicates which directories
 *   to setup for the node
 * @return file paths representing either dfs.name.dir or dfs.data.dir
 *   directories
 */
String[] createStorageState(NodeType nodeType, boolean[] state) throws Exception {
  String[] baseDirs = (nodeType == NAME_NODE ?
                       conf.getStrings("dfs.name.dir") :
                       conf.getStrings("dfs.data.dir"));
  UpgradeUtilities.createEmptyDirs(baseDirs);
  if (state[0])  // current
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "current");
  if (state[1])  // previous
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "previous");
  if (state[2])  // previous.tmp
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "previous.tmp");
  if (state[3])  // removed.tmp
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "removed.tmp");
  if (state[4])  // lastcheckpoint.tmp
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "lastcheckpoint.tmp");
  return baseDirs;
}
项目:hadoop-0.20    文件:TestDFSRollback.java   
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws IOException {
  switch (nodeType) {
  case NAME_NODE:
    for (int i = 0; i < baseDirs.length; i++) {
      assertTrue(new File(baseDirs[i],"current").isDirectory());
      assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
      assertTrue(new File(baseDirs[i],"current/edits").isFile());
      assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
      assertTrue(new File(baseDirs[i],"current/fstime").isFile());
    }
    break;
  case DATA_NODE:
    for (int i = 0; i < baseDirs.length; i++) {
      assertEquals(
                   UpgradeUtilities.checksumContents(
                                                     nodeType, new File(baseDirs[i],"current")),
                   UpgradeUtilities.checksumMasterContents(nodeType));
    }
    break;
  }
  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
项目:hortonworks-extension    文件:TestDFSStorageStateRecovery.java   
/**
 * Sets up the storage directories for the given node type, either
 * dfs.name.dir or dfs.data.dir. For each element in dfs.name.dir or
 * dfs.data.dir, the subdirectories represented by the first four elements 
 * of the <code>state</code> array will be created and populated.
 * See UpgradeUtilities.createStorageDirs().
 * 
 * @param nodeType
 *   the type of node that storage should be created for. Based on this
 *   parameter either dfs.name.dir or dfs.data.dir is used from the global conf.
 * @param state
 *   a row from the testCases table which indicates which directories
 *   to setup for the node
 * @return file paths representing either dfs.name.dir or dfs.data.dir
 *   directories
 */
String[] createStorageState(NodeType nodeType, boolean[] state) throws Exception {
  String[] baseDirs = (nodeType == NAME_NODE ?
                       conf.getStrings("dfs.name.dir") :
                       conf.getStrings("dfs.data.dir"));
  UpgradeUtilities.createEmptyDirs(baseDirs);
  if (state[0])  // current
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "current");
  if (state[1])  // previous
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "previous");
  if (state[2])  // previous.tmp
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "previous.tmp");
  if (state[3])  // removed.tmp
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "removed.tmp");
  if (state[4])  // lastcheckpoint.tmp
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "lastcheckpoint.tmp");
  return baseDirs;
}
项目:hortonworks-extension    文件:TestDFSRollback.java   
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws IOException {
  switch (nodeType) {
  case NAME_NODE:
    for (int i = 0; i < baseDirs.length; i++) {
      assertTrue(new File(baseDirs[i],"current").isDirectory());
      assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
      assertTrue(new File(baseDirs[i],"current/edits").isFile());
      assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
      assertTrue(new File(baseDirs[i],"current/fstime").isFile());
    }
    break;
  case DATA_NODE:
    for (int i = 0; i < baseDirs.length; i++) {
      assertEquals(
                   UpgradeUtilities.checksumContents(
                                                     nodeType, new File(baseDirs[i],"current")),
                   UpgradeUtilities.checksumMasterContents(nodeType));
    }
    break;
  }
  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
项目:hortonworks-extension    文件:TestDFSStorageStateRecovery.java   
/**
 * Sets up the storage directories for the given node type, either
 * dfs.name.dir or dfs.data.dir. For each element in dfs.name.dir or
 * dfs.data.dir, the subdirectories represented by the first four elements 
 * of the <code>state</code> array will be created and populated.
 * See UpgradeUtilities.createStorageDirs().
 * 
 * @param nodeType
 *   the type of node that storage should be created for. Based on this
 *   parameter either dfs.name.dir or dfs.data.dir is used from the global conf.
 * @param state
 *   a row from the testCases table which indicates which directories
 *   to setup for the node
 * @return file paths representing either dfs.name.dir or dfs.data.dir
 *   directories
 */
String[] createStorageState(NodeType nodeType, boolean[] state) throws Exception {
  String[] baseDirs = (nodeType == NAME_NODE ?
                       conf.getStrings("dfs.name.dir") :
                       conf.getStrings("dfs.data.dir"));
  UpgradeUtilities.createEmptyDirs(baseDirs);
  if (state[0])  // current
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "current");
  if (state[1])  // previous
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "previous");
  if (state[2])  // previous.tmp
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "previous.tmp");
  if (state[3])  // removed.tmp
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "removed.tmp");
  if (state[4])  // lastcheckpoint.tmp
    UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "lastcheckpoint.tmp");
  return baseDirs;
}
项目:hortonworks-extension    文件:TestDFSRollback.java   
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws IOException {
  switch (nodeType) {
  case NAME_NODE:
    for (int i = 0; i < baseDirs.length; i++) {
      assertTrue(new File(baseDirs[i],"current").isDirectory());
      assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
      assertTrue(new File(baseDirs[i],"current/edits").isFile());
      assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
      assertTrue(new File(baseDirs[i],"current/fstime").isFile());
    }
    break;
  case DATA_NODE:
    for (int i = 0; i < baseDirs.length; i++) {
      assertEquals(
                   UpgradeUtilities.checksumContents(
                                                     nodeType, new File(baseDirs[i],"current")),
                   UpgradeUtilities.checksumMasterContents(nodeType));
    }
    break;
  }
  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
项目:hadoop-gpu    文件:TestDFSRollback.java   
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws IOException {
  switch (nodeType) {
  case NAME_NODE:
    for (int i = 0; i < baseDirs.length; i++) {
      assertTrue(new File(baseDirs[i],"current").isDirectory());
      assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
      assertTrue(new File(baseDirs[i],"current/edits").isFile());
      assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
      assertTrue(new File(baseDirs[i],"current/fstime").isFile());
    }
    break;
  case DATA_NODE:
    for (int i = 0; i < baseDirs.length; i++) {
      assertEquals(
                   UpgradeUtilities.checksumContents(
                                                     nodeType, new File(baseDirs[i],"current")),
                   UpgradeUtilities.checksumMasterContents(nodeType));
    }
    break;
  }
  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
项目:hadoop-EAR    文件:TestDFSStartupVersions.java   
/**
 * Writes an INFO log message containing the parameters.
 */
void log(String label, NodeType nodeType, Integer testCase, StorageInfo version) {
  String testCaseLine = "";
  if (testCase != null) {
    testCaseLine = " testCase="+testCase;
  }
  LOG.info("============================================================");
  LOG.info("***TEST*** " + label + ":"
           + testCaseLine
           + " nodeType="+nodeType
           + " layoutVersion="+version.getLayoutVersion()
           + " namespaceID="+version.getNamespaceID()
           + " fsscTime="+version.getCTime());
}
项目:hadoop-EAR    文件:UpgradeUtilities.java   
public static long checksumMasterContents(NodeType nodeType, int nnIndex) throws IOException {
  if (nodeType == NAME_NODE) {
    return namenodeStorageChecksums[nnIndex];
  } else {
    return datanodeStorageChecksum;
  }
}
项目:hadoop-EAR    文件:UpgradeUtilities.java   
/**
 * Compute the checksum of all the files in the specified directory. The
 * contents of subdirectories are not included. This method provides an easy
 * way to ensure equality between the contents of two directories.
 *
 * @param nodeType
 *          if DATA_NODE then any file named "VERSION" is ignored. This is
 *          because this file file is changed every time the Datanode is
 *          started.
 * @param dir
 *          must be a directory. Subdirectories are ignored.
 * @param skipFiles
 *          files to be skipped
 *
 * @throws IllegalArgumentException
 *           if specified directory is not a directory
 * @throws IOException
 *           if an IOException occurs while reading the files
 * @return the computed checksum value
 */
public static long checksumContents(NodeType nodeType, File dir,
    String[] skipFiles) throws IOException {
  if (!dir.isDirectory()) {
    throw new IllegalArgumentException(
                                       "Given argument is not a directory:" + dir);
  }
  File[] list = dir.listFiles();
  Arrays.sort(list);
  CRC32 checksum = new CRC32();
  for (int i = 0; i < list.length; i++) {
    if (!list[i].isFile() || inList(skipFiles, list[i].getName())) {
      continue;
    }

    // skip VERSION file for DataNodes
    if (nodeType == DATA_NODE && list[i].getName().equals("VERSION")) {
      continue; 
    }
    FileInputStream fis = null;
    try {
      fis = new FileInputStream(list[i]);
      byte[] buffer = new byte[1024];
      int bytesRead;
      while ((bytesRead = fis.read(buffer)) != -1) {
        checksum.update(buffer, 0, bytesRead);
      }
    } finally {
      if(fis != null) {
        fis.close();
      }
    }
  }
  return checksum.getValue();
}
项目:hadoop-EAR    文件:UpgradeUtilities.java   
public static File[] createStorageDirs(NodeType nodeType, String[] parents, String dirName,
    File srcFile) throws Exception {
  File[] retVal = new File[parents.length];
  for (int i = 0; i < parents.length; i++) {
    File newDir = new File(parents[i], dirName);
    createEmptyDirs(new String[] {newDir.toString()});
    LocalFileSystem localFS = FileSystem.getLocal(new Configuration());
    switch (nodeType) {
    case NAME_NODE:
      localFS.copyToLocalFile(new Path(srcFile.toString(), "current"),
                              new Path(newDir.toString()),
                              false);
      Path newImgDir = new Path(newDir.getParent(), "image");
      if (!localFS.exists(newImgDir))
        localFS.copyToLocalFile(
            new Path(srcFile.toString(), "image"),
            newImgDir,
            false);
      break;
    case DATA_NODE:
      localFS.copyToLocalFile(new Path(srcFile.toString(), "current"),
                              new Path(newDir.toString()),
                              false);
      Path newStorageFile = new Path(newDir.getParent(), "storage");
      if (!localFS.exists(newStorageFile))
        localFS.copyToLocalFile(
            new Path(srcFile.toString(), "storage"),
            newStorageFile,
            false);
      break;
    }
    retVal[i] = newDir;
  }
  return retVal;
}
项目:hadoop-EAR    文件:TestDFSRollback.java   
/**
 * Verify that the new current directory is the old previous.  
 * It is assumed that the server has recovered and rolled back.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
  List<File> curDirs = Lists.newArrayList();
  for (String baseDir : baseDirs) {
    File curDir = new File(baseDir, "current");
    curDirs.add(curDir);
    switch (nodeType) {
    case NAME_NODE:
      FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
      break;
    case DATA_NODE:
      for (int i = 0; i < baseDirs.length; i++) {
        assertEquals(
                     UpgradeUtilities.checksumContents(
                                                       nodeType, new File(baseDirs[i],"current")),
                     UpgradeUtilities.checksumMasterContents(nodeType));
        File nsBaseDir= NameSpaceSliceStorage.getNsRoot(UpgradeUtilities.getCurrentNamespaceID(cluster), new File(baseDirs[i], "current"));
        assertEquals(
                     UpgradeUtilities.checksumContents(nodeType, new File(nsBaseDir,
                         MiniDFSCluster.FINALIZED_DIR_NAME)), 
                     UpgradeUtilities.checksumDatanodeNSStorageContents());
      }
      break;
    }
  }

  FSImageTestUtil.assertParallelFilesAreIdentical(
      curDirs, Collections.<String>emptySet());

  for (int i = 0; i < baseDirs.length; i++) {
    assertFalse(new File(baseDirs[i],"previous").isDirectory());
  }
}
项目:hadoop-EAR    文件:TestAvatarQJMUpgrade.java   
private long[] checksumCurrent() throws IOException {
  int i = 0;
  long[] checksums = new long[storageSet.size()];
  for (StorageDirectory dir : storageSet) {
    assertTrue(dir.getCurrentDir().exists());
    checksums[i++] = UpgradeUtilities.checksumContents(
        NodeType.JOURNAL_NODE, dir.getCurrentDir(),
        new String[] { "last-promised-epoch" });
  }
  return checksums;
}
项目:hadoop-EAR    文件:TestAvatarQJMUpgrade.java   
private void checkState(long[] checksums, boolean failover, boolean previous,
    long[] expectedChecksums)
    throws Exception {
  // Verify checksums for previous directory match with the older current
  // directory.

  int i = 0;
  for (StorageDirectory dir : h.storageSet) {
    System.out.println("Directory : " + dir);
    File dirToProcess = (previous) ? dir.getPreviousDir() : dir.getCurrentDir();
    assertTrue(dirToProcess.exists());
    // We skip last-promised-epoch since it is bumped up just before the
    // upgrade is done when we recover unfinalized segments.
    if (expectedChecksums == null) {
      assertEquals(checksums[i++], UpgradeUtilities.checksumContents(
          NodeType.JOURNAL_NODE, dirToProcess,
          new String[] { "last-promised-epoch" }));
    } else {
      assertEquals(checksums[i], expectedChecksums[i]);
      i++;
    }
  }

  // Ensure we can still write.
  DFSTestUtil.createFile(cluster.getFileSystem(), new Path("/test1"), 1024,
      (short) 1, 0);

  // Ensure we can still do checkpoint.
  h.doCheckpoint();

  // Ensure we can still failover.
  if (failover) {
    cluster.failOver();
  }
}
项目:hadoop-EAR    文件:NNStorage.java   
/**
 * Construct the NNStorage.
 * @param conf Namenode configuration.
 * @param imageDirs Directories the image can be stored in.
 * @param editsDirs Directories the editlog can be stored in.
 * @throws IOException if any directories are inaccessible.
 */
public NNStorage(Configuration conf, Collection<URI> imageDirs, 
    Collection<URI> editsDirs, Map<URI, NNStorageLocation> locationMap) 
    throws IOException {
  super(NodeType.NAME_NODE);

  storageDirs = Collections.synchronizedList(new ArrayList<StorageDirectory>());

  // this may modify the editsDirs, so copy before passing in
  setStorageDirectories(imageDirs, 
                        new ArrayList<URI>(editsDirs), locationMap);
  this.conf = conf;
}
项目:hadoop-EAR    文件:Storage.java   
/**
 * Get common storage fields.
 * Should be overloaded if additional fields need to be get.
 * 
 * @param props
 * @throws IOException
 */
protected void getFields(Properties props, 
                         StorageDirectory sd 
                         ) throws IOException {
  String sv, st, sid, sct;
  sv = props.getProperty(LAYOUT_VERSION);
  st = props.getProperty(STORAGE_TYPE);
  sid = props.getProperty(NAMESPACE_ID);
  sct = props.getProperty(CHECK_TIME);
  if (sv == null || st == null || sid == null || sct == null)
    throw new InconsistentFSStateException(sd.root,
                                           "file " + STORAGE_FILE_VERSION + " is invalid.");
  int rv = Integer.parseInt(sv);
  NodeType rt = NodeType.valueOf(st);
  int rid = Integer.parseInt(sid);
  long rct = Long.parseLong(sct);
  if (!storageType.equals(rt) ||
      !((namespaceID == 0) || (rid == 0) || namespaceID == rid))
    throw new InconsistentFSStateException(sd.root,
                                           "is incompatible with others. " +
                                           " namespaceID is " + namespaceID +
                                           " and rid is " + rid + "," +
                                           " storage type is " + storageType + 
                                           " but rt is " + rt);
  if (rv < FSConstants.LAYOUT_VERSION) // future version
    throw new IncorrectVersionException(rv, "storage directory " 
                                        + sd.root.getCanonicalPath());
  layoutVersion = rv;
  storageType = rt;
  namespaceID = rid;
  cTime = rct;
}
项目:hadoop-EAR    文件:Storage.java   
/** Validate and set storage type from {@link Properties}*/
protected void setStorageType(Properties props, StorageDirectory sd)
    throws InconsistentFSStateException {
  NodeType type = NodeType.valueOf(getProperty(props, sd, STORAGE_TYPE));
  if (!storageType.equals(type)) {
    throw new InconsistentFSStateException(sd.root,
        "node type is incompatible with others.");
  }
  storageType = type;
}
项目:hadoop-EAR    文件:JNStorage.java   
/**
 * @param logDir the path to the directory in which data will be stored
 * @param errorReporter a callback to report errors
 * @throws IOException 
 */
protected JNStorage(File logDir, StorageErrorReporter errorReporter,
    boolean imageDir, Configuration conf) throws IOException {
  super(NodeType.JOURNAL_NODE);
  this.conf = conf;
  sd = new StorageDirectory(logDir, imageDir ? NameNodeDirType.IMAGE
      : NameNodeDirType.EDITS);
  this.addStorageDir(sd);
  this.isImageDir = imageDir;
  // null for metrics, will be extended


  analyzeStorage();
}
项目:hadoop-on-lustre    文件:TestDFSUpgrade.java   
/**
 * Verify that the current and previous directories exist.  Verify that 
 * previous hasn't been modified by comparing the checksum of all it's
 * containing files with their original checksum.  It is assumed that
 * the server has recovered and upgraded.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws IOException {
  switch (nodeType) {
  case NAME_NODE:
    for (int i = 0; i < baseDirs.length; i++) {
      assertTrue(new File(baseDirs[i],"current").isDirectory());
      assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
      assertTrue(new File(baseDirs[i],"current/edits").isFile());
      assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
      assertTrue(new File(baseDirs[i],"current/fstime").isFile());
    }
    break;
  case DATA_NODE:
    for (int i = 0; i < baseDirs.length; i++) {
      assertEquals(
                   UpgradeUtilities.checksumContents(
                                                     nodeType, new File(baseDirs[i],"current")),
                   UpgradeUtilities.checksumMasterContents(nodeType));
    }
    break;
  }
  for (int i = 0; i < baseDirs.length; i++) {
    assertTrue(new File(baseDirs[i],"previous").isDirectory());
    assertEquals(
                 UpgradeUtilities.checksumContents(
                                                   nodeType, new File(baseDirs[i],"previous")),
                 UpgradeUtilities.checksumMasterContents(nodeType));
  }
}
项目:hadoop-on-lustre    文件:TestDFSStorageStateRecovery.java   
/**
 * Verify that the current and/or previous exist as indicated by 
 * the method parameters.  If previous exists, verify that
 * it hasn't been modified by comparing the checksum of all it's
 * containing files with their original checksum.  It is assumed that
 * the server has recovered.
 */
void checkResult(NodeType nodeType, String[] baseDirs, 
                 boolean currentShouldExist, boolean previousShouldExist) 
  throws IOException
{
  switch (nodeType) {
  case NAME_NODE:
    if (currentShouldExist) {
      for (int i = 0; i < baseDirs.length; i++) {
        assertTrue(new File(baseDirs[i],"current").isDirectory());
        assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
        assertTrue(new File(baseDirs[i],"current/edits").isFile());
        assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
        assertTrue(new File(baseDirs[i],"current/fstime").isFile());
      }
    }
    break;
  case DATA_NODE:
    if (currentShouldExist) {
      for (int i = 0; i < baseDirs.length; i++) {
        assertEquals(
                     UpgradeUtilities.checksumContents(
                                                       nodeType, new File(baseDirs[i],"current")),
                     UpgradeUtilities.checksumMasterContents(nodeType));
      }
    }
    break;
  }
  if (previousShouldExist) {
    for (int i = 0; i < baseDirs.length; i++) {
      assertTrue(new File(baseDirs[i],"previous").isDirectory());
      assertEquals(
                   UpgradeUtilities.checksumContents(
                                                     nodeType, new File(baseDirs[i],"previous")),
                   UpgradeUtilities.checksumMasterContents(nodeType));
    }
  }
}
项目:hadoop-on-lustre    文件:TestDFSStartupVersions.java   
/**
 * Writes an INFO log message containing the parameters.
 */
void log(String label, NodeType nodeType, Integer testCase, StorageInfo version) {
  String testCaseLine = "";
  if (testCase != null) {
    testCaseLine = " testCase="+testCase;
  }
  LOG.info("============================================================");
  LOG.info("***TEST*** " + label + ":"
           + testCaseLine
           + " nodeType="+nodeType
           + " layoutVersion="+version.getLayoutVersion()
           + " namespaceID="+version.getNamespaceID()
           + " fsscTime="+version.getCTime());
}
项目:hadoop-on-lustre    文件:UpgradeUtilities.java   
/**
 * Return the checksum for the singleton master storage directory
 * of the given node type.
 */
public static long checksumMasterContents(NodeType nodeType) throws IOException {
  if (nodeType == NAME_NODE) {
    return namenodeStorageChecksum;
  } else {
    return datanodeStorageChecksum;
  }
}
项目:hadoop-on-lustre    文件:UpgradeUtilities.java   
/**
 * Compute the checksum of all the files in the specified directory.
 * The contents of subdirectories are not included. This method provides
 * an easy way to ensure equality between the contents of two directories.
 *
 * @param nodeType if DATA_NODE then any file named "VERSION" is ignored.
 *    This is because this file file is changed every time
 *    the Datanode is started.
 * @param dir must be a directory. Subdirectories are ignored.
 *
 * @throws IllegalArgumentException if specified directory is not a directory
 * @throws IOException if an IOException occurs while reading the files
 * @return the computed checksum value
 */
public static long checksumContents(NodeType nodeType, File dir) throws IOException {
  if (!dir.isDirectory()) {
    throw new IllegalArgumentException(
                                       "Given argument is not a directory:" + dir);
  }
  File[] list = dir.listFiles();
  Arrays.sort(list);
  CRC32 checksum = new CRC32();
  for (int i = 0; i < list.length; i++) {
    if (!list[i].isFile()) {
      continue;
    }
    // skip VERSION file for DataNodes
    if (nodeType == DATA_NODE && list[i].getName().equals("VERSION")) {
      continue; 
    }
    FileInputStream fis = null;
    try {
      fis = new FileInputStream(list[i]);
      byte[] buffer = new byte[1024];
      int bytesRead;
      while ((bytesRead = fis.read(buffer)) != -1) {
        checksum.update(buffer, 0, bytesRead);
      }
    } finally {
      if(fis != null) {
        fis.close();
      }
    }
  }
  return checksum.getValue();
}
项目:hadoop-on-lustre    文件:UpgradeUtilities.java   
/**
 * Simulate the <code>dfs.name.dir</code> or <code>dfs.data.dir</code>
 * of a populated DFS filesystem.
 *
 * This method creates and populates the directory specified by
 *  <code>parent/dirName</code>, for each parent directory.
 * The contents of the new directories will be
 * appropriate for the given node type.  If the directory does not
 * exist, it will be created.  If the directory already exists, it
 * will first be deleted.
 *
 * By default, a singleton master populated storage
 * directory is created for a Namenode (contains edits, fsimage,
 * version, and time files) and a Datanode (contains version and
 * block files).  These directories are then
 * copied by this method to create new storage
 * directories of the appropriate type (Namenode or Datanode).
 *
 * @return the array of created directories
 */
public static File[] createStorageDirs(NodeType nodeType, String[] parents, String dirName) throws Exception {
  File[] retVal = new File[parents.length];
  for (int i = 0; i < parents.length; i++) {
    File newDir = new File(parents[i], dirName);
    createEmptyDirs(new String[] {newDir.toString()});
    LocalFileSystem localFS = FileSystem.getLocal(new Configuration());
    switch (nodeType) {
    case NAME_NODE:
      localFS.copyToLocalFile(new Path(namenodeStorage.toString(), "current"),
                              new Path(newDir.toString()),
                              false);
      Path newImgDir = new Path(newDir.getParent(), "image");
      if (!localFS.exists(newImgDir))
        localFS.copyToLocalFile(
            new Path(namenodeStorage.toString(), "image"),
            newImgDir,
            false);
      break;
    case DATA_NODE:
      localFS.copyToLocalFile(new Path(datanodeStorage.toString(), "current"),
                              new Path(newDir.toString()),
                              false);
      Path newStorageFile = new Path(newDir.getParent(), "storage");
      if (!localFS.exists(newStorageFile))
        localFS.copyToLocalFile(
            new Path(datanodeStorage.toString(), "storage"),
            newStorageFile,
            false);
      break;
    }
    retVal[i] = newDir;
  }
  return retVal;
}
项目:hadoop-on-lustre    文件:Storage.java   
/**
 * Get common storage fields.
 * Should be overloaded if additional fields need to be get.
 * 
 * @param props
 * @throws IOException
 */
protected void getFields(Properties props, 
                         StorageDirectory sd 
                         ) throws IOException {
  String sv, st, sid, sct;
  sv = props.getProperty("layoutVersion");
  st = props.getProperty("storageType");
  sid = props.getProperty("namespaceID");
  sct = props.getProperty("cTime");
  if (sv == null || st == null || sid == null || sct == null)
    throw new InconsistentFSStateException(sd.root,
                                           "file " + STORAGE_FILE_VERSION + " is invalid.");
  int rv = Integer.parseInt(sv);
  NodeType rt = NodeType.valueOf(st);
  int rid = Integer.parseInt(sid);
  long rct = Long.parseLong(sct);
  if (!storageType.equals(rt) ||
      !((namespaceID == 0) || (rid == 0) || namespaceID == rid))
    throw new InconsistentFSStateException(sd.root,
                                           "is incompatible with others.");
  if (rv < FSConstants.LAYOUT_VERSION) // future version
    throw new IncorrectVersionException(rv, "storage directory " 
                                        + sd.root.getCanonicalPath());
  layoutVersion = rv;
  storageType = rt;
  namespaceID = rid;
  cTime = rct;
}
项目:cumulus    文件:Storage.java   
/**
 * Get common storage fields.
 * Should be overloaded if additional fields need to be get.
 * 
 * @param props
 * @throws IOException
 */
protected void getFields(Properties props, 
                         StorageDirectory sd 
                         ) throws IOException {
  String sv, st, sid, sct;
  sv = props.getProperty("layoutVersion");
  st = props.getProperty("storageType");
  sid = props.getProperty("namespaceID");
  sct = props.getProperty("cTime");
  if (sv == null || st == null || sid == null || sct == null)
    throw new InconsistentFSStateException(sd.root,
                                           "file " + STORAGE_FILE_VERSION + " is invalid.");
  int rv = Integer.parseInt(sv);
  NodeType rt = NodeType.valueOf(st);
  int rid = Integer.parseInt(sid);
  long rct = Long.parseLong(sct);
  if (!storageType.equals(rt) ||
      !((namespaceID == 0) || (rid == 0) || namespaceID == rid))
    throw new InconsistentFSStateException(sd.root,
                                           "is incompatible with others.");
  if (rv < FSConstants.LAYOUT_VERSION) // future version
    throw new IncorrectVersionException(rv, "storage directory " 
                                        + sd.root.getCanonicalPath());
  layoutVersion = rv;
  storageType = rt;
  namespaceID = rid;
  cTime = rct;
}
项目:cumulus    文件:TestDFSUpgrade.java   
/**
 * Verify that the current and previous directories exist.  Verify that 
 * previous hasn't been modified by comparing the checksum of all it's
 * containing files with their original checksum.  It is assumed that
 * the server has recovered and upgraded.
 */
void checkResult(NodeType nodeType, String[] baseDirs) throws IOException {
  switch (nodeType) {
  case NAME_NODE:
    for (int i = 0; i < baseDirs.length; i++) {
      assertTrue(new File(baseDirs[i],"current").isDirectory());
      assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
      assertTrue(new File(baseDirs[i],"current/edits").isFile());
      assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
      assertTrue(new File(baseDirs[i],"current/fstime").isFile());
    }
    break;
  case DATA_NODE:
    for (int i = 0; i < baseDirs.length; i++) {
      assertEquals(
                   UpgradeUtilities.checksumContents(
                                                     nodeType, new File(baseDirs[i],"current")),
                   UpgradeUtilities.checksumMasterContents(nodeType));
    }
    break;
  }
  for (int i = 0; i < baseDirs.length; i++) {
    assertTrue(new File(baseDirs[i],"previous").isDirectory());
    assertEquals(
                 UpgradeUtilities.checksumContents(
                                                   nodeType, new File(baseDirs[i],"previous")),
                 UpgradeUtilities.checksumMasterContents(nodeType));
  }
}
项目:cumulus    文件:TestDFSStorageStateRecovery.java   
/**
 * Verify that the current and/or previous exist as indicated by 
 * the method parameters.  If previous exists, verify that
 * it hasn't been modified by comparing the checksum of all it's
 * containing files with their original checksum.  It is assumed that
 * the server has recovered.
 */
void checkResult(NodeType nodeType, String[] baseDirs, 
                 boolean currentShouldExist, boolean previousShouldExist) 
  throws IOException
{
  switch (nodeType) {
  case NAME_NODE:
    if (currentShouldExist) {
      for (int i = 0; i < baseDirs.length; i++) {
        assertTrue(new File(baseDirs[i],"current").isDirectory());
        assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
        assertTrue(new File(baseDirs[i],"current/edits").isFile());
        assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
        assertTrue(new File(baseDirs[i],"current/fstime").isFile());
      }
    }
    break;
  case DATA_NODE:
    if (currentShouldExist) {
      for (int i = 0; i < baseDirs.length; i++) {
        assertEquals(
                     UpgradeUtilities.checksumContents(
                                                       nodeType, new File(baseDirs[i],"current")),
                     UpgradeUtilities.checksumMasterContents(nodeType));
      }
    }
    break;
  }
  if (previousShouldExist) {
    for (int i = 0; i < baseDirs.length; i++) {
      assertTrue(new File(baseDirs[i],"previous").isDirectory());
      assertEquals(
                   UpgradeUtilities.checksumContents(
                                                     nodeType, new File(baseDirs[i],"previous")),
                   UpgradeUtilities.checksumMasterContents(nodeType));
    }
  }
}
项目:cumulus    文件:TestDFSStartupVersions.java   
/**
 * Writes an INFO log message containing the parameters.
 */
void log(String label, NodeType nodeType, Integer testCase, StorageInfo version) {
  String testCaseLine = "";
  if (testCase != null) {
    testCaseLine = " testCase="+testCase;
  }
  LOG.info("============================================================");
  LOG.info("***TEST*** " + label + ":"
           + testCaseLine
           + " nodeType="+nodeType
           + " layoutVersion="+version.getLayoutVersion()
           + " namespaceID="+version.getNamespaceID()
           + " fsscTime="+version.getCTime());
}
项目:cumulus    文件:UpgradeUtilities.java   
/**
 * Return the checksum for the singleton master storage directory
 * of the given node type.
 */
public static long checksumMasterContents(NodeType nodeType) throws IOException {
  if (nodeType == NAME_NODE) {
    return namenodeStorageChecksum;
  } else {
    return datanodeStorageChecksum;
  }
}
项目:cumulus    文件:UpgradeUtilities.java   
/**
 * Compute the checksum of all the files in the specified directory.
 * The contents of subdirectories are not included. This method provides
 * an easy way to ensure equality between the contents of two directories.
 *
 * @param nodeType if DATA_NODE then any file named "VERSION" is ignored.
 *    This is because this file file is changed every time
 *    the Datanode is started.
 * @param dir must be a directory. Subdirectories are ignored.
 *
 * @throws IllegalArgumentException if specified directory is not a directory
 * @throws IOException if an IOException occurs while reading the files
 * @return the computed checksum value
 */
public static long checksumContents(NodeType nodeType, File dir) throws IOException {
  if (!dir.isDirectory()) {
    throw new IllegalArgumentException(
                                       "Given argument is not a directory:" + dir);
  }
  File[] list = dir.listFiles();
  Arrays.sort(list);
  CRC32 checksum = new CRC32();
  for (int i = 0; i < list.length; i++) {
    if (!list[i].isFile()) {
      continue;
    }
    // skip VERSION file for DataNodes
    if (nodeType == DATA_NODE && list[i].getName().equals("VERSION")) {
      continue; 
    }
    FileInputStream fis = null;
    try {
      fis = new FileInputStream(list[i]);
      byte[] buffer = new byte[1024];
      int bytesRead;
      while ((bytesRead = fis.read(buffer)) != -1) {
        checksum.update(buffer, 0, bytesRead);
      }
    } finally {
      if(fis != null) {
        fis.close();
      }
    }
  }
  return checksum.getValue();
}