Java 类org.apache.hadoop.hdfs.tools.offlineImageViewer.SpotCheckImageVisitor.ImageInfo 实例源码

项目:hadoop-EAR    文件:TestOIVCanReadOldVersions.java   
private void spotCheck(String hadoopVersion, String input, 
     ImageInfo inodes, ImageInfo INUCs) {
  SpotCheckImageVisitor v = new SpotCheckImageVisitor();
  OfflineImageViewer oiv = new OfflineImageViewer(input, v, false);
  try {
    oiv.go();
  } catch (IOException e) {
    fail("Error processing file: " + input);
  }

  compareSpotCheck(hadoopVersion, v.getINodesInfo(), inodes);
  compareSpotCheck(hadoopVersion, v.getINUCsInfo(), INUCs);
  System.out.println("Successfully processed fsimage file from Hadoop version " +
                                                  hadoopVersion);
}
项目:hadoop-EAR    文件:TestOIVCanReadOldVersions.java   
private void compareSpotCheck(String hadoopVersion, 
                   ImageInfo generated, ImageInfo expected) {
  assertEquals("Version " + hadoopVersion + ": Same number of total blocks", 
                   expected.totalNumBlocks, generated.totalNumBlocks);
  assertEquals("Version " + hadoopVersion + ": Same total file size", 
                   expected.totalFileSize, generated.totalFileSize);
  assertEquals("Version " + hadoopVersion + ": Same total replication factor", 
                   expected.totalReplications, generated.totalReplications);
  assertEquals("Version " + hadoopVersion + ": One-to-one matching of path names", 
                   expected.pathNames, generated.pathNames);
}
项目:hadoop-plus    文件:TestOIVCanReadOldVersions.java   
private void spotCheck(String hadoopVersion, String input, 
     ImageInfo inodes, ImageInfo INUCs) {
  SpotCheckImageVisitor v = new SpotCheckImageVisitor();
  OfflineImageViewer oiv = new OfflineImageViewer(input, v, false);
  try {
    oiv.go();
  } catch (IOException e) {
    fail("Error processing file: " + input);
  }

  compareSpotCheck(hadoopVersion, v.getINodesInfo(), inodes);
  compareSpotCheck(hadoopVersion, v.getINUCsInfo(), INUCs);
  System.out.println("Successfully processed fsimage file from Hadoop version " +
                                                  hadoopVersion);
}
项目:hadoop-plus    文件:TestOIVCanReadOldVersions.java   
private void compareSpotCheck(String hadoopVersion, 
                   ImageInfo generated, ImageInfo expected) {
  assertEquals("Version " + hadoopVersion + ": Same number of total blocks", 
                   expected.totalNumBlocks, generated.totalNumBlocks);
  assertEquals("Version " + hadoopVersion + ": Same total file size", 
                   expected.totalFileSize, generated.totalFileSize);
  assertEquals("Version " + hadoopVersion + ": Same total replication factor", 
                   expected.totalReplications, generated.totalReplications);
  assertEquals("Version " + hadoopVersion + ": One-to-one matching of path names", 
                   expected.pathNames, generated.pathNames);
}
项目:hadoop-TCP    文件:TestOIVCanReadOldVersions.java   
private void spotCheck(String hadoopVersion, String input, 
     ImageInfo inodes, ImageInfo INUCs) {
  SpotCheckImageVisitor v = new SpotCheckImageVisitor();
  OfflineImageViewer oiv = new OfflineImageViewer(input, v, false);
  try {
    oiv.go();
  } catch (IOException e) {
    fail("Error processing file: " + input);
  }

  compareSpotCheck(hadoopVersion, v.getINodesInfo(), inodes);
  compareSpotCheck(hadoopVersion, v.getINUCsInfo(), INUCs);
  System.out.println("Successfully processed fsimage file from Hadoop version " +
                                                  hadoopVersion);
}
项目:hadoop-TCP    文件:TestOIVCanReadOldVersions.java   
private void compareSpotCheck(String hadoopVersion, 
                   ImageInfo generated, ImageInfo expected) {
  assertEquals("Version " + hadoopVersion + ": Same number of total blocks", 
                   expected.totalNumBlocks, generated.totalNumBlocks);
  assertEquals("Version " + hadoopVersion + ": Same total file size", 
                   expected.totalFileSize, generated.totalFileSize);
  assertEquals("Version " + hadoopVersion + ": Same total replication factor", 
                   expected.totalReplications, generated.totalReplications);
  assertEquals("Version " + hadoopVersion + ": One-to-one matching of path names", 
                   expected.pathNames, generated.pathNames);
}
项目:hardfs    文件:TestOIVCanReadOldVersions.java   
private void spotCheck(String hadoopVersion, String input, 
     ImageInfo inodes, ImageInfo INUCs) {
  SpotCheckImageVisitor v = new SpotCheckImageVisitor();
  OfflineImageViewer oiv = new OfflineImageViewer(input, v, false);
  try {
    oiv.go();
  } catch (IOException e) {
    fail("Error processing file: " + input);
  }

  compareSpotCheck(hadoopVersion, v.getINodesInfo(), inodes);
  compareSpotCheck(hadoopVersion, v.getINUCsInfo(), INUCs);
  System.out.println("Successfully processed fsimage file from Hadoop version " +
                                                  hadoopVersion);
}
项目:hardfs    文件:TestOIVCanReadOldVersions.java   
private void compareSpotCheck(String hadoopVersion, 
                   ImageInfo generated, ImageInfo expected) {
  assertEquals("Version " + hadoopVersion + ": Same number of total blocks", 
                   expected.totalNumBlocks, generated.totalNumBlocks);
  assertEquals("Version " + hadoopVersion + ": Same total file size", 
                   expected.totalFileSize, generated.totalFileSize);
  assertEquals("Version " + hadoopVersion + ": Same total replication factor", 
                   expected.totalReplications, generated.totalReplications);
  assertEquals("Version " + hadoopVersion + ": One-to-one matching of path names", 
                   expected.pathNames, generated.pathNames);
}
项目:hadoop-on-lustre2    文件:TestOIVCanReadOldVersions.java   
private void spotCheck(String hadoopVersion, String input, 
     ImageInfo inodes, ImageInfo INUCs) {
  SpotCheckImageVisitor v = new SpotCheckImageVisitor();
  OfflineImageViewer oiv = new OfflineImageViewer(input, v, false);
  try {
    oiv.go();
  } catch (IOException e) {
    fail("Error processing file: " + input);
  }

  compareSpotCheck(hadoopVersion, v.getINodesInfo(), inodes);
  compareSpotCheck(hadoopVersion, v.getINUCsInfo(), INUCs);
  System.out.println("Successfully processed fsimage file from Hadoop version " +
                                                  hadoopVersion);
}
项目:hadoop-on-lustre2    文件:TestOIVCanReadOldVersions.java   
private void compareSpotCheck(String hadoopVersion, 
                   ImageInfo generated, ImageInfo expected) {
  assertEquals("Version " + hadoopVersion + ": Same number of total blocks", 
                   expected.totalNumBlocks, generated.totalNumBlocks);
  assertEquals("Version " + hadoopVersion + ": Same total file size", 
                   expected.totalFileSize, generated.totalFileSize);
  assertEquals("Version " + hadoopVersion + ": Same total replication factor", 
                   expected.totalReplications, generated.totalReplications);
  assertEquals("Version " + hadoopVersion + ": One-to-one matching of path names", 
                   expected.pathNames, generated.pathNames);
}
项目:cumulus    文件:TestOIVCanReadOldVersions.java   
private void spotCheck(String hadoopVersion, String input, 
     ImageInfo inodes, ImageInfo INUCs) {
  SpotCheckImageVisitor v = new SpotCheckImageVisitor();
  OfflineImageViewer oiv = new OfflineImageViewer(input, v, false);
  try {
    oiv.go();
  } catch (IOException e) {
    fail("Error processing file: " + input);
  }

  compareSpotCheck(hadoopVersion, v.getINodesInfo(), inodes);
  compareSpotCheck(hadoopVersion, v.getINUCsInfo(), INUCs);
  System.out.println("Successfully processed fsimage file from Hadoop version " +
                                                  hadoopVersion);
}
项目:cumulus    文件:TestOIVCanReadOldVersions.java   
private void compareSpotCheck(String hadoopVersion, 
                   ImageInfo generated, ImageInfo expected) {
  assertEquals("Version " + hadoopVersion + ": Same number of total blocks", 
                   expected.totalNumBlocks, generated.totalNumBlocks);
  assertEquals("Version " + hadoopVersion + ": Same total file size", 
                   expected.totalFileSize, generated.totalFileSize);
  assertEquals("Version " + hadoopVersion + ": Same total replication factor", 
                   expected.totalReplications, generated.totalReplications);
  assertEquals("Version " + hadoopVersion + ": One-to-one matching of path names", 
                   expected.pathNames, generated.pathNames);
}
项目:RDFS    文件:TestOIVCanReadOldVersions.java   
private void spotCheck(String hadoopVersion, String input, 
     ImageInfo inodes, ImageInfo INUCs) {
  SpotCheckImageVisitor v = new SpotCheckImageVisitor();
  OfflineImageViewer oiv = new OfflineImageViewer(input, v, false);
  try {
    oiv.go();
  } catch (IOException e) {
    fail("Error processing file: " + input);
  }

  compareSpotCheck(hadoopVersion, v.getINodesInfo(), inodes);
  compareSpotCheck(hadoopVersion, v.getINUCsInfo(), INUCs);
  System.out.println("Successfully processed fsimage file from Hadoop version " +
                                                  hadoopVersion);
}
项目:RDFS    文件:TestOIVCanReadOldVersions.java   
private void compareSpotCheck(String hadoopVersion, 
                   ImageInfo generated, ImageInfo expected) {
  assertEquals("Version " + hadoopVersion + ": Same number of total blocks", 
                   expected.totalNumBlocks, generated.totalNumBlocks);
  assertEquals("Version " + hadoopVersion + ": Same total file size", 
                   expected.totalFileSize, generated.totalFileSize);
  assertEquals("Version " + hadoopVersion + ": Same total replication factor", 
                   expected.totalReplications, generated.totalReplications);
  assertEquals("Version " + hadoopVersion + ": One-to-one matching of path names", 
                   expected.pathNames, generated.pathNames);
}
项目:hadoop-EAR    文件:TestOIVCanReadOldVersions.java   
public void testOldFSImages() {
  // Define the expected values from the prior versions, as they were created
  // and verified at time of creation
  Set<String> pathNames = new HashSet<String>();
  Collections.addAll(pathNames, "", /* root */
                                "/bar",
                                "/bar/dir0",
                                "/bar/dir0/file0",
                                "/bar/dir0/file1",
                                "/bar/dir1",
                                "/bar/dir1/file0",
                                "/bar/dir1/file1",
                                "/bar/dir2",
                                "/bar/dir2/file0",
                                "/bar/dir2/file1",
                                "/foo",
                                "/foo/dir0",
                                "/foo/dir0/file0",
                                "/foo/dir0/file1",
                                "/foo/dir0/file2",
                                "/foo/dir0/file3",
                                "/foo/dir1",
                                "/foo/dir1/file0",
                                "/foo/dir1/file1",
                                "/foo/dir1/file2",
                                "/foo/dir1/file3");

  Set<String> INUCpaths = new HashSet<String>();
  Collections.addAll(INUCpaths, "/bar/dir0/file0",
                                "/bar/dir0/file1",
                                "/bar/dir1/file0",
                                "/bar/dir1/file1",
                                "/bar/dir2/file0",
                                "/bar/dir2/file1");

  ImageInfo v18Inodes = new ImageInfo(); // Hadoop version 18 inodes
  v18Inodes.totalNumBlocks = 12;
  v18Inodes.totalFileSize = 1069548540l;
  v18Inodes.pathNames = pathNames;
  v18Inodes.totalReplications = 14;

  ImageInfo v18INUCs = new ImageInfo(); // Hadoop version 18 inodes under construction
  v18INUCs.totalNumBlocks = 0;
  v18INUCs.totalFileSize = 0;
  v18INUCs.pathNames = INUCpaths;
  v18INUCs.totalReplications = 6;

  ImageInfo v19Inodes = new ImageInfo(); // Hadoop version 19 inodes
  v19Inodes.totalNumBlocks = 12;
  v19Inodes.totalFileSize = 1069548540l;
  v19Inodes.pathNames = pathNames;
  v19Inodes.totalReplications = 14;

  ImageInfo v19INUCs = new ImageInfo(); // Hadoop version 19 inodes under construction
  v19INUCs.totalNumBlocks = 0;
  v19INUCs.totalFileSize = 0;
  v19INUCs.pathNames = INUCpaths;
  v19INUCs.totalReplications = 6;


  spotCheck("18", TEST_CACHE_DATA_DIR + "/fsimageV18", v18Inodes, v18INUCs);
  spotCheck("19", TEST_CACHE_DATA_DIR + "/fsimageV19", v19Inodes, v19INUCs);
}
项目:hadoop-plus    文件:TestOIVCanReadOldVersions.java   
@Test
public void testOldFSImages() {
  // Define the expected values from the prior versions, as they were created
  // and verified at time of creation
  Set<String> pathNames = new HashSet<String>();
  Collections.addAll(pathNames, "", /* root */
                                "/bar",
                                "/bar/dir0",
                                "/bar/dir0/file0",
                                "/bar/dir0/file1",
                                "/bar/dir1",
                                "/bar/dir1/file0",
                                "/bar/dir1/file1",
                                "/bar/dir2",
                                "/bar/dir2/file0",
                                "/bar/dir2/file1",
                                "/foo",
                                "/foo/dir0",
                                "/foo/dir0/file0",
                                "/foo/dir0/file1",
                                "/foo/dir0/file2",
                                "/foo/dir0/file3",
                                "/foo/dir1",
                                "/foo/dir1/file0",
                                "/foo/dir1/file1",
                                "/foo/dir1/file2",
                                "/foo/dir1/file3");

  Set<String> INUCpaths = new HashSet<String>();
  Collections.addAll(INUCpaths, "/bar/dir0/file0",
                                "/bar/dir0/file1",
                                "/bar/dir1/file0",
                                "/bar/dir1/file1",
                                "/bar/dir2/file0",
                                "/bar/dir2/file1");

  ImageInfo v18Inodes = new ImageInfo(); // Hadoop version 18 inodes
  v18Inodes.totalNumBlocks = 12;
  v18Inodes.totalFileSize = 1069548540l;
  v18Inodes.pathNames = pathNames;
  v18Inodes.totalReplications = 14;

  ImageInfo v18INUCs = new ImageInfo(); // Hadoop version 18 inodes under construction
  v18INUCs.totalNumBlocks = 0;
  v18INUCs.totalFileSize = 0;
  v18INUCs.pathNames = INUCpaths;
  v18INUCs.totalReplications = 6;

  ImageInfo v19Inodes = new ImageInfo(); // Hadoop version 19 inodes
  v19Inodes.totalNumBlocks = 12;
  v19Inodes.totalFileSize = 1069548540l;
  v19Inodes.pathNames = pathNames;
  v19Inodes.totalReplications = 14;

  ImageInfo v19INUCs = new ImageInfo(); // Hadoop version 19 inodes under construction
  v19INUCs.totalNumBlocks = 0;
  v19INUCs.totalFileSize = 0;
  v19INUCs.pathNames = INUCpaths;
  v19INUCs.totalReplications = 6;


  spotCheck("18", TEST_CACHE_DATA_DIR + "/fsimageV18", v18Inodes, v18INUCs);
  spotCheck("19", TEST_CACHE_DATA_DIR + "/fsimageV19", v19Inodes, v19INUCs);
}
项目:hadoop-TCP    文件:TestOIVCanReadOldVersions.java   
@Test
public void testOldFSImages() {
  // Define the expected values from the prior versions, as they were created
  // and verified at time of creation
  Set<String> pathNames = new HashSet<String>();
  Collections.addAll(pathNames, "", /* root */
                                "/bar",
                                "/bar/dir0",
                                "/bar/dir0/file0",
                                "/bar/dir0/file1",
                                "/bar/dir1",
                                "/bar/dir1/file0",
                                "/bar/dir1/file1",
                                "/bar/dir2",
                                "/bar/dir2/file0",
                                "/bar/dir2/file1",
                                "/foo",
                                "/foo/dir0",
                                "/foo/dir0/file0",
                                "/foo/dir0/file1",
                                "/foo/dir0/file2",
                                "/foo/dir0/file3",
                                "/foo/dir1",
                                "/foo/dir1/file0",
                                "/foo/dir1/file1",
                                "/foo/dir1/file2",
                                "/foo/dir1/file3");

  Set<String> INUCpaths = new HashSet<String>();
  Collections.addAll(INUCpaths, "/bar/dir0/file0",
                                "/bar/dir0/file1",
                                "/bar/dir1/file0",
                                "/bar/dir1/file1",
                                "/bar/dir2/file0",
                                "/bar/dir2/file1");

  ImageInfo v18Inodes = new ImageInfo(); // Hadoop version 18 inodes
  v18Inodes.totalNumBlocks = 12;
  v18Inodes.totalFileSize = 1069548540l;
  v18Inodes.pathNames = pathNames;
  v18Inodes.totalReplications = 14;

  ImageInfo v18INUCs = new ImageInfo(); // Hadoop version 18 inodes under construction
  v18INUCs.totalNumBlocks = 0;
  v18INUCs.totalFileSize = 0;
  v18INUCs.pathNames = INUCpaths;
  v18INUCs.totalReplications = 6;

  ImageInfo v19Inodes = new ImageInfo(); // Hadoop version 19 inodes
  v19Inodes.totalNumBlocks = 12;
  v19Inodes.totalFileSize = 1069548540l;
  v19Inodes.pathNames = pathNames;
  v19Inodes.totalReplications = 14;

  ImageInfo v19INUCs = new ImageInfo(); // Hadoop version 19 inodes under construction
  v19INUCs.totalNumBlocks = 0;
  v19INUCs.totalFileSize = 0;
  v19INUCs.pathNames = INUCpaths;
  v19INUCs.totalReplications = 6;


  spotCheck("18", TEST_CACHE_DATA_DIR + "/fsimageV18", v18Inodes, v18INUCs);
  spotCheck("19", TEST_CACHE_DATA_DIR + "/fsimageV19", v19Inodes, v19INUCs);
}
项目:hardfs    文件:TestOIVCanReadOldVersions.java   
@Test
public void testOldFSImages() {
  // Define the expected values from the prior versions, as they were created
  // and verified at time of creation
  Set<String> pathNames = new HashSet<String>();
  Collections.addAll(pathNames, "", /* root */
                                "/bar",
                                "/bar/dir0",
                                "/bar/dir0/file0",
                                "/bar/dir0/file1",
                                "/bar/dir1",
                                "/bar/dir1/file0",
                                "/bar/dir1/file1",
                                "/bar/dir2",
                                "/bar/dir2/file0",
                                "/bar/dir2/file1",
                                "/foo",
                                "/foo/dir0",
                                "/foo/dir0/file0",
                                "/foo/dir0/file1",
                                "/foo/dir0/file2",
                                "/foo/dir0/file3",
                                "/foo/dir1",
                                "/foo/dir1/file0",
                                "/foo/dir1/file1",
                                "/foo/dir1/file2",
                                "/foo/dir1/file3");

  Set<String> INUCpaths = new HashSet<String>();
  Collections.addAll(INUCpaths, "/bar/dir0/file0",
                                "/bar/dir0/file1",
                                "/bar/dir1/file0",
                                "/bar/dir1/file1",
                                "/bar/dir2/file0",
                                "/bar/dir2/file1");

  ImageInfo v18Inodes = new ImageInfo(); // Hadoop version 18 inodes
  v18Inodes.totalNumBlocks = 12;
  v18Inodes.totalFileSize = 1069548540l;
  v18Inodes.pathNames = pathNames;
  v18Inodes.totalReplications = 14;

  ImageInfo v18INUCs = new ImageInfo(); // Hadoop version 18 inodes under construction
  v18INUCs.totalNumBlocks = 0;
  v18INUCs.totalFileSize = 0;
  v18INUCs.pathNames = INUCpaths;
  v18INUCs.totalReplications = 6;

  ImageInfo v19Inodes = new ImageInfo(); // Hadoop version 19 inodes
  v19Inodes.totalNumBlocks = 12;
  v19Inodes.totalFileSize = 1069548540l;
  v19Inodes.pathNames = pathNames;
  v19Inodes.totalReplications = 14;

  ImageInfo v19INUCs = new ImageInfo(); // Hadoop version 19 inodes under construction
  v19INUCs.totalNumBlocks = 0;
  v19INUCs.totalFileSize = 0;
  v19INUCs.pathNames = INUCpaths;
  v19INUCs.totalReplications = 6;


  spotCheck("18", TEST_CACHE_DATA_DIR + "/fsimageV18", v18Inodes, v18INUCs);
  spotCheck("19", TEST_CACHE_DATA_DIR + "/fsimageV19", v19Inodes, v19INUCs);
}
项目:hadoop-on-lustre2    文件:TestOIVCanReadOldVersions.java   
@Test
public void testOldFSImages() {
  // Define the expected values from the prior versions, as they were created
  // and verified at time of creation
  Set<String> pathNames = new HashSet<String>();
  Collections.addAll(pathNames, "", /* root */
                                "/bar",
                                "/bar/dir0",
                                "/bar/dir0/file0",
                                "/bar/dir0/file1",
                                "/bar/dir1",
                                "/bar/dir1/file0",
                                "/bar/dir1/file1",
                                "/bar/dir2",
                                "/bar/dir2/file0",
                                "/bar/dir2/file1",
                                "/foo",
                                "/foo/dir0",
                                "/foo/dir0/file0",
                                "/foo/dir0/file1",
                                "/foo/dir0/file2",
                                "/foo/dir0/file3",
                                "/foo/dir1",
                                "/foo/dir1/file0",
                                "/foo/dir1/file1",
                                "/foo/dir1/file2",
                                "/foo/dir1/file3");

  Set<String> INUCpaths = new HashSet<String>();
  Collections.addAll(INUCpaths, "/bar/dir0/file0",
                                "/bar/dir0/file1",
                                "/bar/dir1/file0",
                                "/bar/dir1/file1",
                                "/bar/dir2/file0",
                                "/bar/dir2/file1");

  ImageInfo v18Inodes = new ImageInfo(); // Hadoop version 18 inodes
  v18Inodes.totalNumBlocks = 12;
  v18Inodes.totalFileSize = 1069548540l;
  v18Inodes.pathNames = pathNames;
  v18Inodes.totalReplications = 14;

  ImageInfo v18INUCs = new ImageInfo(); // Hadoop version 18 inodes under construction
  v18INUCs.totalNumBlocks = 0;
  v18INUCs.totalFileSize = 0;
  v18INUCs.pathNames = INUCpaths;
  v18INUCs.totalReplications = 6;

  ImageInfo v19Inodes = new ImageInfo(); // Hadoop version 19 inodes
  v19Inodes.totalNumBlocks = 12;
  v19Inodes.totalFileSize = 1069548540l;
  v19Inodes.pathNames = pathNames;
  v19Inodes.totalReplications = 14;

  ImageInfo v19INUCs = new ImageInfo(); // Hadoop version 19 inodes under construction
  v19INUCs.totalNumBlocks = 0;
  v19INUCs.totalFileSize = 0;
  v19INUCs.pathNames = INUCpaths;
  v19INUCs.totalReplications = 6;


  spotCheck("18", TEST_CACHE_DATA_DIR + "/fsimageV18", v18Inodes, v18INUCs);
  spotCheck("19", TEST_CACHE_DATA_DIR + "/fsimageV19", v19Inodes, v19INUCs);
}
项目:cumulus    文件:TestOIVCanReadOldVersions.java   
public void testOldFSImages() {
  // Define the expected values from the prior versions, as they were created
  // and verified at time of creation
  Set<String> pathNames = new HashSet<String>();
  Collections.addAll(pathNames, "", /* root */
                                "/bar",
                                "/bar/dir0",
                                "/bar/dir0/file0",
                                "/bar/dir0/file1",
                                "/bar/dir1",
                                "/bar/dir1/file0",
                                "/bar/dir1/file1",
                                "/bar/dir2",
                                "/bar/dir2/file0",
                                "/bar/dir2/file1",
                                "/foo",
                                "/foo/dir0",
                                "/foo/dir0/file0",
                                "/foo/dir0/file1",
                                "/foo/dir0/file2",
                                "/foo/dir0/file3",
                                "/foo/dir1",
                                "/foo/dir1/file0",
                                "/foo/dir1/file1",
                                "/foo/dir1/file2",
                                "/foo/dir1/file3");

  Set<String> INUCpaths = new HashSet<String>();
  Collections.addAll(INUCpaths, "/bar/dir0/file0",
                                "/bar/dir0/file1",
                                "/bar/dir1/file0",
                                "/bar/dir1/file1",
                                "/bar/dir2/file0",
                                "/bar/dir2/file1");

  ImageInfo v18Inodes = new ImageInfo(); // Hadoop version 18 inodes
  v18Inodes.totalNumBlocks = 12;
  v18Inodes.totalFileSize = 1069548540l;
  v18Inodes.pathNames = pathNames;
  v18Inodes.totalReplications = 14;

  ImageInfo v18INUCs = new ImageInfo(); // Hadoop version 18 inodes under construction
  v18INUCs.totalNumBlocks = 0;
  v18INUCs.totalFileSize = 0;
  v18INUCs.pathNames = INUCpaths;
  v18INUCs.totalReplications = 6;

  ImageInfo v19Inodes = new ImageInfo(); // Hadoop version 19 inodes
  v19Inodes.totalNumBlocks = 12;
  v19Inodes.totalFileSize = 1069548540l;
  v19Inodes.pathNames = pathNames;
  v19Inodes.totalReplications = 14;

  ImageInfo v19INUCs = new ImageInfo(); // Hadoop version 19 inodes under construction
  v19INUCs.totalNumBlocks = 0;
  v19INUCs.totalFileSize = 0;
  v19INUCs.pathNames = INUCpaths;
  v19INUCs.totalReplications = 6;


  spotCheck("18", TEST_CACHE_DATA_DIR + "/fsimageV18", v18Inodes, v18INUCs);
  spotCheck("19", TEST_CACHE_DATA_DIR + "/fsimageV19", v19Inodes, v19INUCs);
}
项目:RDFS    文件:TestOIVCanReadOldVersions.java   
public void testOldFSImages() {
  // Define the expected values from the prior versions, as they were created
  // and verified at time of creation
  Set<String> pathNames = new HashSet<String>();
  Collections.addAll(pathNames, "", /* root */
                                "/bar",
                                "/bar/dir0",
                                "/bar/dir0/file0",
                                "/bar/dir0/file1",
                                "/bar/dir1",
                                "/bar/dir1/file0",
                                "/bar/dir1/file1",
                                "/bar/dir2",
                                "/bar/dir2/file0",
                                "/bar/dir2/file1",
                                "/foo",
                                "/foo/dir0",
                                "/foo/dir0/file0",
                                "/foo/dir0/file1",
                                "/foo/dir0/file2",
                                "/foo/dir0/file3",
                                "/foo/dir1",
                                "/foo/dir1/file0",
                                "/foo/dir1/file1",
                                "/foo/dir1/file2",
                                "/foo/dir1/file3");

  Set<String> INUCpaths = new HashSet<String>();
  Collections.addAll(INUCpaths, "/bar/dir0/file0",
                                "/bar/dir0/file1",
                                "/bar/dir1/file0",
                                "/bar/dir1/file1",
                                "/bar/dir2/file0",
                                "/bar/dir2/file1");

  ImageInfo v18Inodes = new ImageInfo(); // Hadoop version 18 inodes
  v18Inodes.totalNumBlocks = 12;
  v18Inodes.totalFileSize = 1069548540l;
  v18Inodes.pathNames = pathNames;
  v18Inodes.totalReplications = 14;

  ImageInfo v18INUCs = new ImageInfo(); // Hadoop version 18 inodes under construction
  v18INUCs.totalNumBlocks = 0;
  v18INUCs.totalFileSize = 0;
  v18INUCs.pathNames = INUCpaths;
  v18INUCs.totalReplications = 6;

  ImageInfo v19Inodes = new ImageInfo(); // Hadoop version 19 inodes
  v19Inodes.totalNumBlocks = 12;
  v19Inodes.totalFileSize = 1069548540l;
  v19Inodes.pathNames = pathNames;
  v19Inodes.totalReplications = 14;

  ImageInfo v19INUCs = new ImageInfo(); // Hadoop version 19 inodes under construction
  v19INUCs.totalNumBlocks = 0;
  v19INUCs.totalFileSize = 0;
  v19INUCs.pathNames = INUCpaths;
  v19INUCs.totalReplications = 6;


  spotCheck("18", TEST_CACHE_DATA_DIR + "/fsimageV18", v18Inodes, v18INUCs);
  spotCheck("19", TEST_CACHE_DATA_DIR + "/fsimageV19", v19Inodes, v19INUCs);
}