Java 类org.apache.hadoop.hdfs.LogVerificationAppender 实例源码

项目:hadoop    文件:TestReplicationPolicy.java   
/**
 * In this testcase, it tries to choose more targets than available nodes and
 * check the result. 
 * @throws Exception
 */
@Test
public void testChooseTargetWithMoreThanAvailableNodes() throws Exception {
  // make data node 0 & 1 to be not qualified to choose: not enough disk space
  for(int i=0; i<2; i++) {
    updateHeartbeatWithUsage(dataNodes[i],
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
  }

  final LogVerificationAppender appender = new LogVerificationAppender();
  final Logger logger = Logger.getRootLogger();
  logger.addAppender(appender);

  // try to choose NUM_OF_DATANODES which is more than actually available
  // nodes.
  DatanodeStorageInfo[] targets = chooseTarget(NUM_OF_DATANODES);
  assertEquals(targets.length, NUM_OF_DATANODES - 2);

  final List<LoggingEvent> log = appender.getLog();
  assertNotNull(log);
  assertFalse(log.size() == 0);
  final LoggingEvent lastLogEntry = log.get(log.size() - 1);

  assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel()));
  // Suppose to place replicas on each node but two data nodes are not
  // available for placing replica, so here we expect a short of 2
  assertTrue(((String) lastLogEntry.getMessage()).contains("in need of 2"));

  resetHeartbeatForStorages();
}
项目:aliyun-oss-hadoop-fs    文件:TestReplicationPolicy.java   
/**
 * In this testcase, it tries to choose more targets than available nodes and
 * check the result. 
 * @throws Exception
 */
@Test
public void testChooseTargetWithMoreThanAvailableNodes() throws Exception {
  // make data node 0 & 1 to be not qualified to choose: not enough disk space
  for(int i=0; i<2; i++) {
    updateHeartbeatWithUsage(dataNodes[i],
        2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
  }

  final LogVerificationAppender appender = new LogVerificationAppender();
  final Logger logger = Logger.getRootLogger();
  logger.addAppender(appender);

  // try to choose NUM_OF_DATANODES which is more than actually available
  // nodes.
  DatanodeStorageInfo[] targets = chooseTarget(dataNodes.length);
  assertEquals(targets.length, dataNodes.length - 2);

  final List<LoggingEvent> log = appender.getLog();
  assertNotNull(log);
  assertFalse(log.size() == 0);
  final LoggingEvent lastLogEntry = log.get(log.size() - 1);

  assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel()));
  // Suppose to place replicas on each node but two data nodes are not
  // available for placing replica, so here we expect a short of 2
  assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2"));

  resetHeartbeatForStorages();
}
项目:big-c    文件:TestReplicationPolicy.java   
/**
 * In this testcase, it tries to choose more targets than available nodes and
 * check the result. 
 * @throws Exception
 */
@Test
public void testChooseTargetWithMoreThanAvailableNodes() throws Exception {
  // make data node 0 & 1 to be not qualified to choose: not enough disk space
  for(int i=0; i<2; i++) {
    updateHeartbeatWithUsage(dataNodes[i],
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
  }

  final LogVerificationAppender appender = new LogVerificationAppender();
  final Logger logger = Logger.getRootLogger();
  logger.addAppender(appender);

  // try to choose NUM_OF_DATANODES which is more than actually available
  // nodes.
  DatanodeStorageInfo[] targets = chooseTarget(NUM_OF_DATANODES);
  assertEquals(targets.length, NUM_OF_DATANODES - 2);

  final List<LoggingEvent> log = appender.getLog();
  assertNotNull(log);
  assertFalse(log.size() == 0);
  final LoggingEvent lastLogEntry = log.get(log.size() - 1);

  assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel()));
  // Suppose to place replicas on each node but two data nodes are not
  // available for placing replica, so here we expect a short of 2
  assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2"));

  for(int i=0; i<2; i++) {
    updateHeartbeatWithUsage(dataNodes[i],
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestReplicationPolicy.java   
/**
 * In this testcase, it tries to choose more targets than available nodes and
 * check the result. 
 * @throws Exception
 */
@Test
public void testChooseTargetWithMoreThanAvailableNodes() throws Exception {
  // make data node 0 & 1 to be not qualified to choose: not enough disk space
  for(int i=0; i<2; i++) {
    updateHeartbeatWithUsage(dataNodes[i],
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
  }

  final LogVerificationAppender appender = new LogVerificationAppender();
  final Logger logger = Logger.getRootLogger();
  logger.addAppender(appender);

  // try to choose NUM_OF_DATANODES which is more than actually available
  // nodes.
  DatanodeStorageInfo[] targets = chooseTarget(NUM_OF_DATANODES);
  assertEquals(targets.length, NUM_OF_DATANODES - 2);

  final List<LoggingEvent> log = appender.getLog();
  assertNotNull(log);
  assertFalse(log.size() == 0);
  final LoggingEvent lastLogEntry = log.get(log.size() - 1);

  assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel()));
  // Suppose to place replicas on each node but two data nodes are not
  // available for placing replica, so here we expect a short of 2
  assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2"));

  for(int i=0; i<2; i++) {
    updateHeartbeatWithUsage(dataNodes[i],
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
  }
}
项目:hadoop-plus    文件:TestReplicationPolicy.java   
/**
 * In this testcase, it tries to choose more targets than available nodes and
 * check the result. 
 * @throws Exception
 */
@Test
public void testChooseTargetWithMoreThanAvailableNodes() throws Exception {
  // make data node 0 & 1 to be not qualified to choose: not enough disk space
  for(int i=0; i<2; i++) {
    dataNodes[i].updateHeartbeat(
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0, 0);
  }

  final LogVerificationAppender appender = new LogVerificationAppender();
  final Logger logger = Logger.getRootLogger();
  logger.addAppender(appender);

  // try to choose NUM_OF_DATANODES which is more than actually available
  // nodes.
  DatanodeDescriptor[] targets = replicator.chooseTarget(filename, 
      NUM_OF_DATANODES, dataNodes[0], new ArrayList<DatanodeDescriptor>(),
      BLOCK_SIZE);
  assertEquals(targets.length, NUM_OF_DATANODES - 2);

  final List<LoggingEvent> log = appender.getLog();
  assertNotNull(log);
  assertFalse(log.size() == 0);
  final LoggingEvent lastLogEntry = log.get(log.size() - 1);

  assertEquals(lastLogEntry.getLevel(), Level.WARN);
  // Suppose to place replicas on each node but two data nodes are not
  // available for placing replica, so here we expect a short of 2
  assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2"));

  for(int i=0; i<2; i++) {
    dataNodes[i].updateHeartbeat(
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0);
  }
}
项目:FlexMap    文件:TestReplicationPolicy.java   
/**
 * In this testcase, it tries to choose more targets than available nodes and
 * check the result. 
 * @throws Exception
 */
@Test
public void testChooseTargetWithMoreThanAvailableNodes() throws Exception {
  // make data node 0 & 1 to be not qualified to choose: not enough disk space
  for(int i=0; i<2; i++) {
    updateHeartbeatWithUsage(dataNodes[i],
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
  }

  final LogVerificationAppender appender = new LogVerificationAppender();
  final Logger logger = Logger.getRootLogger();
  logger.addAppender(appender);

  // try to choose NUM_OF_DATANODES which is more than actually available
  // nodes.
  DatanodeStorageInfo[] targets = chooseTarget(NUM_OF_DATANODES);
  assertEquals(targets.length, NUM_OF_DATANODES - 2);

  final List<LoggingEvent> log = appender.getLog();
  assertNotNull(log);
  assertFalse(log.size() == 0);
  final LoggingEvent lastLogEntry = log.get(log.size() - 1);

  assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel()));
  // Suppose to place replicas on each node but two data nodes are not
  // available for placing replica, so here we expect a short of 2
  assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2"));

  for(int i=0; i<2; i++) {
    updateHeartbeatWithUsage(dataNodes[i],
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
  }
}
项目:hadoop-TCP    文件:TestReplicationPolicy.java   
/**
 * In this testcase, it tries to choose more targets than available nodes and
 * check the result. 
 * @throws Exception
 */
@Test
public void testChooseTargetWithMoreThanAvailableNodes() throws Exception {
  // make data node 0 & 1 to be not qualified to choose: not enough disk space
  for(int i=0; i<2; i++) {
    dataNodes[i].updateHeartbeat(
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0, 0);
  }

  final LogVerificationAppender appender = new LogVerificationAppender();
  final Logger logger = Logger.getRootLogger();
  logger.addAppender(appender);

  // try to choose NUM_OF_DATANODES which is more than actually available
  // nodes.
  DatanodeDescriptor[] targets = replicator.chooseTarget(filename, 
      NUM_OF_DATANODES, dataNodes[0], new ArrayList<DatanodeDescriptor>(),
      BLOCK_SIZE);
  assertEquals(targets.length, NUM_OF_DATANODES - 2);

  final List<LoggingEvent> log = appender.getLog();
  assertNotNull(log);
  assertFalse(log.size() == 0);
  final LoggingEvent lastLogEntry = log.get(log.size() - 1);

  assertEquals(lastLogEntry.getLevel(), Level.WARN);
  // Suppose to place replicas on each node but two data nodes are not
  // available for placing replica, so here we expect a short of 2
  assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2"));

  for(int i=0; i<2; i++) {
    dataNodes[i].updateHeartbeat(
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0);
  }
}
项目:hardfs    文件:TestReplicationPolicy.java   
/**
 * In this testcase, it tries to choose more targets than available nodes and
 * check the result. 
 * @throws Exception
 */
@Test
public void testChooseTargetWithMoreThanAvailableNodes() throws Exception {
  // make data node 0 & 1 to be not qualified to choose: not enough disk space
  for(int i=0; i<2; i++) {
    dataNodes[i].updateHeartbeat(
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0, 0);
  }

  final LogVerificationAppender appender = new LogVerificationAppender();
  final Logger logger = Logger.getRootLogger();
  logger.addAppender(appender);

  // try to choose NUM_OF_DATANODES which is more than actually available
  // nodes.
  DatanodeDescriptor[] targets = replicator.chooseTarget(filename, 
      NUM_OF_DATANODES, dataNodes[0], new ArrayList<DatanodeDescriptor>(),
      BLOCK_SIZE);
  assertEquals(targets.length, NUM_OF_DATANODES - 2);

  final List<LoggingEvent> log = appender.getLog();
  assertNotNull(log);
  assertFalse(log.size() == 0);
  final LoggingEvent lastLogEntry = log.get(log.size() - 1);

  assertEquals(lastLogEntry.getLevel(), Level.WARN);
  // Suppose to place replicas on each node but two data nodes are not
  // available for placing replica, so here we expect a short of 2
  assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2"));

  for(int i=0; i<2; i++) {
    dataNodes[i].updateHeartbeat(
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0);
  }
}
项目:hadoop-on-lustre2    文件:TestCacheDirectives.java   
@Test(timeout=60000)
public void testExceedsCapacity() throws Exception {
  // Create a giant file
  final Path fileName = new Path("/exceeds");
  final long fileLen = CACHE_CAPACITY * (NUM_DATANODES*2);
  int numCachedReplicas = (int) ((CACHE_CAPACITY*NUM_DATANODES)/BLOCK_SIZE);
  DFSTestUtil.createFile(dfs, fileName, fileLen, (short) NUM_DATANODES,
      0xFADED);
  // Set up a log appender watcher
  final LogVerificationAppender appender = new LogVerificationAppender();
  final Logger logger = Logger.getRootLogger();
  logger.addAppender(appender);
  dfs.addCachePool(new CachePoolInfo("pool"));
  dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool")
      .setPath(fileName).setReplication((short) 1).build());
  waitForCachedBlocks(namenode, -1, numCachedReplicas,
      "testExceeds:1");
  // Check that no DNs saw an excess CACHE message
  int lines = appender.countLinesWithMessage(
      "more bytes in the cache: " +
      DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
  assertEquals("Namenode should not send extra CACHE commands", 0, lines);
  // Try creating a file with giant-sized blocks that exceed cache capacity
  dfs.delete(fileName, false);
  DFSTestUtil.createFile(dfs, fileName, 4096, fileLen, CACHE_CAPACITY * 2,
      (short) 1, 0xFADED);
  // Nothing will get cached, so just force sleep for a bit
  Thread.sleep(4000);
  // Still should not see any excess commands
  lines = appender.countLinesWithMessage(
      "more bytes in the cache: " +
      DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
  assertEquals("Namenode should not send extra CACHE commands", 0, lines);
}
项目:hadoop-on-lustre2    文件:TestReplicationPolicy.java   
/**
 * In this testcase, it tries to choose more targets than available nodes and
 * check the result. 
 * @throws Exception
 */
@Test
public void testChooseTargetWithMoreThanAvailableNodes() throws Exception {
  // make data node 0 & 1 to be not qualified to choose: not enough disk space
  for(int i=0; i<2; i++) {
    updateHeartbeatWithUsage(dataNodes[i],
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
  }

  final LogVerificationAppender appender = new LogVerificationAppender();
  final Logger logger = Logger.getRootLogger();
  logger.addAppender(appender);

  // try to choose NUM_OF_DATANODES which is more than actually available
  // nodes.
  DatanodeStorageInfo[] targets = chooseTarget(NUM_OF_DATANODES);
  assertEquals(targets.length, NUM_OF_DATANODES - 2);

  final List<LoggingEvent> log = appender.getLog();
  assertNotNull(log);
  assertFalse(log.size() == 0);
  final LoggingEvent lastLogEntry = log.get(log.size() - 1);

  assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel()));
  // Suppose to place replicas on each node but two data nodes are not
  // available for placing replica, so here we expect a short of 2
  assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2"));

  for(int i=0; i<2; i++) {
    updateHeartbeatWithUsage(dataNodes[i],
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
  }
}
项目:hadoop    文件:TestStartup.java   
private void testImageChecksum(boolean compress) throws Exception {
  MiniDFSCluster cluster = null;
  if (compress) {
    config.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, true);
  }

  try {
      LOG.info("\n===========================================\n" +
               "Starting empty cluster");

      cluster = new MiniDFSCluster.Builder(config)
        .numDataNodes(0)
        .format(true)
        .build();
      cluster.waitActive();

      FileSystem fs = cluster.getFileSystem();
      fs.mkdirs(new Path("/test"));

      LOG.info("Shutting down cluster #1");
      cluster.shutdown();
      cluster = null;

      // Corrupt the md5 files in all the namedirs
      corruptFSImageMD5(true);

      // Attach our own log appender so we can verify output
      final LogVerificationAppender appender = new LogVerificationAppender();
      final Logger logger = Logger.getRootLogger();
      logger.addAppender(appender);

      // Try to start a new cluster
      LOG.info("\n===========================================\n" +
      "Starting same cluster after simulated crash");
      try {
        cluster = new MiniDFSCluster.Builder(config)
          .numDataNodes(0)
          .format(false)
          .build();
        fail("Should not have successfully started with corrupt image");
      } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains(
            "Failed to load an FSImage file!", ioe);
        int md5failures = appender.countExceptionsWithMessage(
            " is corrupt with MD5 checksum of ");
        // Two namedirs, so should have seen two failures
        assertEquals(2, md5failures);
      }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop    文件:TestFsDatasetCache.java   
@Test(timeout=600000)
public void testFilesExceedMaxLockedMemory() throws Exception {
  LOG.info("beginning testFilesExceedMaxLockedMemory");

  // Create some test files that will exceed total cache capacity
  final int numFiles = 5;
  final long fileSize = CACHE_CAPACITY / (numFiles-1);

  final Path[] testFiles = new Path[numFiles];
  final HdfsBlockLocation[][] fileLocs = new HdfsBlockLocation[numFiles][];
  final long[] fileSizes = new long[numFiles];
  for (int i=0; i<numFiles; i++) {
    testFiles[i] = new Path("/testFilesExceedMaxLockedMemory-" + i);
    DFSTestUtil.createFile(fs, testFiles[i], fileSize, (short)1, 0xDFAl);
    fileLocs[i] = (HdfsBlockLocation[])fs.getFileBlockLocations(
        testFiles[i], 0, fileSize);
    // Get the file size (sum of blocks)
    long[] sizes = getBlockSizes(fileLocs[i]);
    for (int j=0; j<sizes.length; j++) {
      fileSizes[i] += sizes[j];
    }
  }

  // Cache the first n-1 files
  long total = 0;
  DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
  for (int i=0; i<numFiles-1; i++) {
    setHeartbeatResponse(cacheBlocks(fileLocs[i]));
    total = DFSTestUtil.verifyExpectedCacheUsage(
        rounder.round(total + fileSizes[i]), 4 * (i + 1), fsd);
  }

  // nth file should hit a capacity exception
  final LogVerificationAppender appender = new LogVerificationAppender();
  final Logger logger = Logger.getRootLogger();
  logger.addAppender(appender);
  setHeartbeatResponse(cacheBlocks(fileLocs[numFiles-1]));

  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      int lines = appender.countLinesWithMessage(
          "more bytes in the cache: " +
          DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
      return lines > 0;
    }
  }, 500, 30000);
  // Also check the metrics for the failure
  assertTrue("Expected more than 0 failed cache attempts",
      fsd.getNumBlocksFailedToCache() > 0);

  // Uncache the n-1 files
  int curCachedBlocks = 16;
  for (int i=0; i<numFiles-1; i++) {
    setHeartbeatResponse(uncacheBlocks(fileLocs[i]));
    long uncachedBytes = rounder.round(fileSizes[i]);
    total -= uncachedBytes;
    curCachedBlocks -= uncachedBytes / BLOCK_SIZE;
    DFSTestUtil.verifyExpectedCacheUsage(total, curCachedBlocks, fsd);
  }
  LOG.info("finishing testFilesExceedMaxLockedMemory");
}
项目:aliyun-oss-hadoop-fs    文件:TestStartup.java   
private void testImageChecksum(boolean compress) throws Exception {
  MiniDFSCluster cluster = null;
  if (compress) {
    config.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, true);
  }

  try {
      LOG.info("\n===========================================\n" +
               "Starting empty cluster");

      cluster = new MiniDFSCluster.Builder(config)
        .numDataNodes(0)
        .format(true)
        .build();
      cluster.waitActive();

      FileSystem fs = cluster.getFileSystem();
      fs.mkdirs(new Path("/test"));

      LOG.info("Shutting down cluster #1");
      cluster.shutdown();
      cluster = null;

      // Corrupt the md5 files in all the namedirs
      corruptFSImageMD5(true);

      // Attach our own log appender so we can verify output
      final LogVerificationAppender appender = new LogVerificationAppender();
      final Logger logger = Logger.getRootLogger();
      logger.addAppender(appender);

      // Try to start a new cluster
      LOG.info("\n===========================================\n" +
      "Starting same cluster after simulated crash");
      try {
        cluster = new MiniDFSCluster.Builder(config)
          .numDataNodes(0)
          .format(false)
          .build();
        fail("Should not have successfully started with corrupt image");
      } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains(
            "Failed to load an FSImage file!", ioe);
        int md5failures = appender.countExceptionsWithMessage(
            " is corrupt with MD5 checksum of ");
        // Two namedirs, so should have seen two failures
        assertEquals(2, md5failures);
      }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestFsDatasetCache.java   
@Test(timeout=600000)
public void testFilesExceedMaxLockedMemory() throws Exception {
  LOG.info("beginning testFilesExceedMaxLockedMemory");

  // Create some test files that will exceed total cache capacity
  final int numFiles = 5;
  final long fileSize = CACHE_CAPACITY / (numFiles-1);

  final Path[] testFiles = new Path[numFiles];
  final HdfsBlockLocation[][] fileLocs = new HdfsBlockLocation[numFiles][];
  final long[] fileSizes = new long[numFiles];
  for (int i=0; i<numFiles; i++) {
    testFiles[i] = new Path("/testFilesExceedMaxLockedMemory-" + i);
    DFSTestUtil.createFile(fs, testFiles[i], fileSize, (short)1, 0xDFAl);
    fileLocs[i] = (HdfsBlockLocation[])fs.getFileBlockLocations(
        testFiles[i], 0, fileSize);
    // Get the file size (sum of blocks)
    long[] sizes = getBlockSizes(fileLocs[i]);
    for (int j=0; j<sizes.length; j++) {
      fileSizes[i] += sizes[j];
    }
  }

  // Cache the first n-1 files
  long total = 0;
  DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
  for (int i=0; i<numFiles-1; i++) {
    setHeartbeatResponse(cacheBlocks(fileLocs[i]));
    total = DFSTestUtil.verifyExpectedCacheUsage(
        rounder.roundUp(total + fileSizes[i]), 4 * (i + 1), fsd);
  }

  // nth file should hit a capacity exception
  final LogVerificationAppender appender = new LogVerificationAppender();
  final Logger logger = Logger.getRootLogger();
  logger.addAppender(appender);
  setHeartbeatResponse(cacheBlocks(fileLocs[numFiles-1]));

  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      int lines = appender.countLinesWithMessage(
          "more bytes in the cache: " +
          DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
      return lines > 0;
    }
  }, 500, 30000);
  // Also check the metrics for the failure
  assertTrue("Expected more than 0 failed cache attempts",
      fsd.getNumBlocksFailedToCache() > 0);

  // Uncache the n-1 files
  int curCachedBlocks = 16;
  for (int i=0; i<numFiles-1; i++) {
    setHeartbeatResponse(uncacheBlocks(fileLocs[i]));
    long uncachedBytes = rounder.roundUp(fileSizes[i]);
    total -= uncachedBytes;
    curCachedBlocks -= uncachedBytes / BLOCK_SIZE;
    DFSTestUtil.verifyExpectedCacheUsage(total, curCachedBlocks, fsd);
  }
  LOG.info("finishing testFilesExceedMaxLockedMemory");
}
项目:big-c    文件:TestStartup.java   
private void testImageChecksum(boolean compress) throws Exception {
  MiniDFSCluster cluster = null;
  if (compress) {
    config.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, true);
  }

  try {
      LOG.info("\n===========================================\n" +
               "Starting empty cluster");

      cluster = new MiniDFSCluster.Builder(config)
        .numDataNodes(0)
        .format(true)
        .build();
      cluster.waitActive();

      FileSystem fs = cluster.getFileSystem();
      fs.mkdirs(new Path("/test"));

      LOG.info("Shutting down cluster #1");
      cluster.shutdown();
      cluster = null;

      // Corrupt the md5 files in all the namedirs
      corruptFSImageMD5(true);

      // Attach our own log appender so we can verify output
      final LogVerificationAppender appender = new LogVerificationAppender();
      final Logger logger = Logger.getRootLogger();
      logger.addAppender(appender);

      // Try to start a new cluster
      LOG.info("\n===========================================\n" +
      "Starting same cluster after simulated crash");
      try {
        cluster = new MiniDFSCluster.Builder(config)
          .numDataNodes(0)
          .format(false)
          .build();
        fail("Should not have successfully started with corrupt image");
      } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains(
            "Failed to load an FSImage file!", ioe);
        int md5failures = appender.countExceptionsWithMessage(
            " is corrupt with MD5 checksum of ");
        // Two namedirs, so should have seen two failures
        assertEquals(2, md5failures);
      }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:big-c    文件:TestFsDatasetCache.java   
@Test(timeout=600000)
public void testFilesExceedMaxLockedMemory() throws Exception {
  LOG.info("beginning testFilesExceedMaxLockedMemory");

  // Create some test files that will exceed total cache capacity
  final int numFiles = 5;
  final long fileSize = CACHE_CAPACITY / (numFiles-1);

  final Path[] testFiles = new Path[numFiles];
  final HdfsBlockLocation[][] fileLocs = new HdfsBlockLocation[numFiles][];
  final long[] fileSizes = new long[numFiles];
  for (int i=0; i<numFiles; i++) {
    testFiles[i] = new Path("/testFilesExceedMaxLockedMemory-" + i);
    DFSTestUtil.createFile(fs, testFiles[i], fileSize, (short)1, 0xDFAl);
    fileLocs[i] = (HdfsBlockLocation[])fs.getFileBlockLocations(
        testFiles[i], 0, fileSize);
    // Get the file size (sum of blocks)
    long[] sizes = getBlockSizes(fileLocs[i]);
    for (int j=0; j<sizes.length; j++) {
      fileSizes[i] += sizes[j];
    }
  }

  // Cache the first n-1 files
  long total = 0;
  DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
  for (int i=0; i<numFiles-1; i++) {
    setHeartbeatResponse(cacheBlocks(fileLocs[i]));
    total = DFSTestUtil.verifyExpectedCacheUsage(
        rounder.round(total + fileSizes[i]), 4 * (i + 1), fsd);
  }

  // nth file should hit a capacity exception
  final LogVerificationAppender appender = new LogVerificationAppender();
  final Logger logger = Logger.getRootLogger();
  logger.addAppender(appender);
  setHeartbeatResponse(cacheBlocks(fileLocs[numFiles-1]));

  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      int lines = appender.countLinesWithMessage(
          "more bytes in the cache: " +
          DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
      return lines > 0;
    }
  }, 500, 30000);
  // Also check the metrics for the failure
  assertTrue("Expected more than 0 failed cache attempts",
      fsd.getNumBlocksFailedToCache() > 0);

  // Uncache the n-1 files
  int curCachedBlocks = 16;
  for (int i=0; i<numFiles-1; i++) {
    setHeartbeatResponse(uncacheBlocks(fileLocs[i]));
    long uncachedBytes = rounder.round(fileSizes[i]);
    total -= uncachedBytes;
    curCachedBlocks -= uncachedBytes / BLOCK_SIZE;
    DFSTestUtil.verifyExpectedCacheUsage(total, curCachedBlocks, fsd);
  }
  LOG.info("finishing testFilesExceedMaxLockedMemory");
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestStartup.java   
private void testImageChecksum(boolean compress) throws Exception {
  MiniDFSCluster cluster = null;
  if (compress) {
    config.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, true);
  }

  try {
      LOG.info("\n===========================================\n" +
               "Starting empty cluster");

      cluster = new MiniDFSCluster.Builder(config)
        .numDataNodes(0)
        .format(true)
        .build();
      cluster.waitActive();

      FileSystem fs = cluster.getFileSystem();
      fs.mkdirs(new Path("/test"));

      LOG.info("Shutting down cluster #1");
      cluster.shutdown();
      cluster = null;

      // Corrupt the md5 files in all the namedirs
      corruptFSImageMD5(true);

      // Attach our own log appender so we can verify output
      final LogVerificationAppender appender = new LogVerificationAppender();
      final Logger logger = Logger.getRootLogger();
      logger.addAppender(appender);

      // Try to start a new cluster
      LOG.info("\n===========================================\n" +
      "Starting same cluster after simulated crash");
      try {
        cluster = new MiniDFSCluster.Builder(config)
          .numDataNodes(0)
          .format(false)
          .build();
        fail("Should not have successfully started with corrupt image");
      } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains(
            "Failed to load an FSImage file!", ioe);
        int md5failures = appender.countExceptionsWithMessage(
            " is corrupt with MD5 checksum of ");
        // Two namedirs, so should have seen two failures
        assertEquals(2, md5failures);
      }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestFsDatasetCache.java   
@Test(timeout=600000)
public void testFilesExceedMaxLockedMemory() throws Exception {
  LOG.info("beginning testFilesExceedMaxLockedMemory");

  // Create some test files that will exceed total cache capacity
  final int numFiles = 5;
  final long fileSize = CACHE_CAPACITY / (numFiles-1);

  final Path[] testFiles = new Path[numFiles];
  final HdfsBlockLocation[][] fileLocs = new HdfsBlockLocation[numFiles][];
  final long[] fileSizes = new long[numFiles];
  for (int i=0; i<numFiles; i++) {
    testFiles[i] = new Path("/testFilesExceedMaxLockedMemory-" + i);
    DFSTestUtil.createFile(fs, testFiles[i], fileSize, (short)1, 0xDFAl);
    fileLocs[i] = (HdfsBlockLocation[])fs.getFileBlockLocations(
        testFiles[i], 0, fileSize);
    // Get the file size (sum of blocks)
    long[] sizes = getBlockSizes(fileLocs[i]);
    for (int j=0; j<sizes.length; j++) {
      fileSizes[i] += sizes[j];
    }
  }

  // Cache the first n-1 files
  long total = 0;
  DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
  for (int i=0; i<numFiles-1; i++) {
    setHeartbeatResponse(cacheBlocks(fileLocs[i]));
    total = DFSTestUtil.verifyExpectedCacheUsage(
        rounder.round(total + fileSizes[i]), 4 * (i + 1), fsd);
  }

  // nth file should hit a capacity exception
  final LogVerificationAppender appender = new LogVerificationAppender();
  final Logger logger = Logger.getRootLogger();
  logger.addAppender(appender);
  setHeartbeatResponse(cacheBlocks(fileLocs[numFiles-1]));

  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      int lines = appender.countLinesWithMessage(
          "more bytes in the cache: " +
          DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
      return lines > 0;
    }
  }, 500, 30000);
  // Also check the metrics for the failure
  assertTrue("Expected more than 0 failed cache attempts",
      fsd.getNumBlocksFailedToCache() > 0);

  // Uncache the n-1 files
  int curCachedBlocks = 16;
  for (int i=0; i<numFiles-1; i++) {
    setHeartbeatResponse(uncacheBlocks(fileLocs[i]));
    long uncachedBytes = rounder.round(fileSizes[i]);
    total -= uncachedBytes;
    curCachedBlocks -= uncachedBytes / BLOCK_SIZE;
    DFSTestUtil.verifyExpectedCacheUsage(total, curCachedBlocks, fsd);
  }
  LOG.info("finishing testFilesExceedMaxLockedMemory");
}
项目:hadoop-plus    文件:TestStartup.java   
private void testImageChecksum(boolean compress) throws Exception {
  MiniDFSCluster cluster = null;
  if (compress) {
    config.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, true);
  }

  try {
      LOG.info("\n===========================================\n" +
               "Starting empty cluster");

      cluster = new MiniDFSCluster.Builder(config)
        .numDataNodes(0)
        .format(true)
        .build();
      cluster.waitActive();

      FileSystem fs = cluster.getFileSystem();
      fs.mkdirs(new Path("/test"));

      LOG.info("Shutting down cluster #1");
      cluster.shutdown();
      cluster = null;

      // Corrupt the md5 files in all the namedirs
      corruptFSImageMD5(true);

      // Attach our own log appender so we can verify output
      final LogVerificationAppender appender = new LogVerificationAppender();
      final Logger logger = Logger.getRootLogger();
      logger.addAppender(appender);

      // Try to start a new cluster
      LOG.info("\n===========================================\n" +
      "Starting same cluster after simulated crash");
      try {
        cluster = new MiniDFSCluster.Builder(config)
          .numDataNodes(0)
          .format(false)
          .build();
        fail("Should not have successfully started with corrupt image");
      } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains(
            "Failed to load an FSImage file!", ioe);
        int md5failures = appender.countExceptionsWithMessage(
            " is corrupt with MD5 checksum of ");
        // Two namedirs, so should have seen two failures
        assertEquals(2, md5failures);
      }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:FlexMap    文件:TestStartup.java   
private void testImageChecksum(boolean compress) throws Exception {
  MiniDFSCluster cluster = null;
  if (compress) {
    config.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, true);
  }

  try {
      LOG.info("\n===========================================\n" +
               "Starting empty cluster");

      cluster = new MiniDFSCluster.Builder(config)
        .numDataNodes(0)
        .format(true)
        .build();
      cluster.waitActive();

      FileSystem fs = cluster.getFileSystem();
      fs.mkdirs(new Path("/test"));

      LOG.info("Shutting down cluster #1");
      cluster.shutdown();
      cluster = null;

      // Corrupt the md5 files in all the namedirs
      corruptFSImageMD5(true);

      // Attach our own log appender so we can verify output
      final LogVerificationAppender appender = new LogVerificationAppender();
      final Logger logger = Logger.getRootLogger();
      logger.addAppender(appender);

      // Try to start a new cluster
      LOG.info("\n===========================================\n" +
      "Starting same cluster after simulated crash");
      try {
        cluster = new MiniDFSCluster.Builder(config)
          .numDataNodes(0)
          .format(false)
          .build();
        fail("Should not have successfully started with corrupt image");
      } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains(
            "Failed to load an FSImage file!", ioe);
        int md5failures = appender.countExceptionsWithMessage(
            " is corrupt with MD5 checksum of ");
        // Two namedirs, so should have seen two failures
        assertEquals(2, md5failures);
      }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:FlexMap    文件:TestFsDatasetCache.java   
@Test(timeout=600000)
public void testFilesExceedMaxLockedMemory() throws Exception {
  LOG.info("beginning testFilesExceedMaxLockedMemory");

  // Create some test files that will exceed total cache capacity
  final int numFiles = 5;
  final long fileSize = CACHE_CAPACITY / (numFiles-1);

  final Path[] testFiles = new Path[numFiles];
  final HdfsBlockLocation[][] fileLocs = new HdfsBlockLocation[numFiles][];
  final long[] fileSizes = new long[numFiles];
  for (int i=0; i<numFiles; i++) {
    testFiles[i] = new Path("/testFilesExceedMaxLockedMemory-" + i);
    DFSTestUtil.createFile(fs, testFiles[i], fileSize, (short)1, 0xDFAl);
    fileLocs[i] = (HdfsBlockLocation[])fs.getFileBlockLocations(
        testFiles[i], 0, fileSize);
    // Get the file size (sum of blocks)
    long[] sizes = getBlockSizes(fileLocs[i]);
    for (int j=0; j<sizes.length; j++) {
      fileSizes[i] += sizes[j];
    }
  }

  // Cache the first n-1 files
  long total = 0;
  DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
  for (int i=0; i<numFiles-1; i++) {
    setHeartbeatResponse(cacheBlocks(fileLocs[i]));
    total = DFSTestUtil.verifyExpectedCacheUsage(
        rounder.round(total + fileSizes[i]), 4 * (i + 1), fsd);
  }

  // nth file should hit a capacity exception
  final LogVerificationAppender appender = new LogVerificationAppender();
  final Logger logger = Logger.getRootLogger();
  logger.addAppender(appender);
  setHeartbeatResponse(cacheBlocks(fileLocs[numFiles-1]));

  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      int lines = appender.countLinesWithMessage(
          "more bytes in the cache: " +
          DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
      return lines > 0;
    }
  }, 500, 30000);
  // Also check the metrics for the failure
  assertTrue("Expected more than 0 failed cache attempts",
      fsd.getNumBlocksFailedToCache() > 0);

  // Uncache the n-1 files
  int curCachedBlocks = 16;
  for (int i=0; i<numFiles-1; i++) {
    setHeartbeatResponse(uncacheBlocks(fileLocs[i]));
    long uncachedBytes = rounder.round(fileSizes[i]);
    total -= uncachedBytes;
    curCachedBlocks -= uncachedBytes / BLOCK_SIZE;
    DFSTestUtil.verifyExpectedCacheUsage(total, curCachedBlocks, fsd);
  }
  LOG.info("finishing testFilesExceedMaxLockedMemory");
}
项目:hadoop-TCP    文件:TestStartup.java   
private void testImageChecksum(boolean compress) throws Exception {
  MiniDFSCluster cluster = null;
  if (compress) {
    config.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, true);
  }

  try {
      LOG.info("\n===========================================\n" +
               "Starting empty cluster");

      cluster = new MiniDFSCluster.Builder(config)
        .numDataNodes(0)
        .format(true)
        .build();
      cluster.waitActive();

      FileSystem fs = cluster.getFileSystem();
      fs.mkdirs(new Path("/test"));

      LOG.info("Shutting down cluster #1");
      cluster.shutdown();
      cluster = null;

      // Corrupt the md5 files in all the namedirs
      corruptFSImageMD5(true);

      // Attach our own log appender so we can verify output
      final LogVerificationAppender appender = new LogVerificationAppender();
      final Logger logger = Logger.getRootLogger();
      logger.addAppender(appender);

      // Try to start a new cluster
      LOG.info("\n===========================================\n" +
      "Starting same cluster after simulated crash");
      try {
        cluster = new MiniDFSCluster.Builder(config)
          .numDataNodes(0)
          .format(false)
          .build();
        fail("Should not have successfully started with corrupt image");
      } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains(
            "Failed to load an FSImage file!", ioe);
        int md5failures = appender.countExceptionsWithMessage(
            " is corrupt with MD5 checksum of ");
        // Two namedirs, so should have seen two failures
        assertEquals(2, md5failures);
      }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hardfs    文件:TestStartup.java   
private void testImageChecksum(boolean compress) throws Exception {
  MiniDFSCluster cluster = null;
  if (compress) {
    config.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, true);
  }

  try {
      LOG.info("\n===========================================\n" +
               "Starting empty cluster");

      cluster = new MiniDFSCluster.Builder(config)
        .numDataNodes(0)
        .format(true)
        .build();
      cluster.waitActive();

      FileSystem fs = cluster.getFileSystem();
      fs.mkdirs(new Path("/test"));

      LOG.info("Shutting down cluster #1");
      cluster.shutdown();
      cluster = null;

      // Corrupt the md5 files in all the namedirs
      corruptFSImageMD5(true);

      // Attach our own log appender so we can verify output
      final LogVerificationAppender appender = new LogVerificationAppender();
      final Logger logger = Logger.getRootLogger();
      logger.addAppender(appender);

      // Try to start a new cluster
      LOG.info("\n===========================================\n" +
      "Starting same cluster after simulated crash");
      try {
        cluster = new MiniDFSCluster.Builder(config)
          .numDataNodes(0)
          .format(false)
          .build();
        fail("Should not have successfully started with corrupt image");
      } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains(
            "Failed to load an FSImage file!", ioe);
        int md5failures = appender.countExceptionsWithMessage(
            " is corrupt with MD5 checksum of ");
        // Two namedirs, so should have seen two failures
        assertEquals(2, md5failures);
      }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop-on-lustre2    文件:TestStartup.java   
private void testImageChecksum(boolean compress) throws Exception {
  MiniDFSCluster cluster = null;
  if (compress) {
    config.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, true);
  }

  try {
      LOG.info("\n===========================================\n" +
               "Starting empty cluster");

      cluster = new MiniDFSCluster.Builder(config)
        .numDataNodes(0)
        .format(true)
        .build();
      cluster.waitActive();

      FileSystem fs = cluster.getFileSystem();
      fs.mkdirs(new Path("/test"));

      LOG.info("Shutting down cluster #1");
      cluster.shutdown();
      cluster = null;

      // Corrupt the md5 files in all the namedirs
      corruptFSImageMD5(true);

      // Attach our own log appender so we can verify output
      final LogVerificationAppender appender = new LogVerificationAppender();
      final Logger logger = Logger.getRootLogger();
      logger.addAppender(appender);

      // Try to start a new cluster
      LOG.info("\n===========================================\n" +
      "Starting same cluster after simulated crash");
      try {
        cluster = new MiniDFSCluster.Builder(config)
          .numDataNodes(0)
          .format(false)
          .build();
        fail("Should not have successfully started with corrupt image");
      } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains(
            "Failed to load an FSImage file!", ioe);
        int md5failures = appender.countExceptionsWithMessage(
            " is corrupt with MD5 checksum of ");
        // Two namedirs, so should have seen two failures
        assertEquals(2, md5failures);
      }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop-on-lustre2    文件:TestFsDatasetCache.java   
@Test(timeout=600000)
public void testFilesExceedMaxLockedMemory() throws Exception {
  LOG.info("beginning testFilesExceedMaxLockedMemory");

  // Create some test files that will exceed total cache capacity
  final int numFiles = 5;
  final long fileSize = CACHE_CAPACITY / (numFiles-1);

  final Path[] testFiles = new Path[numFiles];
  final HdfsBlockLocation[][] fileLocs = new HdfsBlockLocation[numFiles][];
  final long[] fileSizes = new long[numFiles];
  for (int i=0; i<numFiles; i++) {
    testFiles[i] = new Path("/testFilesExceedMaxLockedMemory-" + i);
    DFSTestUtil.createFile(fs, testFiles[i], fileSize, (short)1, 0xDFAl);
    fileLocs[i] = (HdfsBlockLocation[])fs.getFileBlockLocations(
        testFiles[i], 0, fileSize);
    // Get the file size (sum of blocks)
    long[] sizes = getBlockSizes(fileLocs[i]);
    for (int j=0; j<sizes.length; j++) {
      fileSizes[i] += sizes[j];
    }
  }

  // Cache the first n-1 files
  long total = 0;
  DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
  for (int i=0; i<numFiles-1; i++) {
    setHeartbeatResponse(cacheBlocks(fileLocs[i]));
    total = DFSTestUtil.verifyExpectedCacheUsage(
        rounder.round(total + fileSizes[i]), 4 * (i + 1), fsd);
  }

  // nth file should hit a capacity exception
  final LogVerificationAppender appender = new LogVerificationAppender();
  final Logger logger = Logger.getRootLogger();
  logger.addAppender(appender);
  setHeartbeatResponse(cacheBlocks(fileLocs[numFiles-1]));

  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      int lines = appender.countLinesWithMessage(
          "more bytes in the cache: " +
          DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
      return lines > 0;
    }
  }, 500, 30000);
  // Also check the metrics for the failure
  assertTrue("Expected more than 0 failed cache attempts",
      fsd.getNumBlocksFailedToCache() > 0);

  // Uncache the n-1 files
  int curCachedBlocks = 16;
  for (int i=0; i<numFiles-1; i++) {
    setHeartbeatResponse(uncacheBlocks(fileLocs[i]));
    long uncachedBytes = rounder.round(fileSizes[i]);
    total -= uncachedBytes;
    curCachedBlocks -= uncachedBytes / BLOCK_SIZE;
    DFSTestUtil.verifyExpectedCacheUsage(total, curCachedBlocks, fsd);
  }
  LOG.info("finishing testFilesExceedMaxLockedMemory");
}