Java 类org.apache.hadoop.hdfs.server.namenode.TestFileTruncate 实例源码

项目:hadoop    文件:TestRollingUpgrade.java   
private static void startRollingUpgrade(Path foo, Path bar,
    Path file, byte[] data,
    MiniDFSCluster cluster) throws IOException {
  final DistributedFileSystem dfs = cluster.getFileSystem();

  //start rolling upgrade
  dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
  dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);

  dfs.mkdirs(bar);
  Assert.assertTrue(dfs.exists(foo));
  Assert.assertTrue(dfs.exists(bar));

  //truncate a file
  final int newLength = DFSUtil.getRandom().nextInt(data.length - 1) + 1;
  dfs.truncate(file, newLength);
  TestFileTruncate.checkBlockRecovery(file, dfs);
  AppendTestUtil.checkFullFile(dfs, file, newLength, data);
}
项目:aliyun-oss-hadoop-fs    文件:TestRollingUpgrade.java   
private static void startRollingUpgrade(Path foo, Path bar,
    Path file, byte[] data,
    MiniDFSCluster cluster) throws IOException {
  final DistributedFileSystem dfs = cluster.getFileSystem();

  //start rolling upgrade
  dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
  dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);

  dfs.mkdirs(bar);
  Assert.assertTrue(dfs.exists(foo));
  Assert.assertTrue(dfs.exists(bar));

  //truncate a file
  final int newLength = ThreadLocalRandom.current().nextInt(data.length - 1)
      + 1;
  dfs.truncate(file, newLength);
  TestFileTruncate.checkBlockRecovery(file, dfs);
  AppendTestUtil.checkFullFile(dfs, file, newLength, data);
}
项目:big-c    文件:TestRollingUpgrade.java   
private static void startRollingUpgrade(Path foo, Path bar,
    Path file, byte[] data,
    MiniDFSCluster cluster) throws IOException {
  final DistributedFileSystem dfs = cluster.getFileSystem();

  //start rolling upgrade
  dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
  dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);

  dfs.mkdirs(bar);
  Assert.assertTrue(dfs.exists(foo));
  Assert.assertTrue(dfs.exists(bar));

  //truncate a file
  final int newLength = DFSUtil.getRandom().nextInt(data.length - 1) + 1;
  dfs.truncate(file, newLength);
  TestFileTruncate.checkBlockRecovery(file, dfs);
  AppendTestUtil.checkFullFile(dfs, file, newLength, data);
}
项目:hadoop    文件:TestAppendSnapshotTruncate.java   
private boolean truncate(long newLength, StringBuilder b) throws IOException {
  final RandomAccessFile raf = new RandomAccessFile(localFile, "rw");
  raf.setLength(newLength);
  raf.close();

  final boolean isReady = dfs.truncate(file, newLength);
  b.append(", newLength=").append(newLength)
   .append(", isReady=").append(isReady);
  if (!isReady) {
    TestFileTruncate.checkBlockRecovery(file, dfs, 100, 300L);
  }
  return isReady;
}
项目:aliyun-oss-hadoop-fs    文件:TestAppendSnapshotTruncate.java   
private boolean truncate(long newLength, StringBuilder b) throws IOException {
  final RandomAccessFile raf = new RandomAccessFile(localFile, "rw");
  raf.setLength(newLength);
  raf.close();

  final boolean isReady = dfs.truncate(file, newLength);
  b.append(", newLength=").append(newLength)
   .append(", isReady=").append(isReady);
  if (!isReady) {
    TestFileTruncate.checkBlockRecovery(file, dfs, 100, 300L);
  }
  return isReady;
}
项目:big-c    文件:TestAppendSnapshotTruncate.java   
private boolean truncate(long newLength, StringBuilder b) throws IOException {
  final RandomAccessFile raf = new RandomAccessFile(localFile, "rw");
  raf.setLength(newLength);
  raf.close();

  final boolean isReady = dfs.truncate(file, newLength);
  b.append(", newLength=").append(newLength)
   .append(", isReady=").append(isReady);
  if (!isReady) {
    TestFileTruncate.checkBlockRecovery(file, dfs, 100, 300L);
  }
  return isReady;
}