Java 类org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff 实例源码

项目:hadoop-plus    文件:INodeFile.java   
@Override
public final Quota.Counts computeQuotaUsage(Quota.Counts counts,
    boolean useCache, int lastSnapshotId) {
  long nsDelta = 1;
  final long dsDelta;
  if (this instanceof FileWithSnapshot) {
    FileDiffList fileDiffList = ((FileWithSnapshot) this).getDiffs();
    Snapshot last = fileDiffList.getLastSnapshot();
    List<FileDiff> diffs = fileDiffList.asList();

    if (lastSnapshotId == Snapshot.INVALID_ID || last == null) {
      nsDelta += diffs.size();
      dsDelta = diskspaceConsumed();
    } else if (last.getId() < lastSnapshotId) {
      dsDelta = computeFileSize(true, false) * getFileReplication();
    } else {      
      Snapshot s = fileDiffList.getSnapshotById(lastSnapshotId);
      dsDelta = diskspaceConsumed(s);
    }
  } else {
    dsDelta = diskspaceConsumed();
  }
  counts.add(Quota.NAMESPACE, nsDelta);
  counts.add(Quota.DISKSPACE, dsDelta);
  return counts;
}
项目:hadoop-plus    文件:SnapshotFSImageFormat.java   
public static FileDiffList loadFileDiffList(DataInput in,
    FSImageFormat.Loader loader) throws IOException {
  final int size = in.readInt();
  if (size == -1) {
    return null;
  } else {
    final FileDiffList diffs = new FileDiffList();
    FileDiff posterior = null;
    for(int i = 0; i < size; i++) {
      final FileDiff d = loadFileDiff(posterior, in, loader);
      diffs.addFirst(d);
      posterior = d;
    }
    return diffs;
  }
}
项目:hadoop-TCP    文件:INodeFile.java   
@Override
public final Quota.Counts computeQuotaUsage(Quota.Counts counts,
    boolean useCache, int lastSnapshotId) {
  long nsDelta = 1;
  final long dsDelta;
  if (this instanceof FileWithSnapshot) {
    FileDiffList fileDiffList = ((FileWithSnapshot) this).getDiffs();
    Snapshot last = fileDiffList.getLastSnapshot();
    List<FileDiff> diffs = fileDiffList.asList();

    if (lastSnapshotId == Snapshot.INVALID_ID || last == null) {
      nsDelta += diffs.size();
      dsDelta = diskspaceConsumed();
    } else if (last.getId() < lastSnapshotId) {
      dsDelta = computeFileSize(true, false) * getFileReplication();
    } else {      
      Snapshot s = fileDiffList.getSnapshotById(lastSnapshotId);
      dsDelta = diskspaceConsumed(s);
    }
  } else {
    dsDelta = diskspaceConsumed();
  }
  counts.add(Quota.NAMESPACE, nsDelta);
  counts.add(Quota.DISKSPACE, dsDelta);
  return counts;
}
项目:hadoop-TCP    文件:SnapshotFSImageFormat.java   
public static FileDiffList loadFileDiffList(DataInput in,
    FSImageFormat.Loader loader) throws IOException {
  final int size = in.readInt();
  if (size == -1) {
    return null;
  } else {
    final FileDiffList diffs = new FileDiffList();
    FileDiff posterior = null;
    for(int i = 0; i < size; i++) {
      final FileDiff d = loadFileDiff(posterior, in, loader);
      diffs.addFirst(d);
      posterior = d;
    }
    return diffs;
  }
}
项目:hardfs    文件:INodeFile.java   
@Override
public final Quota.Counts computeQuotaUsage(Quota.Counts counts,
    boolean useCache, int lastSnapshotId) {
  long nsDelta = 1;
  final long dsDelta;
  if (this instanceof FileWithSnapshot) {
    FileDiffList fileDiffList = ((FileWithSnapshot) this).getDiffs();
    Snapshot last = fileDiffList.getLastSnapshot();
    List<FileDiff> diffs = fileDiffList.asList();

    if (lastSnapshotId == Snapshot.INVALID_ID || last == null) {
      nsDelta += diffs.size();
      dsDelta = diskspaceConsumed();
    } else if (last.getId() < lastSnapshotId) {
      dsDelta = computeFileSize(true, false) * getFileReplication();
    } else {      
      Snapshot s = fileDiffList.getSnapshotById(lastSnapshotId);
      dsDelta = diskspaceConsumed(s);
    }
  } else {
    dsDelta = diskspaceConsumed();
  }
  counts.add(Quota.NAMESPACE, nsDelta);
  counts.add(Quota.DISKSPACE, dsDelta);
  return counts;
}
项目:hardfs    文件:SnapshotFSImageFormat.java   
public static FileDiffList loadFileDiffList(DataInput in,
    FSImageFormat.Loader loader) throws IOException {
  final int size = in.readInt();
  if (size == -1) {
    return null;
  } else {
    final FileDiffList diffs = new FileDiffList();
    FileDiff posterior = null;
    for(int i = 0; i < size; i++) {
      final FileDiff d = loadFileDiff(posterior, in, loader);
      diffs.addFirst(d);
      posterior = d;
    }
    return diffs;
  }
}
项目:hadoop-plus    文件:INodeFile.java   
/**
 * Compute file size of the current file if the given snapshot is null;
 * otherwise, get the file size from the given snapshot.
 */
public final long computeFileSize(Snapshot snapshot) {
  if (snapshot != null && this instanceof FileWithSnapshot) {
    final FileDiff d = ((FileWithSnapshot)this).getDiffs().getDiff(snapshot);
    if (d != null) {
      return d.getFileSize();
    }
  }

  return computeFileSize(true, false);
}
项目:hadoop-plus    文件:SnapshotFSImageFormat.java   
private static FileDiff loadFileDiff(FileDiff posterior, DataInput in,
    FSImageFormat.Loader loader) throws IOException {
  // 1. Read the full path of the Snapshot root to identify the Snapshot
  final Snapshot snapshot = loader.getSnapshot(in);

  // 2. Load file size
  final long fileSize = in.readLong();

  // 3. Load snapshotINode 
  final INodeFileAttributes snapshotINode = in.readBoolean()?
      loader.loadINodeFileAttributes(in): null;

  return new FileDiff(snapshot, snapshotINode, posterior, fileSize);
}
项目:hadoop-TCP    文件:INodeFile.java   
/**
 * Compute file size of the current file if the given snapshot is null;
 * otherwise, get the file size from the given snapshot.
 */
public final long computeFileSize(Snapshot snapshot) {
  if (snapshot != null && this instanceof FileWithSnapshot) {
    final FileDiff d = ((FileWithSnapshot)this).getDiffs().getDiff(snapshot);
    if (d != null) {
      return d.getFileSize();
    }
  }

  return computeFileSize(true, false);
}
项目:hadoop-TCP    文件:SnapshotFSImageFormat.java   
private static FileDiff loadFileDiff(FileDiff posterior, DataInput in,
    FSImageFormat.Loader loader) throws IOException {
  // 1. Read the full path of the Snapshot root to identify the Snapshot
  final Snapshot snapshot = loader.getSnapshot(in);

  // 2. Load file size
  final long fileSize = in.readLong();

  // 3. Load snapshotINode 
  final INodeFileAttributes snapshotINode = in.readBoolean()?
      loader.loadINodeFileAttributes(in): null;

  return new FileDiff(snapshot, snapshotINode, posterior, fileSize);
}
项目:hardfs    文件:INodeFile.java   
/**
 * Compute file size of the current file if the given snapshot is null;
 * otherwise, get the file size from the given snapshot.
 */
public final long computeFileSize(Snapshot snapshot) {
  if (snapshot != null && this instanceof FileWithSnapshot) {
    final FileDiff d = ((FileWithSnapshot)this).getDiffs().getDiff(snapshot);
    if (d != null) {
      return d.getFileSize();
    }
  }

  return computeFileSize(true, false);
}
项目:hardfs    文件:SnapshotFSImageFormat.java   
private static FileDiff loadFileDiff(FileDiff posterior, DataInput in,
    FSImageFormat.Loader loader) throws IOException {
  // 1. Read the full path of the Snapshot root to identify the Snapshot
  final Snapshot snapshot = loader.getSnapshot(in);

  // 2. Load file size
  final long fileSize = in.readLong();

  // 3. Load snapshotINode 
  final INodeFileAttributes snapshotINode = in.readBoolean()?
      loader.loadINodeFileAttributes(in): null;

  return new FileDiff(snapshot, snapshotINode, posterior, fileSize);
}
项目:hadoop-plus    文件:TestRenameWithSnapshots.java   
/**
 * Test rename to an invalid name (xxx/.snapshot)
 */
@Test
public void testRenameUndo_7() throws Exception {
  final Path root = new Path("/");
  final Path foo = new Path(root, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);

  // create a snapshot on root
  SnapshotTestHelper.createSnapshot(hdfs, root, snap1);

  // rename bar to /foo/.snapshot which is invalid
  final Path invalid = new Path(foo, HdfsConstants.DOT_SNAPSHOT_DIR);
  try {
    hdfs.rename(bar, invalid);
    fail("expect exception since invalid name is used for rename");
  } catch (Exception e) {
    GenericTestUtils.assertExceptionContains("\"" +
        HdfsConstants.DOT_SNAPSHOT_DIR + "\" is a reserved name", e);
  }

  // check
  INodeDirectoryWithSnapshot fooNode = (INodeDirectoryWithSnapshot) fsdir
      .getINode4Write(foo.toString());
  ReadOnlyList<INode> children = fooNode.getChildrenList(null);
  assertEquals(1, children.size());
  List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  DirectoryDiff diff = diffList.get(0);
  // this diff is generated while renaming
  assertEquals(snap1, Snapshot.getSnapshotName(diff.snapshot));
  // after undo, the diff should be empty
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());

  // bar was converted to filewithsnapshot while renaming
  INodeFileWithSnapshot barNode = (INodeFileWithSnapshot) fsdir
      .getINode4Write(bar.toString());
  assertSame(barNode, children.get(0));
  assertSame(fooNode, barNode.getParent());
  List<FileDiff> barDiffList = barNode.getDiffs().asList();
  assertEquals(1, barDiffList.size());
  FileDiff barDiff = barDiffList.get(0);
  assertEquals(snap1, Snapshot.getSnapshotName(barDiff.snapshot));

  // restart cluster multiple times to make sure the fsimage and edits log are
  // correct. Note that when loading fsimage, foo and bar will be converted 
  // back to normal INodeDirectory and INodeFile since they do not store any 
  // snapshot data
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDataNodes(REPL).build();
  cluster.waitActive();
  restartClusterAndCheckImage(true);
}
项目:hadoop-TCP    文件:TestRenameWithSnapshots.java   
/**
 * Test rename to an invalid name (xxx/.snapshot)
 */
@Test
public void testRenameUndo_7() throws Exception {
  final Path root = new Path("/");
  final Path foo = new Path(root, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);

  // create a snapshot on root
  SnapshotTestHelper.createSnapshot(hdfs, root, snap1);

  // rename bar to /foo/.snapshot which is invalid
  final Path invalid = new Path(foo, HdfsConstants.DOT_SNAPSHOT_DIR);
  try {
    hdfs.rename(bar, invalid);
    fail("expect exception since invalid name is used for rename");
  } catch (Exception e) {
    GenericTestUtils.assertExceptionContains("\"" +
        HdfsConstants.DOT_SNAPSHOT_DIR + "\" is a reserved name", e);
  }

  // check
  INodeDirectoryWithSnapshot fooNode = (INodeDirectoryWithSnapshot) fsdir
      .getINode4Write(foo.toString());
  ReadOnlyList<INode> children = fooNode.getChildrenList(null);
  assertEquals(1, children.size());
  List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  DirectoryDiff diff = diffList.get(0);
  // this diff is generated while renaming
  assertEquals(snap1, Snapshot.getSnapshotName(diff.snapshot));
  // after undo, the diff should be empty
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());

  // bar was converted to filewithsnapshot while renaming
  INodeFileWithSnapshot barNode = (INodeFileWithSnapshot) fsdir
      .getINode4Write(bar.toString());
  assertSame(barNode, children.get(0));
  assertSame(fooNode, barNode.getParent());
  List<FileDiff> barDiffList = barNode.getDiffs().asList();
  assertEquals(1, barDiffList.size());
  FileDiff barDiff = barDiffList.get(0);
  assertEquals(snap1, Snapshot.getSnapshotName(barDiff.snapshot));

  // restart cluster multiple times to make sure the fsimage and edits log are
  // correct. Note that when loading fsimage, foo and bar will be converted 
  // back to normal INodeDirectory and INodeFile since they do not store any 
  // snapshot data
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDataNodes(REPL).build();
  cluster.waitActive();
  restartClusterAndCheckImage(true);
}
项目:hardfs    文件:TestRenameWithSnapshots.java   
/**
 * Test rename to an invalid name (xxx/.snapshot)
 */
@Test
public void testRenameUndo_7() throws Exception {
  final Path root = new Path("/");
  final Path foo = new Path(root, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);

  // create a snapshot on root
  SnapshotTestHelper.createSnapshot(hdfs, root, snap1);

  // rename bar to /foo/.snapshot which is invalid
  final Path invalid = new Path(foo, HdfsConstants.DOT_SNAPSHOT_DIR);
  try {
    hdfs.rename(bar, invalid);
    fail("expect exception since invalid name is used for rename");
  } catch (Exception e) {
    GenericTestUtils.assertExceptionContains("\"" +
        HdfsConstants.DOT_SNAPSHOT_DIR + "\" is a reserved name", e);
  }

  // check
  INodeDirectoryWithSnapshot fooNode = (INodeDirectoryWithSnapshot) fsdir
      .getINode4Write(foo.toString());
  ReadOnlyList<INode> children = fooNode.getChildrenList(null);
  assertEquals(1, children.size());
  List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  DirectoryDiff diff = diffList.get(0);
  // this diff is generated while renaming
  assertEquals(snap1, Snapshot.getSnapshotName(diff.snapshot));
  // after undo, the diff should be empty
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());

  // bar was converted to filewithsnapshot while renaming
  INodeFileWithSnapshot barNode = (INodeFileWithSnapshot) fsdir
      .getINode4Write(bar.toString());
  assertSame(barNode, children.get(0));
  assertSame(fooNode, barNode.getParent());
  List<FileDiff> barDiffList = barNode.getDiffs().asList();
  assertEquals(1, barDiffList.size());
  FileDiff barDiff = barDiffList.get(0);
  assertEquals(snap1, Snapshot.getSnapshotName(barDiff.snapshot));

  // restart cluster multiple times to make sure the fsimage and edits log are
  // correct. Note that when loading fsimage, foo and bar will be converted 
  // back to normal INodeDirectory and INodeFile since they do not store any 
  // snapshot data
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDataNodes(REPL).build();
  cluster.waitActive();
  restartClusterAndCheckImage(true);
}