Java 类org.apache.hadoop.hdfs.util.ReadOnlyList 实例源码

项目:hadoop    文件:FSPermissionChecker.java   
/** Guarded by {@link FSNamesystem#readLock()} */
private void checkSubAccess(byte[][] pathByNameArr, int pathIdx, INode inode,
    int snapshotId, FsAction access, boolean ignoreEmptyDir)
    throws AccessControlException {
  if (inode == null || !inode.isDirectory()) {
    return;
  }

  Stack<INodeDirectory> directories = new Stack<INodeDirectory>();
  for(directories.push(inode.asDirectory()); !directories.isEmpty(); ) {
    INodeDirectory d = directories.pop();
    ReadOnlyList<INode> cList = d.getChildrenList(snapshotId);
    if (!(cList.isEmpty() && ignoreEmptyDir)) {
      //TODO have to figure this out with inodeattribute provider
      check(getINodeAttrs(pathByNameArr, pathIdx, d, snapshotId),
          inode.getFullPathName(), access);
    }

    for(INode child : cList) {
      if (child.isDirectory()) {
        directories.push(child.asDirectory());
      }
    }
  }
}
项目:hadoop    文件:FSImageFormat.java   
/**
 * Save children INodes.
 * @param children The list of children INodes
 * @param out The DataOutputStream to write
 * @param inSnapshot Whether the parent directory or its ancestor is in
 *                   the deleted list of some snapshot (caused by rename or
 *                   deletion)
 * @param counter Counter to increment for namenode startup progress
 * @return Number of children that are directory
 */
private int saveChildren(ReadOnlyList<INode> children,
    DataOutputStream out, boolean inSnapshot, Counter counter)
    throws IOException {
  // Write normal children INode.
  out.writeInt(children.size());
  int dirNum = 0;
  for(INode child : children) {
    // print all children first
    // TODO: for HDFS-5428, we cannot change the format/content of fsimage
    // here, thus even if the parent directory is in snapshot, we still
    // do not handle INodeUC as those stored in deleted list
    saveINode2Image(child, out, false, referenceMap, counter);
    if (child.isDirectory()) {
      dirNum++;
    } else if (inSnapshot && child.isFile()
        && child.asFile().isUnderConstruction()) {
      this.snapshotUCMap.put(child.getId(), child.asFile());
    }
    if (checkCancelCounter++ % CHECK_CANCEL_INTERVAL == 0) {
      context.checkCancelled();
    }
  }
  return dirNum;
}
项目:hadoop    文件:TestSnapshotRename.java   
/**
 * Check the correctness of snapshot list within snapshottable dir
 */
private void checkSnapshotList(INodeDirectory srcRoot,
    String[] sortedNames, String[] names) {
  assertTrue(srcRoot.isSnapshottable());
  ReadOnlyList<Snapshot> listByName = srcRoot
      .getDirectorySnapshottableFeature().getSnapshotList();
  assertEquals(sortedNames.length, listByName.size());
  for (int i = 0; i < listByName.size(); i++) {
    assertEquals(sortedNames[i], listByName.get(i).getRoot().getLocalName());
  }
  List<DirectoryDiff> listByTime = srcRoot.getDiffs().asList();
  assertEquals(names.length, listByTime.size());
  for (int i = 0; i < listByTime.size(); i++) {
    Snapshot s = srcRoot.getDirectorySnapshottableFeature().getSnapshotById(
        listByTime.get(i).getSnapshotId());
    assertEquals(names[i], s.getRoot().getLocalName());
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSPermissionChecker.java   
/** Guarded by {@link FSNamesystem#readLock()} */
private void checkSubAccess(byte[][] pathByNameArr, int pathIdx, INode inode,
    int snapshotId, FsAction access, boolean ignoreEmptyDir)
    throws AccessControlException {
  if (inode == null || !inode.isDirectory()) {
    return;
  }

  Stack<INodeDirectory> directories = new Stack<INodeDirectory>();
  for(directories.push(inode.asDirectory()); !directories.isEmpty(); ) {
    INodeDirectory d = directories.pop();
    ReadOnlyList<INode> cList = d.getChildrenList(snapshotId);
    if (!(cList.isEmpty() && ignoreEmptyDir)) {
      //TODO have to figure this out with inodeattribute provider
      check(getINodeAttrs(pathByNameArr, pathIdx, d, snapshotId),
          inode.getFullPathName(), access);
    }

    for(INode child : cList) {
      if (child.isDirectory()) {
        directories.push(child.asDirectory());
      }
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSImageFormat.java   
/**
 * Save children INodes.
 * @param children The list of children INodes
 * @param out The DataOutputStream to write
 * @param inSnapshot Whether the parent directory or its ancestor is in
 *                   the deleted list of some snapshot (caused by rename or
 *                   deletion)
 * @param counter Counter to increment for namenode startup progress
 * @return Number of children that are directory
 */
private int saveChildren(ReadOnlyList<INode> children,
    DataOutputStream out, boolean inSnapshot, Counter counter)
    throws IOException {
  // Write normal children INode.
  out.writeInt(children.size());
  int dirNum = 0;
  for(INode child : children) {
    // print all children first
    // TODO: for HDFS-5428, we cannot change the format/content of fsimage
    // here, thus even if the parent directory is in snapshot, we still
    // do not handle INodeUC as those stored in deleted list
    saveINode2Image(child, out, false, referenceMap, counter);
    if (child.isDirectory()) {
      dirNum++;
    } else if (inSnapshot && child.isFile()
        && child.asFile().isUnderConstruction()) {
      this.snapshotUCMap.put(child.getId(), child.asFile());
    }
    if (checkCancelCounter++ % CHECK_CANCEL_INTERVAL == 0) {
      context.checkCancelled();
    }
  }
  return dirNum;
}
项目:aliyun-oss-hadoop-fs    文件:TestSnapshotRename.java   
/**
 * Check the correctness of snapshot list within snapshottable dir
 */
private void checkSnapshotList(INodeDirectory srcRoot,
    String[] sortedNames, String[] names) {
  assertTrue(srcRoot.isSnapshottable());
  ReadOnlyList<Snapshot> listByName = srcRoot
      .getDirectorySnapshottableFeature().getSnapshotList();
  assertEquals(sortedNames.length, listByName.size());
  for (int i = 0; i < listByName.size(); i++) {
    assertEquals(sortedNames[i], listByName.get(i).getRoot().getLocalName());
  }
  List<DirectoryDiff> listByTime = srcRoot.getDiffs().asList();
  assertEquals(names.length, listByTime.size());
  for (int i = 0; i < listByTime.size(); i++) {
    Snapshot s = srcRoot.getDirectorySnapshottableFeature().getSnapshotById(
        listByTime.get(i).getSnapshotId());
    assertEquals(names[i], s.getRoot().getLocalName());
  }
}
项目:big-c    文件:FSPermissionChecker.java   
/** Guarded by {@link FSNamesystem#readLock()} */
private void checkSubAccess(byte[][] pathByNameArr, int pathIdx, INode inode,
    int snapshotId, FsAction access, boolean ignoreEmptyDir)
    throws AccessControlException {
  if (inode == null || !inode.isDirectory()) {
    return;
  }

  Stack<INodeDirectory> directories = new Stack<INodeDirectory>();
  for(directories.push(inode.asDirectory()); !directories.isEmpty(); ) {
    INodeDirectory d = directories.pop();
    ReadOnlyList<INode> cList = d.getChildrenList(snapshotId);
    if (!(cList.isEmpty() && ignoreEmptyDir)) {
      //TODO have to figure this out with inodeattribute provider
      check(getINodeAttrs(pathByNameArr, pathIdx, d, snapshotId),
          inode.getFullPathName(), access);
    }

    for(INode child : cList) {
      if (child.isDirectory()) {
        directories.push(child.asDirectory());
      }
    }
  }
}
项目:big-c    文件:FSImageFormat.java   
/**
 * Save children INodes.
 * @param children The list of children INodes
 * @param out The DataOutputStream to write
 * @param inSnapshot Whether the parent directory or its ancestor is in
 *                   the deleted list of some snapshot (caused by rename or
 *                   deletion)
 * @param counter Counter to increment for namenode startup progress
 * @return Number of children that are directory
 */
private int saveChildren(ReadOnlyList<INode> children,
    DataOutputStream out, boolean inSnapshot, Counter counter)
    throws IOException {
  // Write normal children INode.
  out.writeInt(children.size());
  int dirNum = 0;
  for(INode child : children) {
    // print all children first
    // TODO: for HDFS-5428, we cannot change the format/content of fsimage
    // here, thus even if the parent directory is in snapshot, we still
    // do not handle INodeUC as those stored in deleted list
    saveINode2Image(child, out, false, referenceMap, counter);
    if (child.isDirectory()) {
      dirNum++;
    } else if (inSnapshot && child.isFile()
        && child.asFile().isUnderConstruction()) {
      this.snapshotUCMap.put(child.getId(), child.asFile());
    }
    if (checkCancelCounter++ % CHECK_CANCEL_INTERVAL == 0) {
      context.checkCancelled();
    }
  }
  return dirNum;
}
项目:big-c    文件:TestSnapshotRename.java   
/**
 * Check the correctness of snapshot list within snapshottable dir
 */
private void checkSnapshotList(INodeDirectory srcRoot,
    String[] sortedNames, String[] names) {
  assertTrue(srcRoot.isSnapshottable());
  ReadOnlyList<Snapshot> listByName = srcRoot
      .getDirectorySnapshottableFeature().getSnapshotList();
  assertEquals(sortedNames.length, listByName.size());
  for (int i = 0; i < listByName.size(); i++) {
    assertEquals(sortedNames[i], listByName.get(i).getRoot().getLocalName());
  }
  List<DirectoryDiff> listByTime = srcRoot.getDiffs().asList();
  assertEquals(names.length, listByTime.size());
  for (int i = 0; i < listByTime.size(); i++) {
    Snapshot s = srcRoot.getDirectorySnapshottableFeature().getSnapshotById(
        listByTime.get(i).getSnapshotId());
    assertEquals(names[i], s.getRoot().getLocalName());
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DefaultAuthorizationProvider.java   
/**
 * Guarded by {@link FSNamesystem#readLock()}
 */
private void checkSubAccess(String user, Set<String> groups, INode inode,
    int snapshotId, FsAction access, boolean ignoreEmptyDir)
    throws AccessControlException {
  if (inode == null || !inode.isDirectory()) {
    return;
  }

  Stack<INodeDirectory> directories = new Stack<INodeDirectory>();
  for (directories.push(inode.asDirectory()); !directories.isEmpty(); ) {
    INodeDirectory d = directories.pop();
    ReadOnlyList<INode> cList = d.getChildrenList(snapshotId);
    if (!(cList.isEmpty() && ignoreEmptyDir)) {
      check(user, groups, d, snapshotId, access);
    }

    for (INode child : cList) {
      if (child.isDirectory()) {
        directories.push(child.asDirectory());
      }
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSImageFormat.java   
/**
 * Save children INodes.
 * @param children The list of children INodes
 * @param out The DataOutputStream to write
 * @param inSnapshot Whether the parent directory or its ancestor is in
 *                   the deleted list of some snapshot (caused by rename or
 *                   deletion)
 * @param counter Counter to increment for namenode startup progress
 * @return Number of children that are directory
 */
private int saveChildren(ReadOnlyList<INode> children,
    DataOutputStream out, boolean inSnapshot, Counter counter)
    throws IOException {
  // Write normal children INode.
  out.writeInt(children.size());
  int dirNum = 0;
  for(INode child : children) {
    // print all children first
    // TODO: for HDFS-5428, we cannot change the format/content of fsimage
    // here, thus even if the parent directory is in snapshot, we still
    // do not handle INodeUC as those stored in deleted list
    saveINode2Image(child, out, false, referenceMap, counter);
    if (child.isDirectory()) {
      dirNum++;
    } else if (inSnapshot && child.isFile()
        && child.asFile().isUnderConstruction()) {
      this.snapshotUCMap.put(child.getId(), child.asFile());
    }
    if (checkCancelCounter++ % CHECK_CANCEL_INTERVAL == 0) {
      context.checkCancelled();
    }
  }
  return dirNum;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestSnapshotRename.java   
/**
 * Check the correctness of snapshot list within snapshottable dir
 */
private void checkSnapshotList(INodeDirectory srcRoot,
    String[] sortedNames, String[] names) {
  assertTrue(srcRoot.isSnapshottable());
  ReadOnlyList<Snapshot> listByName = srcRoot
      .getDirectorySnapshottableFeature().getSnapshotList();
  assertEquals(sortedNames.length, listByName.size());
  for (int i = 0; i < listByName.size(); i++) {
    assertEquals(sortedNames[i], listByName.get(i).getRoot().getLocalName());
  }
  List<DirectoryDiff> listByTime = srcRoot.getDiffs().asList();
  assertEquals(names.length, listByTime.size());
  for (int i = 0; i < listByTime.size(); i++) {
    Snapshot s = srcRoot.getDirectorySnapshottableFeature().getSnapshotById(
        listByTime.get(i).getSnapshotId());
    assertEquals(names[i], s.getRoot().getLocalName());
  }
}
项目:hadoop-plus    文件:FSDirectory.java   
/**
 * Get a listing of all the snapshots of a snapshottable directory
 */
private DirectoryListing getSnapshotsListing(String src, byte[] startAfter)
    throws UnresolvedLinkException, IOException {
  Preconditions.checkState(hasReadLock());
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR), 
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);

  final String dirPath = normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));

  final INode node = this.getINode(dirPath);
  final INodeDirectorySnapshottable dirNode = INodeDirectorySnapshottable
      .valueOf(node, dirPath);
  final ReadOnlyList<Snapshot> snapshots = dirNode.getSnapshotList();
  int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
  skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
  int numOfListing = Math.min(snapshots.size() - skipSize, this.lsLimit);
  final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
  for (int i = 0; i < numOfListing; i++) {
    Root sRoot = snapshots.get(i + skipSize).getRoot();
    listing[i] = createFileStatus(sRoot.getLocalNameBytes(), sRoot, null);
  }
  return new DirectoryListing(
      listing, snapshots.size() - skipSize - numOfListing);
}
项目:hadoop-plus    文件:INodeDirectoryWithSnapshot.java   
@Override
public final Quota.Counts computeQuotaUsage(Quota.Counts counts,
    boolean useCache, int lastSnapshotId) {
  if ((useCache && isQuotaSet()) || lastSnapshotId == Snapshot.INVALID_ID) {
    return super.computeQuotaUsage(counts, useCache, lastSnapshotId);
  }

  Snapshot lastSnapshot = diffs.getSnapshotById(lastSnapshotId);

  ReadOnlyList<INode> childrenList = getChildrenList(lastSnapshot);
  for (INode child : childrenList) {
    child.computeQuotaUsage(counts, useCache, lastSnapshotId);
  }

  counts.add(Quota.NAMESPACE, 1);
  return counts;
}
项目:hadoop-plus    文件:FSImageFormat.java   
/**
 * Save children INodes.
 * @param children The list of children INodes
 * @param out The DataOutputStream to write
 * @param counter Counter to increment for namenode startup progress
 * @return Number of children that are directory
 */
private int saveChildren(ReadOnlyList<INode> children, DataOutputStream out,
    Counter counter) throws IOException {
  // Write normal children INode. 
  out.writeInt(children.size());
  int dirNum = 0;
  int i = 0;
  for(INode child : children) {
    // print all children first
    saveINode2Image(child, out, false, referenceMap, counter);
    if (child.isDirectory()) {
      dirNum++;
    }
    if (i++ % 50 == 0) {
      context.checkCancelled();
    }
  }
  return dirNum;
}
项目:FlexMap    文件:FSPermissionChecker.java   
/** Guarded by {@link FSNamesystem#readLock()} */
private void checkSubAccess(INode inode, int snapshotId, FsAction access,
    boolean ignoreEmptyDir) throws AccessControlException {
  if (inode == null || !inode.isDirectory()) {
    return;
  }

  Stack<INodeDirectory> directories = new Stack<INodeDirectory>();
  for(directories.push(inode.asDirectory()); !directories.isEmpty(); ) {
    INodeDirectory d = directories.pop();
    ReadOnlyList<INode> cList = d.getChildrenList(snapshotId);
    if (!(cList.isEmpty() && ignoreEmptyDir)) {
      check(d, snapshotId, access);
    }

    for(INode child : cList) {
      if (child.isDirectory()) {
        directories.push(child.asDirectory());
      }
    }
  }
}
项目:FlexMap    文件:FSImageFormat.java   
/**
 * Save children INodes.
 * @param children The list of children INodes
 * @param out The DataOutputStream to write
 * @param inSnapshot Whether the parent directory or its ancestor is in
 *                   the deleted list of some snapshot (caused by rename or
 *                   deletion)
 * @param counter Counter to increment for namenode startup progress
 * @return Number of children that are directory
 */
private int saveChildren(ReadOnlyList<INode> children,
    DataOutputStream out, boolean inSnapshot, Counter counter)
    throws IOException {
  // Write normal children INode.
  out.writeInt(children.size());
  int dirNum = 0;
  int i = 0;
  for(INode child : children) {
    // print all children first
    // TODO: for HDFS-5428, we cannot change the format/content of fsimage
    // here, thus even if the parent directory is in snapshot, we still
    // do not handle INodeUC as those stored in deleted list
    saveINode2Image(child, out, false, referenceMap, counter);
    if (child.isDirectory()) {
      dirNum++;
    } else if (inSnapshot && child.isFile()
        && child.asFile().isUnderConstruction()) {
      this.snapshotUCMap.put(child.getId(), child.asFile());
    }
    if (i++ % 50 == 0) {
      context.checkCancelled();
    }
  }
  return dirNum;
}
项目:FlexMap    文件:TestSnapshotRename.java   
/**
 * Check the correctness of snapshot list within snapshottable dir
 */
private void checkSnapshotList(INodeDirectory srcRoot,
    String[] sortedNames, String[] names) {
  assertTrue(srcRoot.isSnapshottable());
  ReadOnlyList<Snapshot> listByName = srcRoot
      .getDirectorySnapshottableFeature().getSnapshotList();
  assertEquals(sortedNames.length, listByName.size());
  for (int i = 0; i < listByName.size(); i++) {
    assertEquals(sortedNames[i], listByName.get(i).getRoot().getLocalName());
  }
  List<DirectoryDiff> listByTime = srcRoot.getDiffs().asList();
  assertEquals(names.length, listByTime.size());
  for (int i = 0; i < listByTime.size(); i++) {
    Snapshot s = srcRoot.getDirectorySnapshottableFeature().getSnapshotById(
        listByTime.get(i).getSnapshotId());
    assertEquals(names[i], s.getRoot().getLocalName());
  }
}
项目:hadoop-TCP    文件:FSDirectory.java   
/**
 * Get a listing of all the snapshots of a snapshottable directory
 */
private DirectoryListing getSnapshotsListing(String src, byte[] startAfter)
    throws UnresolvedLinkException, IOException {
  Preconditions.checkState(hasReadLock());
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR), 
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);

  final String dirPath = normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));

  final INode node = this.getINode(dirPath);
  final INodeDirectorySnapshottable dirNode = INodeDirectorySnapshottable
      .valueOf(node, dirPath);
  final ReadOnlyList<Snapshot> snapshots = dirNode.getSnapshotList();
  int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
  skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
  int numOfListing = Math.min(snapshots.size() - skipSize, this.lsLimit);
  final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
  for (int i = 0; i < numOfListing; i++) {
    Root sRoot = snapshots.get(i + skipSize).getRoot();
    listing[i] = createFileStatus(sRoot.getLocalNameBytes(), sRoot, null);
  }
  return new DirectoryListing(
      listing, snapshots.size() - skipSize - numOfListing);
}
项目:hadoop-TCP    文件:INodeDirectoryWithSnapshot.java   
@Override
public final Quota.Counts computeQuotaUsage(Quota.Counts counts,
    boolean useCache, int lastSnapshotId) {
  if ((useCache && isQuotaSet()) || lastSnapshotId == Snapshot.INVALID_ID) {
    return super.computeQuotaUsage(counts, useCache, lastSnapshotId);
  }

  Snapshot lastSnapshot = diffs.getSnapshotById(lastSnapshotId);

  ReadOnlyList<INode> childrenList = getChildrenList(lastSnapshot);
  for (INode child : childrenList) {
    child.computeQuotaUsage(counts, useCache, lastSnapshotId);
  }

  counts.add(Quota.NAMESPACE, 1);
  return counts;
}
项目:hadoop-TCP    文件:FSImageFormat.java   
/**
 * Save children INodes.
 * @param children The list of children INodes
 * @param out The DataOutputStream to write
 * @param counter Counter to increment for namenode startup progress
 * @return Number of children that are directory
 */
private int saveChildren(ReadOnlyList<INode> children, DataOutputStream out,
    Counter counter) throws IOException {
  // Write normal children INode. 
  out.writeInt(children.size());
  int dirNum = 0;
  int i = 0;
  for(INode child : children) {
    // print all children first
    saveINode2Image(child, out, false, referenceMap, counter);
    if (child.isDirectory()) {
      dirNum++;
    }
    if (i++ % 50 == 0) {
      context.checkCancelled();
    }
  }
  return dirNum;
}
项目:hardfs    文件:FSDirectory.java   
/**
 * Get a listing of all the snapshots of a snapshottable directory
 */
private DirectoryListing getSnapshotsListing(String src, byte[] startAfter)
    throws UnresolvedLinkException, IOException {
  Preconditions.checkState(hasReadLock());
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR), 
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);

  final String dirPath = normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));

  final INode node = this.getINode(dirPath);
  final INodeDirectorySnapshottable dirNode = INodeDirectorySnapshottable
      .valueOf(node, dirPath);
  final ReadOnlyList<Snapshot> snapshots = dirNode.getSnapshotList();
  int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
  skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
  int numOfListing = Math.min(snapshots.size() - skipSize, this.lsLimit);
  final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
  for (int i = 0; i < numOfListing; i++) {
    Root sRoot = snapshots.get(i + skipSize).getRoot();
    listing[i] = createFileStatus(sRoot.getLocalNameBytes(), sRoot, null);
  }
  return new DirectoryListing(
      listing, snapshots.size() - skipSize - numOfListing);
}
项目:hardfs    文件:INodeDirectoryWithSnapshot.java   
@Override
public final Quota.Counts computeQuotaUsage(Quota.Counts counts,
    boolean useCache, int lastSnapshotId) {
  if ((useCache && isQuotaSet()) || lastSnapshotId == Snapshot.INVALID_ID) {
    return super.computeQuotaUsage(counts, useCache, lastSnapshotId);
  }

  Snapshot lastSnapshot = diffs.getSnapshotById(lastSnapshotId);

  ReadOnlyList<INode> childrenList = getChildrenList(lastSnapshot);
  for (INode child : childrenList) {
    child.computeQuotaUsage(counts, useCache, lastSnapshotId);
  }

  counts.add(Quota.NAMESPACE, 1);
  return counts;
}
项目:hardfs    文件:FSImageFormat.java   
/**
 * Save children INodes.
 * @param children The list of children INodes
 * @param out The DataOutputStream to write
 * @param counter Counter to increment for namenode startup progress
 * @return Number of children that are directory
 */
private int saveChildren(ReadOnlyList<INode> children, DataOutputStream out,
    Counter counter) throws IOException {
  // Write normal children INode. 
  out.writeInt(children.size());
  int dirNum = 0;
  int i = 0;
  for(INode child : children) {
    // print all children first
    saveINode2Image(child, out, false, referenceMap, counter);
    if (child.isDirectory()) {
      dirNum++;
    }
    if (i++ % 50 == 0) {
      context.checkCancelled();
    }
  }
  return dirNum;
}
项目:hadoop    文件:FSDirStatAndListingOp.java   
/**
 * Get a listing of all the snapshots of a snapshottable directory
 */
private static DirectoryListing getSnapshotsListing(
    FSDirectory fsd, String src, byte[] startAfter)
    throws IOException {
  Preconditions.checkState(fsd.hasReadLock());
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);

  final String dirPath = FSDirectory.normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));

  final INode node = fsd.getINode(dirPath);
  final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath);
  final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature();
  if (sf == null) {
    throw new SnapshotException(
        "Directory is not a snapshottable directory: " + dirPath);
  }
  final ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
  int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
  skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
  int numOfListing = Math.min(snapshots.size() - skipSize, fsd.getLsLimit());
  final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
  for (int i = 0; i < numOfListing; i++) {
    Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
    listing[i] = createFileStatus(fsd, src, sRoot.getLocalNameBytes(), sRoot,
        BlockStoragePolicySuite.ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID,
        false, INodesInPath.fromINode(sRoot));
  }
  return new DirectoryListing(
      listing, snapshots.size() - skipSize - numOfListing);
}
项目:hadoop    文件:FSDirRenameOp.java   
private static void validateOverwrite(
    String src, String dst, boolean overwrite, INode srcInode, INode dstInode)
    throws IOException {
  String error;// It's OK to rename a file to a symlink and vice versa
  if (dstInode.isDirectory() != srcInode.isDirectory()) {
    error = "Source " + src + " and destination " + dst
        + " must both be directories";
    NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
        + error);
    throw new IOException(error);
  }
  if (!overwrite) { // If destination exists, overwrite flag must be true
    error = "rename destination " + dst + " already exists";
    NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
        + error);
    throw new FileAlreadyExistsException(error);
  }
  if (dstInode.isDirectory()) {
    final ReadOnlyList<INode> children = dstInode.asDirectory()
        .getChildrenList(Snapshot.CURRENT_STATE_ID);
    if (!children.isEmpty()) {
      error = "rename destination directory is not empty: " + dst;
      NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
          + error);
      throw new IOException(error);
    }
  }
}
项目:hadoop    文件:FSImageFormatPBINode.java   
void serializeINodeDirectorySection(OutputStream out) throws IOException {
  Iterator<INodeWithAdditionalFields> iter = fsn.getFSDirectory()
      .getINodeMap().getMapIterator();
  final ArrayList<INodeReference> refList = parent.getSaverContext()
      .getRefList();
  int i = 0;
  while (iter.hasNext()) {
    INodeWithAdditionalFields n = iter.next();
    if (!n.isDirectory()) {
      continue;
    }

    ReadOnlyList<INode> children = n.asDirectory().getChildrenList(
        Snapshot.CURRENT_STATE_ID);
    if (children.size() > 0) {
      INodeDirectorySection.DirEntry.Builder b = INodeDirectorySection.
          DirEntry.newBuilder().setParent(n.getId());
      for (INode inode : children) {
        if (!inode.isReference()) {
          b.addChildren(inode.getId());
        } else {
          refList.add(inode.asReference());
          b.addRefChildren(refList.size() - 1);
        }
      }
      INodeDirectorySection.DirEntry e = b.build();
      e.writeDelimitedTo(out);
    }

    ++i;
    if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) {
      context.checkCancelled();
    }
  }
  parent.commitSection(summary,
      FSImageFormatProtobuf.SectionName.INODE_DIR);
}
项目:hadoop    文件:CacheManager.java   
/**
 * Computes the needed number of bytes and files for a path.
 * @return CacheDirectiveStats describing the needed stats for this path
 */
private CacheDirectiveStats computeNeeded(String path, short replication) {
  FSDirectory fsDir = namesystem.getFSDirectory();
  INode node;
  long requestedBytes = 0;
  long requestedFiles = 0;
  CacheDirectiveStats.Builder builder = new CacheDirectiveStats.Builder();
  try {
    node = fsDir.getINode(path);
  } catch (UnresolvedLinkException e) {
    // We don't cache through symlinks
    return builder.build();
  }
  if (node == null) {
    return builder.build();
  }
  if (node.isFile()) {
    requestedFiles = 1;
    INodeFile file = node.asFile();
    requestedBytes = file.computeFileSize();
  } else if (node.isDirectory()) {
    INodeDirectory dir = node.asDirectory();
    ReadOnlyList<INode> children = dir
        .getChildrenList(Snapshot.CURRENT_STATE_ID);
    requestedFiles = children.size();
    for (INode child : children) {
      if (child.isFile()) {
        requestedBytes += child.asFile().computeFileSize();
      }
    }
  }
  return new CacheDirectiveStats.Builder()
      .setBytesNeeded(requestedBytes)
      .setFilesCached(requestedFiles)
      .build();
}
项目:hadoop    文件:SnapshotFSImageFormat.java   
/**
 * Save snapshots and snapshot quota for a snapshottable directory.
 * @param current The directory that the snapshots belongs to.
 * @param out The {@link DataOutput} to write.
 * @throws IOException
 */
public static void saveSnapshots(INodeDirectory current, DataOutput out)
    throws IOException {
  DirectorySnapshottableFeature sf = current.getDirectorySnapshottableFeature();
  Preconditions.checkArgument(sf != null);
  // list of snapshots in snapshotsByNames
  ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
  out.writeInt(snapshots.size());
  for (Snapshot s : snapshots) {
    // write the snapshot id
    out.writeInt(s.getId());
  }
  // snapshot quota
  out.writeInt(sf.getSnapshotQuota());
}
项目:hadoop    文件:DirectoryWithSnapshotFeature.java   
/**
 * @return The children list of a directory in a snapshot.
 *         Since the snapshot is read-only, the logical view of the list is
 *         never changed although the internal data structure may mutate.
 */
private ReadOnlyList<INode> getChildrenList(final INodeDirectory currentDir) {
  return new ReadOnlyList<INode>() {
    private List<INode> children = null;

    private List<INode> initChildren() {
      if (children == null) {
        final ChildrenDiff combined = new ChildrenDiff();
        for (DirectoryDiff d = DirectoryDiff.this; d != null; 
            d = d.getPosterior()) {
          combined.combinePosterior(d.diff, null);
        }
        children = combined.apply2Current(ReadOnlyList.Util.asList(
            currentDir.getChildrenList(Snapshot.CURRENT_STATE_ID)));
      }
      return children;
    }

    @Override
    public Iterator<INode> iterator() {
      return initChildren().iterator();
    }

    @Override
    public boolean isEmpty() {
      return childrenSize == 0;
    }

    @Override
    public int size() {
      return childrenSize;
    }

    @Override
    public INode get(int i) {
      return initChildren().get(i);
    }
  };
}
项目:hadoop    文件:DirectoryWithSnapshotFeature.java   
/**
 * @return If there is no corresponding directory diff for the given
 *         snapshot, this means that the current children list should be
 *         returned for the snapshot. Otherwise we calculate the children list
 *         for the snapshot and return it. 
 */
public ReadOnlyList<INode> getChildrenList(INodeDirectory currentINode,
    final int snapshotId) {
  final DirectoryDiff diff = diffs.getDiffById(snapshotId);
  return diff != null ? diff.getChildrenList(currentINode) : currentINode
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
}
项目:hadoop    文件:INodeDirectory.java   
/**
 * @param name the name of the child
 * @param snapshotId
 *          if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
 *          from the corresponding snapshot; otherwise, get the result from
 *          the current directory.
 * @return the child inode.
 */
public INode getChild(byte[] name, int snapshotId) {
  DirectoryWithSnapshotFeature sf;
  if (snapshotId == Snapshot.CURRENT_STATE_ID || 
      (sf = getDirectoryWithSnapshotFeature()) == null) {
    ReadOnlyList<INode> c = getCurrentChildrenList();
    final int i = ReadOnlyList.Util.binarySearch(c, name);
    return i < 0 ? null : c.get(i);
  }

  return sf.getChild(this, name, snapshotId);
}
项目:hadoop    文件:INodeDirectory.java   
/**
 * Given a child's name, return the index of the next child
 *
 * @param name a child's name
 * @return the index of the next child
 */
static int nextChild(ReadOnlyList<INode> children, byte[] name) {
  if (name.length == 0) { // empty name
    return 0;
  }
  int nextPos = ReadOnlyList.Util.binarySearch(children, name) + 1;
  if (nextPos >= 0) {
    return nextPos;
  }
  return -nextPos;
}
项目:hadoop    文件:INodeDirectory.java   
@Override
public QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps,
    byte blockStoragePolicyId, QuotaCounts counts, boolean useCache,
    int lastSnapshotId) {
  final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();

  // we are computing the quota usage for a specific snapshot here, i.e., the
  // computation only includes files/directories that exist at the time of the
  // given snapshot
  if (sf != null && lastSnapshotId != Snapshot.CURRENT_STATE_ID
      && !(useCache && isQuotaSet())) {
    ReadOnlyList<INode> childrenList = getChildrenList(lastSnapshotId);
    for (INode child : childrenList) {
      final byte childPolicyId = child.getStoragePolicyIDForQuota(blockStoragePolicyId);
      child.computeQuotaUsage(bsps, childPolicyId, counts, useCache,
          lastSnapshotId);
    }
    counts.addNameSpace(1);
    return counts;
  }

  // compute the quota usage in the scope of the current directory tree
  final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
  if (useCache && q != null && q.isQuotaSet()) { // use the cached quota
    return q.AddCurrentSpaceUsage(counts);
  } else {
    useCache = q != null && !q.isQuotaSet() ? false : useCache;
    return computeDirectoryQuotaUsage(bsps, blockStoragePolicyId, counts,
        useCache, lastSnapshotId);
  }
}
项目:hadoop    文件:INodeDirectory.java   
protected ContentSummaryComputationContext computeDirectoryContentSummary(
    ContentSummaryComputationContext summary, int snapshotId) {
  ReadOnlyList<INode> childrenList = getChildrenList(snapshotId);
  // Explicit traversing is done to enable repositioning after relinquishing
  // and reacquiring locks.
  for (int i = 0;  i < childrenList.size(); i++) {
    INode child = childrenList.get(i);
    byte[] childName = child.getLocalNameBytes();

    long lastYieldCount = summary.getYieldCount();
    child.computeContentSummary(summary);

    // Check whether the computation was paused in the subtree.
    // The counts may be off, but traversing the rest of children
    // should be made safe.
    if (lastYieldCount == summary.getYieldCount()) {
      continue;
    }
    // The locks were released and reacquired. Check parent first.
    if (getParent() == null) {
      // Stop further counting and return whatever we have so far.
      break;
    }
    // Obtain the children list again since it may have been modified.
    childrenList = getChildrenList(snapshotId);
    // Reposition in case the children list is changed. Decrement by 1
    // since it will be incremented when loops.
    i = nextChild(childrenList, childName) - 1;
  }

  // Increment the directory count for this directory.
  summary.getCounts().addContent(Content.DIRECTORY, 1);
  // Relinquish and reacquire locks if necessary.
  summary.yield();
  return summary;
}
项目:aliyun-oss-hadoop-fs    文件:FSDirStatAndListingOp.java   
/**
 * Get a listing of all the snapshots of a snapshottable directory
 */
private static DirectoryListing getSnapshotsListing(
    FSDirectory fsd, String src, byte[] startAfter)
    throws IOException {
  Preconditions.checkState(fsd.hasReadLock());
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);

  final String dirPath = FSDirectory.normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));

  final INode node = fsd.getINode(dirPath);
  final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath);
  final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature();
  if (sf == null) {
    throw new SnapshotException(
        "Directory is not a snapshottable directory: " + dirPath);
  }
  final ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
  int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
  skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
  int numOfListing = Math.min(snapshots.size() - skipSize, fsd.getLsLimit());
  final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
  for (int i = 0; i < numOfListing; i++) {
    Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
    INodeAttributes nodeAttrs = getINodeAttributes(
        fsd, src, sRoot.getLocalNameBytes(),
        node, Snapshot.CURRENT_STATE_ID);
    listing[i] = createFileStatus(
        fsd, sRoot.getLocalNameBytes(),
        sRoot, nodeAttrs,
        HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
        Snapshot.CURRENT_STATE_ID, false,
        INodesInPath.fromINode(sRoot));
  }
  return new DirectoryListing(
      listing, snapshots.size() - skipSize - numOfListing);
}
项目:aliyun-oss-hadoop-fs    文件:FSDirRenameOp.java   
private static void validateOverwrite(
    String src, String dst, boolean overwrite, INode srcInode, INode dstInode)
    throws IOException {
  String error;// It's OK to rename a file to a symlink and vice versa
  if (dstInode.isDirectory() != srcInode.isDirectory()) {
    error = "Source " + src + " and destination " + dst
        + " must both be directories";
    NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
        + error);
    throw new IOException(error);
  }
  if (!overwrite) { // If destination exists, overwrite flag must be true
    error = "rename destination " + dst + " already exists";
    NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
        + error);
    throw new FileAlreadyExistsException(error);
  }
  if (dstInode.isDirectory()) {
    final ReadOnlyList<INode> children = dstInode.asDirectory()
        .getChildrenList(Snapshot.CURRENT_STATE_ID);
    if (!children.isEmpty()) {
      error = "rename destination directory is not empty: " + dst;
      NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
          + error);
      throw new IOException(error);
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSDirSnapshotOp.java   
/** Get a collection of full snapshot paths given file and snapshot dir.
 * @param lsf a list of snapshottable features
 * @param file full path of the file
 * @return collection of full paths of snapshot of the file
 */
static Collection<String> getSnapshotFiles(FSDirectory fsd,
    List<DirectorySnapshottableFeature> lsf,
    String file) throws IOException {
  ArrayList<String> snaps = new ArrayList<>();
  for (DirectorySnapshottableFeature sf : lsf) {
    // for each snapshottable dir e.g. /dir1, /dir2
    final ReadOnlyList<Snapshot> lsnap = sf.getSnapshotList();
    for (Snapshot s : lsnap) {
      // for each snapshot name under snapshottable dir
      // e.g. /dir1/.snapshot/s1, /dir1/.snapshot/s2
      final String dirName = s.getRoot().getRootFullPathName();
      if (!file.startsWith(dirName)) {
        // file not in current snapshot root dir, no need to check other snaps
        break;
      }
      String snapname = s.getRoot().getFullPathName();
      if (dirName.equals(Path.SEPARATOR)) { // handle rootDir
        snapname += Path.SEPARATOR;
      }
      snapname += file.substring(file.indexOf(dirName) + dirName.length());
      if (fsd.getFSNamesystem().getFileInfo(snapname, true) != null) {
        snaps.add(snapname);
      }
    }
  }
  return snaps;
}
项目:aliyun-oss-hadoop-fs    文件:FSImageFormatPBINode.java   
void serializeINodeDirectorySection(OutputStream out) throws IOException {
  Iterator<INodeWithAdditionalFields> iter = fsn.getFSDirectory()
      .getINodeMap().getMapIterator();
  final ArrayList<INodeReference> refList = parent.getSaverContext()
      .getRefList();
  int i = 0;
  while (iter.hasNext()) {
    INodeWithAdditionalFields n = iter.next();
    if (!n.isDirectory()) {
      continue;
    }

    ReadOnlyList<INode> children = n.asDirectory().getChildrenList(
        Snapshot.CURRENT_STATE_ID);
    if (children.size() > 0) {
      INodeDirectorySection.DirEntry.Builder b = INodeDirectorySection.
          DirEntry.newBuilder().setParent(n.getId());
      for (INode inode : children) {
        if (!inode.isReference()) {
          b.addChildren(inode.getId());
        } else {
          refList.add(inode.asReference());
          b.addRefChildren(refList.size() - 1);
        }
      }
      INodeDirectorySection.DirEntry e = b.build();
      e.writeDelimitedTo(out);
    }

    ++i;
    if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) {
      context.checkCancelled();
    }
  }
  parent.commitSection(summary,
      FSImageFormatProtobuf.SectionName.INODE_DIR);
}
项目:aliyun-oss-hadoop-fs    文件:CacheManager.java   
/**
 * Computes the needed number of bytes and files for a path.
 * @return CacheDirectiveStats describing the needed stats for this path
 */
private CacheDirectiveStats computeNeeded(String path, short replication) {
  FSDirectory fsDir = namesystem.getFSDirectory();
  INode node;
  long requestedBytes = 0;
  long requestedFiles = 0;
  CacheDirectiveStats.Builder builder = new CacheDirectiveStats.Builder();
  try {
    node = fsDir.getINode(path);
  } catch (UnresolvedLinkException e) {
    // We don't cache through symlinks
    return builder.build();
  }
  if (node == null) {
    return builder.build();
  }
  if (node.isFile()) {
    requestedFiles = 1;
    INodeFile file = node.asFile();
    requestedBytes = file.computeFileSize();
  } else if (node.isDirectory()) {
    INodeDirectory dir = node.asDirectory();
    ReadOnlyList<INode> children = dir
        .getChildrenList(Snapshot.CURRENT_STATE_ID);
    requestedFiles = children.size();
    for (INode child : children) {
      if (child.isFile()) {
        requestedBytes += child.asFile().computeFileSize();
      }
    }
  }
  return new CacheDirectiveStats.Builder()
      .setBytesNeeded(requestedBytes)
      .setFilesCached(requestedFiles)
      .build();
}