Java 类org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat 实例源码

项目:hadoop    文件:FSImageSerialization.java   
/**
 * Serialize a {@link INodeFile} node
 * @param node The node to write
 * @param out The {@link DataOutputStream} where the fields are written
 * @param writeBlock Whether to write block information
 */
public static void writeINodeFile(INodeFile file, DataOutput out,
    boolean writeUnderConstruction) throws IOException {
  writeLocalName(file, out);
  out.writeLong(file.getId());
  out.writeShort(file.getFileReplication());
  out.writeLong(file.getModificationTime());
  out.writeLong(file.getAccessTime());
  out.writeLong(file.getPreferredBlockSize());

  writeBlocks(file.getBlocks(), out);
  SnapshotFSImageFormat.saveFileDiffList(file, out);

  if (writeUnderConstruction) {
    if (file.isUnderConstruction()) {
      out.writeBoolean(true);
      final FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature();
      writeString(uc.getClientName(), out);
      writeString(uc.getClientMachine(), out);
    } else {
      out.writeBoolean(false);
    }
  }

  writePermissionStatus(file, out);
}
项目:aliyun-oss-hadoop-fs    文件:FSImageSerialization.java   
/**
 * Serialize a {@link INodeFile} node
 * @param file The INodeFile to write
 * @param out The {@link DataOutputStream} where the fields are written
 * @param writeUnderConstruction Whether to write under construction information
 */
public static void writeINodeFile(INodeFile file, DataOutput out,
    boolean writeUnderConstruction) throws IOException {
  writeLocalName(file, out);
  out.writeLong(file.getId());
  out.writeShort(file.getFileReplication());
  out.writeLong(file.getModificationTime());
  out.writeLong(file.getAccessTime());
  out.writeLong(file.getPreferredBlockSize());

  writeBlocks(file.getBlocks(), out);
  SnapshotFSImageFormat.saveFileDiffList(file, out);

  if (writeUnderConstruction) {
    if (file.isUnderConstruction()) {
      out.writeBoolean(true);
      final FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature();
      writeString(uc.getClientName(), out);
      writeString(uc.getClientMachine(), out);
    } else {
      out.writeBoolean(false);
    }
  }

  writePermissionStatus(file, out);
}
项目:big-c    文件:FSImageSerialization.java   
/**
 * Serialize a {@link INodeFile} node
 * @param node The node to write
 * @param out The {@link DataOutputStream} where the fields are written
 * @param writeBlock Whether to write block information
 */
public static void writeINodeFile(INodeFile file, DataOutput out,
    boolean writeUnderConstruction) throws IOException {
  writeLocalName(file, out);
  out.writeLong(file.getId());
  out.writeShort(file.getFileReplication());
  out.writeLong(file.getModificationTime());
  out.writeLong(file.getAccessTime());
  out.writeLong(file.getPreferredBlockSize());

  writeBlocks(file.getBlocks(), out);
  SnapshotFSImageFormat.saveFileDiffList(file, out);

  if (writeUnderConstruction) {
    if (file.isUnderConstruction()) {
      out.writeBoolean(true);
      final FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature();
      writeString(uc.getClientName(), out);
      writeString(uc.getClientMachine(), out);
    } else {
      out.writeBoolean(false);
    }
  }

  writePermissionStatus(file, out);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSImageSerialization.java   
/**
 * Serialize a {@link INodeFile} node
 * @param node The node to write
 * @param out The {@link DataOutputStream} where the fields are written
 * @param writeBlock Whether to write block information
 */
public static void writeINodeFile(INodeFile file, DataOutput out,
    boolean writeUnderConstruction) throws IOException {
  writeLocalName(file, out);
  out.writeLong(file.getId());
  out.writeShort(file.getFileReplication());
  out.writeLong(file.getModificationTime());
  out.writeLong(file.getAccessTime());
  out.writeLong(file.getPreferredBlockSize());

  writeBlocks(file.getBlocks(), out);
  SnapshotFSImageFormat.saveFileDiffList(file, out);

  if (writeUnderConstruction) {
    if (file.isUnderConstruction()) {
      out.writeBoolean(true);
      final FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature();
      writeString(uc.getClientName(), out);
      writeString(uc.getClientMachine(), out);
    } else {
      out.writeBoolean(false);
    }
  }

  writePermissionStatus(file, out);
}
项目:hadoop-plus    文件:FSImageSerialization.java   
/**
 * Serialize a {@link INodeFile} node
 * @param node The node to write
 * @param out The {@link DataOutputStream} where the fields are written
 * @param writeBlock Whether to write block information
 */
public static void writeINodeFile(INodeFile file, DataOutput out,
    boolean writeUnderConstruction) throws IOException {
  writeLocalName(file, out);
  out.writeLong(file.getId());
  out.writeShort(file.getFileReplication());
  out.writeLong(file.getModificationTime());
  out.writeLong(file.getAccessTime());
  out.writeLong(file.getPreferredBlockSize());

  writeBlocks(file.getBlocks(), out);
  SnapshotFSImageFormat.saveFileDiffList(file, out);

  if (writeUnderConstruction) {
    if (file instanceof INodeFileUnderConstruction) {
      out.writeBoolean(true);
      final INodeFileUnderConstruction uc = (INodeFileUnderConstruction)file;
      writeString(uc.getClientName(), out);
      writeString(uc.getClientMachine(), out);
    } else {
      out.writeBoolean(false);
    }
  }

  writePermissionStatus(file, out);
}
项目:FlexMap    文件:FSImageSerialization.java   
/**
 * Serialize a {@link INodeFile} node
 * @param node The node to write
 * @param out The {@link DataOutputStream} where the fields are written
 * @param writeBlock Whether to write block information
 */
public static void writeINodeFile(INodeFile file, DataOutput out,
    boolean writeUnderConstruction) throws IOException {
  writeLocalName(file, out);
  out.writeLong(file.getId());
  out.writeShort(file.getFileReplication());
  out.writeLong(file.getModificationTime());
  out.writeLong(file.getAccessTime());
  out.writeLong(file.getPreferredBlockSize());

  writeBlocks(file.getBlocks(), out);
  SnapshotFSImageFormat.saveFileDiffList(file, out);

  if (writeUnderConstruction) {
    if (file.isUnderConstruction()) {
      out.writeBoolean(true);
      final FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature();
      writeString(uc.getClientName(), out);
      writeString(uc.getClientMachine(), out);
    } else {
      out.writeBoolean(false);
    }
  }

  writePermissionStatus(file, out);
}
项目:hadoop-TCP    文件:FSImageSerialization.java   
/**
 * Serialize a {@link INodeFile} node
 * @param node The node to write
 * @param out The {@link DataOutputStream} where the fields are written
 * @param writeBlock Whether to write block information
 */
public static void writeINodeFile(INodeFile file, DataOutput out,
    boolean writeUnderConstruction) throws IOException {
  writeLocalName(file, out);
  out.writeLong(file.getId());
  out.writeShort(file.getFileReplication());
  out.writeLong(file.getModificationTime());
  out.writeLong(file.getAccessTime());
  out.writeLong(file.getPreferredBlockSize());

  writeBlocks(file.getBlocks(), out);
  SnapshotFSImageFormat.saveFileDiffList(file, out);

  if (writeUnderConstruction) {
    if (file instanceof INodeFileUnderConstruction) {
      out.writeBoolean(true);
      final INodeFileUnderConstruction uc = (INodeFileUnderConstruction)file;
      writeString(uc.getClientName(), out);
      writeString(uc.getClientMachine(), out);
    } else {
      out.writeBoolean(false);
    }
  }

  writePermissionStatus(file, out);
}
项目:hardfs    文件:FSImageSerialization.java   
/**
 * Serialize a {@link INodeFile} node
 * @param node The node to write
 * @param out The {@link DataOutputStream} where the fields are written
 * @param writeBlock Whether to write block information
 */
public static void writeINodeFile(INodeFile file, DataOutput out,
    boolean writeUnderConstruction) throws IOException {
  writeLocalName(file, out);
  out.writeLong(file.getId());
  out.writeShort(file.getFileReplication());
  out.writeLong(file.getModificationTime());
  out.writeLong(file.getAccessTime());
  out.writeLong(file.getPreferredBlockSize());

  writeBlocks(file.getBlocks(), out);
  SnapshotFSImageFormat.saveFileDiffList(file, out);

  if (writeUnderConstruction) {
    if (file instanceof INodeFileUnderConstruction) {
      out.writeBoolean(true);
      final INodeFileUnderConstruction uc = (INodeFileUnderConstruction)file;
      writeString(uc.getClientName(), out);
      writeString(uc.getClientMachine(), out);
    } else {
      out.writeBoolean(false);
    }
  }

  writePermissionStatus(file, out);
}
项目:hadoop    文件:FSImageFormat.java   
/**
 * Load a directory when snapshot is supported.
 * @param in The {@link DataInput} instance to read.
 * @param counter Counter to increment for namenode startup progress
 */
private void loadDirectoryWithSnapshot(DataInput in, Counter counter)
    throws IOException {
  // Step 1. Identify the parent INode
  long inodeId = in.readLong();
  final INodeDirectory parent = this.namesystem.dir.getInode(inodeId)
      .asDirectory();

  // Check if the whole subtree has been saved (for reference nodes)
  boolean toLoadSubtree = referenceMap.toProcessSubtree(parent.getId());
  if (!toLoadSubtree) {
    return;
  }

  // Step 2. Load snapshots if parent is snapshottable
  int numSnapshots = in.readInt();
  if (numSnapshots >= 0) {
    // load snapshots and snapshotQuota
    SnapshotFSImageFormat.loadSnapshotList(parent, numSnapshots, in, this);
    if (parent.getDirectorySnapshottableFeature().getSnapshotQuota() > 0) {
      // add the directory to the snapshottable directory list in 
      // SnapshotManager. Note that we only add root when its snapshot quota
      // is positive.
      this.namesystem.getSnapshotManager().addSnapshottable(parent);
    }
  }

  // Step 3. Load children nodes under parent
  loadChildren(parent, in, counter);

  // Step 4. load Directory Diff List
  SnapshotFSImageFormat.loadDirectoryDiffList(parent, in, this);

  // Recursively load sub-directories, including snapshot copies of deleted
  // directories
  int numSubTree = in.readInt();
  for (int i = 0; i < numSubTree; i++) {
    loadDirectoryWithSnapshot(in, counter);
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSImageFormat.java   
/**
 * Load a directory when snapshot is supported.
 * @param in The {@link DataInput} instance to read.
 * @param counter Counter to increment for namenode startup progress
 */
private void loadDirectoryWithSnapshot(DataInput in, Counter counter)
    throws IOException {
  // Step 1. Identify the parent INode
  long inodeId = in.readLong();
  final INodeDirectory parent = this.namesystem.dir.getInode(inodeId)
      .asDirectory();

  // Check if the whole subtree has been saved (for reference nodes)
  boolean toLoadSubtree = referenceMap.toProcessSubtree(parent.getId());
  if (!toLoadSubtree) {
    return;
  }

  // Step 2. Load snapshots if parent is snapshottable
  int numSnapshots = in.readInt();
  if (numSnapshots >= 0) {
    // load snapshots and snapshotQuota
    SnapshotFSImageFormat.loadSnapshotList(parent, numSnapshots, in, this);
    if (parent.getDirectorySnapshottableFeature().getSnapshotQuota() > 0) {
      // add the directory to the snapshottable directory list in 
      // SnapshotManager. Note that we only add root when its snapshot quota
      // is positive.
      this.namesystem.getSnapshotManager().addSnapshottable(parent);
    }
  }

  // Step 3. Load children nodes under parent
  loadChildren(parent, in, counter);

  // Step 4. load Directory Diff List
  SnapshotFSImageFormat.loadDirectoryDiffList(parent, in, this);

  // Recursively load sub-directories, including snapshot copies of deleted
  // directories
  int numSubTree = in.readInt();
  for (int i = 0; i < numSubTree; i++) {
    loadDirectoryWithSnapshot(in, counter);
  }
}
项目:big-c    文件:FSImageFormat.java   
/**
 * Load a directory when snapshot is supported.
 * @param in The {@link DataInput} instance to read.
 * @param counter Counter to increment for namenode startup progress
 */
private void loadDirectoryWithSnapshot(DataInput in, Counter counter)
    throws IOException {
  // Step 1. Identify the parent INode
  long inodeId = in.readLong();
  final INodeDirectory parent = this.namesystem.dir.getInode(inodeId)
      .asDirectory();

  // Check if the whole subtree has been saved (for reference nodes)
  boolean toLoadSubtree = referenceMap.toProcessSubtree(parent.getId());
  if (!toLoadSubtree) {
    return;
  }

  // Step 2. Load snapshots if parent is snapshottable
  int numSnapshots = in.readInt();
  if (numSnapshots >= 0) {
    // load snapshots and snapshotQuota
    SnapshotFSImageFormat.loadSnapshotList(parent, numSnapshots, in, this);
    if (parent.getDirectorySnapshottableFeature().getSnapshotQuota() > 0) {
      // add the directory to the snapshottable directory list in 
      // SnapshotManager. Note that we only add root when its snapshot quota
      // is positive.
      this.namesystem.getSnapshotManager().addSnapshottable(parent);
    }
  }

  // Step 3. Load children nodes under parent
  loadChildren(parent, in, counter);

  // Step 4. load Directory Diff List
  SnapshotFSImageFormat.loadDirectoryDiffList(parent, in, this);

  // Recursively load sub-directories, including snapshot copies of deleted
  // directories
  int numSubTree = in.readInt();
  for (int i = 0; i < numSubTree; i++) {
    loadDirectoryWithSnapshot(in, counter);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSImageFormat.java   
/**
 * Load a directory when snapshot is supported.
 * @param in The {@link DataInput} instance to read.
 * @param counter Counter to increment for namenode startup progress
 */
private void loadDirectoryWithSnapshot(DataInput in, Counter counter)
    throws IOException {
  // Step 1. Identify the parent INode
  long inodeId = in.readLong();
  final INodeDirectory parent = this.namesystem.dir.getInode(inodeId)
      .asDirectory();

  // Check if the whole subtree has been saved (for reference nodes)
  boolean toLoadSubtree = referenceMap.toProcessSubtree(parent.getId());
  if (!toLoadSubtree) {
    return;
  }

  // Step 2. Load snapshots if parent is snapshottable
  int numSnapshots = in.readInt();
  if (numSnapshots >= 0) {
    // load snapshots and snapshotQuota
    SnapshotFSImageFormat.loadSnapshotList(parent, numSnapshots, in, this);
    if (parent.getDirectorySnapshottableFeature().getSnapshotQuota() > 0) {
      // add the directory to the snapshottable directory list in 
      // SnapshotManager. Note that we only add root when its snapshot quota
      // is positive.
      this.namesystem.getSnapshotManager().addSnapshottable(parent);
    }
  }

  // Step 3. Load children nodes under parent
  loadChildren(parent, in, counter);

  // Step 4. load Directory Diff List
  SnapshotFSImageFormat.loadDirectoryDiffList(parent, in, this);

  // Recursively load sub-directories, including snapshot copies of deleted
  // directories
  int numSubTree = in.readInt();
  for (int i = 0; i < numSubTree; i++) {
    loadDirectoryWithSnapshot(in, counter);
  }
}
项目:FlexMap    文件:FSImageFormat.java   
/**
 * Load a directory when snapshot is supported.
 * @param in The {@link DataInput} instance to read.
 * @param counter Counter to increment for namenode startup progress
 */
private void loadDirectoryWithSnapshot(DataInput in, Counter counter)
    throws IOException {
  // Step 1. Identify the parent INode
  long inodeId = in.readLong();
  final INodeDirectory parent = this.namesystem.dir.getInode(inodeId)
      .asDirectory();

  // Check if the whole subtree has been saved (for reference nodes)
  boolean toLoadSubtree = referenceMap.toProcessSubtree(parent.getId());
  if (!toLoadSubtree) {
    return;
  }

  // Step 2. Load snapshots if parent is snapshottable
  int numSnapshots = in.readInt();
  if (numSnapshots >= 0) {
    // load snapshots and snapshotQuota
    SnapshotFSImageFormat.loadSnapshotList(parent, numSnapshots, in, this);
    if (parent.getDirectorySnapshottableFeature().getSnapshotQuota() > 0) {
      // add the directory to the snapshottable directory list in 
      // SnapshotManager. Note that we only add root when its snapshot quota
      // is positive.
      this.namesystem.getSnapshotManager().addSnapshottable(parent);
    }
  }

  // Step 3. Load children nodes under parent
  loadChildren(parent, in, counter);

  // Step 4. load Directory Diff List
  SnapshotFSImageFormat.loadDirectoryDiffList(parent, in, this);

  // Recursively load sub-directories, including snapshot copies of deleted
  // directories
  int numSubTree = in.readInt();
  for (int i = 0; i < numSubTree; i++) {
    loadDirectoryWithSnapshot(in, counter);
  }
}
项目:hadoop-plus    文件:FSImageFormat.java   
/**
 * Load a directory when snapshot is supported.
 * @param in The {@link DataInput} instance to read.
 * @param counter Counter to increment for namenode startup progress
 */
private void loadDirectoryWithSnapshot(DataInput in, Counter counter)
    throws IOException {
  // Step 1. Identify the parent INode
  long inodeId = in.readLong();
  final INodeDirectory parent = this.namesystem.dir.getInode(inodeId)
      .asDirectory();

  // Check if the whole subtree has been saved (for reference nodes)
  boolean toLoadSubtree = referenceMap.toProcessSubtree(parent.getId());
  if (!toLoadSubtree) {
    return;
  }

  // Step 2. Load snapshots if parent is snapshottable
  int numSnapshots = in.readInt();
  if (numSnapshots >= 0) {
    final INodeDirectorySnapshottable snapshottableParent
        = INodeDirectorySnapshottable.valueOf(parent, parent.getLocalName());
    // load snapshots and snapshotQuota
    SnapshotFSImageFormat.loadSnapshotList(snapshottableParent,
        numSnapshots, in, this);
    if (snapshottableParent.getSnapshotQuota() > 0) {
      // add the directory to the snapshottable directory list in 
      // SnapshotManager. Note that we only add root when its snapshot quota
      // is positive.
      this.namesystem.getSnapshotManager().addSnapshottable(
          snapshottableParent);
    }
  }

  // Step 3. Load children nodes under parent
  loadChildren(parent, in, counter);

  // Step 4. load Directory Diff List
  SnapshotFSImageFormat.loadDirectoryDiffList(parent, in, this);

  // Recursively load sub-directories, including snapshot copies of deleted
  // directories
  int numSubTree = in.readInt();
  for (int i = 0; i < numSubTree; i++) {
    loadDirectoryWithSnapshot(in, counter);
  }
}
项目:hadoop-TCP    文件:FSImageFormat.java   
/**
 * Load a directory when snapshot is supported.
 * @param in The {@link DataInput} instance to read.
 * @param counter Counter to increment for namenode startup progress
 */
private void loadDirectoryWithSnapshot(DataInput in, Counter counter)
    throws IOException {
  // Step 1. Identify the parent INode
  long inodeId = in.readLong();
  final INodeDirectory parent = this.namesystem.dir.getInode(inodeId)
      .asDirectory();

  // Check if the whole subtree has been saved (for reference nodes)
  boolean toLoadSubtree = referenceMap.toProcessSubtree(parent.getId());
  if (!toLoadSubtree) {
    return;
  }

  // Step 2. Load snapshots if parent is snapshottable
  int numSnapshots = in.readInt();
  if (numSnapshots >= 0) {
    final INodeDirectorySnapshottable snapshottableParent
        = INodeDirectorySnapshottable.valueOf(parent, parent.getLocalName());
    // load snapshots and snapshotQuota
    SnapshotFSImageFormat.loadSnapshotList(snapshottableParent,
        numSnapshots, in, this);
    if (snapshottableParent.getSnapshotQuota() > 0) {
      // add the directory to the snapshottable directory list in 
      // SnapshotManager. Note that we only add root when its snapshot quota
      // is positive.
      this.namesystem.getSnapshotManager().addSnapshottable(
          snapshottableParent);
    }
  }

  // Step 3. Load children nodes under parent
  loadChildren(parent, in, counter);

  // Step 4. load Directory Diff List
  SnapshotFSImageFormat.loadDirectoryDiffList(parent, in, this);

  // Recursively load sub-directories, including snapshot copies of deleted
  // directories
  int numSubTree = in.readInt();
  for (int i = 0; i < numSubTree; i++) {
    loadDirectoryWithSnapshot(in, counter);
  }
}
项目:hardfs    文件:FSImageFormat.java   
/**
 * Load a directory when snapshot is supported.
 * @param in The {@link DataInput} instance to read.
 * @param counter Counter to increment for namenode startup progress
 */
private void loadDirectoryWithSnapshot(DataInput in, Counter counter)
    throws IOException {
  // Step 1. Identify the parent INode
  long inodeId = in.readLong();
  final INodeDirectory parent = this.namesystem.dir.getInode(inodeId)
      .asDirectory();

  // Check if the whole subtree has been saved (for reference nodes)
  boolean toLoadSubtree = referenceMap.toProcessSubtree(parent.getId());
  if (!toLoadSubtree) {
    return;
  }

  // Step 2. Load snapshots if parent is snapshottable
  int numSnapshots = in.readInt();
  if (numSnapshots >= 0) {
    final INodeDirectorySnapshottable snapshottableParent
        = INodeDirectorySnapshottable.valueOf(parent, parent.getLocalName());
    // load snapshots and snapshotQuota
    SnapshotFSImageFormat.loadSnapshotList(snapshottableParent,
        numSnapshots, in, this);
    if (snapshottableParent.getSnapshotQuota() > 0) {
      // add the directory to the snapshottable directory list in 
      // SnapshotManager. Note that we only add root when its snapshot quota
      // is positive.
      this.namesystem.getSnapshotManager().addSnapshottable(
          snapshottableParent);
    }
  }

  // Step 3. Load children nodes under parent
  loadChildren(parent, in, counter);

  // Step 4. load Directory Diff List
  SnapshotFSImageFormat.loadDirectoryDiffList(parent, in, this);

  // Recursively load sub-directories, including snapshot copies of deleted
  // directories
  int numSubTree = in.readInt();
  for (int i = 0; i < numSubTree; i++) {
    loadDirectoryWithSnapshot(in, counter);
  }
}
项目:hadoop-on-lustre2    文件:FSImageFormat.java   
/**
 * Load a directory when snapshot is supported.
 * @param in The {@link DataInput} instance to read.
 * @param counter Counter to increment for namenode startup progress
 */
private void loadDirectoryWithSnapshot(DataInput in, Counter counter)
    throws IOException {
  // Step 1. Identify the parent INode
  long inodeId = in.readLong();
  final INodeDirectory parent = this.namesystem.dir.getInode(inodeId)
      .asDirectory();

  // Check if the whole subtree has been saved (for reference nodes)
  boolean toLoadSubtree = referenceMap.toProcessSubtree(parent.getId());
  if (!toLoadSubtree) {
    return;
  }

  // Step 2. Load snapshots if parent is snapshottable
  int numSnapshots = in.readInt();
  if (numSnapshots >= 0) {
    final INodeDirectorySnapshottable snapshottableParent
        = INodeDirectorySnapshottable.valueOf(parent, parent.getLocalName());
    // load snapshots and snapshotQuota
    SnapshotFSImageFormat.loadSnapshotList(snapshottableParent,
        numSnapshots, in, this);
    if (snapshottableParent.getSnapshotQuota() > 0) {
      // add the directory to the snapshottable directory list in 
      // SnapshotManager. Note that we only add root when its snapshot quota
      // is positive.
      this.namesystem.getSnapshotManager().addSnapshottable(
          snapshottableParent);
    }
  }

  // Step 3. Load children nodes under parent
  loadChildren(parent, in, counter);

  // Step 4. load Directory Diff List
  SnapshotFSImageFormat.loadDirectoryDiffList(parent, in, this);

  // Recursively load sub-directories, including snapshot copies of deleted
  // directories
  int numSubTree = in.readInt();
  for (int i = 0; i < numSubTree; i++) {
    loadDirectoryWithSnapshot(in, counter);
  }
}