Java 类org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature 实例源码

项目:hadoop    文件:FSEditLogOp.java   
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (this.length != 3) {
      throw new IOException("Incorrect data format. "
          + "Old rename operation.");
    }
  }
  this.src = FSImageSerialization.readString(in);
  this.dst = FSImageSerialization.readString(in);
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.timestamp = FSImageSerialization.readLong(in);
  } else {
    this.timestamp = readLong(in);
  }

  // read RPC ids if necessary
  readRpcIds(in, logVersion);
}
项目:hadoop    文件:FSEditLogOp.java   
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (this.length != 2) {
      throw new IOException("Incorrect data format. " + "delete operation.");
    }
  }
  this.path = FSImageSerialization.readString(in);
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.timestamp = FSImageSerialization.readLong(in);
  } else {
    this.timestamp = readLong(in);
  }
  // read RPC ids if necessary
  readRpcIds(in, logVersion);
}
项目:hadoop    文件:FSEditLogOp.java   
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (length != 3) {
      throw new IOException("Incorrect data format. " + "times operation.");
    }
  }
  this.path = FSImageSerialization.readString(in);

  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.mtime = FSImageSerialization.readLong(in);
    this.atime = FSImageSerialization.readLong(in);
  } else {
    this.mtime = readLong(in);
    this.atime = readLong(in);
  }
}
项目:hadoop    文件:FSEditLogOp.java   
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (this.length != 3) {
      throw new IOException("Incorrect data format. " + "Rename operation.");
    }
  }
  this.src = FSImageSerialization.readString(in);
  this.dst = FSImageSerialization.readString(in);

  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.timestamp = FSImageSerialization.readLong(in);
  } else {
    this.timestamp = readLong(in);
  }
  this.options = readRenameOptions(in);

  // read RPC ids if necessary
  readRpcIds(in, logVersion);
}
项目:hadoop    文件:FSEditLogOp.java   
/**
 * Construct the reader
 * @param in The stream to read from.
 * @param logVersion The version of the data coming from the stream.
 */
public Reader(DataInputStream in, StreamLimiter limiter, int logVersion) {
  this.logVersion = logVersion;
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITS_CHESKUM, logVersion)) {
    this.checksum = DataChecksum.newCrc32();
  } else {
    this.checksum = null;
  }
  // It is possible that the logVersion is actually a future layoutversion
  // during the rolling upgrade (e.g., the NN gets upgraded first). We
  // assume future layout will also support length of editlog op.
  this.supportEditLogLength = NameNodeLayoutVersion.supports(
      NameNodeLayoutVersion.Feature.EDITLOG_LENGTH, logVersion)
      || logVersion < NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;

  if (this.checksum != null) {
    this.in = new DataInputStream(
        new CheckedInputStream(in, this.checksum));
  } else {
    this.in = in;
  }
  this.limiter = limiter;
  this.cache = new OpInstanceCache();
  this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT;
}
项目:hadoop    文件:FSImageFormat.java   
/** Load {@link INodeFileAttributes}. */
public INodeFileAttributes loadINodeFileAttributes(DataInput in)
    throws IOException {
  final int layoutVersion = getLayoutVersion();

  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
    return loadINodeWithLocalName(true, in, false).asFile();
  }

  final byte[] name = FSImageSerialization.readLocalName(in);
  final PermissionStatus permissions = PermissionStatus.read(in);
  final long modificationTime = in.readLong();
  final long accessTime = in.readLong();

  final short replication = namesystem.getBlockManager().adjustReplication(
      in.readShort());
  final long preferredBlockSize = in.readLong();

  return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime,
      accessTime, replication, preferredBlockSize, (byte) 0, null);
}
项目:hadoop    文件:FSImageFormat.java   
public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in)
    throws IOException {
  final int layoutVersion = getLayoutVersion();

  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
    return loadINodeWithLocalName(true, in, false).asDirectory();
  }

  final byte[] name = FSImageSerialization.readLocalName(in);
  final PermissionStatus permissions = PermissionStatus.read(in);
  final long modificationTime = in.readLong();

  // Read quotas: quota by storage type does not need to be processed below.
  // It is handled only in protobuf based FsImagePBINode class for newer
  // fsImages. Tools using this class such as legacy-mode of offline image viewer
  // should only load legacy FSImages without newer features.
  final long nsQuota = in.readLong();
  final long dsQuota = in.readLong();

  return nsQuota == -1L && dsQuota == -1L ? new INodeDirectoryAttributes.SnapshotCopy(
      name, permissions, null, modificationTime, null)
    : new INodeDirectoryAttributes.CopyWithQuota(name, permissions,
        null, modificationTime, nsQuota, dsQuota, null, null);
}
项目:hadoop    文件:FSImageFormat.java   
/**
 * Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single
 * byte array path component.
 */
private static byte[] renameReservedComponentOnUpgrade(byte[] component,
    final int layoutVersion) {
  // If the LV doesn't support snapshots, we're doing an upgrade
  if (!NameNodeLayoutVersion.supports(Feature.SNAPSHOT, layoutVersion)) {
    if (Arrays.equals(component, HdfsConstants.DOT_SNAPSHOT_DIR_BYTES)) {
      Preconditions.checkArgument(
          renameReservedMap.containsKey(HdfsConstants.DOT_SNAPSHOT_DIR),
          RESERVED_ERROR_MSG);
      component =
          DFSUtil.string2Bytes(renameReservedMap
              .get(HdfsConstants.DOT_SNAPSHOT_DIR));
    }
  }
  return component;
}
项目:hadoop    文件:FSImageFormat.java   
/**
 * Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single
 * byte array path component.
 */
private static byte[] renameReservedRootComponentOnUpgrade(byte[] component,
    final int layoutVersion) {
  // If the LV doesn't support inode IDs, we're doing an upgrade
  if (!NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, layoutVersion)) {
    if (Arrays.equals(component, FSDirectory.DOT_RESERVED)) {
      Preconditions.checkArgument(
          renameReservedMap.containsKey(FSDirectory.DOT_RESERVED_STRING),
          RESERVED_ERROR_MSG);
      final String renameString = renameReservedMap
          .get(FSDirectory.DOT_RESERVED_STRING);
      component =
          DFSUtil.string2Bytes(renameString);
      LOG.info("Renamed root path " + FSDirectory.DOT_RESERVED_STRING
          + " to " + renameString);
    }
  }
  return component;
}
项目:hadoop    文件:ImageLoaderCurrent.java   
/**
 * Process the INode records stored in the fsimage.
 *
 * @param in Datastream to process
 * @param v Visitor to walk over INodes
 * @param numInodes Number of INodes stored in file
 * @param skipBlocks Process all the blocks within the INode?
 * @param supportSnapshot Whether or not the imageVersion supports snapshot
 * @throws VisitException
 * @throws IOException
 */
private void processINodes(DataInputStream in, ImageVisitor v,
    long numInodes, boolean skipBlocks, boolean supportSnapshot)
    throws IOException {
  v.visitEnclosingElement(ImageElement.INODES,
      ImageElement.NUM_INODES, numInodes);

  if (NameNodeLayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION, imageVersion)) {
    if (!supportSnapshot) {
      processLocalNameINodes(in, v, numInodes, skipBlocks);
    } else {
      processLocalNameINodesWithSnapshot(in, v, skipBlocks);
    }
  } else { // full path name
    processFullNameINodes(in, v, numInodes, skipBlocks);
  }


  v.leaveEnclosingElement(); // INodes
}
项目:hadoop    文件:ImageLoaderCurrent.java   
private void processFileDiff(DataInputStream in, ImageVisitor v,
    String currentINodeName) throws IOException {
  int snapshotId = in.readInt();
  v.visitEnclosingElement(ImageElement.SNAPSHOT_FILE_DIFF,
      ImageElement.SNAPSHOT_DIFF_SNAPSHOTID, snapshotId);
  v.visit(ImageElement.SNAPSHOT_FILE_SIZE, in.readLong());
  if (in.readBoolean()) {
    v.visitEnclosingElement(ImageElement.SNAPSHOT_INODE_FILE_ATTRIBUTES);
    if (NameNodeLayoutVersion.supports(Feature.OPTIMIZE_SNAPSHOT_INODES, imageVersion)) {
      processINodeFileAttributes(in, v, currentINodeName);
    } else {
      processINode(in, v, true, currentINodeName, true);
    }
    v.leaveEnclosingElement();
  }
  v.leaveEnclosingElement();
}
项目:aliyun-oss-hadoop-fs    文件:FSEditLogOp.java   
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (this.length != 3) {
      throw new IOException("Incorrect data format. "
          + "Old rename operation.");
    }
  }
  this.src = FSImageSerialization.readString(in);
  this.dst = FSImageSerialization.readString(in);
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.timestamp = FSImageSerialization.readLong(in);
  } else {
    this.timestamp = readLong(in);
  }

  // read RPC ids if necessary
  readRpcIds(in, logVersion);
}
项目:aliyun-oss-hadoop-fs    文件:FSEditLogOp.java   
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (this.length != 2) {
      throw new IOException("Incorrect data format. " + "delete operation.");
    }
  }
  this.path = FSImageSerialization.readString(in);
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.timestamp = FSImageSerialization.readLong(in);
  } else {
    this.timestamp = readLong(in);
  }
  // read RPC ids if necessary
  readRpcIds(in, logVersion);
}
项目:aliyun-oss-hadoop-fs    文件:FSEditLogOp.java   
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (length != 3) {
      throw new IOException("Incorrect data format. " + "times operation.");
    }
  }
  this.path = FSImageSerialization.readString(in);

  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.mtime = FSImageSerialization.readLong(in);
    this.atime = FSImageSerialization.readLong(in);
  } else {
    this.mtime = readLong(in);
    this.atime = readLong(in);
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSEditLogOp.java   
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (this.length != 3) {
      throw new IOException("Incorrect data format. " + "Rename operation.");
    }
  }
  this.src = FSImageSerialization.readString(in);
  this.dst = FSImageSerialization.readString(in);

  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.timestamp = FSImageSerialization.readLong(in);
  } else {
    this.timestamp = readLong(in);
  }
  this.options = readRenameOptions(in);

  // read RPC ids if necessary
  readRpcIds(in, logVersion);
}
项目:aliyun-oss-hadoop-fs    文件:FSEditLogOp.java   
public static Reader create(DataInputStream in, StreamLimiter limiter,
                            int logVersion) {
  if (logVersion < NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION) {
    // Use the LengthPrefixedReader on edit logs which are newer than what
    // we can parse.  (Newer layout versions are represented by smaller
    // negative integers, for historical reasons.) Even though we can't
    // parse the Ops contained in them, we should still be able to call
    // scanOp on them.  This is important for the JournalNode during rolling
    // upgrade.
    return new LengthPrefixedReader(in, limiter, logVersion);
  } else if (NameNodeLayoutVersion.supports(
          NameNodeLayoutVersion.Feature.EDITLOG_LENGTH, logVersion)) {
    return new LengthPrefixedReader(in, limiter, logVersion);
  } else if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITS_CHECKSUM, logVersion)) {
    Checksum checksum = DataChecksum.newCrc32();
    return new ChecksummedReader(checksum, in, limiter, logVersion);
  } else {
    return new LegacyReader(in, limiter, logVersion);
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSImageFormat.java   
/** Load {@link INodeFileAttributes}. */
public INodeFileAttributes loadINodeFileAttributes(DataInput in)
    throws IOException {
  final int layoutVersion = getLayoutVersion();

  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
    return loadINodeWithLocalName(true, in, false).asFile();
  }

  final byte[] name = FSImageSerialization.readLocalName(in);
  final PermissionStatus permissions = PermissionStatus.read(in);
  final long modificationTime = in.readLong();
  final long accessTime = in.readLong();

  final short replication = namesystem.getBlockManager().adjustReplication(
      in.readShort());
  final long preferredBlockSize = in.readLong();

  return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime,
      accessTime, replication, preferredBlockSize, (byte) 0, null, false);
}
项目:aliyun-oss-hadoop-fs    文件:FSImageFormat.java   
public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in)
    throws IOException {
  final int layoutVersion = getLayoutVersion();

  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
    return loadINodeWithLocalName(true, in, false).asDirectory();
  }

  final byte[] name = FSImageSerialization.readLocalName(in);
  final PermissionStatus permissions = PermissionStatus.read(in);
  final long modificationTime = in.readLong();

  // Read quotas: quota by storage type does not need to be processed below.
  // It is handled only in protobuf based FsImagePBINode class for newer
  // fsImages. Tools using this class such as legacy-mode of offline image viewer
  // should only load legacy FSImages without newer features.
  final long nsQuota = in.readLong();
  final long dsQuota = in.readLong();

  return nsQuota == -1L && dsQuota == -1L ? new INodeDirectoryAttributes.SnapshotCopy(
      name, permissions, null, modificationTime, null)
    : new INodeDirectoryAttributes.CopyWithQuota(name, permissions,
        null, modificationTime, nsQuota, dsQuota, null, null);
}
项目:aliyun-oss-hadoop-fs    文件:FSImageFormat.java   
/**
 * Same as {@link #renameReservedPathsOnUpgrade}, but for a single
 * byte array path component.
 */
private static byte[] renameReservedComponentOnUpgrade(byte[] component,
    final int layoutVersion) {
  // If the LV doesn't support snapshots, we're doing an upgrade
  if (!NameNodeLayoutVersion.supports(Feature.SNAPSHOT, layoutVersion)) {
    if (Arrays.equals(component, HdfsServerConstants.DOT_SNAPSHOT_DIR_BYTES)) {
      Preconditions.checkArgument(
          renameReservedMap.containsKey(HdfsConstants.DOT_SNAPSHOT_DIR),
          RESERVED_ERROR_MSG);
      component =
          DFSUtil.string2Bytes(renameReservedMap
              .get(HdfsConstants.DOT_SNAPSHOT_DIR));
    }
  }
  return component;
}
项目:aliyun-oss-hadoop-fs    文件:FSImageFormat.java   
/**
 * Same as {@link #renameReservedPathsOnUpgrade}, but for a single
 * byte array path component.
 */
private static byte[] renameReservedRootComponentOnUpgrade(byte[] component,
    final int layoutVersion) {
  // If the LV doesn't support inode IDs, we're doing an upgrade
  if (!NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, layoutVersion)) {
    if (Arrays.equals(component, FSDirectory.DOT_RESERVED)) {
      Preconditions.checkArgument(
          renameReservedMap.containsKey(FSDirectory.DOT_RESERVED_STRING),
          RESERVED_ERROR_MSG);
      final String renameString = renameReservedMap
          .get(FSDirectory.DOT_RESERVED_STRING);
      component =
          DFSUtil.string2Bytes(renameString);
      LOG.info("Renamed root path " + FSDirectory.DOT_RESERVED_STRING
          + " to " + renameString);
    }
  }
  return component;
}
项目:aliyun-oss-hadoop-fs    文件:ImageLoaderCurrent.java   
/**
 * Process the INode records stored in the fsimage.
 *
 * @param in Datastream to process
 * @param v Visitor to walk over INodes
 * @param numInodes Number of INodes stored in file
 * @param skipBlocks Process all the blocks within the INode?
 * @param supportSnapshot Whether or not the imageVersion supports snapshot
 * @throws VisitException
 * @throws IOException
 */
private void processINodes(DataInputStream in, ImageVisitor v,
    long numInodes, boolean skipBlocks, boolean supportSnapshot)
    throws IOException {
  v.visitEnclosingElement(ImageElement.INODES,
      ImageElement.NUM_INODES, numInodes);

  if (NameNodeLayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION, imageVersion)) {
    if (!supportSnapshot) {
      processLocalNameINodes(in, v, numInodes, skipBlocks);
    } else {
      processLocalNameINodesWithSnapshot(in, v, skipBlocks);
    }
  } else { // full path name
    processFullNameINodes(in, v, numInodes, skipBlocks);
  }


  v.leaveEnclosingElement(); // INodes
}
项目:aliyun-oss-hadoop-fs    文件:ImageLoaderCurrent.java   
private void processFileDiff(DataInputStream in, ImageVisitor v,
    String currentINodeName) throws IOException {
  int snapshotId = in.readInt();
  v.visitEnclosingElement(ImageElement.SNAPSHOT_FILE_DIFF,
      ImageElement.SNAPSHOT_DIFF_SNAPSHOTID, snapshotId);
  v.visit(ImageElement.SNAPSHOT_FILE_SIZE, in.readLong());
  if (in.readBoolean()) {
    v.visitEnclosingElement(ImageElement.SNAPSHOT_INODE_FILE_ATTRIBUTES);
    if (NameNodeLayoutVersion.supports(Feature.OPTIMIZE_SNAPSHOT_INODES, imageVersion)) {
      processINodeFileAttributes(in, v, currentINodeName);
    } else {
      processINode(in, v, true, currentINodeName, true);
    }
    v.leaveEnclosingElement();
  }
  v.leaveEnclosingElement();
}
项目:big-c    文件:FSEditLogOp.java   
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (this.length != 3) {
      throw new IOException("Incorrect data format. "
          + "Old rename operation.");
    }
  }
  this.src = FSImageSerialization.readString(in);
  this.dst = FSImageSerialization.readString(in);
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.timestamp = FSImageSerialization.readLong(in);
  } else {
    this.timestamp = readLong(in);
  }

  // read RPC ids if necessary
  readRpcIds(in, logVersion);
}
项目:big-c    文件:FSEditLogOp.java   
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (this.length != 2) {
      throw new IOException("Incorrect data format. " + "delete operation.");
    }
  }
  this.path = FSImageSerialization.readString(in);
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.timestamp = FSImageSerialization.readLong(in);
  } else {
    this.timestamp = readLong(in);
  }
  // read RPC ids if necessary
  readRpcIds(in, logVersion);
}
项目:big-c    文件:FSEditLogOp.java   
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (length != 3) {
      throw new IOException("Incorrect data format. " + "times operation.");
    }
  }
  this.path = FSImageSerialization.readString(in);

  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.mtime = FSImageSerialization.readLong(in);
    this.atime = FSImageSerialization.readLong(in);
  } else {
    this.mtime = readLong(in);
    this.atime = readLong(in);
  }
}
项目:big-c    文件:FSEditLogOp.java   
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (this.length != 3) {
      throw new IOException("Incorrect data format. " + "Rename operation.");
    }
  }
  this.src = FSImageSerialization.readString(in);
  this.dst = FSImageSerialization.readString(in);

  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.timestamp = FSImageSerialization.readLong(in);
  } else {
    this.timestamp = readLong(in);
  }
  this.options = readRenameOptions(in);

  // read RPC ids if necessary
  readRpcIds(in, logVersion);
}
项目:big-c    文件:FSEditLogOp.java   
/**
 * Construct the reader
 * @param in The stream to read from.
 * @param logVersion The version of the data coming from the stream.
 */
public Reader(DataInputStream in, StreamLimiter limiter, int logVersion) {
  this.logVersion = logVersion;
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITS_CHESKUM, logVersion)) {
    this.checksum = DataChecksum.newCrc32();
  } else {
    this.checksum = null;
  }
  // It is possible that the logVersion is actually a future layoutversion
  // during the rolling upgrade (e.g., the NN gets upgraded first). We
  // assume future layout will also support length of editlog op.
  this.supportEditLogLength = NameNodeLayoutVersion.supports(
      NameNodeLayoutVersion.Feature.EDITLOG_LENGTH, logVersion)
      || logVersion < NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;

  if (this.checksum != null) {
    this.in = new DataInputStream(
        new CheckedInputStream(in, this.checksum));
  } else {
    this.in = in;
  }
  this.limiter = limiter;
  this.cache = new OpInstanceCache();
  this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT;
}
项目:big-c    文件:FSImageFormat.java   
/** Load {@link INodeFileAttributes}. */
public INodeFileAttributes loadINodeFileAttributes(DataInput in)
    throws IOException {
  final int layoutVersion = getLayoutVersion();

  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
    return loadINodeWithLocalName(true, in, false).asFile();
  }

  final byte[] name = FSImageSerialization.readLocalName(in);
  final PermissionStatus permissions = PermissionStatus.read(in);
  final long modificationTime = in.readLong();
  final long accessTime = in.readLong();

  final short replication = namesystem.getBlockManager().adjustReplication(
      in.readShort());
  final long preferredBlockSize = in.readLong();

  return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime,
      accessTime, replication, preferredBlockSize, (byte) 0, null);
}
项目:big-c    文件:FSImageFormat.java   
public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in)
    throws IOException {
  final int layoutVersion = getLayoutVersion();

  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
    return loadINodeWithLocalName(true, in, false).asDirectory();
  }

  final byte[] name = FSImageSerialization.readLocalName(in);
  final PermissionStatus permissions = PermissionStatus.read(in);
  final long modificationTime = in.readLong();

  // Read quotas: quota by storage type does not need to be processed below.
  // It is handled only in protobuf based FsImagePBINode class for newer
  // fsImages. Tools using this class such as legacy-mode of offline image viewer
  // should only load legacy FSImages without newer features.
  final long nsQuota = in.readLong();
  final long dsQuota = in.readLong();

  return nsQuota == -1L && dsQuota == -1L ? new INodeDirectoryAttributes.SnapshotCopy(
      name, permissions, null, modificationTime, null)
    : new INodeDirectoryAttributes.CopyWithQuota(name, permissions,
        null, modificationTime, nsQuota, dsQuota, null, null);
}
项目:big-c    文件:FSImageFormat.java   
/**
 * Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single
 * byte array path component.
 */
private static byte[] renameReservedComponentOnUpgrade(byte[] component,
    final int layoutVersion) {
  // If the LV doesn't support snapshots, we're doing an upgrade
  if (!NameNodeLayoutVersion.supports(Feature.SNAPSHOT, layoutVersion)) {
    if (Arrays.equals(component, HdfsConstants.DOT_SNAPSHOT_DIR_BYTES)) {
      Preconditions.checkArgument(
          renameReservedMap.containsKey(HdfsConstants.DOT_SNAPSHOT_DIR),
          RESERVED_ERROR_MSG);
      component =
          DFSUtil.string2Bytes(renameReservedMap
              .get(HdfsConstants.DOT_SNAPSHOT_DIR));
    }
  }
  return component;
}
项目:big-c    文件:FSImageFormat.java   
/**
 * Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single
 * byte array path component.
 */
private static byte[] renameReservedRootComponentOnUpgrade(byte[] component,
    final int layoutVersion) {
  // If the LV doesn't support inode IDs, we're doing an upgrade
  if (!NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, layoutVersion)) {
    if (Arrays.equals(component, FSDirectory.DOT_RESERVED)) {
      Preconditions.checkArgument(
          renameReservedMap.containsKey(FSDirectory.DOT_RESERVED_STRING),
          RESERVED_ERROR_MSG);
      final String renameString = renameReservedMap
          .get(FSDirectory.DOT_RESERVED_STRING);
      component =
          DFSUtil.string2Bytes(renameString);
      LOG.info("Renamed root path " + FSDirectory.DOT_RESERVED_STRING
          + " to " + renameString);
    }
  }
  return component;
}
项目:big-c    文件:ImageLoaderCurrent.java   
/**
 * Process the INode records stored in the fsimage.
 *
 * @param in Datastream to process
 * @param v Visitor to walk over INodes
 * @param numInodes Number of INodes stored in file
 * @param skipBlocks Process all the blocks within the INode?
 * @param supportSnapshot Whether or not the imageVersion supports snapshot
 * @throws VisitException
 * @throws IOException
 */
private void processINodes(DataInputStream in, ImageVisitor v,
    long numInodes, boolean skipBlocks, boolean supportSnapshot)
    throws IOException {
  v.visitEnclosingElement(ImageElement.INODES,
      ImageElement.NUM_INODES, numInodes);

  if (NameNodeLayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION, imageVersion)) {
    if (!supportSnapshot) {
      processLocalNameINodes(in, v, numInodes, skipBlocks);
    } else {
      processLocalNameINodesWithSnapshot(in, v, skipBlocks);
    }
  } else { // full path name
    processFullNameINodes(in, v, numInodes, skipBlocks);
  }


  v.leaveEnclosingElement(); // INodes
}
项目:big-c    文件:ImageLoaderCurrent.java   
private void processFileDiff(DataInputStream in, ImageVisitor v,
    String currentINodeName) throws IOException {
  int snapshotId = in.readInt();
  v.visitEnclosingElement(ImageElement.SNAPSHOT_FILE_DIFF,
      ImageElement.SNAPSHOT_DIFF_SNAPSHOTID, snapshotId);
  v.visit(ImageElement.SNAPSHOT_FILE_SIZE, in.readLong());
  if (in.readBoolean()) {
    v.visitEnclosingElement(ImageElement.SNAPSHOT_INODE_FILE_ATTRIBUTES);
    if (NameNodeLayoutVersion.supports(Feature.OPTIMIZE_SNAPSHOT_INODES, imageVersion)) {
      processINodeFileAttributes(in, v, currentINodeName);
    } else {
      processINode(in, v, true, currentINodeName, true);
    }
    v.leaveEnclosingElement();
  }
  v.leaveEnclosingElement();
}
项目:FlexMap    文件:ImageLoaderCurrent.java   
/**
 * Process the INode records stored in the fsimage.
 *
 * @param in Datastream to process
 * @param v Visitor to walk over INodes
 * @param numInodes Number of INodes stored in file
 * @param skipBlocks Process all the blocks within the INode?
 * @param supportSnapshot Whether or not the imageVersion supports snapshot
 * @throws VisitException
 * @throws IOException
 */
private void processINodes(DataInputStream in, ImageVisitor v,
    long numInodes, boolean skipBlocks, boolean supportSnapshot)
    throws IOException {
  v.visitEnclosingElement(ImageElement.INODES,
      ImageElement.NUM_INODES, numInodes);

  if (NameNodeLayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION, imageVersion)) {
    if (!supportSnapshot) {
      processLocalNameINodes(in, v, numInodes, skipBlocks);
    } else {
      processLocalNameINodesWithSnapshot(in, v, skipBlocks);
    }
  } else { // full path name
    processFullNameINodes(in, v, numInodes, skipBlocks);
  }


  v.leaveEnclosingElement(); // INodes
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSEditLogOp.java   
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (this.length != 2) {
      throw new IOException("Incorrect data format. " + "delete operation.");
    }
  }
  this.path = FSImageSerialization.readString(in);
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.timestamp = FSImageSerialization.readLong(in);
  } else {
    this.timestamp = readLong(in);
  }
  // read RPC ids if necessary
  readRpcIds(in, logVersion);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSEditLogOp.java   
/**
 * Construct the reader
 * @param in The stream to read from.
 * @param logVersion The version of the data coming from the stream.
 */
public Reader(DataInputStream in, StreamLimiter limiter, int logVersion) {
  this.logVersion = logVersion;
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITS_CHESKUM, logVersion)) {
    this.checksum = DataChecksum.newCrc32();
  } else {
    this.checksum = null;
  }
  // It is possible that the logVersion is actually a future layoutversion
  // during the rolling upgrade (e.g., the NN gets upgraded first). We
  // assume future layout will also support length of editlog op.
  this.supportEditLogLength = NameNodeLayoutVersion.supports(
      NameNodeLayoutVersion.Feature.EDITLOG_LENGTH, logVersion)
      || logVersion < NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;

  if (this.checksum != null) {
    this.in = new DataInputStream(
        new CheckedInputStream(in, this.checksum));
  } else {
    this.in = in;
  }
  this.limiter = limiter;
  this.cache = new OpInstanceCache();
  this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT;
}
项目:FlexMap    文件:FSEditLogOp.java   
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (this.length != 3) {
      throw new IOException("Incorrect data format. "
          + "Old rename operation.");
    }
  }
  this.src = FSImageSerialization.readString(in);
  this.dst = FSImageSerialization.readString(in);
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.timestamp = FSImageSerialization.readLong(in);
  } else {
    this.timestamp = readLong(in);
  }

  // read RPC ids if necessary
  readRpcIds(in, logVersion);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSImageFormat.java   
/** Load {@link INodeFileAttributes}. */
public INodeFileAttributes loadINodeFileAttributes(DataInput in)
    throws IOException {
  final int layoutVersion = getLayoutVersion();

  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
    return loadINodeWithLocalName(true, in, false).asFile();
  }

  final byte[] name = FSImageSerialization.readLocalName(in);
  final PermissionStatus permissions = PermissionStatus.read(in);
  final long modificationTime = in.readLong();
  final long accessTime = in.readLong();

  final short replication = namesystem.getBlockManager().adjustReplication(
      in.readShort());
  final long preferredBlockSize = in.readLong();

  return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime,
      accessTime, replication, preferredBlockSize, (byte) 0, null);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSImageFormat.java   
/**
 * Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single
 * byte array path component.
 */
private static byte[] renameReservedComponentOnUpgrade(byte[] component,
    final int layoutVersion) {
  // If the LV doesn't support snapshots, we're doing an upgrade
  if (!NameNodeLayoutVersion.supports(Feature.SNAPSHOT, layoutVersion)) {
    if (Arrays.equals(component, HdfsConstants.DOT_SNAPSHOT_DIR_BYTES)) {
      Preconditions.checkArgument(
          renameReservedMap.containsKey(HdfsConstants.DOT_SNAPSHOT_DIR),
          RESERVED_ERROR_MSG);
      component =
          DFSUtil.string2Bytes(renameReservedMap
              .get(HdfsConstants.DOT_SNAPSHOT_DIR));
    }
  }
  return component;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSImageFormat.java   
/**
 * Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single
 * byte array path component.
 */
private static byte[] renameReservedRootComponentOnUpgrade(byte[] component,
    final int layoutVersion) {
  // If the LV doesn't support inode IDs, we're doing an upgrade
  if (!NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, layoutVersion)) {
    if (Arrays.equals(component, FSDirectory.DOT_RESERVED)) {
      Preconditions.checkArgument(
          renameReservedMap.containsKey(FSDirectory.DOT_RESERVED_STRING),
          RESERVED_ERROR_MSG);
      final String renameString = renameReservedMap
          .get(FSDirectory.DOT_RESERVED_STRING);
      component =
          DFSUtil.string2Bytes(renameString);
      LOG.info("Renamed root path " + FSDirectory.DOT_RESERVED_STRING
          + " to " + renameString);
    }
  }
  return component;
}