Java 类org.apache.hadoop.hdfs.protocol.LayoutVersion 实例源码

项目:hadoop    文件:FSEditLogLoader.java   
private long getAndUpdateLastInodeId(long inodeIdFromOp, int logVersion,
    long lastInodeId) throws IOException {
  long inodeId = inodeIdFromOp;

  if (inodeId == INodeId.GRANDFATHER_INODE_ID) {
    if (NameNodeLayoutVersion.supports(
        LayoutVersion.Feature.ADD_INODE_ID, logVersion)) {
      throw new IOException("The layout version " + logVersion
          + " supports inodeId but gave bogus inodeId");
    }
    inodeId = fsNamesys.dir.allocateNewInodeId();
  } else {
    // need to reset lastInodeId. fsnamesys gets lastInodeId firstly from
    // fsimage but editlog captures more recent inodeId allocations
    if (inodeId > lastInodeId) {
      fsNamesys.dir.resetLastInodeId(inodeId);
    }
  }
  return inodeId;
}
项目:hadoop    文件:NNStorage.java   
@Override // Storage
protected void setFieldsFromProperties(
    Properties props, StorageDirectory sd) throws IOException {
  super.setFieldsFromProperties(props, sd);
  if (layoutVersion == 0) {
    throw new IOException("NameNode directory "
                          + sd.getRoot() + " is not formatted.");
  }

  // Set Block pool ID in version with federation support
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.FEDERATION, getLayoutVersion())) {
    String sbpid = props.getProperty("blockpoolID");
    setBlockPoolID(sd.getRoot(), sbpid);
  }
  setDeprecatedPropertiesForUpgrade(props);
}
项目:hadoop    文件:FSEditLogOp.java   
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (this.length != 3) {
      throw new IOException("Incorrect data format. "
          + "Old rename operation.");
    }
  }
  this.src = FSImageSerialization.readString(in);
  this.dst = FSImageSerialization.readString(in);
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.timestamp = FSImageSerialization.readLong(in);
  } else {
    this.timestamp = readLong(in);
  }

  // read RPC ids if necessary
  readRpcIds(in, logVersion);
}
项目:hadoop    文件:FSEditLogOp.java   
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (this.length != 2) {
      throw new IOException("Incorrect data format. " + "delete operation.");
    }
  }
  this.path = FSImageSerialization.readString(in);
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.timestamp = FSImageSerialization.readLong(in);
  } else {
    this.timestamp = readLong(in);
  }
  // read RPC ids if necessary
  readRpcIds(in, logVersion);
}
项目:hadoop    文件:FSEditLogOp.java   
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (length != 3) {
      throw new IOException("Incorrect data format. " + "times operation.");
    }
  }
  this.path = FSImageSerialization.readString(in);

  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.mtime = FSImageSerialization.readLong(in);
    this.atime = FSImageSerialization.readLong(in);
  } else {
    this.mtime = readLong(in);
    this.atime = readLong(in);
  }
}
项目:hadoop    文件:FSEditLogOp.java   
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (this.length != 3) {
      throw new IOException("Incorrect data format. " + "Rename operation.");
    }
  }
  this.src = FSImageSerialization.readString(in);
  this.dst = FSImageSerialization.readString(in);

  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.timestamp = FSImageSerialization.readLong(in);
  } else {
    this.timestamp = readLong(in);
  }
  this.options = readRenameOptions(in);

  // read RPC ids if necessary
  readRpcIds(in, logVersion);
}
项目:hadoop    文件:FSEditLogOp.java   
/**
 * Construct the reader
 * @param in The stream to read from.
 * @param logVersion The version of the data coming from the stream.
 */
public Reader(DataInputStream in, StreamLimiter limiter, int logVersion) {
  this.logVersion = logVersion;
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITS_CHESKUM, logVersion)) {
    this.checksum = DataChecksum.newCrc32();
  } else {
    this.checksum = null;
  }
  // It is possible that the logVersion is actually a future layoutversion
  // during the rolling upgrade (e.g., the NN gets upgraded first). We
  // assume future layout will also support length of editlog op.
  this.supportEditLogLength = NameNodeLayoutVersion.supports(
      NameNodeLayoutVersion.Feature.EDITLOG_LENGTH, logVersion)
      || logVersion < NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;

  if (this.checksum != null) {
    this.in = new DataInputStream(
        new CheckedInputStream(in, this.checksum));
  } else {
    this.in = in;
  }
  this.limiter = limiter;
  this.cache = new OpInstanceCache();
  this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT;
}
项目:hadoop    文件:FSImageFormat.java   
/** Load {@link INodeFileAttributes}. */
public INodeFileAttributes loadINodeFileAttributes(DataInput in)
    throws IOException {
  final int layoutVersion = getLayoutVersion();

  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
    return loadINodeWithLocalName(true, in, false).asFile();
  }

  final byte[] name = FSImageSerialization.readLocalName(in);
  final PermissionStatus permissions = PermissionStatus.read(in);
  final long modificationTime = in.readLong();
  final long accessTime = in.readLong();

  final short replication = namesystem.getBlockManager().adjustReplication(
      in.readShort());
  final long preferredBlockSize = in.readLong();

  return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime,
      accessTime, replication, preferredBlockSize, (byte) 0, null);
}
项目:hadoop    文件:FSImageFormat.java   
public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in)
    throws IOException {
  final int layoutVersion = getLayoutVersion();

  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
    return loadINodeWithLocalName(true, in, false).asDirectory();
  }

  final byte[] name = FSImageSerialization.readLocalName(in);
  final PermissionStatus permissions = PermissionStatus.read(in);
  final long modificationTime = in.readLong();

  // Read quotas: quota by storage type does not need to be processed below.
  // It is handled only in protobuf based FsImagePBINode class for newer
  // fsImages. Tools using this class such as legacy-mode of offline image viewer
  // should only load legacy FSImages without newer features.
  final long nsQuota = in.readLong();
  final long dsQuota = in.readLong();

  return nsQuota == -1L && dsQuota == -1L ? new INodeDirectoryAttributes.SnapshotCopy(
      name, permissions, null, modificationTime, null)
    : new INodeDirectoryAttributes.CopyWithQuota(name, permissions,
        null, modificationTime, nsQuota, dsQuota, null, null);
}
项目:hadoop    文件:BlockPoolSliceStorage.java   
/**
 * Cleanup the detachDir.
 * 
 * If the directory is not empty report an error; Otherwise remove the
 * directory.
 * 
 * @param detachDir detach directory
 * @throws IOException if the directory is not empty or it can not be removed
 */
private void cleanupDetachDir(File detachDir) throws IOException {
  if (!DataNodeLayoutVersion.supports(
      LayoutVersion.Feature.APPEND_RBW_DIR, layoutVersion)
      && detachDir.exists() && detachDir.isDirectory()) {

    if (FileUtil.list(detachDir).length != 0) {
      throw new IOException("Detached directory " + detachDir
          + " is not empty. Please manually move each file under this "
          + "directory to the finalized directory if the finalized "
          + "directory tree does not have the file.");
    } else if (!detachDir.delete()) {
      throw new IOException("Cannot remove directory " + detachDir);
    }
  }
}
项目:hadoop    文件:DataStorage.java   
@Override
protected void setPropertiesFromFields(Properties props, 
                         StorageDirectory sd 
                         ) throws IOException {
  props.setProperty("storageType", storageType.toString());
  props.setProperty("clusterID", clusterID);
  props.setProperty("cTime", String.valueOf(cTime));
  props.setProperty("layoutVersion", String.valueOf(layoutVersion));
  props.setProperty("storageID", sd.getStorageUuid());

  String datanodeUuid = getDatanodeUuid();
  if (datanodeUuid != null) {
    props.setProperty("datanodeUuid", datanodeUuid);
  }

  // Set NamespaceID in version before federation
  if (!DataNodeLayoutVersion.supports(
      LayoutVersion.Feature.FEDERATION, layoutVersion)) {
    props.setProperty("namespaceID", String.valueOf(namespaceID));
  }
}
项目:hadoop    文件:DataStorage.java   
/**
 * Cleanup the detachDir. 
 * 
 * If the directory is not empty report an error; 
 * Otherwise remove the directory.
 * 
 * @param detachDir detach directory
 * @throws IOException if the directory is not empty or it can not be removed
 */
private void cleanupDetachDir(File detachDir) throws IOException {
  if (!DataNodeLayoutVersion.supports(
      LayoutVersion.Feature.APPEND_RBW_DIR, layoutVersion) &&
      detachDir.exists() && detachDir.isDirectory() ) {

      if (FileUtil.list(detachDir).length != 0 ) {
        throw new IOException("Detached directory " + detachDir +
            " is not empty. Please manually move each file under this " +
            "directory to the finalized directory if the finalized " +
            "directory tree does not have the file.");
      } else if (!detachDir.delete()) {
        throw new IOException("Cannot remove directory " + detachDir);
      }
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSEditLogLoader.java   
private long getAndUpdateLastInodeId(long inodeIdFromOp, int logVersion,
    long lastInodeId) throws IOException {
  long inodeId = inodeIdFromOp;

  if (inodeId == HdfsConstants.GRANDFATHER_INODE_ID) {
    if (NameNodeLayoutVersion.supports(
        LayoutVersion.Feature.ADD_INODE_ID, logVersion)) {
      throw new IOException("The layout version " + logVersion
          + " supports inodeId but gave bogus inodeId");
    }
    inodeId = fsNamesys.dir.allocateNewInodeId();
  } else {
    // need to reset lastInodeId. fsnamesys gets lastInodeId firstly from
    // fsimage but editlog captures more recent inodeId allocations
    if (inodeId > lastInodeId) {
      fsNamesys.dir.resetLastInodeId(inodeId);
    }
  }
  return inodeId;
}
项目:aliyun-oss-hadoop-fs    文件:NNStorage.java   
@Override // Storage
protected void setFieldsFromProperties(
    Properties props, StorageDirectory sd) throws IOException {
  super.setFieldsFromProperties(props, sd);
  if (layoutVersion == 0) {
    throw new IOException("NameNode directory "
                          + sd.getRoot() + " is not formatted.");
  }

  // Set Block pool ID in version with federation support
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.FEDERATION, getLayoutVersion())) {
    String sbpid = props.getProperty("blockpoolID");
    setBlockPoolID(sd.getRoot(), sbpid);
  }
  setDeprecatedPropertiesForUpgrade(props);
}
项目:aliyun-oss-hadoop-fs    文件:FSEditLogOp.java   
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (this.length != 3) {
      throw new IOException("Incorrect data format. "
          + "Old rename operation.");
    }
  }
  this.src = FSImageSerialization.readString(in);
  this.dst = FSImageSerialization.readString(in);
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.timestamp = FSImageSerialization.readLong(in);
  } else {
    this.timestamp = readLong(in);
  }

  // read RPC ids if necessary
  readRpcIds(in, logVersion);
}
项目:aliyun-oss-hadoop-fs    文件:FSEditLogOp.java   
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (this.length != 2) {
      throw new IOException("Incorrect data format. " + "delete operation.");
    }
  }
  this.path = FSImageSerialization.readString(in);
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.timestamp = FSImageSerialization.readLong(in);
  } else {
    this.timestamp = readLong(in);
  }
  // read RPC ids if necessary
  readRpcIds(in, logVersion);
}
项目:aliyun-oss-hadoop-fs    文件:FSEditLogOp.java   
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (length != 3) {
      throw new IOException("Incorrect data format. " + "times operation.");
    }
  }
  this.path = FSImageSerialization.readString(in);

  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.mtime = FSImageSerialization.readLong(in);
    this.atime = FSImageSerialization.readLong(in);
  } else {
    this.mtime = readLong(in);
    this.atime = readLong(in);
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSEditLogOp.java   
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (this.length != 3) {
      throw new IOException("Incorrect data format. " + "Rename operation.");
    }
  }
  this.src = FSImageSerialization.readString(in);
  this.dst = FSImageSerialization.readString(in);

  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.timestamp = FSImageSerialization.readLong(in);
  } else {
    this.timestamp = readLong(in);
  }
  this.options = readRenameOptions(in);

  // read RPC ids if necessary
  readRpcIds(in, logVersion);
}
项目:aliyun-oss-hadoop-fs    文件:FSEditLogOp.java   
public static Reader create(DataInputStream in, StreamLimiter limiter,
                            int logVersion) {
  if (logVersion < NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION) {
    // Use the LengthPrefixedReader on edit logs which are newer than what
    // we can parse.  (Newer layout versions are represented by smaller
    // negative integers, for historical reasons.) Even though we can't
    // parse the Ops contained in them, we should still be able to call
    // scanOp on them.  This is important for the JournalNode during rolling
    // upgrade.
    return new LengthPrefixedReader(in, limiter, logVersion);
  } else if (NameNodeLayoutVersion.supports(
          NameNodeLayoutVersion.Feature.EDITLOG_LENGTH, logVersion)) {
    return new LengthPrefixedReader(in, limiter, logVersion);
  } else if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITS_CHECKSUM, logVersion)) {
    Checksum checksum = DataChecksum.newCrc32();
    return new ChecksummedReader(checksum, in, limiter, logVersion);
  } else {
    return new LegacyReader(in, limiter, logVersion);
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSImageFormat.java   
/** Load {@link INodeFileAttributes}. */
public INodeFileAttributes loadINodeFileAttributes(DataInput in)
    throws IOException {
  final int layoutVersion = getLayoutVersion();

  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
    return loadINodeWithLocalName(true, in, false).asFile();
  }

  final byte[] name = FSImageSerialization.readLocalName(in);
  final PermissionStatus permissions = PermissionStatus.read(in);
  final long modificationTime = in.readLong();
  final long accessTime = in.readLong();

  final short replication = namesystem.getBlockManager().adjustReplication(
      in.readShort());
  final long preferredBlockSize = in.readLong();

  return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime,
      accessTime, replication, preferredBlockSize, (byte) 0, null, false);
}
项目:aliyun-oss-hadoop-fs    文件:FSImageFormat.java   
public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in)
    throws IOException {
  final int layoutVersion = getLayoutVersion();

  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
    return loadINodeWithLocalName(true, in, false).asDirectory();
  }

  final byte[] name = FSImageSerialization.readLocalName(in);
  final PermissionStatus permissions = PermissionStatus.read(in);
  final long modificationTime = in.readLong();

  // Read quotas: quota by storage type does not need to be processed below.
  // It is handled only in protobuf based FsImagePBINode class for newer
  // fsImages. Tools using this class such as legacy-mode of offline image viewer
  // should only load legacy FSImages without newer features.
  final long nsQuota = in.readLong();
  final long dsQuota = in.readLong();

  return nsQuota == -1L && dsQuota == -1L ? new INodeDirectoryAttributes.SnapshotCopy(
      name, permissions, null, modificationTime, null)
    : new INodeDirectoryAttributes.CopyWithQuota(name, permissions,
        null, modificationTime, nsQuota, dsQuota, null, null);
}
项目:aliyun-oss-hadoop-fs    文件:BlockPoolSliceStorage.java   
/**
 * Cleanup the detachDir.
 * 
 * If the directory is not empty report an error; Otherwise remove the
 * directory.
 * 
 * @param detachDir detach directory
 * @throws IOException if the directory is not empty or it can not be removed
 */
private void cleanupDetachDir(File detachDir) throws IOException {
  if (!DataNodeLayoutVersion.supports(
      LayoutVersion.Feature.APPEND_RBW_DIR, layoutVersion)
      && detachDir.exists() && detachDir.isDirectory()) {

    if (FileUtil.list(detachDir).length != 0) {
      throw new IOException("Detached directory " + detachDir
          + " is not empty. Please manually move each file under this "
          + "directory to the finalized directory if the finalized "
          + "directory tree does not have the file.");
    } else if (!detachDir.delete()) {
      throw new IOException("Cannot remove directory " + detachDir);
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:DataStorage.java   
@Override
protected void setPropertiesFromFields(Properties props, 
                         StorageDirectory sd 
                         ) throws IOException {
  props.setProperty("storageType", storageType.toString());
  props.setProperty("clusterID", clusterID);
  props.setProperty("cTime", String.valueOf(cTime));
  props.setProperty("layoutVersion", String.valueOf(layoutVersion));
  props.setProperty("storageID", sd.getStorageUuid());

  String datanodeUuid = getDatanodeUuid();
  if (datanodeUuid != null) {
    props.setProperty("datanodeUuid", datanodeUuid);
  }

  // Set NamespaceID in version before federation
  if (!DataNodeLayoutVersion.supports(
      LayoutVersion.Feature.FEDERATION, layoutVersion)) {
    props.setProperty("namespaceID", String.valueOf(namespaceID));
  }
}
项目:aliyun-oss-hadoop-fs    文件:DataStorage.java   
/**
 * Cleanup the detachDir. 
 * 
 * If the directory is not empty report an error; 
 * Otherwise remove the directory.
 * 
 * @param detachDir detach directory
 * @throws IOException if the directory is not empty or it can not be removed
 */
private void cleanupDetachDir(File detachDir) throws IOException {
  if (!DataNodeLayoutVersion.supports(
      LayoutVersion.Feature.APPEND_RBW_DIR, layoutVersion) &&
      detachDir.exists() && detachDir.isDirectory() ) {

      if (FileUtil.list(detachDir).length != 0 ) {
        throw new IOException("Detached directory " + detachDir +
            " is not empty. Please manually move each file under this " +
            "directory to the finalized directory if the finalized " +
            "directory tree does not have the file.");
      } else if (!detachDir.delete()) {
        throw new IOException("Cannot remove directory " + detachDir);
      }
  }
}
项目:big-c    文件:FSEditLogLoader.java   
private long getAndUpdateLastInodeId(long inodeIdFromOp, int logVersion,
    long lastInodeId) throws IOException {
  long inodeId = inodeIdFromOp;

  if (inodeId == INodeId.GRANDFATHER_INODE_ID) {
    if (NameNodeLayoutVersion.supports(
        LayoutVersion.Feature.ADD_INODE_ID, logVersion)) {
      throw new IOException("The layout version " + logVersion
          + " supports inodeId but gave bogus inodeId");
    }
    inodeId = fsNamesys.dir.allocateNewInodeId();
  } else {
    // need to reset lastInodeId. fsnamesys gets lastInodeId firstly from
    // fsimage but editlog captures more recent inodeId allocations
    if (inodeId > lastInodeId) {
      fsNamesys.dir.resetLastInodeId(inodeId);
    }
  }
  return inodeId;
}
项目:big-c    文件:NNStorage.java   
@Override // Storage
protected void setFieldsFromProperties(
    Properties props, StorageDirectory sd) throws IOException {
  super.setFieldsFromProperties(props, sd);
  if (layoutVersion == 0) {
    throw new IOException("NameNode directory "
                          + sd.getRoot() + " is not formatted.");
  }

  // Set Block pool ID in version with federation support
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.FEDERATION, getLayoutVersion())) {
    String sbpid = props.getProperty("blockpoolID");
    setBlockPoolID(sd.getRoot(), sbpid);
  }
  setDeprecatedPropertiesForUpgrade(props);
}
项目:big-c    文件:FSEditLogOp.java   
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (this.length != 3) {
      throw new IOException("Incorrect data format. "
          + "Old rename operation.");
    }
  }
  this.src = FSImageSerialization.readString(in);
  this.dst = FSImageSerialization.readString(in);
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.timestamp = FSImageSerialization.readLong(in);
  } else {
    this.timestamp = readLong(in);
  }

  // read RPC ids if necessary
  readRpcIds(in, logVersion);
}
项目:big-c    文件:FSEditLogOp.java   
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (this.length != 2) {
      throw new IOException("Incorrect data format. " + "delete operation.");
    }
  }
  this.path = FSImageSerialization.readString(in);
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.timestamp = FSImageSerialization.readLong(in);
  } else {
    this.timestamp = readLong(in);
  }
  // read RPC ids if necessary
  readRpcIds(in, logVersion);
}
项目:big-c    文件:FSEditLogOp.java   
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (length != 3) {
      throw new IOException("Incorrect data format. " + "times operation.");
    }
  }
  this.path = FSImageSerialization.readString(in);

  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.mtime = FSImageSerialization.readLong(in);
    this.atime = FSImageSerialization.readLong(in);
  } else {
    this.mtime = readLong(in);
    this.atime = readLong(in);
  }
}
项目:big-c    文件:FSEditLogOp.java   
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (this.length != 3) {
      throw new IOException("Incorrect data format. " + "Rename operation.");
    }
  }
  this.src = FSImageSerialization.readString(in);
  this.dst = FSImageSerialization.readString(in);

  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.timestamp = FSImageSerialization.readLong(in);
  } else {
    this.timestamp = readLong(in);
  }
  this.options = readRenameOptions(in);

  // read RPC ids if necessary
  readRpcIds(in, logVersion);
}
项目:big-c    文件:FSEditLogOp.java   
/**
 * Construct the reader
 * @param in The stream to read from.
 * @param logVersion The version of the data coming from the stream.
 */
public Reader(DataInputStream in, StreamLimiter limiter, int logVersion) {
  this.logVersion = logVersion;
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITS_CHESKUM, logVersion)) {
    this.checksum = DataChecksum.newCrc32();
  } else {
    this.checksum = null;
  }
  // It is possible that the logVersion is actually a future layoutversion
  // during the rolling upgrade (e.g., the NN gets upgraded first). We
  // assume future layout will also support length of editlog op.
  this.supportEditLogLength = NameNodeLayoutVersion.supports(
      NameNodeLayoutVersion.Feature.EDITLOG_LENGTH, logVersion)
      || logVersion < NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;

  if (this.checksum != null) {
    this.in = new DataInputStream(
        new CheckedInputStream(in, this.checksum));
  } else {
    this.in = in;
  }
  this.limiter = limiter;
  this.cache = new OpInstanceCache();
  this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT;
}
项目:big-c    文件:FSImageFormat.java   
/** Load {@link INodeFileAttributes}. */
public INodeFileAttributes loadINodeFileAttributes(DataInput in)
    throws IOException {
  final int layoutVersion = getLayoutVersion();

  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
    return loadINodeWithLocalName(true, in, false).asFile();
  }

  final byte[] name = FSImageSerialization.readLocalName(in);
  final PermissionStatus permissions = PermissionStatus.read(in);
  final long modificationTime = in.readLong();
  final long accessTime = in.readLong();

  final short replication = namesystem.getBlockManager().adjustReplication(
      in.readShort());
  final long preferredBlockSize = in.readLong();

  return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime,
      accessTime, replication, preferredBlockSize, (byte) 0, null);
}
项目:big-c    文件:FSImageFormat.java   
public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in)
    throws IOException {
  final int layoutVersion = getLayoutVersion();

  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
    return loadINodeWithLocalName(true, in, false).asDirectory();
  }

  final byte[] name = FSImageSerialization.readLocalName(in);
  final PermissionStatus permissions = PermissionStatus.read(in);
  final long modificationTime = in.readLong();

  // Read quotas: quota by storage type does not need to be processed below.
  // It is handled only in protobuf based FsImagePBINode class for newer
  // fsImages. Tools using this class such as legacy-mode of offline image viewer
  // should only load legacy FSImages without newer features.
  final long nsQuota = in.readLong();
  final long dsQuota = in.readLong();

  return nsQuota == -1L && dsQuota == -1L ? new INodeDirectoryAttributes.SnapshotCopy(
      name, permissions, null, modificationTime, null)
    : new INodeDirectoryAttributes.CopyWithQuota(name, permissions,
        null, modificationTime, nsQuota, dsQuota, null, null);
}
项目:big-c    文件:BlockPoolSliceStorage.java   
/**
 * Cleanup the detachDir.
 * 
 * If the directory is not empty report an error; Otherwise remove the
 * directory.
 * 
 * @param detachDir detach directory
 * @throws IOException if the directory is not empty or it can not be removed
 */
private void cleanupDetachDir(File detachDir) throws IOException {
  if (!DataNodeLayoutVersion.supports(
      LayoutVersion.Feature.APPEND_RBW_DIR, layoutVersion)
      && detachDir.exists() && detachDir.isDirectory()) {

    if (FileUtil.list(detachDir).length != 0) {
      throw new IOException("Detached directory " + detachDir
          + " is not empty. Please manually move each file under this "
          + "directory to the finalized directory if the finalized "
          + "directory tree does not have the file.");
    } else if (!detachDir.delete()) {
      throw new IOException("Cannot remove directory " + detachDir);
    }
  }
}
项目:big-c    文件:DataStorage.java   
@Override
protected void setPropertiesFromFields(Properties props, 
                         StorageDirectory sd 
                         ) throws IOException {
  props.setProperty("storageType", storageType.toString());
  props.setProperty("clusterID", clusterID);
  props.setProperty("cTime", String.valueOf(cTime));
  props.setProperty("layoutVersion", String.valueOf(layoutVersion));
  props.setProperty("storageID", sd.getStorageUuid());

  String datanodeUuid = getDatanodeUuid();
  if (datanodeUuid != null) {
    props.setProperty("datanodeUuid", datanodeUuid);
  }

  // Set NamespaceID in version before federation
  if (!DataNodeLayoutVersion.supports(
      LayoutVersion.Feature.FEDERATION, layoutVersion)) {
    props.setProperty("namespaceID", String.valueOf(namespaceID));
  }
}
项目:big-c    文件:DataStorage.java   
/**
 * Cleanup the detachDir. 
 * 
 * If the directory is not empty report an error; 
 * Otherwise remove the directory.
 * 
 * @param detachDir detach directory
 * @throws IOException if the directory is not empty or it can not be removed
 */
private void cleanupDetachDir(File detachDir) throws IOException {
  if (!DataNodeLayoutVersion.supports(
      LayoutVersion.Feature.APPEND_RBW_DIR, layoutVersion) &&
      detachDir.exists() && detachDir.isDirectory() ) {

      if (FileUtil.list(detachDir).length != 0 ) {
        throw new IOException("Detached directory " + detachDir +
            " is not empty. Please manually move each file under this " +
            "directory to the finalized directory if the finalized " +
            "directory tree does not have the file.");
      } else if (!detachDir.delete()) {
        throw new IOException("Cannot remove directory " + detachDir);
      }
  }
}
项目:FlexMap    文件:FSEditLogLoader.java   
private long getAndUpdateLastInodeId(long inodeIdFromOp, int logVersion,
    long lastInodeId) throws IOException {
  long inodeId = inodeIdFromOp;

  if (inodeId == INodeId.GRANDFATHER_INODE_ID) {
    if (NameNodeLayoutVersion.supports(
        LayoutVersion.Feature.ADD_INODE_ID, logVersion)) {
      throw new IOException("The layout version " + logVersion
          + " supports inodeId but gave bogus inodeId");
    }
    inodeId = fsNamesys.allocateNewInodeId();
  } else {
    // need to reset lastInodeId. fsnamesys gets lastInodeId firstly from
    // fsimage but editlog captures more recent inodeId allocations
    if (inodeId > lastInodeId) {
      fsNamesys.resetLastInodeId(inodeId);
    }
  }
  return inodeId;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSEditLogLoader.java   
private long getAndUpdateLastInodeId(long inodeIdFromOp, int logVersion,
    long lastInodeId) throws IOException {
  long inodeId = inodeIdFromOp;

  if (inodeId == INodeId.GRANDFATHER_INODE_ID) {
    if (NameNodeLayoutVersion.supports(
        LayoutVersion.Feature.ADD_INODE_ID, logVersion)) {
      throw new IOException("The layout version " + logVersion
          + " supports inodeId but gave bogus inodeId");
    }
    inodeId = fsNamesys.allocateNewInodeId();
  } else {
    // need to reset lastInodeId. fsnamesys gets lastInodeId firstly from
    // fsimage but editlog captures more recent inodeId allocations
    if (inodeId > lastInodeId) {
      fsNamesys.resetLastInodeId(inodeId);
    }
  }
  return inodeId;
}
项目:FlexMap    文件:FSImageFormat.java   
/** Load {@link INodeFileAttributes}. */
public INodeFileAttributes loadINodeFileAttributes(DataInput in)
    throws IOException {
  final int layoutVersion = getLayoutVersion();

  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
    return loadINodeWithLocalName(true, in, false).asFile();
  }

  final byte[] name = FSImageSerialization.readLocalName(in);
  final PermissionStatus permissions = PermissionStatus.read(in);
  final long modificationTime = in.readLong();
  final long accessTime = in.readLong();

  final short replication = namesystem.getBlockManager().adjustReplication(
      in.readShort());
  final long preferredBlockSize = in.readLong();

  return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime,
      accessTime, replication, preferredBlockSize, (byte) 0, null);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSEditLogOp.java   
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (this.length != 3) {
      throw new IOException("Incorrect data format. "
          + "Old rename operation.");
    }
  }
  this.src = FSImageSerialization.readString(in);
  this.dst = FSImageSerialization.readString(in);
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.timestamp = FSImageSerialization.readLong(in);
  } else {
    this.timestamp = readLong(in);
  }

  // read RPC ids if necessary
  readRpcIds(in, logVersion);
}