Java 类org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary 实例源码

项目:hadoop    文件:FSImageFormatProtobuf.java   
private void saveNameSystemSection(FileSummary.Builder summary)
    throws IOException {
  final FSNamesystem fsn = context.getSourceNamesystem();
  OutputStream out = sectionOutputStream;
  BlockIdManager blockIdManager = fsn.getBlockIdManager();
  NameSystemSection.Builder b = NameSystemSection.newBuilder()
      .setGenstampV1(blockIdManager.getGenerationStampV1())
      .setGenstampV1Limit(blockIdManager.getGenerationStampV1Limit())
      .setGenstampV2(blockIdManager.getGenerationStampV2())
      .setLastAllocatedBlockId(blockIdManager.getLastAllocatedBlockId())
      .setTransactionId(context.getTxId());

  // We use the non-locked version of getNamespaceInfo here since
  // the coordinating thread of saveNamespace already has read-locked
  // the namespace for us. If we attempt to take another readlock
  // from the actual saver thread, there's a potential of a
  // fairness-related deadlock. See the comments on HDFS-2223.
  b.setNamespaceId(fsn.unprotectedGetNamespaceInfo().getNamespaceID());
  if (fsn.isRollingUpgrade()) {
    b.setRollingUpgradeStartTime(fsn.getRollingUpgradeInfo().getStartTime());
  }
  NameSystemSection s = b.build();
  s.writeDelimitedTo(out);

  commitSection(summary, SectionName.NS_INFO);
}
项目:hadoop    文件:PBImageTextWriter.java   
private void output(Configuration conf, FileSummary summary,
    FileInputStream fin, ArrayList<FileSummary.Section> sections)
    throws IOException {
  InputStream is;
  long startTime = Time.monotonicNow();
  out.println(getHeader());
  for (FileSummary.Section section : sections) {
    if (SectionName.fromString(section.getName()) == SectionName.INODE) {
      fin.getChannel().position(section.getOffset());
      is = FSImageUtil.wrapInputStreamForCompression(conf,
          summary.getCodec(), new BufferedInputStream(new LimitInputStream(
              fin, section.getLength())));
      outputINodes(is);
    }
  }
  long timeTaken = Time.monotonicNow() - startTime;
  LOG.debug("Time to output inodes: {}ms", timeTaken);
}
项目:hadoop    文件:PBImageTextWriter.java   
/** Load the directories in the INode section. */
private void loadDirectories(
    FileInputStream fin, List<FileSummary.Section> sections,
    FileSummary summary, Configuration conf)
    throws IOException {
  LOG.info("Loading directories");
  long startTime = Time.monotonicNow();
  for (FileSummary.Section section : sections) {
    if (SectionName.fromString(section.getName())
        == SectionName.INODE) {
      fin.getChannel().position(section.getOffset());
      InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
          summary.getCodec(), new BufferedInputStream(new LimitInputStream(
              fin, section.getLength())));
      loadDirectoriesInINodeSection(is);
    }
  }
  long timeTaken = Time.monotonicNow() - startTime;
  LOG.info("Finished loading directories in {}ms", timeTaken);
}
项目:hadoop    文件:PBImageTextWriter.java   
private void loadINodeDirSection(
    FileInputStream fin, List<FileSummary.Section> sections,
    FileSummary summary, Configuration conf, List<Long> refIdList)
    throws IOException {
  LOG.info("Loading INode directory section.");
  long startTime = Time.monotonicNow();
  for (FileSummary.Section section : sections) {
    if (SectionName.fromString(section.getName())
        == SectionName.INODE_DIR) {
      fin.getChannel().position(section.getOffset());
      InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
          summary.getCodec(), new BufferedInputStream(
              new LimitInputStream(fin, section.getLength())));
      buildNamespace(is, refIdList);
    }
  }
  long timeTaken = Time.monotonicNow() - startTime;
  LOG.info("Finished loading INode directory section in {}ms", timeTaken);
}
项目:hadoop    文件:FileDistributionCalculator.java   
void visit(RandomAccessFile file) throws IOException {
  if (!FSImageUtil.checkFileFormat(file)) {
    throw new IOException("Unrecognized FSImage");
  }

  FileSummary summary = FSImageUtil.loadSummary(file);
  try (FileInputStream in = new FileInputStream(file.getFD())) {
    for (FileSummary.Section s : summary.getSectionsList()) {
      if (SectionName.fromString(s.getName()) != SectionName.INODE) {
        continue;
      }

      in.getChannel().position(s.getOffset());
      InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
          summary.getCodec(), new BufferedInputStream(new LimitInputStream(
              in, s.getLength())));
      run(is);
      output();
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:PBImageTextWriter.java   
private void output(Configuration conf, FileSummary summary,
    FileInputStream fin, ArrayList<FileSummary.Section> sections)
    throws IOException {
  InputStream is;
  long startTime = Time.monotonicNow();
  for (FileSummary.Section section : sections) {
    if (SectionName.fromString(section.getName()) == SectionName.INODE) {
      fin.getChannel().position(section.getOffset());
      is = FSImageUtil.wrapInputStreamForCompression(conf,
          summary.getCodec(), new BufferedInputStream(new LimitInputStream(
              fin, section.getLength())));
      outputINodes(is);
    }
  }
  long timeTaken = Time.monotonicNow() - startTime;
  LOG.debug("Time to output inodes: {}ms", timeTaken);
}
项目:aliyun-oss-hadoop-fs    文件:PBImageTextWriter.java   
/** Load the directories in the INode section. */
private void loadDirectories(
    FileInputStream fin, List<FileSummary.Section> sections,
    FileSummary summary, Configuration conf)
    throws IOException {
  LOG.info("Loading directories");
  long startTime = Time.monotonicNow();
  for (FileSummary.Section section : sections) {
    if (SectionName.fromString(section.getName())
        == SectionName.INODE) {
      fin.getChannel().position(section.getOffset());
      InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
          summary.getCodec(), new BufferedInputStream(new LimitInputStream(
              fin, section.getLength())));
      loadDirectoriesInINodeSection(is);
    }
  }
  long timeTaken = Time.monotonicNow() - startTime;
  LOG.info("Finished loading directories in {}ms", timeTaken);
}
项目:aliyun-oss-hadoop-fs    文件:PBImageTextWriter.java   
private void loadINodeDirSection(
    FileInputStream fin, List<FileSummary.Section> sections,
    FileSummary summary, Configuration conf)
    throws IOException {
  LOG.info("Loading INode directory section.");
  long startTime = Time.monotonicNow();
  for (FileSummary.Section section : sections) {
    if (SectionName.fromString(section.getName())
        == SectionName.INODE_DIR) {
      fin.getChannel().position(section.getOffset());
      InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
          summary.getCodec(), new BufferedInputStream(
              new LimitInputStream(fin, section.getLength())));
      buildNamespace(is);
    }
  }
  long timeTaken = Time.monotonicNow() - startTime;
  LOG.info("Finished loading INode directory section in {}ms", timeTaken);
}
项目:aliyun-oss-hadoop-fs    文件:FileDistributionCalculator.java   
void visit(RandomAccessFile file) throws IOException {
  if (!FSImageUtil.checkFileFormat(file)) {
    throw new IOException("Unrecognized FSImage");
  }

  FileSummary summary = FSImageUtil.loadSummary(file);
  try (FileInputStream in = new FileInputStream(file.getFD())) {
    for (FileSummary.Section s : summary.getSectionsList()) {
      if (SectionName.fromString(s.getName()) != SectionName.INODE) {
        continue;
      }

      in.getChannel().position(s.getOffset());
      InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
          summary.getCodec(), new BufferedInputStream(new LimitInputStream(
              in, s.getLength())));
      run(is);
      output();
    }
  }
}
项目:big-c    文件:FSImageFormatProtobuf.java   
private void saveNameSystemSection(FileSummary.Builder summary)
    throws IOException {
  final FSNamesystem fsn = context.getSourceNamesystem();
  OutputStream out = sectionOutputStream;
  BlockIdManager blockIdManager = fsn.getBlockIdManager();
  NameSystemSection.Builder b = NameSystemSection.newBuilder()
      .setGenstampV1(blockIdManager.getGenerationStampV1())
      .setGenstampV1Limit(blockIdManager.getGenerationStampV1Limit())
      .setGenstampV2(blockIdManager.getGenerationStampV2())
      .setLastAllocatedBlockId(blockIdManager.getLastAllocatedBlockId())
      .setTransactionId(context.getTxId());

  // We use the non-locked version of getNamespaceInfo here since
  // the coordinating thread of saveNamespace already has read-locked
  // the namespace for us. If we attempt to take another readlock
  // from the actual saver thread, there's a potential of a
  // fairness-related deadlock. See the comments on HDFS-2223.
  b.setNamespaceId(fsn.unprotectedGetNamespaceInfo().getNamespaceID());
  if (fsn.isRollingUpgrade()) {
    b.setRollingUpgradeStartTime(fsn.getRollingUpgradeInfo().getStartTime());
  }
  NameSystemSection s = b.build();
  s.writeDelimitedTo(out);

  commitSection(summary, SectionName.NS_INFO);
}
项目:big-c    文件:PBImageTextWriter.java   
private void output(Configuration conf, FileSummary summary,
    FileInputStream fin, ArrayList<FileSummary.Section> sections)
    throws IOException {
  InputStream is;
  long startTime = Time.monotonicNow();
  for (FileSummary.Section section : sections) {
    if (SectionName.fromString(section.getName()) == SectionName.INODE) {
      fin.getChannel().position(section.getOffset());
      is = FSImageUtil.wrapInputStreamForCompression(conf,
          summary.getCodec(), new BufferedInputStream(new LimitInputStream(
              fin, section.getLength())));
      outputINodes(is);
    }
  }
  long timeTaken = Time.monotonicNow() - startTime;
  LOG.debug("Time to output inodes: {}ms", timeTaken);
}
项目:big-c    文件:PBImageTextWriter.java   
/** Load the directories in the INode section. */
private void loadDirectories(
    FileInputStream fin, List<FileSummary.Section> sections,
    FileSummary summary, Configuration conf)
    throws IOException {
  LOG.info("Loading directories");
  long startTime = Time.monotonicNow();
  for (FileSummary.Section section : sections) {
    if (SectionName.fromString(section.getName())
        == SectionName.INODE) {
      fin.getChannel().position(section.getOffset());
      InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
          summary.getCodec(), new BufferedInputStream(new LimitInputStream(
              fin, section.getLength())));
      loadDirectoriesInINodeSection(is);
    }
  }
  long timeTaken = Time.monotonicNow() - startTime;
  LOG.info("Finished loading directories in {}ms", timeTaken);
}
项目:big-c    文件:PBImageTextWriter.java   
private void loadINodeDirSection(
    FileInputStream fin, List<FileSummary.Section> sections,
    FileSummary summary, Configuration conf)
    throws IOException {
  LOG.info("Loading INode directory section.");
  long startTime = Time.monotonicNow();
  for (FileSummary.Section section : sections) {
    if (SectionName.fromString(section.getName())
        == SectionName.INODE_DIR) {
      fin.getChannel().position(section.getOffset());
      InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
          summary.getCodec(), new BufferedInputStream(
              new LimitInputStream(fin, section.getLength())));
      buildNamespace(is);
    }
  }
  long timeTaken = Time.monotonicNow() - startTime;
  LOG.info("Finished loading INode directory section in {}ms", timeTaken);
}
项目:big-c    文件:FileDistributionCalculator.java   
void visit(RandomAccessFile file) throws IOException {
  if (!FSImageUtil.checkFileFormat(file)) {
    throw new IOException("Unrecognized FSImage");
  }

  FileSummary summary = FSImageUtil.loadSummary(file);
  try (FileInputStream in = new FileInputStream(file.getFD())) {
    for (FileSummary.Section s : summary.getSectionsList()) {
      if (SectionName.fromString(s.getName()) != SectionName.INODE) {
        continue;
      }

      in.getChannel().position(s.getOffset());
      InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
          summary.getCodec(), new BufferedInputStream(new LimitInputStream(
              in, s.getLength())));
      run(is);
      output();
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSImageFormatProtobuf.java   
private void saveNameSystemSection(FileSummary.Builder summary)
    throws IOException {
  final FSNamesystem fsn = context.getSourceNamesystem();
  OutputStream out = sectionOutputStream;
  NameSystemSection.Builder b = NameSystemSection.newBuilder()
      .setGenstampV1(fsn.getGenerationStampV1())
      .setGenstampV1Limit(fsn.getGenerationStampV1Limit())
      .setGenstampV2(fsn.getGenerationStampV2())
      .setLastAllocatedBlockId(fsn.getLastAllocatedBlockId())
      .setTransactionId(context.getTxId());

  // We use the non-locked version of getNamespaceInfo here since
  // the coordinating thread of saveNamespace already has read-locked
  // the namespace for us. If we attempt to take another readlock
  // from the actual saver thread, there's a potential of a
  // fairness-related deadlock. See the comments on HDFS-2223.
  b.setNamespaceId(fsn.unprotectedGetNamespaceInfo().getNamespaceID());
  if (fsn.isRollingUpgrade()) {
    b.setRollingUpgradeStartTime(fsn.getRollingUpgradeInfo().getStartTime());
  }
  NameSystemSection s = b.build();
  s.writeDelimitedTo(out);

  commitSection(summary, SectionName.NS_INFO);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:PBImageTextWriter.java   
private void output(Configuration conf, FileSummary summary,
    FileInputStream fin, ArrayList<FileSummary.Section> sections)
    throws IOException {
  InputStream is;
  long startTime = Time.monotonicNow();
  for (FileSummary.Section section : sections) {
    if (SectionName.fromString(section.getName()) == SectionName.INODE) {
      fin.getChannel().position(section.getOffset());
      is = FSImageUtil.wrapInputStreamForCompression(conf,
          summary.getCodec(), new BufferedInputStream(new LimitInputStream(
              fin, section.getLength())));
      outputINodes(is);
    }
  }
  long timeTaken = Time.monotonicNow() - startTime;
  LOG.debug("Time to output inodes: {}ms", timeTaken);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:PBImageTextWriter.java   
/** Load the directories in the INode section. */
private void loadDirectories(
    FileInputStream fin, List<FileSummary.Section> sections,
    FileSummary summary, Configuration conf)
    throws IOException {
  LOG.info("Loading directories");
  long startTime = Time.monotonicNow();
  for (FileSummary.Section section : sections) {
    if (SectionName.fromString(section.getName())
        == SectionName.INODE) {
      fin.getChannel().position(section.getOffset());
      InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
          summary.getCodec(), new BufferedInputStream(new LimitInputStream(
              fin, section.getLength())));
      loadDirectoriesInINodeSection(is);
    }
  }
  long timeTaken = Time.monotonicNow() - startTime;
  LOG.info("Finished loading directories in {}ms", timeTaken);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:PBImageTextWriter.java   
private void loadINodeDirSection(
    FileInputStream fin, List<FileSummary.Section> sections,
    FileSummary summary, Configuration conf)
    throws IOException {
  LOG.info("Loading INode directory section.");
  long startTime = Time.monotonicNow();
  for (FileSummary.Section section : sections) {
    if (SectionName.fromString(section.getName())
        == SectionName.INODE_DIR) {
      fin.getChannel().position(section.getOffset());
      InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
          summary.getCodec(), new BufferedInputStream(
              new LimitInputStream(fin, section.getLength())));
      buildNamespace(is);
    }
  }
  long timeTaken = Time.monotonicNow() - startTime;
  LOG.info("Finished loading INode directory section in {}ms", timeTaken);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FileDistributionCalculator.java   
void visit(RandomAccessFile file) throws IOException {
  if (!FSImageUtil.checkFileFormat(file)) {
    throw new IOException("Unrecognized FSImage");
  }

  FileSummary summary = FSImageUtil.loadSummary(file);
  FileInputStream in = null;
  try {
    in = new FileInputStream(file.getFD());
    for (FileSummary.Section s : summary.getSectionsList()) {
      if (SectionName.fromString(s.getName()) != SectionName.INODE) {
        continue;
      }

      in.getChannel().position(s.getOffset());
      InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
          summary.getCodec(), new BufferedInputStream(new LimitInputStream(
              in, s.getLength())));
      run(is);
      output();
    }
  } finally {
    IOUtils.cleanup(null, in);
  }
}
项目:FlexMap    文件:FSImageFormatProtobuf.java   
private void saveNameSystemSection(FileSummary.Builder summary)
    throws IOException {
  final FSNamesystem fsn = context.getSourceNamesystem();
  OutputStream out = sectionOutputStream;
  NameSystemSection.Builder b = NameSystemSection.newBuilder()
      .setGenstampV1(fsn.getGenerationStampV1())
      .setGenstampV1Limit(fsn.getGenerationStampV1Limit())
      .setGenstampV2(fsn.getGenerationStampV2())
      .setLastAllocatedBlockId(fsn.getLastAllocatedBlockId())
      .setTransactionId(context.getTxId());

  // We use the non-locked version of getNamespaceInfo here since
  // the coordinating thread of saveNamespace already has read-locked
  // the namespace for us. If we attempt to take another readlock
  // from the actual saver thread, there's a potential of a
  // fairness-related deadlock. See the comments on HDFS-2223.
  b.setNamespaceId(fsn.unprotectedGetNamespaceInfo().getNamespaceID());
  if (fsn.isRollingUpgrade()) {
    b.setRollingUpgradeStartTime(fsn.getRollingUpgradeInfo().getStartTime());
  }
  NameSystemSection s = b.build();
  s.writeDelimitedTo(out);

  commitSection(summary, SectionName.NS_INFO);
}
项目:FlexMap    文件:FileDistributionCalculator.java   
void visit(RandomAccessFile file) throws IOException {
  if (!FSImageUtil.checkFileFormat(file)) {
    throw new IOException("Unrecognized FSImage");
  }

  FileSummary summary = FSImageUtil.loadSummary(file);
  FileInputStream in = null;
  try {
    in = new FileInputStream(file.getFD());
    for (FileSummary.Section s : summary.getSectionsList()) {
      if (SectionName.fromString(s.getName()) != SectionName.INODE) {
        continue;
      }

      in.getChannel().position(s.getOffset());
      InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
          summary.getCodec(), new BufferedInputStream(new LimitInputStream(
              in, s.getLength())));
      run(is);
      output();
    }
  } finally {
    IOUtils.cleanup(null, in);
  }
}
项目:hadoop-on-lustre2    文件:FSImageFormatProtobuf.java   
private void saveNameSystemSection(FileSummary.Builder summary)
    throws IOException {
  final FSNamesystem fsn = context.getSourceNamesystem();
  OutputStream out = sectionOutputStream;
  NameSystemSection.Builder b = NameSystemSection.newBuilder()
      .setGenstampV1(fsn.getGenerationStampV1())
      .setGenstampV1Limit(fsn.getGenerationStampV1Limit())
      .setGenstampV2(fsn.getGenerationStampV2())
      .setLastAllocatedBlockId(fsn.getLastAllocatedBlockId())
      .setTransactionId(context.getTxId());

  // We use the non-locked version of getNamespaceInfo here since
  // the coordinating thread of saveNamespace already has read-locked
  // the namespace for us. If we attempt to take another readlock
  // from the actual saver thread, there's a potential of a
  // fairness-related deadlock. See the comments on HDFS-2223.
  b.setNamespaceId(fsn.unprotectedGetNamespaceInfo().getNamespaceID());
  if (fsn.isRollingUpgrade()) {
    b.setRollingUpgradeStartTime(fsn.getRollingUpgradeInfo().getStartTime());
  }
  NameSystemSection s = b.build();
  s.writeDelimitedTo(out);

  commitSection(summary, SectionName.NS_INFO);
}
项目:hadoop-on-lustre2    文件:FileDistributionCalculator.java   
void visit(RandomAccessFile file) throws IOException {
  if (!FSImageUtil.checkFileFormat(file)) {
    throw new IOException("Unrecognized FSImage");
  }

  FileSummary summary = FSImageUtil.loadSummary(file);
  FileInputStream in = null;
  try {
    in = new FileInputStream(file.getFD());
    for (FileSummary.Section s : summary.getSectionsList()) {
      if (SectionName.fromString(s.getName()) != SectionName.INODE) {
        continue;
      }

      in.getChannel().position(s.getOffset());
      InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
          summary.getCodec(), new BufferedInputStream(new LimitInputStream(
              in, s.getLength())));
      run(is);
      output();
    }
  } finally {
    IOUtils.cleanup(null, in);
  }
}
项目:hadoop    文件:FSImageFormatProtobuf.java   
public void commitSection(FileSummary.Builder summary, SectionName name)
    throws IOException {
  long oldOffset = currentOffset;
  flushSectionOutputStream();

  if (codec != null) {
    sectionOutputStream = codec.createOutputStream(underlyingOutputStream);
  } else {
    sectionOutputStream = underlyingOutputStream;
  }
  long length = fileChannel.position() - oldOffset;
  summary.addSections(FileSummary.Section.newBuilder().setName(name.name)
      .setLength(length).setOffset(currentOffset));
  currentOffset += length;
}
项目:hadoop    文件:FSImageFormatProtobuf.java   
private static void saveFileSummary(OutputStream out, FileSummary summary)
    throws IOException {
  summary.writeDelimitedTo(out);
  int length = getOndiskTrunkSize(summary);
  byte[] lengthBytes = new byte[4];
  ByteBuffer.wrap(lengthBytes).asIntBuffer().put(length);
  out.write(lengthBytes);
}
项目:hadoop    文件:FSImageFormatProtobuf.java   
private void saveInodes(FileSummary.Builder summary) throws IOException {
  FSImageFormatPBINode.Saver saver = new FSImageFormatPBINode.Saver(this,
      summary);

  saver.serializeINodeSection(sectionOutputStream);
  saver.serializeINodeDirectorySection(sectionOutputStream);
  saver.serializeFilesUCSection(sectionOutputStream);
}
项目:hadoop    文件:FSImageFormatProtobuf.java   
private void saveSnapshots(FileSummary.Builder summary) throws IOException {
  FSImageFormatPBSnapshot.Saver snapshotSaver = new FSImageFormatPBSnapshot.Saver(
      this, summary, context, context.getSourceNamesystem());

  snapshotSaver.serializeSnapshotSection(sectionOutputStream);
  snapshotSaver.serializeSnapshotDiffSection(sectionOutputStream);
  snapshotSaver.serializeINodeReferenceSection(sectionOutputStream);
}
项目:hadoop    文件:FSImageFormatProtobuf.java   
private void saveSecretManagerSection(FileSummary.Builder summary)
    throws IOException {
  final FSNamesystem fsn = context.getSourceNamesystem();
  DelegationTokenSecretManager.SecretManagerState state = fsn
      .saveSecretManagerState();
  state.section.writeDelimitedTo(sectionOutputStream);
  for (SecretManagerSection.DelegationKey k : state.keys)
    k.writeDelimitedTo(sectionOutputStream);

  for (SecretManagerSection.PersistToken t : state.tokens)
    t.writeDelimitedTo(sectionOutputStream);

  commitSection(summary, SectionName.SECRET_MANAGER);
}
项目:hadoop    文件:FSImageFormatProtobuf.java   
private void saveStringTableSection(FileSummary.Builder summary)
    throws IOException {
  OutputStream out = sectionOutputStream;
  StringTableSection.Builder b = StringTableSection.newBuilder()
      .setNumEntry(saverContext.stringMap.size());
  b.build().writeDelimitedTo(out);
  for (Entry<String, Integer> e : saverContext.stringMap.entrySet()) {
    StringTableSection.Entry.Builder eb = StringTableSection.Entry
        .newBuilder().setId(e.getValue()).setStr(e.getKey());
    eb.build().writeDelimitedTo(out);
  }
  commitSection(summary, SectionName.STRING_TABLE);
}
项目:hadoop    文件:FSImageUtil.java   
public static FileSummary loadSummary(RandomAccessFile file)
    throws IOException {
  final int FILE_LENGTH_FIELD_SIZE = 4;
  long fileLength = file.length();
  file.seek(fileLength - FILE_LENGTH_FIELD_SIZE);
  int summaryLength = file.readInt();

  if (summaryLength <= 0) {
    throw new IOException("Negative length of the file");
  }
  file.seek(fileLength - FILE_LENGTH_FIELD_SIZE - summaryLength);

  byte[] summaryBytes = new byte[summaryLength];
  file.readFully(summaryBytes);

  FileSummary summary = FileSummary
      .parseDelimitedFrom(new ByteArrayInputStream(summaryBytes));
  if (summary.getOndiskVersion() != FILE_VERSION) {
    throw new IOException("Unsupported file version "
        + summary.getOndiskVersion());
  }

  if (!NameNodeLayoutVersion.supports(Feature.PROTOBUF_FORMAT,
      summary.getLayoutVersion())) {
    throw new IOException("Unsupported layout version "
        + summary.getLayoutVersion());
  }
  return summary;
}
项目:hadoop    文件:FSImageFormatPBSnapshot.java   
public Saver(FSImageFormatProtobuf.Saver parent,
    FileSummary.Builder headers, SaveNamespaceContext context,
    FSNamesystem fsn) {
  this.parent = parent;
  this.headers = headers;
  this.context = context;
  this.fsn = fsn;
}
项目:aliyun-oss-hadoop-fs    文件:FSImageFormatProtobuf.java   
public void commitSection(FileSummary.Builder summary, SectionName name)
    throws IOException {
  long oldOffset = currentOffset;
  flushSectionOutputStream();

  if (codec != null) {
    sectionOutputStream = codec.createOutputStream(underlyingOutputStream);
  } else {
    sectionOutputStream = underlyingOutputStream;
  }
  long length = fileChannel.position() - oldOffset;
  summary.addSections(FileSummary.Section.newBuilder().setName(name.name)
      .setLength(length).setOffset(currentOffset));
  currentOffset += length;
}
项目:aliyun-oss-hadoop-fs    文件:FSImageFormatProtobuf.java   
private static void saveFileSummary(OutputStream out, FileSummary summary)
    throws IOException {
  summary.writeDelimitedTo(out);
  int length = getOndiskTrunkSize(summary);
  byte[] lengthBytes = new byte[4];
  ByteBuffer.wrap(lengthBytes).asIntBuffer().put(length);
  out.write(lengthBytes);
}
项目:aliyun-oss-hadoop-fs    文件:FSImageFormatProtobuf.java   
private void saveInodes(FileSummary.Builder summary) throws IOException {
  FSImageFormatPBINode.Saver saver = new FSImageFormatPBINode.Saver(this,
      summary);

  saver.serializeINodeSection(sectionOutputStream);
  saver.serializeINodeDirectorySection(sectionOutputStream);
  saver.serializeFilesUCSection(sectionOutputStream);
}
项目:aliyun-oss-hadoop-fs    文件:FSImageFormatProtobuf.java   
private void saveSnapshots(FileSummary.Builder summary) throws IOException {
  FSImageFormatPBSnapshot.Saver snapshotSaver = new FSImageFormatPBSnapshot.Saver(
      this, summary, context, context.getSourceNamesystem());

  snapshotSaver.serializeSnapshotSection(sectionOutputStream);
  snapshotSaver.serializeSnapshotDiffSection(sectionOutputStream);
  snapshotSaver.serializeINodeReferenceSection(sectionOutputStream);
}
项目:aliyun-oss-hadoop-fs    文件:FSImageFormatProtobuf.java   
private void saveSecretManagerSection(FileSummary.Builder summary)
    throws IOException {
  final FSNamesystem fsn = context.getSourceNamesystem();
  DelegationTokenSecretManager.SecretManagerState state = fsn
      .saveSecretManagerState();
  state.section.writeDelimitedTo(sectionOutputStream);
  for (SecretManagerSection.DelegationKey k : state.keys)
    k.writeDelimitedTo(sectionOutputStream);

  for (SecretManagerSection.PersistToken t : state.tokens)
    t.writeDelimitedTo(sectionOutputStream);

  commitSection(summary, SectionName.SECRET_MANAGER);
}
项目:aliyun-oss-hadoop-fs    文件:FSImageFormatProtobuf.java   
private void saveNameSystemSection(FileSummary.Builder summary)
    throws IOException {
  final FSNamesystem fsn = context.getSourceNamesystem();
  OutputStream out = sectionOutputStream;
  BlockIdManager blockIdManager = fsn.getBlockIdManager();
  NameSystemSection.Builder b = NameSystemSection.newBuilder()
      .setGenstampV1(blockIdManager.getGenerationStampV1())
      .setGenstampV1Limit(blockIdManager.getGenerationStampV1Limit())
      .setGenstampV2(blockIdManager.getGenerationStampV2())
      .setLastAllocatedBlockId(blockIdManager.getLastAllocatedContiguousBlockId())
      .setLastAllocatedStripedBlockId(blockIdManager.getLastAllocatedStripedBlockId())
      .setTransactionId(context.getTxId());

  // We use the non-locked version of getNamespaceInfo here since
  // the coordinating thread of saveNamespace already has read-locked
  // the namespace for us. If we attempt to take another readlock
  // from the actual saver thread, there's a potential of a
  // fairness-related deadlock. See the comments on HDFS-2223.
  b.setNamespaceId(fsn.unprotectedGetNamespaceInfo().getNamespaceID());
  if (fsn.isRollingUpgrade()) {
    b.setRollingUpgradeStartTime(fsn.getRollingUpgradeInfo().getStartTime());
  }
  NameSystemSection s = b.build();
  s.writeDelimitedTo(out);

  commitSection(summary, SectionName.NS_INFO);
}
项目:aliyun-oss-hadoop-fs    文件:FSImageFormatProtobuf.java   
private void saveStringTableSection(FileSummary.Builder summary)
    throws IOException {
  OutputStream out = sectionOutputStream;
  StringTableSection.Builder b = StringTableSection.newBuilder()
      .setNumEntry(saverContext.stringMap.size());
  b.build().writeDelimitedTo(out);
  for (Entry<String, Integer> e : saverContext.stringMap.entrySet()) {
    StringTableSection.Entry.Builder eb = StringTableSection.Entry
        .newBuilder().setId(e.getValue()).setStr(e.getKey());
    eb.build().writeDelimitedTo(out);
  }
  commitSection(summary, SectionName.STRING_TABLE);
}
项目:aliyun-oss-hadoop-fs    文件:FSImageUtil.java   
public static FileSummary loadSummary(RandomAccessFile file)
    throws IOException {
  final int FILE_LENGTH_FIELD_SIZE = 4;
  long fileLength = file.length();
  file.seek(fileLength - FILE_LENGTH_FIELD_SIZE);
  int summaryLength = file.readInt();

  if (summaryLength <= 0) {
    throw new IOException("Negative length of the file");
  }
  file.seek(fileLength - FILE_LENGTH_FIELD_SIZE - summaryLength);

  byte[] summaryBytes = new byte[summaryLength];
  file.readFully(summaryBytes);

  FileSummary summary = FileSummary
      .parseDelimitedFrom(new ByteArrayInputStream(summaryBytes));
  if (summary.getOndiskVersion() != FILE_VERSION) {
    throw new IOException("Unsupported file version "
        + summary.getOndiskVersion());
  }

  if (!NameNodeLayoutVersion.supports(Feature.PROTOBUF_FORMAT,
      summary.getLayoutVersion())) {
    throw new IOException("Unsupported layout version "
        + summary.getLayoutVersion());
  }
  return summary;
}
项目:aliyun-oss-hadoop-fs    文件:FSImageFormatPBSnapshot.java   
public Saver(FSImageFormatProtobuf.Saver parent,
    FileSummary.Builder headers, SaveNamespaceContext context,
    FSNamesystem fsn) {
  this.parent = parent;
  this.headers = headers;
  this.context = context;
  this.fsn = fsn;
}