Java 类org.apache.hadoop.hdfs.util.PersistentLongFile 实例源码

项目:hadoop    文件:TestDFSUpgradeWithHA.java   
private static void assertEpochFilesCopied(MiniQJMHACluster jnCluster)
    throws IOException {
  for (int i = 0; i < 3; i++) {
    File journalDir = jnCluster.getJournalCluster().getJournalDir(i, "ns1");
    File currDir = new File(journalDir, "current");
    File prevDir = new File(journalDir, "previous");
    for (String fileName : new String[]{ Journal.LAST_PROMISED_FILENAME,
        Journal.LAST_WRITER_EPOCH }) {
      File prevFile = new File(prevDir, fileName);
      // Possible the prev file doesn't exist, e.g. if there has never been a
      // writer before the upgrade.
      if (prevFile.exists()) {
        PersistentLongFile prevLongFile = new PersistentLongFile(prevFile, -10);
        PersistentLongFile currLongFile = new PersistentLongFile(new File(currDir,
            fileName), -11);
        assertTrue("Value in " + fileName + " has decreased on upgrade in "
            + journalDir, prevLongFile.get() <= currLongFile.get());
      }
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestDFSUpgradeWithHA.java   
private static void assertEpochFilesCopied(MiniQJMHACluster jnCluster)
    throws IOException {
  for (int i = 0; i < 3; i++) {
    File journalDir = jnCluster.getJournalCluster().getJournalDir(i, "ns1");
    File currDir = new File(journalDir, "current");
    File prevDir = new File(journalDir, "previous");
    for (String fileName : new String[]{ Journal.LAST_PROMISED_FILENAME,
        Journal.LAST_WRITER_EPOCH }) {
      File prevFile = new File(prevDir, fileName);
      // Possible the prev file doesn't exist, e.g. if there has never been a
      // writer before the upgrade.
      if (prevFile.exists()) {
        PersistentLongFile prevLongFile = new PersistentLongFile(prevFile, -10);
        PersistentLongFile currLongFile = new PersistentLongFile(new File(currDir,
            fileName), -11);
        assertTrue("Value in " + fileName + " has decreased on upgrade in "
            + journalDir, prevLongFile.get() <= currLongFile.get());
      }
    }
  }
}
项目:big-c    文件:TestDFSUpgradeWithHA.java   
private static void assertEpochFilesCopied(MiniQJMHACluster jnCluster)
    throws IOException {
  for (int i = 0; i < 3; i++) {
    File journalDir = jnCluster.getJournalCluster().getJournalDir(i, "ns1");
    File currDir = new File(journalDir, "current");
    File prevDir = new File(journalDir, "previous");
    for (String fileName : new String[]{ Journal.LAST_PROMISED_FILENAME,
        Journal.LAST_WRITER_EPOCH }) {
      File prevFile = new File(prevDir, fileName);
      // Possible the prev file doesn't exist, e.g. if there has never been a
      // writer before the upgrade.
      if (prevFile.exists()) {
        PersistentLongFile prevLongFile = new PersistentLongFile(prevFile, -10);
        PersistentLongFile currLongFile = new PersistentLongFile(new File(currDir,
            fileName), -11);
        assertTrue("Value in " + fileName + " has decreased on upgrade in "
            + journalDir, prevLongFile.get() <= currLongFile.get());
      }
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDFSUpgradeWithHA.java   
private static void assertEpochFilesCopied(MiniQJMHACluster jnCluster)
    throws IOException {
  for (int i = 0; i < 3; i++) {
    File journalDir = jnCluster.getJournalCluster().getJournalDir(i, "ns1");
    File currDir = new File(journalDir, "current");
    File prevDir = new File(journalDir, "previous");
    for (String fileName : new String[]{ Journal.LAST_PROMISED_FILENAME,
        Journal.LAST_WRITER_EPOCH }) {
      File prevFile = new File(prevDir, fileName);
      // Possible the prev file doesn't exist, e.g. if there has never been a
      // writer before the upgrade.
      if (prevFile.exists()) {
        PersistentLongFile prevLongFile = new PersistentLongFile(prevFile, -10);
        PersistentLongFile currLongFile = new PersistentLongFile(new File(currDir,
            fileName), -11);
        assertTrue("Value in " + fileName + " has decreased on upgrade in "
            + journalDir, prevLongFile.get() <= currLongFile.get());
      }
    }
  }
}
项目:FlexMap    文件:TestDFSUpgradeWithHA.java   
private static void assertEpochFilesCopied(MiniQJMHACluster jnCluster)
    throws IOException {
  for (int i = 0; i < 3; i++) {
    File journalDir = jnCluster.getJournalCluster().getJournalDir(i, "ns1");
    File currDir = new File(journalDir, "current");
    File prevDir = new File(journalDir, "previous");
    for (String fileName : new String[]{ Journal.LAST_PROMISED_FILENAME,
        Journal.LAST_WRITER_EPOCH }) {
      File prevFile = new File(prevDir, fileName);
      // Possible the prev file doesn't exist, e.g. if there has never been a
      // writer before the upgrade.
      if (prevFile.exists()) {
        PersistentLongFile prevLongFile = new PersistentLongFile(prevFile, -10);
        PersistentLongFile currLongFile = new PersistentLongFile(new File(currDir,
            fileName), -11);
        assertTrue("Value in " + fileName + " has decreased on upgrade in "
            + journalDir, prevLongFile.get() <= currLongFile.get());
      }
    }
  }
}
项目:hadoop-on-lustre2    文件:TestDFSUpgradeWithHA.java   
private static void assertEpochFilesCopied(MiniQJMHACluster jnCluster)
    throws IOException {
  for (int i = 0; i < 3; i++) {
    File journalDir = jnCluster.getJournalCluster().getJournalDir(i, "ns1");
    File currDir = new File(journalDir, "current");
    File prevDir = new File(journalDir, "previous");
    for (String fileName : new String[]{ Journal.LAST_PROMISED_FILENAME,
        Journal.LAST_WRITER_EPOCH }) {
      File prevFile = new File(prevDir, fileName);
      // Possible the prev file doesn't exist, e.g. if there has never been a
      // writer before the upgrade.
      if (prevFile.exists()) {
        PersistentLongFile prevLongFile = new PersistentLongFile(prevFile, -10);
        PersistentLongFile currLongFile = new PersistentLongFile(new File(currDir,
            fileName), -11);
        assertTrue("Value in " + fileName + " has decreased on upgrade in "
            + journalDir, prevLongFile.get() <= currLongFile.get());
      }
    }
  }
}
项目:hadoop    文件:NNStorage.java   
/**
 * Write last checkpoint time into a separate file.
 * @param sd storage directory
 * @throws IOException
 */
void writeTransactionIdFile(StorageDirectory sd, long txid) throws IOException {
  Preconditions.checkArgument(txid >= 0, "bad txid: " + txid);

  File txIdFile = getStorageFile(sd, NameNodeFile.SEEN_TXID);
  PersistentLongFile.writeFile(txIdFile, txid);
}
项目:hadoop    文件:Journal.java   
/**
 * Reload any data that may have been cached. This is necessary
 * when we first load the Journal, but also after any formatting
 * operation, since the cached data is no longer relevant.
 */
private synchronized void refreshCachedData() {
  IOUtils.closeStream(committedTxnId);

  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  this.lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  this.lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  this.committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);
}
项目:aliyun-oss-hadoop-fs    文件:NNStorage.java   
/**
 * Write last checkpoint time into a separate file.
 * @param sd storage directory
 * @throws IOException
 */
void writeTransactionIdFile(StorageDirectory sd, long txid) throws IOException {
  Preconditions.checkArgument(txid >= 0, "bad txid: " + txid);

  File txIdFile = getStorageFile(sd, NameNodeFile.SEEN_TXID);
  PersistentLongFile.writeFile(txIdFile, txid);
}
项目:aliyun-oss-hadoop-fs    文件:Journal.java   
/**
 * Reload any data that may have been cached. This is necessary
 * when we first load the Journal, but also after any formatting
 * operation, since the cached data is no longer relevant.
 */
private synchronized void refreshCachedData() {
  IOUtils.closeStream(committedTxnId);

  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  this.lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  this.lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  this.committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsServerConstants.INVALID_TXID);
}
项目:big-c    文件:NNStorage.java   
/**
 * Write last checkpoint time into a separate file.
 * @param sd storage directory
 * @throws IOException
 */
void writeTransactionIdFile(StorageDirectory sd, long txid) throws IOException {
  Preconditions.checkArgument(txid >= 0, "bad txid: " + txid);

  File txIdFile = getStorageFile(sd, NameNodeFile.SEEN_TXID);
  PersistentLongFile.writeFile(txIdFile, txid);
}
项目:big-c    文件:Journal.java   
/**
 * Reload any data that may have been cached. This is necessary
 * when we first load the Journal, but also after any formatting
 * operation, since the cached data is no longer relevant.
 */
private synchronized void refreshCachedData() {
  IOUtils.closeStream(committedTxnId);

  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  this.lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  this.lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  this.committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:NNStorage.java   
/**
 * Write last checkpoint time into a separate file.
 * @param sd storage directory
 * @throws IOException
 */
void writeTransactionIdFile(StorageDirectory sd, long txid) throws IOException {
  Preconditions.checkArgument(txid >= 0, "bad txid: " + txid);

  File txIdFile = getStorageFile(sd, NameNodeFile.SEEN_TXID);
  PersistentLongFile.writeFile(txIdFile, txid);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:Journal.java   
/**
 * Reload any data that may have been cached. This is necessary
 * when we first load the Journal, but also after any formatting
 * operation, since the cached data is no longer relevant.
 */
private synchronized void refreshCachedData() {
  IOUtils.closeStream(committedTxnId);

  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  this.lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  this.lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  this.committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);
}
项目:hadoop-EAR    文件:Journal.java   
/**
 * Reload any data that may have been cached. This is necessary
 * when we first load the Journal, but also after any formatting
 * operation, since the cached data is no longer relevant.
 * @throws IOException 
 */
private synchronized void refreshCachedData() throws IOException {
  IOUtils.closeStream(committedTxnId);

  File currentDir = journalStorage.getSingularStorageDir().getCurrentDir();
  this.lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  this.lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  this.committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);
  metrics.lastWriterEpoch.set(lastWriterEpoch.get());
}
项目:hadoop-plus    文件:NNStorage.java   
/**
 * Write last checkpoint time into a separate file.
 *
 * @param sd
 * @throws IOException
 */
void writeTransactionIdFile(StorageDirectory sd, long txid) throws IOException {
  Preconditions.checkArgument(txid >= 0, "bad txid: " + txid);

  File txIdFile = getStorageFile(sd, NameNodeFile.SEEN_TXID);
  PersistentLongFile.writeFile(txIdFile, txid);
}
项目:hadoop-plus    文件:Journal.java   
/**
 * Reload any data that may have been cached. This is necessary
 * when we first load the Journal, but also after any formatting
 * operation, since the cached data is no longer relevant.
 */
private synchronized void refreshCachedData() {
  IOUtils.closeStream(committedTxnId);

  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  this.lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  this.lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  this.committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);
}
项目:FlexMap    文件:NNStorage.java   
/**
 * Write last checkpoint time into a separate file.
 * @param sd storage directory
 * @throws IOException
 */
void writeTransactionIdFile(StorageDirectory sd, long txid) throws IOException {
  Preconditions.checkArgument(txid >= 0, "bad txid: " + txid);

  File txIdFile = getStorageFile(sd, NameNodeFile.SEEN_TXID);
  PersistentLongFile.writeFile(txIdFile, txid);
}
项目:FlexMap    文件:Journal.java   
/**
 * Reload any data that may have been cached. This is necessary
 * when we first load the Journal, but also after any formatting
 * operation, since the cached data is no longer relevant.
 */
private synchronized void refreshCachedData() {
  IOUtils.closeStream(committedTxnId);

  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  this.lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  this.lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  this.committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);
}
项目:hadoop-TCP    文件:NNStorage.java   
/**
 * Write last checkpoint time into a separate file.
 *
 * @param sd
 * @throws IOException
 */
void writeTransactionIdFile(StorageDirectory sd, long txid) throws IOException {
  Preconditions.checkArgument(txid >= 0, "bad txid: " + txid);

  File txIdFile = getStorageFile(sd, NameNodeFile.SEEN_TXID);
  PersistentLongFile.writeFile(txIdFile, txid);
}
项目:hadoop-TCP    文件:Journal.java   
/**
 * Reload any data that may have been cached. This is necessary
 * when we first load the Journal, but also after any formatting
 * operation, since the cached data is no longer relevant.
 */
private synchronized void refreshCachedData() {
  IOUtils.closeStream(committedTxnId);

  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  this.lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  this.lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  this.committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);
}
项目:hardfs    文件:NNStorage.java   
/**
 * Write last checkpoint time into a separate file.
 *
 * @param sd
 * @throws IOException
 */
void writeTransactionIdFile(StorageDirectory sd, long txid) throws IOException {
  Preconditions.checkArgument(txid >= 0, "bad txid: " + txid);

  File txIdFile = getStorageFile(sd, NameNodeFile.SEEN_TXID);
  PersistentLongFile.writeFile(txIdFile, txid);
}
项目:hardfs    文件:Journal.java   
/**
 * Reload any data that may have been cached. This is necessary
 * when we first load the Journal, but also after any formatting
 * operation, since the cached data is no longer relevant.
 */
private synchronized void refreshCachedData() {
  IOUtils.closeStream(committedTxnId);

  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  this.lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  this.lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  this.committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);
}
项目:hadoop-on-lustre2    文件:NNStorage.java   
/**
 * Write last checkpoint time into a separate file.
 *
 * @param sd
 * @throws IOException
 */
void writeTransactionIdFile(StorageDirectory sd, long txid) throws IOException {
  Preconditions.checkArgument(txid >= 0, "bad txid: " + txid);

  File txIdFile = getStorageFile(sd, NameNodeFile.SEEN_TXID);
  PersistentLongFile.writeFile(txIdFile, txid);
}
项目:hadoop-on-lustre2    文件:Journal.java   
/**
 * Reload any data that may have been cached. This is necessary
 * when we first load the Journal, but also after any formatting
 * operation, since the cached data is no longer relevant.
 */
private synchronized void refreshCachedData() {
  IOUtils.closeStream(committedTxnId);

  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  this.lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  this.lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  this.committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);
}
项目:hadoop-on-lustre2    文件:Journal.java   
public synchronized void doUpgrade(StorageInfo sInfo) throws IOException {
  long oldCTime = storage.getCTime();
  storage.cTime = sInfo.cTime;
  int oldLV = storage.getLayoutVersion();
  storage.layoutVersion = sInfo.layoutVersion;
  LOG.info("Starting upgrade of edits directory: "
      + ".\n   old LV = " + oldLV
      + "; old CTime = " + oldCTime
      + ".\n   new LV = " + storage.getLayoutVersion()
      + "; new CTime = " + storage.getCTime());
  storage.getJournalManager().doUpgrade(storage);
  storage.createPaxosDir();

  // Copy over the contents of the epoch data files to the new dir.
  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  File previousDir = storage.getSingularStorageDir().getPreviousDir();

  PersistentLongFile prevLastPromisedEpoch = new PersistentLongFile(
      new File(previousDir, LAST_PROMISED_FILENAME), 0);
  PersistentLongFile prevLastWriterEpoch = new PersistentLongFile(
      new File(previousDir, LAST_WRITER_EPOCH), 0);

  lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);

  lastPromisedEpoch.set(prevLastPromisedEpoch.get());
  lastWriterEpoch.set(prevLastWriterEpoch.get());
}
项目:hadoop    文件:Journal.java   
public synchronized void doUpgrade(StorageInfo sInfo) throws IOException {
  long oldCTime = storage.getCTime();
  storage.cTime = sInfo.cTime;
  int oldLV = storage.getLayoutVersion();
  storage.layoutVersion = sInfo.layoutVersion;
  LOG.info("Starting upgrade of edits directory: "
      + ".\n   old LV = " + oldLV
      + "; old CTime = " + oldCTime
      + ".\n   new LV = " + storage.getLayoutVersion()
      + "; new CTime = " + storage.getCTime());
  storage.getJournalManager().doUpgrade(storage);
  storage.createPaxosDir();

  // Copy over the contents of the epoch data files to the new dir.
  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  File previousDir = storage.getSingularStorageDir().getPreviousDir();

  PersistentLongFile prevLastPromisedEpoch = new PersistentLongFile(
      new File(previousDir, LAST_PROMISED_FILENAME), 0);
  PersistentLongFile prevLastWriterEpoch = new PersistentLongFile(
      new File(previousDir, LAST_WRITER_EPOCH), 0);
  BestEffortLongFile prevCommittedTxnId = new BestEffortLongFile(
      new File(previousDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);

  lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);

  try {
    lastPromisedEpoch.set(prevLastPromisedEpoch.get());
    lastWriterEpoch.set(prevLastWriterEpoch.get());
    committedTxnId.set(prevCommittedTxnId.get());
  } finally {
    IOUtils.cleanup(LOG, prevCommittedTxnId);
  }
}
项目:aliyun-oss-hadoop-fs    文件:Journal.java   
public synchronized void doUpgrade(StorageInfo sInfo) throws IOException {
  long oldCTime = storage.getCTime();
  storage.cTime = sInfo.cTime;
  int oldLV = storage.getLayoutVersion();
  storage.layoutVersion = sInfo.layoutVersion;
  LOG.info("Starting upgrade of edits directory: "
      + ".\n   old LV = " + oldLV
      + "; old CTime = " + oldCTime
      + ".\n   new LV = " + storage.getLayoutVersion()
      + "; new CTime = " + storage.getCTime());
  storage.getJournalManager().doUpgrade(storage);
  storage.createPaxosDir();

  // Copy over the contents of the epoch data files to the new dir.
  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  File previousDir = storage.getSingularStorageDir().getPreviousDir();

  PersistentLongFile prevLastPromisedEpoch = new PersistentLongFile(
      new File(previousDir, LAST_PROMISED_FILENAME), 0);
  PersistentLongFile prevLastWriterEpoch = new PersistentLongFile(
      new File(previousDir, LAST_WRITER_EPOCH), 0);
  BestEffortLongFile prevCommittedTxnId = new BestEffortLongFile(
      new File(previousDir, COMMITTED_TXID_FILENAME),
      HdfsServerConstants.INVALID_TXID);

  lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsServerConstants.INVALID_TXID);

  try {
    lastPromisedEpoch.set(prevLastPromisedEpoch.get());
    lastWriterEpoch.set(prevLastWriterEpoch.get());
    committedTxnId.set(prevCommittedTxnId.get());
  } finally {
    IOUtils.cleanup(LOG, prevCommittedTxnId);
  }
}
项目:big-c    文件:Journal.java   
public synchronized void doUpgrade(StorageInfo sInfo) throws IOException {
  long oldCTime = storage.getCTime();
  storage.cTime = sInfo.cTime;
  int oldLV = storage.getLayoutVersion();
  storage.layoutVersion = sInfo.layoutVersion;
  LOG.info("Starting upgrade of edits directory: "
      + ".\n   old LV = " + oldLV
      + "; old CTime = " + oldCTime
      + ".\n   new LV = " + storage.getLayoutVersion()
      + "; new CTime = " + storage.getCTime());
  storage.getJournalManager().doUpgrade(storage);
  storage.createPaxosDir();

  // Copy over the contents of the epoch data files to the new dir.
  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  File previousDir = storage.getSingularStorageDir().getPreviousDir();

  PersistentLongFile prevLastPromisedEpoch = new PersistentLongFile(
      new File(previousDir, LAST_PROMISED_FILENAME), 0);
  PersistentLongFile prevLastWriterEpoch = new PersistentLongFile(
      new File(previousDir, LAST_WRITER_EPOCH), 0);
  BestEffortLongFile prevCommittedTxnId = new BestEffortLongFile(
      new File(previousDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);

  lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);

  try {
    lastPromisedEpoch.set(prevLastPromisedEpoch.get());
    lastWriterEpoch.set(prevLastWriterEpoch.get());
    committedTxnId.set(prevCommittedTxnId.get());
  } finally {
    IOUtils.cleanup(LOG, prevCommittedTxnId);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:Journal.java   
public synchronized void doUpgrade(StorageInfo sInfo) throws IOException {
  long oldCTime = storage.getCTime();
  storage.cTime = sInfo.cTime;
  int oldLV = storage.getLayoutVersion();
  storage.layoutVersion = sInfo.layoutVersion;
  LOG.info("Starting upgrade of edits directory: "
      + ".\n   old LV = " + oldLV
      + "; old CTime = " + oldCTime
      + ".\n   new LV = " + storage.getLayoutVersion()
      + "; new CTime = " + storage.getCTime());
  storage.getJournalManager().doUpgrade(storage);
  storage.createPaxosDir();

  // Copy over the contents of the epoch data files to the new dir.
  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  File previousDir = storage.getSingularStorageDir().getPreviousDir();

  PersistentLongFile prevLastPromisedEpoch = new PersistentLongFile(
      new File(previousDir, LAST_PROMISED_FILENAME), 0);
  PersistentLongFile prevLastWriterEpoch = new PersistentLongFile(
      new File(previousDir, LAST_WRITER_EPOCH), 0);
  BestEffortLongFile prevCommittedTxnId = new BestEffortLongFile(
      new File(previousDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);

  lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);

  try {
    lastPromisedEpoch.set(prevLastPromisedEpoch.get());
    lastWriterEpoch.set(prevLastWriterEpoch.get());
    committedTxnId.set(prevCommittedTxnId.get());
  } finally {
    IOUtils.cleanup(LOG, prevCommittedTxnId);
  }
}
项目:FlexMap    文件:Journal.java   
public synchronized void doUpgrade(StorageInfo sInfo) throws IOException {
  long oldCTime = storage.getCTime();
  storage.cTime = sInfo.cTime;
  int oldLV = storage.getLayoutVersion();
  storage.layoutVersion = sInfo.layoutVersion;
  LOG.info("Starting upgrade of edits directory: "
      + ".\n   old LV = " + oldLV
      + "; old CTime = " + oldCTime
      + ".\n   new LV = " + storage.getLayoutVersion()
      + "; new CTime = " + storage.getCTime());
  storage.getJournalManager().doUpgrade(storage);
  storage.createPaxosDir();

  // Copy over the contents of the epoch data files to the new dir.
  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  File previousDir = storage.getSingularStorageDir().getPreviousDir();

  PersistentLongFile prevLastPromisedEpoch = new PersistentLongFile(
      new File(previousDir, LAST_PROMISED_FILENAME), 0);
  PersistentLongFile prevLastWriterEpoch = new PersistentLongFile(
      new File(previousDir, LAST_WRITER_EPOCH), 0);
  BestEffortLongFile prevCommittedTxnId = new BestEffortLongFile(
      new File(previousDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);

  lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);

  try {
    lastPromisedEpoch.set(prevLastPromisedEpoch.get());
    lastWriterEpoch.set(prevLastWriterEpoch.get());
    committedTxnId.set(prevCommittedTxnId.get());
  } finally {
    IOUtils.cleanup(LOG, prevCommittedTxnId);
  }
}
项目:hadoop    文件:NNStorage.java   
/**
 * Determine the last transaction ID noted in this storage directory.
 * This txid is stored in a special seen_txid file since it might not
 * correspond to the latest image or edit log. For example, an image-only
 * directory will have this txid incremented when edits logs roll, even
 * though the edits logs are in a different directory.
 *
 * @param sd StorageDirectory to check
 * @return If file exists and can be read, last recorded txid. If not, 0L.
 * @throws IOException On errors processing file pointed to by sd
 */
static long readTransactionIdFile(StorageDirectory sd) throws IOException {
  File txidFile = getStorageFile(sd, NameNodeFile.SEEN_TXID);
  return PersistentLongFile.readFile(txidFile, 0);
}
项目:aliyun-oss-hadoop-fs    文件:NNStorage.java   
/**
 * Determine the last transaction ID noted in this storage directory.
 * This txid is stored in a special seen_txid file since it might not
 * correspond to the latest image or edit log. For example, an image-only
 * directory will have this txid incremented when edits logs roll, even
 * though the edits logs are in a different directory.
 *
 * @param sd StorageDirectory to check
 * @return If file exists and can be read, last recorded txid. If not, 0L.
 * @throws IOException On errors processing file pointed to by sd
 */
static long readTransactionIdFile(StorageDirectory sd) throws IOException {
  File txidFile = getStorageFile(sd, NameNodeFile.SEEN_TXID);
  return PersistentLongFile.readFile(txidFile, 0);
}
项目:big-c    文件:NNStorage.java   
/**
 * Determine the last transaction ID noted in this storage directory.
 * This txid is stored in a special seen_txid file since it might not
 * correspond to the latest image or edit log. For example, an image-only
 * directory will have this txid incremented when edits logs roll, even
 * though the edits logs are in a different directory.
 *
 * @param sd StorageDirectory to check
 * @return If file exists and can be read, last recorded txid. If not, 0L.
 * @throws IOException On errors processing file pointed to by sd
 */
static long readTransactionIdFile(StorageDirectory sd) throws IOException {
  File txidFile = getStorageFile(sd, NameNodeFile.SEEN_TXID);
  return PersistentLongFile.readFile(txidFile, 0);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:NNStorage.java   
/**
 * Determine the last transaction ID noted in this storage directory.
 * This txid is stored in a special seen_txid file since it might not
 * correspond to the latest image or edit log. For example, an image-only
 * directory will have this txid incremented when edits logs roll, even
 * though the edits logs are in a different directory.
 *
 * @param sd StorageDirectory to check
 * @return If file exists and can be read, last recorded txid. If not, 0L.
 * @throws IOException On errors processing file pointed to by sd
 */
static long readTransactionIdFile(StorageDirectory sd) throws IOException {
  File txidFile = getStorageFile(sd, NameNodeFile.SEEN_TXID);
  return PersistentLongFile.readFile(txidFile, 0);
}
项目:hadoop-plus    文件:NNStorage.java   
/**
 * Determine the last transaction ID noted in this storage directory.
 * This txid is stored in a special seen_txid file since it might not
 * correspond to the latest image or edit log. For example, an image-only
 * directory will have this txid incremented when edits logs roll, even
 * though the edits logs are in a different directory.
 *
 * @param sd StorageDirectory to check
 * @return If file exists and can be read, last recorded txid. If not, 0L.
 * @throws IOException On errors processing file pointed to by sd
 */
static long readTransactionIdFile(StorageDirectory sd) throws IOException {
  File txidFile = getStorageFile(sd, NameNodeFile.SEEN_TXID);
  return PersistentLongFile.readFile(txidFile, 0);
}
项目:FlexMap    文件:NNStorage.java   
/**
 * Determine the last transaction ID noted in this storage directory.
 * This txid is stored in a special seen_txid file since it might not
 * correspond to the latest image or edit log. For example, an image-only
 * directory will have this txid incremented when edits logs roll, even
 * though the edits logs are in a different directory.
 *
 * @param sd StorageDirectory to check
 * @return If file exists and can be read, last recorded txid. If not, 0L.
 * @throws IOException On errors processing file pointed to by sd
 */
static long readTransactionIdFile(StorageDirectory sd) throws IOException {
  File txidFile = getStorageFile(sd, NameNodeFile.SEEN_TXID);
  return PersistentLongFile.readFile(txidFile, 0);
}
项目:hadoop-TCP    文件:NNStorage.java   
/**
 * Determine the last transaction ID noted in this storage directory.
 * This txid is stored in a special seen_txid file since it might not
 * correspond to the latest image or edit log. For example, an image-only
 * directory will have this txid incremented when edits logs roll, even
 * though the edits logs are in a different directory.
 *
 * @param sd StorageDirectory to check
 * @return If file exists and can be read, last recorded txid. If not, 0L.
 * @throws IOException On errors processing file pointed to by sd
 */
static long readTransactionIdFile(StorageDirectory sd) throws IOException {
  File txidFile = getStorageFile(sd, NameNodeFile.SEEN_TXID);
  return PersistentLongFile.readFile(txidFile, 0);
}
项目:hardfs    文件:NNStorage.java   
/**
 * Determine the last transaction ID noted in this storage directory.
 * This txid is stored in a special seen_txid file since it might not
 * correspond to the latest image or edit log. For example, an image-only
 * directory will have this txid incremented when edits logs roll, even
 * though the edits logs are in a different directory.
 *
 * @param sd StorageDirectory to check
 * @return If file exists and can be read, last recorded txid. If not, 0L.
 * @throws IOException On errors processing file pointed to by sd
 */
static long readTransactionIdFile(StorageDirectory sd) throws IOException {
  File txidFile = getStorageFile(sd, NameNodeFile.SEEN_TXID);
  return PersistentLongFile.readFile(txidFile, 0);
}
项目:hadoop-on-lustre2    文件:NNStorage.java   
/**
 * Determine the last transaction ID noted in this storage directory.
 * This txid is stored in a special seen_txid file since it might not
 * correspond to the latest image or edit log. For example, an image-only
 * directory will have this txid incremented when edits logs roll, even
 * though the edits logs are in a different directory.
 *
 * @param sd StorageDirectory to check
 * @return If file exists and can be read, last recorded txid. If not, 0L.
 * @throws IOException On errors processing file pointed to by sd
 */
static long readTransactionIdFile(StorageDirectory sd) throws IOException {
  File txidFile = getStorageFile(sd, NameNodeFile.SEEN_TXID);
  return PersistentLongFile.readFile(txidFile, 0);
}