Java 类org.apache.hadoop.hdfs.util.BestEffortLongFile 实例源码

项目:hadoop    文件:Journal.java   
/**
 * Reload any data that may have been cached. This is necessary
 * when we first load the Journal, but also after any formatting
 * operation, since the cached data is no longer relevant.
 */
private synchronized void refreshCachedData() {
  IOUtils.closeStream(committedTxnId);

  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  this.lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  this.lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  this.committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);
}
项目:hadoop    文件:TestDFSUpgradeWithHA.java   
private long getCommittedTxnIdValue(MiniQJMHACluster qjCluster)
    throws IOException {
  Journal journal1 = qjCluster.getJournalCluster().getJournalNode(0)
      .getOrCreateJournal(MiniQJMHACluster.NAMESERVICE);
  BestEffortLongFile committedTxnId = (BestEffortLongFile) Whitebox
      .getInternalState(journal1, "committedTxnId");
  return committedTxnId != null ? committedTxnId.get() :
      HdfsConstants.INVALID_TXID;
}
项目:aliyun-oss-hadoop-fs    文件:Journal.java   
/**
 * Reload any data that may have been cached. This is necessary
 * when we first load the Journal, but also after any formatting
 * operation, since the cached data is no longer relevant.
 */
private synchronized void refreshCachedData() {
  IOUtils.closeStream(committedTxnId);

  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  this.lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  this.lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  this.committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsServerConstants.INVALID_TXID);
}
项目:aliyun-oss-hadoop-fs    文件:TestDFSUpgradeWithHA.java   
private long getCommittedTxnIdValue(MiniQJMHACluster qjCluster)
    throws IOException {
  Journal journal1 = qjCluster.getJournalCluster().getJournalNode(0)
      .getOrCreateJournal(MiniQJMHACluster.NAMESERVICE);
  BestEffortLongFile committedTxnId = (BestEffortLongFile) Whitebox
      .getInternalState(journal1, "committedTxnId");
  return committedTxnId != null ? committedTxnId.get() :
      HdfsServerConstants.INVALID_TXID;
}
项目:big-c    文件:Journal.java   
/**
 * Reload any data that may have been cached. This is necessary
 * when we first load the Journal, but also after any formatting
 * operation, since the cached data is no longer relevant.
 */
private synchronized void refreshCachedData() {
  IOUtils.closeStream(committedTxnId);

  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  this.lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  this.lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  this.committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);
}
项目:big-c    文件:TestDFSUpgradeWithHA.java   
private long getCommittedTxnIdValue(MiniQJMHACluster qjCluster)
    throws IOException {
  Journal journal1 = qjCluster.getJournalCluster().getJournalNode(0)
      .getOrCreateJournal(MiniQJMHACluster.NAMESERVICE);
  BestEffortLongFile committedTxnId = (BestEffortLongFile) Whitebox
      .getInternalState(journal1, "committedTxnId");
  return committedTxnId != null ? committedTxnId.get() :
      HdfsConstants.INVALID_TXID;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:Journal.java   
/**
 * Reload any data that may have been cached. This is necessary
 * when we first load the Journal, but also after any formatting
 * operation, since the cached data is no longer relevant.
 */
private synchronized void refreshCachedData() {
  IOUtils.closeStream(committedTxnId);

  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  this.lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  this.lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  this.committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDFSUpgradeWithHA.java   
private long getCommittedTxnIdValue(MiniQJMHACluster qjCluster)
    throws IOException {
  Journal journal1 = qjCluster.getJournalCluster().getJournalNode(0)
      .getOrCreateJournal(MiniQJMHACluster.NAMESERVICE);
  BestEffortLongFile committedTxnId = (BestEffortLongFile) Whitebox
      .getInternalState(journal1, "committedTxnId");
  return committedTxnId != null ? committedTxnId.get() :
      HdfsConstants.INVALID_TXID;
}
项目:hadoop-EAR    文件:Journal.java   
/**
 * Reload any data that may have been cached. This is necessary
 * when we first load the Journal, but also after any formatting
 * operation, since the cached data is no longer relevant.
 * @throws IOException 
 */
private synchronized void refreshCachedData() throws IOException {
  IOUtils.closeStream(committedTxnId);

  File currentDir = journalStorage.getSingularStorageDir().getCurrentDir();
  this.lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  this.lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  this.committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);
  metrics.lastWriterEpoch.set(lastWriterEpoch.get());
}
项目:hadoop-plus    文件:Journal.java   
/**
 * Reload any data that may have been cached. This is necessary
 * when we first load the Journal, but also after any formatting
 * operation, since the cached data is no longer relevant.
 */
private synchronized void refreshCachedData() {
  IOUtils.closeStream(committedTxnId);

  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  this.lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  this.lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  this.committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);
}
项目:FlexMap    文件:Journal.java   
/**
 * Reload any data that may have been cached. This is necessary
 * when we first load the Journal, but also after any formatting
 * operation, since the cached data is no longer relevant.
 */
private synchronized void refreshCachedData() {
  IOUtils.closeStream(committedTxnId);

  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  this.lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  this.lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  this.committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);
}
项目:FlexMap    文件:TestDFSUpgradeWithHA.java   
private long getCommittedTxnIdValue(MiniQJMHACluster qjCluster)
    throws IOException {
  Journal journal1 = qjCluster.getJournalCluster().getJournalNode(0)
      .getOrCreateJournal(MiniQJMHACluster.NAMESERVICE);
  BestEffortLongFile committedTxnId = (BestEffortLongFile) Whitebox
      .getInternalState(journal1, "committedTxnId");
  return committedTxnId != null ? committedTxnId.get() :
      HdfsConstants.INVALID_TXID;
}
项目:hadoop-TCP    文件:Journal.java   
/**
 * Reload any data that may have been cached. This is necessary
 * when we first load the Journal, but also after any formatting
 * operation, since the cached data is no longer relevant.
 */
private synchronized void refreshCachedData() {
  IOUtils.closeStream(committedTxnId);

  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  this.lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  this.lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  this.committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);
}
项目:hardfs    文件:Journal.java   
/**
 * Reload any data that may have been cached. This is necessary
 * when we first load the Journal, but also after any formatting
 * operation, since the cached data is no longer relevant.
 */
private synchronized void refreshCachedData() {
  IOUtils.closeStream(committedTxnId);

  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  this.lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  this.lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  this.committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);
}
项目:hadoop-on-lustre2    文件:Journal.java   
/**
 * Reload any data that may have been cached. This is necessary
 * when we first load the Journal, but also after any formatting
 * operation, since the cached data is no longer relevant.
 */
private synchronized void refreshCachedData() {
  IOUtils.closeStream(committedTxnId);

  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  this.lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  this.lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  this.committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);
}
项目:hadoop    文件:Journal.java   
public synchronized void doUpgrade(StorageInfo sInfo) throws IOException {
  long oldCTime = storage.getCTime();
  storage.cTime = sInfo.cTime;
  int oldLV = storage.getLayoutVersion();
  storage.layoutVersion = sInfo.layoutVersion;
  LOG.info("Starting upgrade of edits directory: "
      + ".\n   old LV = " + oldLV
      + "; old CTime = " + oldCTime
      + ".\n   new LV = " + storage.getLayoutVersion()
      + "; new CTime = " + storage.getCTime());
  storage.getJournalManager().doUpgrade(storage);
  storage.createPaxosDir();

  // Copy over the contents of the epoch data files to the new dir.
  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  File previousDir = storage.getSingularStorageDir().getPreviousDir();

  PersistentLongFile prevLastPromisedEpoch = new PersistentLongFile(
      new File(previousDir, LAST_PROMISED_FILENAME), 0);
  PersistentLongFile prevLastWriterEpoch = new PersistentLongFile(
      new File(previousDir, LAST_WRITER_EPOCH), 0);
  BestEffortLongFile prevCommittedTxnId = new BestEffortLongFile(
      new File(previousDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);

  lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);

  try {
    lastPromisedEpoch.set(prevLastPromisedEpoch.get());
    lastWriterEpoch.set(prevLastWriterEpoch.get());
    committedTxnId.set(prevCommittedTxnId.get());
  } finally {
    IOUtils.cleanup(LOG, prevCommittedTxnId);
  }
}
项目:aliyun-oss-hadoop-fs    文件:Journal.java   
public synchronized void doUpgrade(StorageInfo sInfo) throws IOException {
  long oldCTime = storage.getCTime();
  storage.cTime = sInfo.cTime;
  int oldLV = storage.getLayoutVersion();
  storage.layoutVersion = sInfo.layoutVersion;
  LOG.info("Starting upgrade of edits directory: "
      + ".\n   old LV = " + oldLV
      + "; old CTime = " + oldCTime
      + ".\n   new LV = " + storage.getLayoutVersion()
      + "; new CTime = " + storage.getCTime());
  storage.getJournalManager().doUpgrade(storage);
  storage.createPaxosDir();

  // Copy over the contents of the epoch data files to the new dir.
  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  File previousDir = storage.getSingularStorageDir().getPreviousDir();

  PersistentLongFile prevLastPromisedEpoch = new PersistentLongFile(
      new File(previousDir, LAST_PROMISED_FILENAME), 0);
  PersistentLongFile prevLastWriterEpoch = new PersistentLongFile(
      new File(previousDir, LAST_WRITER_EPOCH), 0);
  BestEffortLongFile prevCommittedTxnId = new BestEffortLongFile(
      new File(previousDir, COMMITTED_TXID_FILENAME),
      HdfsServerConstants.INVALID_TXID);

  lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsServerConstants.INVALID_TXID);

  try {
    lastPromisedEpoch.set(prevLastPromisedEpoch.get());
    lastWriterEpoch.set(prevLastWriterEpoch.get());
    committedTxnId.set(prevCommittedTxnId.get());
  } finally {
    IOUtils.cleanup(LOG, prevCommittedTxnId);
  }
}
项目:big-c    文件:Journal.java   
public synchronized void doUpgrade(StorageInfo sInfo) throws IOException {
  long oldCTime = storage.getCTime();
  storage.cTime = sInfo.cTime;
  int oldLV = storage.getLayoutVersion();
  storage.layoutVersion = sInfo.layoutVersion;
  LOG.info("Starting upgrade of edits directory: "
      + ".\n   old LV = " + oldLV
      + "; old CTime = " + oldCTime
      + ".\n   new LV = " + storage.getLayoutVersion()
      + "; new CTime = " + storage.getCTime());
  storage.getJournalManager().doUpgrade(storage);
  storage.createPaxosDir();

  // Copy over the contents of the epoch data files to the new dir.
  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  File previousDir = storage.getSingularStorageDir().getPreviousDir();

  PersistentLongFile prevLastPromisedEpoch = new PersistentLongFile(
      new File(previousDir, LAST_PROMISED_FILENAME), 0);
  PersistentLongFile prevLastWriterEpoch = new PersistentLongFile(
      new File(previousDir, LAST_WRITER_EPOCH), 0);
  BestEffortLongFile prevCommittedTxnId = new BestEffortLongFile(
      new File(previousDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);

  lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);

  try {
    lastPromisedEpoch.set(prevLastPromisedEpoch.get());
    lastWriterEpoch.set(prevLastWriterEpoch.get());
    committedTxnId.set(prevCommittedTxnId.get());
  } finally {
    IOUtils.cleanup(LOG, prevCommittedTxnId);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:Journal.java   
public synchronized void doUpgrade(StorageInfo sInfo) throws IOException {
  long oldCTime = storage.getCTime();
  storage.cTime = sInfo.cTime;
  int oldLV = storage.getLayoutVersion();
  storage.layoutVersion = sInfo.layoutVersion;
  LOG.info("Starting upgrade of edits directory: "
      + ".\n   old LV = " + oldLV
      + "; old CTime = " + oldCTime
      + ".\n   new LV = " + storage.getLayoutVersion()
      + "; new CTime = " + storage.getCTime());
  storage.getJournalManager().doUpgrade(storage);
  storage.createPaxosDir();

  // Copy over the contents of the epoch data files to the new dir.
  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  File previousDir = storage.getSingularStorageDir().getPreviousDir();

  PersistentLongFile prevLastPromisedEpoch = new PersistentLongFile(
      new File(previousDir, LAST_PROMISED_FILENAME), 0);
  PersistentLongFile prevLastWriterEpoch = new PersistentLongFile(
      new File(previousDir, LAST_WRITER_EPOCH), 0);
  BestEffortLongFile prevCommittedTxnId = new BestEffortLongFile(
      new File(previousDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);

  lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);

  try {
    lastPromisedEpoch.set(prevLastPromisedEpoch.get());
    lastWriterEpoch.set(prevLastWriterEpoch.get());
    committedTxnId.set(prevCommittedTxnId.get());
  } finally {
    IOUtils.cleanup(LOG, prevCommittedTxnId);
  }
}
项目:FlexMap    文件:Journal.java   
public synchronized void doUpgrade(StorageInfo sInfo) throws IOException {
  long oldCTime = storage.getCTime();
  storage.cTime = sInfo.cTime;
  int oldLV = storage.getLayoutVersion();
  storage.layoutVersion = sInfo.layoutVersion;
  LOG.info("Starting upgrade of edits directory: "
      + ".\n   old LV = " + oldLV
      + "; old CTime = " + oldCTime
      + ".\n   new LV = " + storage.getLayoutVersion()
      + "; new CTime = " + storage.getCTime());
  storage.getJournalManager().doUpgrade(storage);
  storage.createPaxosDir();

  // Copy over the contents of the epoch data files to the new dir.
  File currentDir = storage.getSingularStorageDir().getCurrentDir();
  File previousDir = storage.getSingularStorageDir().getPreviousDir();

  PersistentLongFile prevLastPromisedEpoch = new PersistentLongFile(
      new File(previousDir, LAST_PROMISED_FILENAME), 0);
  PersistentLongFile prevLastWriterEpoch = new PersistentLongFile(
      new File(previousDir, LAST_WRITER_EPOCH), 0);
  BestEffortLongFile prevCommittedTxnId = new BestEffortLongFile(
      new File(previousDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);

  lastPromisedEpoch = new PersistentLongFile(
      new File(currentDir, LAST_PROMISED_FILENAME), 0);
  lastWriterEpoch = new PersistentLongFile(
      new File(currentDir, LAST_WRITER_EPOCH), 0);
  committedTxnId = new BestEffortLongFile(
      new File(currentDir, COMMITTED_TXID_FILENAME),
      HdfsConstants.INVALID_TXID);

  try {
    lastPromisedEpoch.set(prevLastPromisedEpoch.get());
    lastWriterEpoch.set(prevLastWriterEpoch.get());
    committedTxnId.set(prevCommittedTxnId.get());
  } finally {
    IOUtils.cleanup(LOG, prevCommittedTxnId);
  }
}