Java 类org.apache.hadoop.hdfs.server.namenode.FSEditLog 实例源码

项目:hadoop-EAR    文件:TestAvatarQJMFailures.java   
public void setUp(Configuration confg, MiniJournalCluster jCluster,
    String name)
    throws Exception {
  LOG.info("START TEST : " + name);

  handler = new TestAvatarQJMFailuresHandler();
  InjectionHandler.set(handler);

  FSEditLog.setRuntimeForTesting(Runtime.getRuntime());
  conf = confg;
  if (jCluster == null) {
    cluster = new MiniAvatarCluster.Builder(conf).numDataNodes(1)
        .enableQJM(true).build();
  } else {
    cluster = new MiniAvatarCluster.Builder(conf).numDataNodes(1)
        .enableQJM(true).setJournalCluster(jCluster).build();
  }
  fs = cluster.getFileSystem();
  journalCluster = cluster.getJournalCluster();

}
项目:hadoop-EAR    文件:TestBookKeeperEditLogInputStream.java   
@After
public void tearDown() throws Exception {
  try {
    super.tearDown();
    if (tempEditsFile != null) {
      if (!tempEditsFile.delete()) {
        LOG.warn("Unable to delete temporary edits file: " +
            tempEditsFile.getAbsolutePath());
      }
    }
  } finally {
    // Reset sizeFlushBuffer between each unit test (in case it has been
    // altered by a unit test to trigger a boundary condition)
    int lastSizeFlushBuffer = FSEditLog.sizeFlushBuffer;
    FSEditLog.sizeFlushBuffer = origSizeFlushBuffer;

    if (lastSizeFlushBuffer != origSizeFlushBuffer) {
      LOG.info("Setting FSEditLog.sizeFlushBuffer back to " +
          origSizeFlushBuffer + " was " + lastSizeFlushBuffer +
          " after last test.");
    }
  }
}
项目:hadoop-EAR    文件:Journal.java   
/**
 * @see JournalManager#purgeLogsOlderThan(long)
 */
public synchronized void purgeLogsOlderThan(RequestInfo reqInfo,
    long minTxIdToKeep) throws IOException {
  checkJournalStorageFormatted();
  checkRequest(reqInfo);

  journalStorage.purgeDataOlderThan(minTxIdToKeep);
  if (minTxIdToKeep == FSEditLog.PURGE_ALL_TXID) {
    // When trying to remove all the segments, reset
    // the committed transaction ID too.
    committedTxnId.set(0, true);
    minTxid = 0;
  } else {
    minTxid = minTxIdToKeep;
  }
  if (imageStorage.isFormatted()) {
    imageStorage.purgeDataOlderThan(minTxIdToKeep == 0 ? -1 : minTxIdToKeep);
  }
}
项目:hadoop    文件:TestFailureToReadEdits.java   
private LimitedEditLogAnswer causeFailureOnEditLogRead() throws IOException {
  FSEditLog spyEditLog = NameNodeAdapter.spyOnEditLog(nn1);
  LimitedEditLogAnswer answer = new LimitedEditLogAnswer(); 
  doAnswer(answer).when(spyEditLog).selectInputStreams(
      anyLong(), anyLong(), (MetaRecoveryContext)anyObject(), anyBoolean());
  return answer;
}
项目:aliyun-oss-hadoop-fs    文件:TestFailureToReadEdits.java   
private LimitedEditLogAnswer causeFailureOnEditLogRead() throws IOException {
  FSEditLog spyEditLog = NameNodeAdapter.spyOnEditLog(nn1);
  LimitedEditLogAnswer answer = new LimitedEditLogAnswer(); 
  doAnswer(answer).when(spyEditLog).selectInputStreams(
      anyLong(), anyLong(), (MetaRecoveryContext)anyObject(), anyBoolean());
  return answer;
}
项目:big-c    文件:TestFailureToReadEdits.java   
private LimitedEditLogAnswer causeFailureOnEditLogRead() throws IOException {
  FSEditLog spyEditLog = NameNodeAdapter.spyOnEditLog(nn1);
  LimitedEditLogAnswer answer = new LimitedEditLogAnswer(); 
  doAnswer(answer).when(spyEditLog).selectInputStreams(
      anyLong(), anyLong(), (MetaRecoveryContext)anyObject(), anyBoolean());
  return answer;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestFailureToReadEdits.java   
private LimitedEditLogAnswer causeFailureOnEditLogRead() throws IOException {
  FSEditLog spyEditLog = NameNodeAdapter.spyOnEditLog(nn1);
  LimitedEditLogAnswer answer = new LimitedEditLogAnswer(); 
  doAnswer(answer).when(spyEditLog).selectInputStreams(
      anyLong(), anyLong(), (MetaRecoveryContext)anyObject(), anyBoolean());
  return answer;
}
项目:hadoop-EAR    文件:MiniDFSCluster.java   
private void processNamenodesForShutdown(Collection<Thread> threads) {
  Runtime runtime = Runtime.getRuntime();
  runtime = spy(runtime);
  doNothing().when(runtime).exit(anyInt());
  FSEditLog.setRuntimeForTesting(runtime);
  for (NameNodeInfo nnInfo : nameNodes) {
    Thread st = new Thread(new ShutDownUtil(nnInfo));
    st.start();
    threads.add(st);
  }
}
项目:hadoop-EAR    文件:Ingest.java   
private void tearDown(FSEditLog localEditLog,
    boolean writeEndTxn, boolean updateLastCorrectTxn) throws IOException {
  localEditLog.endCurrentLogSegment(writeEndTxn);
  endTxId = localEditLog.getLastWrittenTxId();
  running = false;
  lastScan = true;
  if (updateLastCorrectTxn) {
    standby.setLastCorrectTxId(endTxId);
  }
  standby.clearIngestState(endTxId + 1);
}
项目:hadoop-EAR    文件:TestAvatarQJMFailures.java   
/**
 * Tests that if the journal cluster is marked as required, we cannot
 * tolerate its failure.
 */
@Test
public void testJournalClusterFailureWhenRequired() throws Exception {
  Configuration conf = new Configuration();
  journalCluster = new MiniJournalCluster.Builder(conf).numJournalNodes(
      3).build();
  String journalURI = journalCluster.getQuorumJournalURI(
      MiniAvatarCluster.JID).toString();
  conf.set("dfs.name.edits.dir.required", journalURI + "/zero," + journalURI
      + "/one");

  setUp(conf, journalCluster, "testJournalClusterFailureWhenRequired");

  // spy the runtime
  Runtime runtime = Runtime.getRuntime();
  runtime = spy(runtime);
  doNothing().when(runtime).exit(anyInt());
  FSEditLog.setRuntimeForTesting(runtime);


  // Kill standby to ensure only 1 runtime.exit();
  cluster.killStandby();
  journalCluster.shutdown();
  fs.create(new Path("/test1"));

  // verify failure.
  verify(runtime, times(1)).exit(anyInt());
}
项目:hadoop-EAR    文件:TestAvatarQJMFailures2.java   
public void setUp(String name) throws Exception {
  LOG.info("START TEST : " + name);

  handler = new TestAvatarQJMFailuresHandler();
  InjectionHandler.set(handler);

  FSEditLog.setRuntimeForTesting(Runtime.getRuntime());
  conf = new Configuration();
  cluster = new MiniAvatarCluster.Builder(conf).numDataNodes(1)
        .enableQJM(true).build();
  fs = cluster.getFileSystem();
  journalCluster = cluster.getJournalCluster();
}
项目:hadoop-EAR    文件:TestAvatarCheckpointingFailure.java   
private void setUp(String name) throws Exception {
  LOG.info("------------------- test: " + name + " START ----------------");
  conf = new Configuration();
  conf.setLong("fs.checkpoint.period", 3600);

  cluster = new MiniAvatarCluster(conf, 2, true, null, null);
  fs = cluster.getFileSystem();

  // spy the runtime
  runtime = Runtime.getRuntime();
  runtime = spy(runtime);
  doNothing().when(runtime).exit(anyInt());
  FSEditLog.setRuntimeForTesting(runtime);
}
项目:hadoop-EAR    文件:TestBookKeeperEditLogInputStream.java   
@Before
public void setUp() throws Exception {
  super.setUp();

  // Remember the size of the flush buffer so that we can reset it
  // in tearDown() in case any of the tests alter it
  origSizeFlushBuffer = FSEditLog.sizeFlushBuffer;

  // Create a temp file to use for comparison of BookKeeper based
  // input stream and the file based input stream
  tempEditsFile = FSEditLogTestUtil.createTempEditsFile(
      "testBookKeeperEditLogInputStream");
}
项目:hadoop-EAR    文件:QuorumJournalManager.java   
@Override
public EditLogOutputStream startLogSegment(long txId) throws IOException {
  Preconditions.checkState(isActiveWriter,
      "must recover segments before starting a new one");
  QuorumCall<AsyncLogger,Void> q = loggers.startLogSegment(txId);
  loggers.waitForWriteQuorum(q, startSegmentTimeoutMs,
      "startLogSegment(" + txId + ")");
  return new QuorumOutputStream(loggers, txId, FSEditLog.sizeFlushBuffer,
      writeTxnsTimeoutMs, metrics, journalId);
}
项目:hadoop-plus    文件:TestFailureToReadEdits.java   
private LimitedEditLogAnswer causeFailureOnEditLogRead() throws IOException {
  FSEditLog spyEditLog = spy(nn1.getNamesystem().getEditLogTailer()
      .getEditLog());
  LimitedEditLogAnswer answer = new LimitedEditLogAnswer(); 
  doAnswer(answer).when(spyEditLog).selectInputStreams(
      anyLong(), anyLong(), (MetaRecoveryContext)anyObject(), anyBoolean());
  nn1.getNamesystem().getEditLogTailer().setEditLog(spyEditLog);

  return answer;
}
项目:FlexMap    文件:TestFailureToReadEdits.java   
private LimitedEditLogAnswer causeFailureOnEditLogRead() throws IOException {
  FSEditLog spyEditLog = NameNodeAdapter.spyOnEditLog(nn1);
  LimitedEditLogAnswer answer = new LimitedEditLogAnswer(); 
  doAnswer(answer).when(spyEditLog).selectInputStreams(
      anyLong(), anyLong(), (MetaRecoveryContext)anyObject(), anyBoolean());
  return answer;
}
项目:hadoop-TCP    文件:TestFailureToReadEdits.java   
private LimitedEditLogAnswer causeFailureOnEditLogRead() throws IOException {
  FSEditLog spyEditLog = spy(nn1.getNamesystem().getEditLogTailer()
      .getEditLog());
  LimitedEditLogAnswer answer = new LimitedEditLogAnswer(); 
  doAnswer(answer).when(spyEditLog).selectInputStreams(
      anyLong(), anyLong(), (MetaRecoveryContext)anyObject(), anyBoolean());
  nn1.getNamesystem().getEditLogTailer().setEditLog(spyEditLog);

  return answer;
}
项目:hardfs    文件:TestFailureToReadEdits.java   
private LimitedEditLogAnswer causeFailureOnEditLogRead() throws IOException {
  FSEditLog spyEditLog = spy(nn1.getNamesystem().getEditLogTailer()
      .getEditLog());
  LimitedEditLogAnswer answer = new LimitedEditLogAnswer(); 
  doAnswer(answer).when(spyEditLog).selectInputStreams(
      anyLong(), anyLong(), (MetaRecoveryContext)anyObject(), anyBoolean());
  nn1.getNamesystem().getEditLogTailer().setEditLog(spyEditLog);

  return answer;
}
项目:hadoop-on-lustre2    文件:TestFailureToReadEdits.java   
private LimitedEditLogAnswer causeFailureOnEditLogRead() throws IOException {
  FSEditLog spyEditLog = NameNodeAdapter.spyOnEditLog(nn1);
  LimitedEditLogAnswer answer = new LimitedEditLogAnswer(); 
  doAnswer(answer).when(spyEditLog).selectInputStreams(
      anyLong(), anyLong(), (MetaRecoveryContext)anyObject(), anyBoolean());
  return answer;
}
项目:hadoop    文件:EditLogTailer.java   
@VisibleForTesting
FSEditLog getEditLog() {
  return editLog;
}
项目:hadoop    文件:EditLogTailer.java   
@VisibleForTesting
public void setEditLog(FSEditLog editLog) {
  this.editLog = editLog;
}
项目:hadoop    文件:DFSTestUtil.java   
public static void setEditLogForTesting(FSNamesystem fsn, FSEditLog newLog) {
  Whitebox.setInternalState(fsn.getFSImage(), "editLog", newLog);
  Whitebox.setInternalState(fsn.getFSDirectory(), "editLog", newLog);
}
项目:aliyun-oss-hadoop-fs    文件:EditLogTailer.java   
@VisibleForTesting
FSEditLog getEditLog() {
  return editLog;
}
项目:aliyun-oss-hadoop-fs    文件:EditLogTailer.java   
@VisibleForTesting
public void setEditLog(FSEditLog editLog) {
  this.editLog = editLog;
}
项目:aliyun-oss-hadoop-fs    文件:DFSTestUtil.java   
public static void setEditLogForTesting(FSNamesystem fsn, FSEditLog newLog) {
  Whitebox.setInternalState(fsn.getFSImage(), "editLog", newLog);
  Whitebox.setInternalState(fsn.getFSDirectory(), "editLog", newLog);
}
项目:big-c    文件:EditLogTailer.java   
@VisibleForTesting
FSEditLog getEditLog() {
  return editLog;
}
项目:big-c    文件:EditLogTailer.java   
@VisibleForTesting
public void setEditLog(FSEditLog editLog) {
  this.editLog = editLog;
}
项目:big-c    文件:DFSTestUtil.java   
public static void setEditLogForTesting(FSNamesystem fsn, FSEditLog newLog) {
  Whitebox.setInternalState(fsn.getFSImage(), "editLog", newLog);
  Whitebox.setInternalState(fsn.getFSDirectory(), "editLog", newLog);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:EditLogTailer.java   
@VisibleForTesting
FSEditLog getEditLog() {
  return editLog;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:EditLogTailer.java   
@VisibleForTesting
public void setEditLog(FSEditLog editLog) {
  this.editLog = editLog;
}
项目:hadoop-EAR    文件:TestFileAppend4.java   
/**
 * Test for a race in appendFile where the file might get removed in between
 * the two synchronized sections.
 */
public void testAppendFileRace() throws Throwable {
  LOG.info("START");
  cluster = new MiniDFSCluster(conf, 1, true, null);
  final FileSystem fs1 = cluster.getFileSystem();;

  try {
    createFile(fs1, "/testAppendFileRace", 1, BBW_SIZE);
    stm.close();

    NameNode nn = cluster.getNameNode();
    FSEditLog editLogSpy = FSImageAdapter.injectEditLogSpy(nn.getNamesystem());
    DelayAnswer  delayer = new DelayAnswer();
    doAnswer(delayer).when(editLogSpy).logSync();

    final AtomicReference<Throwable> err = new AtomicReference<Throwable>();
    Thread appender = new Thread() {
        public void run() {
          try {
            stm = fs1.append(file1);
          } catch (Throwable t) {
            err.set(t);
          }
        }
      };
    LOG.info("Triggering append in other thread");
    appender.start();

    LOG.info("Waiting for logsync");
    delayer.waitForCall();

    LOG.info("Resetting spy");
    reset(editLogSpy);

    LOG.info("Deleting file");
    fs1.delete(file1, true);

    LOG.info("Allowing append to proceed");
    delayer.proceed();

    LOG.info("Waiting for append to finish");

    appender.join();

    if (err.get() != null) {
      if (err.get().getMessage().contains(
            "File does not exist.")) {
        LOG.info("Got expected exception", err.get());
      } else {
        throw err.get();
      }
    }
    LOG.info("Closing stream");
    stm.close();
  } finally {
    fs1.close();
    cluster.shutdown();
  }
}
项目:hadoop-EAR    文件:AvatarSetupUtil.java   
public void setUp(boolean federation, String name, Configuration conf)
    throws Exception {
  conf.setLong(FSEditLog.CONF_ROLL_TIMEOUT_MSEC, 500);
  setUp(federation, conf, name);
}
项目:hadoop-EAR    文件:TestBookKeeperEditLogInputStream.java   
private void testReadAndRefreshAfterEachTransactionInner(int numEdits)
  throws Exception {
  FSEditLog.sizeFlushBuffer = 100;
  LedgerHandle ledgerOut = createLedger();
  long ledgerId = ledgerOut.getId();
  BookKeeperEditLogOutputStream bkEditsOut =
      new BookKeeperEditLogOutputStream(ledgerOut);
  EditLogFileOutputStream fileEditsOut =
      new EditLogFileOutputStream(tempEditsFile, null);

  FSEditLogTestUtil.createAndPopulateStreams(1,
      numEdits, bkEditsOut, fileEditsOut);

  BookKeeperEditLogInputStream bkEditsIn =
      new BookKeeperEditLogInputStream(ledgerProvider,
          ledgerId,
          0,
          1,
          numEdits,
          false);

  EditLogFileInputStream fileEditsIn =
      new EditLogFileInputStream(tempEditsFile);

  assertEquals("Length in bytes must be equal!",
      bkEditsIn.length(), fileEditsIn.length());

  long lastBkPos = bkEditsIn.getPosition();
  long lastFilePos = fileEditsIn.getPosition();
  for (int i = 1; i <= numEdits; i++) {
    assertEquals("Position in file must be equal position in bk",
        lastBkPos, lastFilePos);
    bkEditsIn.refresh(lastBkPos, -1);
    fileEditsIn.refresh(lastFilePos, -1);
    FSEditLogOp opFromBk = bkEditsIn.readOp();
    FSEditLogOp opFromFile = fileEditsIn.readOp();
    if (LOG.isDebugEnabled()) {
      LOG.debug("txId = " + i + ", " + "opFromBk = " + opFromBk +
          ", opFromFile = " + opFromFile);
    }
    assertEquals(
        "Operation read from file and BookKeeper must be same after refresh",
        opFromBk, opFromFile);
    lastBkPos = bkEditsIn.getPosition();
    lastFilePos = fileEditsIn.getPosition();
  }
  assertNull("BookKeeper edit log must end at last txId", bkEditsIn.readOp());
}
项目:hadoop-EAR    文件:TestBookKeeperEditLogInputStream.java   
private void testReadBufferGreaterThanLedgerSizeInner(int numEdits)
    throws Exception {
  LedgerHandle ledgerOut = createLedger();
  long ledgerId = ledgerOut.getId();
  BookKeeperEditLogInputStream bkEditsIn =
      new BookKeeperEditLogInputStream(ledgerProvider,
          ledgerId,
          0,
          1,
          -1,
          true);
  EditLogFileOutputStream fileEditsOut =
      new EditLogFileOutputStream(tempEditsFile, null);
  bkEditsIn.init();
  // Set the edit log buffer flush size smaller than the size of
  // of the buffer in BufferedInputStream in BookKeeperJournalInputStream
  FSEditLog.sizeFlushBuffer = bkEditsIn.bin.available() / 3;
  LOG.info("Set flush buffer size to " + FSEditLog.sizeFlushBuffer);

  BookKeeperEditLogOutputStream bkEditsOut =
      new BookKeeperEditLogOutputStream(ledgerOut);

  FSEditLogTestUtil.createAndPopulateStreams(1, numEdits, bkEditsOut,
      fileEditsOut);

  // Re-try refreshing up to ten times until we are able to refresh
  // successfully to be beginning of the ledger and read the edit log
  // layout version
  int maxTries = 10;
  for (int i = 0; i < maxTries; i++) {
    try {
      bkEditsIn.refresh(0, -1);
      assertEquals("refresh succeeded", bkEditsIn.logVersion,
          FSConstants.LAYOUT_VERSION);
    } catch (AssertionFailedError e) {
      if (i == maxTries) {
        // Fail the unit test rethrowing the assertion failure if we've
        // reached the maximum number of retries
        throw e;
      }
    }
  }
  EditLogFileInputStream fileEditsIn =
      new EditLogFileInputStream(tempEditsFile);
  for (int i = 0; i <= numEdits; i++) {
    FSEditLogOp opFromBk = bkEditsIn.readOp();
    FSEditLogOp opFromFile = fileEditsIn.readOp();
    if (LOG.isDebugEnabled()) {
      LOG.debug("txId = " + i + ", " + "opFromBk = " + opFromBk +
          ", opFromFile = " + opFromFile);
    }
    assertEquals(
        "Operation read from file and BookKeeper must be same after refresh",
        opFromBk, opFromFile);
  }
  assertNull("BookKeeper edit log must end at txid 1000", bkEditsIn.readOp());
}
项目:hadoop-plus    文件:EditLogTailer.java   
@VisibleForTesting
FSEditLog getEditLog() {
  return editLog;
}
项目:hadoop-plus    文件:EditLogTailer.java   
@VisibleForTesting
void setEditLog(FSEditLog editLog) {
  this.editLog = editLog;
}
项目:FlexMap    文件:EditLogTailer.java   
@VisibleForTesting
FSEditLog getEditLog() {
  return editLog;
}
项目:FlexMap    文件:EditLogTailer.java   
@VisibleForTesting
public void setEditLog(FSEditLog editLog) {
  this.editLog = editLog;
}
项目:hadoop-TCP    文件:EditLogTailer.java   
@VisibleForTesting
FSEditLog getEditLog() {
  return editLog;
}
项目:hadoop-TCP    文件:EditLogTailer.java   
@VisibleForTesting
void setEditLog(FSEditLog editLog) {
  this.editLog = editLog;
}