Java 类org.apache.hadoop.hdfs.qjournal.server.Journal 实例源码

项目:hadoop    文件:TestDFSUpgradeWithHA.java   
private static void assertEpochFilesCopied(MiniQJMHACluster jnCluster)
    throws IOException {
  for (int i = 0; i < 3; i++) {
    File journalDir = jnCluster.getJournalCluster().getJournalDir(i, "ns1");
    File currDir = new File(journalDir, "current");
    File prevDir = new File(journalDir, "previous");
    for (String fileName : new String[]{ Journal.LAST_PROMISED_FILENAME,
        Journal.LAST_WRITER_EPOCH }) {
      File prevFile = new File(prevDir, fileName);
      // Possible the prev file doesn't exist, e.g. if there has never been a
      // writer before the upgrade.
      if (prevFile.exists()) {
        PersistentLongFile prevLongFile = new PersistentLongFile(prevFile, -10);
        PersistentLongFile currLongFile = new PersistentLongFile(new File(currDir,
            fileName), -11);
        assertTrue("Value in " + fileName + " has decreased on upgrade in "
            + journalDir, prevLongFile.get() <= currLongFile.get());
      }
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestDFSUpgradeWithHA.java   
private static void assertEpochFilesCopied(MiniQJMHACluster jnCluster)
    throws IOException {
  for (int i = 0; i < 3; i++) {
    File journalDir = jnCluster.getJournalCluster().getJournalDir(i, "ns1");
    File currDir = new File(journalDir, "current");
    File prevDir = new File(journalDir, "previous");
    for (String fileName : new String[]{ Journal.LAST_PROMISED_FILENAME,
        Journal.LAST_WRITER_EPOCH }) {
      File prevFile = new File(prevDir, fileName);
      // Possible the prev file doesn't exist, e.g. if there has never been a
      // writer before the upgrade.
      if (prevFile.exists()) {
        PersistentLongFile prevLongFile = new PersistentLongFile(prevFile, -10);
        PersistentLongFile currLongFile = new PersistentLongFile(new File(currDir,
            fileName), -11);
        assertTrue("Value in " + fileName + " has decreased on upgrade in "
            + journalDir, prevLongFile.get() <= currLongFile.get());
      }
    }
  }
}
项目:big-c    文件:TestDFSUpgradeWithHA.java   
private static void assertEpochFilesCopied(MiniQJMHACluster jnCluster)
    throws IOException {
  for (int i = 0; i < 3; i++) {
    File journalDir = jnCluster.getJournalCluster().getJournalDir(i, "ns1");
    File currDir = new File(journalDir, "current");
    File prevDir = new File(journalDir, "previous");
    for (String fileName : new String[]{ Journal.LAST_PROMISED_FILENAME,
        Journal.LAST_WRITER_EPOCH }) {
      File prevFile = new File(prevDir, fileName);
      // Possible the prev file doesn't exist, e.g. if there has never been a
      // writer before the upgrade.
      if (prevFile.exists()) {
        PersistentLongFile prevLongFile = new PersistentLongFile(prevFile, -10);
        PersistentLongFile currLongFile = new PersistentLongFile(new File(currDir,
            fileName), -11);
        assertTrue("Value in " + fileName + " has decreased on upgrade in "
            + journalDir, prevLongFile.get() <= currLongFile.get());
      }
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDFSUpgradeWithHA.java   
private static void assertEpochFilesCopied(MiniQJMHACluster jnCluster)
    throws IOException {
  for (int i = 0; i < 3; i++) {
    File journalDir = jnCluster.getJournalCluster().getJournalDir(i, "ns1");
    File currDir = new File(journalDir, "current");
    File prevDir = new File(journalDir, "previous");
    for (String fileName : new String[]{ Journal.LAST_PROMISED_FILENAME,
        Journal.LAST_WRITER_EPOCH }) {
      File prevFile = new File(prevDir, fileName);
      // Possible the prev file doesn't exist, e.g. if there has never been a
      // writer before the upgrade.
      if (prevFile.exists()) {
        PersistentLongFile prevLongFile = new PersistentLongFile(prevFile, -10);
        PersistentLongFile currLongFile = new PersistentLongFile(new File(currDir,
            fileName), -11);
        assertTrue("Value in " + fileName + " has decreased on upgrade in "
            + journalDir, prevLongFile.get() <= currLongFile.get());
      }
    }
  }
}
项目:hadoop-EAR    文件:TestImageUploadStream.java   
/**
 * Assert contents for a single journal.
 */
private void assertContentsForJournal(Journal journal, byte[] written,
    long txid) throws IOException {
  LOG.info("---- validating contents ---- for txid: " + txid);
  InputStream is = null;
  try {
    File uploaded = journal.getImageStorage().getCheckpointImageFile(txid);
    assertTrue(uploaded.exists());
    assertEquals(written.length, uploaded.length());

    // assert contents of the uploaded file
    is = new FileInputStream(uploaded);
    byte[] contents = new byte[written.length];
    is.read(contents);

    assertTrue(Arrays.equals(written, contents));
  } finally {
    if (is != null)
      is.close();
  }
}
项目:hadoop-EAR    文件:TestJournal.java   
@Test
public void testJournalLocking() throws Exception {
  Assume.assumeTrue(journal.getJournalStorage().isLockSupported(0));
  StorageDirectory sd = journal.getJournalStorage().getStorageDir(0);
  File lockFile = new File(sd.getRoot(), STORAGE_FILE_LOCK);

  // Journal should be locked, since the format() call locks it.
  GenericTestUtils.assertExists(lockFile);

  journal.newEpoch(FAKE_NSINFO, 1);
  try {
    new Journal(TEST_LOG_DIR, TEST_IMG_DIR, JID, mockErrorReporter, mockJournalNode);
    fail("Did not fail to create another journal in same dir");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains("Cannot lock storage", ioe);
  }

  journal.close();

  // Journal should no longer be locked after the close() call.
  // Hence, should be able to create a new Journal in the same dir.
  Journal journal2 = new Journal(TEST_LOG_DIR, TEST_IMG_DIR, JID,
      mockErrorReporter, mockJournalNode);
  journal2.newEpoch(FAKE_NSINFO, 2);
  journal2.close();
}
项目:hadoop-plus    文件:TestJournal.java   
@Test (timeout = 10000)
public void testJournalLocking() throws Exception {
  Assume.assumeTrue(journal.getStorage().getStorageDir(0).isLockSupported());
  StorageDirectory sd = journal.getStorage().getStorageDir(0);
  File lockFile = new File(sd.getRoot(), Storage.STORAGE_FILE_LOCK);

  // Journal should be locked, since the format() call locks it.
  GenericTestUtils.assertExists(lockFile);

  journal.newEpoch(FAKE_NSINFO,  1);
  try {
    new Journal(conf, TEST_LOG_DIR, JID, mockErrorReporter);
    fail("Did not fail to create another journal in same dir");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains(
        "Cannot lock storage", ioe);
  }

  journal.close();

  // Journal should no longer be locked after the close() call.
  // Hence, should be able to create a new Journal in the same dir.
  Journal journal2 = new Journal(conf, TEST_LOG_DIR, JID, mockErrorReporter);
  journal2.newEpoch(FAKE_NSINFO, 2);
  journal2.close();
}
项目:FlexMap    文件:TestDFSUpgradeWithHA.java   
private static void assertEpochFilesCopied(MiniQJMHACluster jnCluster)
    throws IOException {
  for (int i = 0; i < 3; i++) {
    File journalDir = jnCluster.getJournalCluster().getJournalDir(i, "ns1");
    File currDir = new File(journalDir, "current");
    File prevDir = new File(journalDir, "previous");
    for (String fileName : new String[]{ Journal.LAST_PROMISED_FILENAME,
        Journal.LAST_WRITER_EPOCH }) {
      File prevFile = new File(prevDir, fileName);
      // Possible the prev file doesn't exist, e.g. if there has never been a
      // writer before the upgrade.
      if (prevFile.exists()) {
        PersistentLongFile prevLongFile = new PersistentLongFile(prevFile, -10);
        PersistentLongFile currLongFile = new PersistentLongFile(new File(currDir,
            fileName), -11);
        assertTrue("Value in " + fileName + " has decreased on upgrade in "
            + journalDir, prevLongFile.get() <= currLongFile.get());
      }
    }
  }
}
项目:hadoop-TCP    文件:TestJournal.java   
@Test (timeout = 10000)
public void testJournalLocking() throws Exception {
  Assume.assumeTrue(journal.getStorage().getStorageDir(0).isLockSupported());
  StorageDirectory sd = journal.getStorage().getStorageDir(0);
  File lockFile = new File(sd.getRoot(), Storage.STORAGE_FILE_LOCK);

  // Journal should be locked, since the format() call locks it.
  GenericTestUtils.assertExists(lockFile);

  journal.newEpoch(FAKE_NSINFO,  1);
  try {
    new Journal(conf, TEST_LOG_DIR, JID, mockErrorReporter);
    fail("Did not fail to create another journal in same dir");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains(
        "Cannot lock storage", ioe);
  }

  journal.close();

  // Journal should no longer be locked after the close() call.
  // Hence, should be able to create a new Journal in the same dir.
  Journal journal2 = new Journal(conf, TEST_LOG_DIR, JID, mockErrorReporter);
  journal2.newEpoch(FAKE_NSINFO, 2);
  journal2.close();
}
项目:hardfs    文件:TestJournal.java   
@Test (timeout = 10000)
public void testJournalLocking() throws Exception {
  Assume.assumeTrue(journal.getStorage().getStorageDir(0).isLockSupported());
  StorageDirectory sd = journal.getStorage().getStorageDir(0);
  File lockFile = new File(sd.getRoot(), Storage.STORAGE_FILE_LOCK);

  // Journal should be locked, since the format() call locks it.
  GenericTestUtils.assertExists(lockFile);

  journal.newEpoch(FAKE_NSINFO,  1);
  try {
    new Journal(conf, TEST_LOG_DIR, JID, mockErrorReporter);
    fail("Did not fail to create another journal in same dir");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains(
        "Cannot lock storage", ioe);
  }

  journal.close();

  // Journal should no longer be locked after the close() call.
  // Hence, should be able to create a new Journal in the same dir.
  Journal journal2 = new Journal(conf, TEST_LOG_DIR, JID, mockErrorReporter);
  journal2.newEpoch(FAKE_NSINFO, 2);
  journal2.close();
}
项目:hadoop-on-lustre2    文件:TestDFSUpgradeWithHA.java   
private static void assertEpochFilesCopied(MiniQJMHACluster jnCluster)
    throws IOException {
  for (int i = 0; i < 3; i++) {
    File journalDir = jnCluster.getJournalCluster().getJournalDir(i, "ns1");
    File currDir = new File(journalDir, "current");
    File prevDir = new File(journalDir, "previous");
    for (String fileName : new String[]{ Journal.LAST_PROMISED_FILENAME,
        Journal.LAST_WRITER_EPOCH }) {
      File prevFile = new File(prevDir, fileName);
      // Possible the prev file doesn't exist, e.g. if there has never been a
      // writer before the upgrade.
      if (prevFile.exists()) {
        PersistentLongFile prevLongFile = new PersistentLongFile(prevFile, -10);
        PersistentLongFile currLongFile = new PersistentLongFile(new File(currDir,
            fileName), -11);
        assertTrue("Value in " + fileName + " has decreased on upgrade in "
            + journalDir, prevLongFile.get() <= currLongFile.get());
      }
    }
  }
}
项目:hadoop    文件:TestDFSUpgradeWithHA.java   
private long getCommittedTxnIdValue(MiniQJMHACluster qjCluster)
    throws IOException {
  Journal journal1 = qjCluster.getJournalCluster().getJournalNode(0)
      .getOrCreateJournal(MiniQJMHACluster.NAMESERVICE);
  BestEffortLongFile committedTxnId = (BestEffortLongFile) Whitebox
      .getInternalState(journal1, "committedTxnId");
  return committedTxnId != null ? committedTxnId.get() :
      HdfsConstants.INVALID_TXID;
}
项目:aliyun-oss-hadoop-fs    文件:TestDFSUpgradeWithHA.java   
private long getCommittedTxnIdValue(MiniQJMHACluster qjCluster)
    throws IOException {
  Journal journal1 = qjCluster.getJournalCluster().getJournalNode(0)
      .getOrCreateJournal(MiniQJMHACluster.NAMESERVICE);
  BestEffortLongFile committedTxnId = (BestEffortLongFile) Whitebox
      .getInternalState(journal1, "committedTxnId");
  return committedTxnId != null ? committedTxnId.get() :
      HdfsServerConstants.INVALID_TXID;
}
项目:big-c    文件:TestDFSUpgradeWithHA.java   
private long getCommittedTxnIdValue(MiniQJMHACluster qjCluster)
    throws IOException {
  Journal journal1 = qjCluster.getJournalCluster().getJournalNode(0)
      .getOrCreateJournal(MiniQJMHACluster.NAMESERVICE);
  BestEffortLongFile committedTxnId = (BestEffortLongFile) Whitebox
      .getInternalState(journal1, "committedTxnId");
  return committedTxnId != null ? committedTxnId.get() :
      HdfsConstants.INVALID_TXID;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDFSUpgradeWithHA.java   
private long getCommittedTxnIdValue(MiniQJMHACluster qjCluster)
    throws IOException {
  Journal journal1 = qjCluster.getJournalCluster().getJournalNode(0)
      .getOrCreateJournal(MiniQJMHACluster.NAMESERVICE);
  BestEffortLongFile committedTxnId = (BestEffortLongFile) Whitebox
      .getInternalState(journal1, "committedTxnId");
  return committedTxnId != null ? committedTxnId.get() :
      HdfsConstants.INVALID_TXID;
}
项目:hadoop-EAR    文件:TestImageUploadStream.java   
/**
 * Assert contenst and hash for every journal.
 */
private void assertContents(MiniJournalCluster cluster, byte[] written,
    long txid, MD5Hash writtenHash, TestImageUploadStreamInjectionHandler h)
    throws IOException {
  int numJournals = cluster.getNumNodes();

  // assert that each file contains what it should
  for (int i = 0; i < numJournals; i++) {
    if (h.failOn[i] != null) {
      continue;
    }
    Journal j = cluster.getJournalNode(i).getOrCreateJournal(JID.getBytes());
    assertContentsForJournal(j, written, txid);
  }

  // for failures assert the number of exceptions
  int expectedExceptionCount = 0;
  for (InjectionEventI e : h.failOn) {
    expectedExceptionCount += (e == null ? 0 : 1);
  }
  assertEquals(expectedExceptionCount, h.getExceptions());

  // assert hashes
  assertEquals(numJournals - expectedExceptionCount, h.uploadHashes.size());
  for (MD5Hash hash : h.uploadHashes) {
    assertEquals(writtenHash, hash);
  }

}
项目:hadoop-EAR    文件:TestJournal.java   
@Before
public void setup() throws Exception {
  FileUtil.fullyDelete(TEST_LOG_DIR);
  FileUtil.fullyDelete(TEST_IMG_DIR);
  journal = new Journal(TEST_LOG_DIR, TEST_IMG_DIR, JID, mockErrorReporter,
      mockJournalNode);
  journal.transitionJournal(FAKE_NSINFO, Transition.FORMAT, null);
  journal.transitionImage(FAKE_NSINFO, Transition.FORMAT, null);
}
项目:hadoop-plus    文件:TestJournal.java   
@Before
public void setup() throws Exception {
  FileUtil.fullyDelete(TEST_LOG_DIR);
  conf = new Configuration();
  journal = new Journal(conf, TEST_LOG_DIR, JID,
    mockErrorReporter);
  journal.format(FAKE_NSINFO);
}
项目:FlexMap    文件:TestDFSUpgradeWithHA.java   
private long getCommittedTxnIdValue(MiniQJMHACluster qjCluster)
    throws IOException {
  Journal journal1 = qjCluster.getJournalCluster().getJournalNode(0)
      .getOrCreateJournal(MiniQJMHACluster.NAMESERVICE);
  BestEffortLongFile committedTxnId = (BestEffortLongFile) Whitebox
      .getInternalState(journal1, "committedTxnId");
  return committedTxnId != null ? committedTxnId.get() :
      HdfsConstants.INVALID_TXID;
}
项目:hadoop-TCP    文件:TestJournal.java   
@Before
public void setup() throws Exception {
  FileUtil.fullyDelete(TEST_LOG_DIR);
  conf = new Configuration();
  journal = new Journal(conf, TEST_LOG_DIR, JID,
    mockErrorReporter);
  journal.format(FAKE_NSINFO);
}
项目:hardfs    文件:TestJournal.java   
@Before
public void setup() throws Exception {
  FileUtil.fullyDelete(TEST_LOG_DIR);
  conf = new Configuration();
  journal = new Journal(conf, TEST_LOG_DIR, JID,
    mockErrorReporter);
  journal.format(FAKE_NSINFO);
}