Java 类org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream 实例源码

项目:hadoop    文件:TestHAStateTransitions.java   
private static void createEmptyInProgressEditLog(MiniDFSCluster cluster,
    NameNode nn, boolean writeHeader) throws IOException {
  long txid = nn.getNamesystem().getEditLog().getLastWrittenTxId();
  URI sharedEditsUri = cluster.getSharedEditsDir(0, 1);
  File sharedEditsDir = new File(sharedEditsUri.getPath());
  StorageDirectory storageDir = new StorageDirectory(sharedEditsDir);
  File inProgressFile = NameNodeAdapter.getInProgressEditsFile(storageDir,
      txid + 1);
  assertTrue("Failed to create in-progress edits file",
      inProgressFile.createNewFile());

  if (writeHeader) {
    DataOutputStream out = new DataOutputStream(new FileOutputStream(
        inProgressFile));
    EditLogFileOutputStream.writeHeader(
        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION, out);
    out.close();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestHAStateTransitions.java   
private static void createEmptyInProgressEditLog(MiniDFSCluster cluster,
    NameNode nn, boolean writeHeader) throws IOException {
  long txid = nn.getNamesystem().getEditLog().getLastWrittenTxId();
  URI sharedEditsUri = cluster.getSharedEditsDir(0, 1);
  File sharedEditsDir = new File(sharedEditsUri.getPath());
  StorageDirectory storageDir = new StorageDirectory(sharedEditsDir);
  File inProgressFile = NameNodeAdapter.getInProgressEditsFile(storageDir,
      txid + 1);
  assertTrue("Failed to create in-progress edits file",
      inProgressFile.createNewFile());

  if (writeHeader) {
    DataOutputStream out = new DataOutputStream(new FileOutputStream(
        inProgressFile));
    EditLogFileOutputStream.writeHeader(
        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION, out);
    out.close();
  }
}
项目:big-c    文件:TestHAStateTransitions.java   
private static void createEmptyInProgressEditLog(MiniDFSCluster cluster,
    NameNode nn, boolean writeHeader) throws IOException {
  long txid = nn.getNamesystem().getEditLog().getLastWrittenTxId();
  URI sharedEditsUri = cluster.getSharedEditsDir(0, 1);
  File sharedEditsDir = new File(sharedEditsUri.getPath());
  StorageDirectory storageDir = new StorageDirectory(sharedEditsDir);
  File inProgressFile = NameNodeAdapter.getInProgressEditsFile(storageDir,
      txid + 1);
  assertTrue("Failed to create in-progress edits file",
      inProgressFile.createNewFile());

  if (writeHeader) {
    DataOutputStream out = new DataOutputStream(new FileOutputStream(
        inProgressFile));
    EditLogFileOutputStream.writeHeader(
        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION, out);
    out.close();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestHAStateTransitions.java   
private static void createEmptyInProgressEditLog(MiniDFSCluster cluster,
    NameNode nn, boolean writeHeader) throws IOException {
  long txid = nn.getNamesystem().getEditLog().getLastWrittenTxId();
  URI sharedEditsUri = cluster.getSharedEditsDir(0, 1);
  File sharedEditsDir = new File(sharedEditsUri.getPath());
  StorageDirectory storageDir = new StorageDirectory(sharedEditsDir);
  File inProgressFile = NameNodeAdapter.getInProgressEditsFile(storageDir,
      txid + 1);
  assertTrue("Failed to create in-progress edits file",
      inProgressFile.createNewFile());

  if (writeHeader) {
    DataOutputStream out = new DataOutputStream(new FileOutputStream(
        inProgressFile));
    EditLogFileOutputStream.writeHeader(
        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION, out);
    out.close();
  }
}
项目:hadoop-EAR    文件:TestPreTransactionalServerLogReader.java   
@Test
public void testOneOperation() throws Exception {
  File editsDir = createEditsDir();
  DummyServerCore core = new DummyServerCore();
  EditLogFileOutputStream out = initEdits(editsDir);
  ServerLogReaderPreTransactional logReader = new ServerLogReaderPreTransactional(core,
      Util.stringAsURI(editsDir.getAbsolutePath()));
  core.logReader = logReader;
  Thread coreThread, logReaderThread;

  coreThread = new Thread(core);
  logReaderThread = new Thread(logReader);

  logReaderThread.start();
  coreThread.start();
  writeOperation(out, 1000, true);
  Thread.sleep(500);
  core.shutdown();
  logReaderThread.join();
  coreThread.join();

  Assert.assertEquals(1, core.notifications.size());
  Assert.assertEquals(1000, core.notifications.poll().txId);
}
项目:hadoop-EAR    文件:TestBookKeeperJournalManager.java   
@Test
public void testGetInputStreamNoValidationNoCheckLastTxId() throws Exception {
  setupTest("test-get-input-stream-no-validation-no-check-last-txid");
  File tempEditsFile = FSEditLogTestUtil.createTempEditsFile(
      "test-get-input-stream-with-validation");
  try {
    EditLogOutputStream bkeos = bkjm.startLogSegment(1);
    EditLogOutputStream elfos =
        new EditLogFileOutputStream(tempEditsFile, null);
    elfos.create();
    FSEditLogTestUtil.populateStreams(1, 100, bkeos, elfos);
    EditLogInputStream bkeis =
        getJournalInputStreamDontCheckLastTxId(bkjm, 1);
    EditLogInputStream elfis = new EditLogFileInputStream(tempEditsFile);
    Map<String, EditLogInputStream> streamByName =
        ImmutableMap.of("BookKeeper", bkeis, "File", elfis);
    FSEditLogTestUtil.assertStreamsAreEquivalent(100, streamByName);
  } finally {
    if (!tempEditsFile.delete()) {
      LOG.warn("Unable to delete edits file: " +
          tempEditsFile.getAbsolutePath());
    }
  }
}
项目:hadoop-plus    文件:TestHAStateTransitions.java   
private static void createEmptyInProgressEditLog(MiniDFSCluster cluster,
    NameNode nn, boolean writeHeader) throws IOException {
  long txid = nn.getNamesystem().getEditLog().getLastWrittenTxId();
  URI sharedEditsUri = cluster.getSharedEditsDir(0, 1);
  File sharedEditsDir = new File(sharedEditsUri.getPath());
  StorageDirectory storageDir = new StorageDirectory(sharedEditsDir);
  File inProgressFile = NameNodeAdapter.getInProgressEditsFile(storageDir,
      txid + 1);
  assertTrue("Failed to create in-progress edits file",
      inProgressFile.createNewFile());

  if (writeHeader) {
    DataOutputStream out = new DataOutputStream(new FileOutputStream(
        inProgressFile));
    EditLogFileOutputStream.writeHeader(out);
    out.close();
  }
}
项目:FlexMap    文件:TestHAStateTransitions.java   
private static void createEmptyInProgressEditLog(MiniDFSCluster cluster,
    NameNode nn, boolean writeHeader) throws IOException {
  long txid = nn.getNamesystem().getEditLog().getLastWrittenTxId();
  URI sharedEditsUri = cluster.getSharedEditsDir(0, 1);
  File sharedEditsDir = new File(sharedEditsUri.getPath());
  StorageDirectory storageDir = new StorageDirectory(sharedEditsDir);
  File inProgressFile = NameNodeAdapter.getInProgressEditsFile(storageDir,
      txid + 1);
  assertTrue("Failed to create in-progress edits file",
      inProgressFile.createNewFile());

  if (writeHeader) {
    DataOutputStream out = new DataOutputStream(new FileOutputStream(
        inProgressFile));
    EditLogFileOutputStream.writeHeader(
        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION, out);
    out.close();
  }
}
项目:datacollector    文件:HdfsDestinationPipelineRunIT.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  // setting some dummy kerberos settings to be able to test a mis-setting
  System.setProperty("java.security.krb5.realm", "foo");
  System.setProperty("java.security.krb5.kdc", "localhost:0");

  File minidfsDir = new File("target/minidfs").getAbsoluteFile();
  if (!minidfsDir.exists()) {
    Assert.assertTrue(minidfsDir.mkdirs());
  }
  System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
  Configuration conf = new HdfsConfiguration();
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
  UserGroupInformation.createUserForTesting("foo", new String[]{"all", "supergroup"});
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
  miniDFS = new MiniDFSCluster.Builder(conf).build();
}
项目:datacollector    文件:HdfsDestinationPipelineOperationsIT.java   
@BeforeClass
public static void beforeClass() throws Exception {
  // setting some dummy kerberos settings to be able to test a mis-setting
  System.setProperty("java.security.krb5.realm", "foo");
  System.setProperty("java.security.krb5.kdc", "localhost:0");

  File minidfsDir = new File("target/minidfs").getAbsoluteFile();
  if (!minidfsDir.exists()) {
    Assert.assertTrue(minidfsDir.mkdirs());
  }
  System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
  Configuration conf = new HdfsConfiguration();
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
  UserGroupInformation.createUserForTesting("foo", new String[]{ "all", "supergroup"});
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
  miniDFS = new MiniDFSCluster.Builder(conf).build();
  PipelineOperationsStandaloneIT.beforeClass(getPipelineJson());
}
项目:datacollector    文件:BaseHdfsTargetIT.java   
@BeforeClass
public static void setUpClass() throws Exception {
  //setting some dummy kerberos settings to be able to test a mis-setting
  System.setProperty("java.security.krb5.realm", "foo");
  System.setProperty("java.security.krb5.kdc", "localhost:0");

  File minidfsDir = new File("target/minidfs").getAbsoluteFile();
  if (!minidfsDir.exists()) {
    Assert.assertTrue(minidfsDir.mkdirs());
  }
  Set<PosixFilePermission> set = new HashSet<PosixFilePermission>();
  set.add(PosixFilePermission.OWNER_EXECUTE);
  set.add(PosixFilePermission.OWNER_READ);
  set.add(PosixFilePermission.OWNER_WRITE);
  set.add(PosixFilePermission.OTHERS_READ);
  java.nio.file.Files.setPosixFilePermissions(minidfsDir.toPath(), set);
  System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
  Configuration conf = new HdfsConfiguration();
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
  fooUgi = UserGroupInformation.createUserForTesting("foo", new String[]{ "all"});
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
  FileSystem.closeAll();
  miniDFS = new MiniDFSCluster.Builder(conf).build();
  miniDFS.getFileSystem().setPermission(new Path("/"), FsPermission.createImmutable((short)0777));
}
项目:datacollector    文件:ClusterHDFSSourceIT.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  minidfsDir = new File("target/minidfs-" + UUID.randomUUID()).getAbsoluteFile();
  assertTrue(minidfsDir.mkdirs());
  System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
  Configuration conf = new HdfsConfiguration();
  conf.set("dfs.namenode.fs-limits.min-block-size", String.valueOf(32));
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
  miniDFS = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  dir = new Path(miniDFS.getURI()+"/dir");
  FileSystem fs = miniDFS.getFileSystem();
  fs.mkdirs(dir);
  writeFile(fs, new Path(dir + "/forAllTests/" + "path"), 1000);
  dummyEtc = new File(minidfsDir, "dummy-etc");
  assertTrue(dummyEtc.mkdirs());
  Configuration dummyConf = new Configuration(false);
  for (String file : new String[]{"core", "hdfs", "mapred", "yarn"}) {
    File siteXml = new File(dummyEtc, file + "-site.xml");
    FileOutputStream out = new FileOutputStream(siteXml);
    dummyConf.writeXml(out);
    out.close();
  }
  resourcesDir = minidfsDir.getAbsolutePath();
  hadoopConfDir = dummyEtc.getName();
  System.setProperty("sdc.resources.dir", resourcesDir);;
}
项目:hadoop-TCP    文件:TestHAStateTransitions.java   
private static void createEmptyInProgressEditLog(MiniDFSCluster cluster,
    NameNode nn, boolean writeHeader) throws IOException {
  long txid = nn.getNamesystem().getEditLog().getLastWrittenTxId();
  URI sharedEditsUri = cluster.getSharedEditsDir(0, 1);
  File sharedEditsDir = new File(sharedEditsUri.getPath());
  StorageDirectory storageDir = new StorageDirectory(sharedEditsDir);
  File inProgressFile = NameNodeAdapter.getInProgressEditsFile(storageDir,
      txid + 1);
  assertTrue("Failed to create in-progress edits file",
      inProgressFile.createNewFile());

  if (writeHeader) {
    DataOutputStream out = new DataOutputStream(new FileOutputStream(
        inProgressFile));
    EditLogFileOutputStream.writeHeader(out);
    out.close();
  }
}
项目:hardfs    文件:TestHAStateTransitions.java   
private static void createEmptyInProgressEditLog(MiniDFSCluster cluster,
    NameNode nn, boolean writeHeader) throws IOException {
  long txid = nn.getNamesystem().getEditLog().getLastWrittenTxId();
  URI sharedEditsUri = cluster.getSharedEditsDir(0, 1);
  File sharedEditsDir = new File(sharedEditsUri.getPath());
  StorageDirectory storageDir = new StorageDirectory(sharedEditsDir);
  File inProgressFile = NameNodeAdapter.getInProgressEditsFile(storageDir,
      txid + 1);
  assertTrue("Failed to create in-progress edits file",
      inProgressFile.createNewFile());

  if (writeHeader) {
    DataOutputStream out = new DataOutputStream(new FileOutputStream(
        inProgressFile));
    EditLogFileOutputStream.writeHeader(out);
    out.close();
  }
}
项目:hadoop-on-lustre2    文件:TestHAStateTransitions.java   
private static void createEmptyInProgressEditLog(MiniDFSCluster cluster,
    NameNode nn, boolean writeHeader) throws IOException {
  long txid = nn.getNamesystem().getEditLog().getLastWrittenTxId();
  URI sharedEditsUri = cluster.getSharedEditsDir(0, 1);
  File sharedEditsDir = new File(sharedEditsUri.getPath());
  StorageDirectory storageDir = new StorageDirectory(sharedEditsDir);
  File inProgressFile = NameNodeAdapter.getInProgressEditsFile(storageDir,
      txid + 1);
  assertTrue("Failed to create in-progress edits file",
      inProgressFile.createNewFile());

  if (writeHeader) {
    DataOutputStream out = new DataOutputStream(new FileOutputStream(
        inProgressFile));
    EditLogFileOutputStream.writeHeader(
        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION, out);
    out.close();
  }
}
项目:incubator-sentry    文件:TestSentryAuthorizationProvider.java   
@Before
public void setUp() throws Exception {
  admin = UserGroupInformation.createUserForTesting(
      System.getProperty("user.name"), new String[] { "supergroup" });
  admin.doAs(new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws Exception {
      System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, "target/test/data");
      Configuration conf = new HdfsConfiguration();
      conf.setBoolean("sentry.authorization-provider.include-hdfs-authz-as-acl", true);
      conf.set(DFS_NAMENODE_AUTHORIZATION_PROVIDER_KEY,
          MockSentryAuthorizationProvider.class.getName());
      conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
      EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
      miniDFS = new MiniDFSCluster.Builder(conf).build();
      return null;
    }
  });
}
项目:hadoop    文件:TestCachingStrategy.java   
@BeforeClass
public static void setupTest() {
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);

  // Track calls to posix_fadvise.
  NativeIO.POSIX.setCacheManipulator(tracker);

  // Normally, we wait for a few megabytes of data to be read or written 
  // before dropping the cache.  This is to avoid an excessive number of
  // JNI calls to the posix_fadvise function.  However, for the purpose
  // of this test, we want to use small files and see all fadvise calls
  // happen.
  BlockSender.CACHE_DROP_INTERVAL_BYTES = 4096;
  BlockReceiver.CACHE_DROP_LAG_BYTES = 4096;
}
项目:ditb    文件:HBaseTestingUtility.java   
/**
 * Start a minidfscluster.
 * Can only create one.
 * @param servers How many DNs to start.
 * @param hosts hostnames DNs to run on.
 * @throws Exception
 * @see {@link #shutdownMiniDFSCluster()}
 * @return The mini dfs cluster created.
 */
public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
throws Exception {
  createDirsAndSetProperties();
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);

  // Error level to skip some warnings specific to the minicluster. See HBASE-4709
  org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
      setLevel(org.apache.log4j.Level.ERROR);
  org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
      setLevel(org.apache.log4j.Level.ERROR);


  this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
    true, null, null, hosts, null);

  // Set this just-started cluster as our filesystem.
  setFs();

  // Wait for the cluster to be totally up
  this.dfsCluster.waitClusterUp();

  //reset the test directory for test file system
  dataTestDirOnTestFS = null;

  return this.dfsCluster;
}
项目:aliyun-oss-hadoop-fs    文件:TestCachingStrategy.java   
@BeforeClass
public static void setupTest() {
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);

  // Track calls to posix_fadvise.
  NativeIO.POSIX.setCacheManipulator(tracker);

  // Normally, we wait for a few megabytes of data to be read or written 
  // before dropping the cache.  This is to avoid an excessive number of
  // JNI calls to the posix_fadvise function.  However, for the purpose
  // of this test, we want to use small files and see all fadvise calls
  // happen.
  BlockSender.CACHE_DROP_INTERVAL_BYTES = 4096;
  BlockReceiver.CACHE_DROP_LAG_BYTES = 4096;
}
项目:big-c    文件:TestCachingStrategy.java   
@BeforeClass
public static void setupTest() {
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);

  // Track calls to posix_fadvise.
  NativeIO.POSIX.setCacheManipulator(tracker);

  // Normally, we wait for a few megabytes of data to be read or written 
  // before dropping the cache.  This is to avoid an excessive number of
  // JNI calls to the posix_fadvise function.  However, for the purpose
  // of this test, we want to use small files and see all fadvise calls
  // happen.
  BlockSender.CACHE_DROP_INTERVAL_BYTES = 4096;
  BlockReceiver.CACHE_DROP_LAG_BYTES = 4096;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestCachingStrategy.java   
@BeforeClass
public static void setupTest() {
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);

  // Track calls to posix_fadvise.
  NativeIO.POSIX.setCacheManipulator(tracker);

  // Normally, we wait for a few megabytes of data to be read or written 
  // before dropping the cache.  This is to avoid an excessive number of
  // JNI calls to the posix_fadvise function.  However, for the purpose
  // of this test, we want to use small files and see all fadvise calls
  // happen.
  BlockSender.CACHE_DROP_INTERVAL_BYTES = 4096;
  BlockReceiver.CACHE_DROP_LAG_BYTES = 4096;
}
项目:hadoop-EAR    文件:TestPreTransactionalServerLogReader.java   
public static EditLogFileOutputStream initEdits(File editsDir) throws IOException {
  File edits = getFileWithCurrent(editsDir, "edits");
  File fstime = getFileWithCurrent(editsDir, "fstime");

  if (!edits.createNewFile())
    throw new IOException("Failed to create edits file");
  EditLogFileOutputStream out = new EditLogFileOutputStream(edits, null);
  out.create();
  if (!fstime.createNewFile())
    throw new IOException("Failed to create fstime file");

  return out;
}
项目:hadoop-EAR    文件:TestPreTransactionalServerLogReader.java   
private EditLogFileOutputStream beginRoll(File editsDir,
    EditLogFileOutputStream editsOutput)
        throws IOException {
  File editsNew = getFileWithCurrent(editsDir, "edits.new");

  editsOutput.close();
  if (!editsNew.createNewFile())
    throw new IOException("Failed to create edits.new file");
  EditLogFileOutputStream out = new EditLogFileOutputStream(editsNew, null);
  out.create();
  Assert.assertTrue(editsNew.exists());

  return out;
}
项目:hadoop-EAR    文件:TestPreTransactionalServerLogReader.java   
private void writeOperation(EditLogFileOutputStream out,
    long txId, boolean forceSync) throws IOException {
  FSEditLogOp.AddOp op = FSEditLogOp.AddOp.getUniqueInstance();
  op.setTransactionId(txId);
  op.set(INodeId.GRANDFATHER_INODE_ID, "/a/b", (short)3, 100L, 100L, 100L, new BlockInfo[0],
      PermissionStatus.createImmutable("x", "y", FsPermission.getDefault()),
      "x", "y");
  out.write(op);
  LOG.info("Wrote operation " + txId);
  if (txId % 10 == 0 || forceSync) {
    out.setReadyToFlush();
    out.flush();
    LOG.info("Flushed operation " + txId);
  }  
}
项目:hadoop-EAR    文件:TestPreTransactionalServerLogReader.java   
@Test
public void testMultipleOperations() throws Exception {
  File editsDir = createEditsDir();
  DummyServerCore core = new DummyServerCore();
  EditLogFileOutputStream out = initEdits(editsDir);
  ServerLogReaderPreTransactional logReader = new ServerLogReaderPreTransactional(core,
      Util.stringAsURI(editsDir.getAbsolutePath()));
  core.logReader = logReader;
  Thread coreThread, logReaderThread;
  long txCount = 1000;

  coreThread = new Thread(core);
  logReaderThread = new Thread(logReader);

  logReaderThread.start();
  coreThread.start();
  for (long txId = 0; txId < txCount; txId ++) {
    writeOperation(out, txId, false);
  }

  // flush
  out.setReadyToFlush();
  out.flush();

  Thread.sleep(500);
  core.shutdown();
  logReaderThread.join();
  coreThread.join();

  Assert.assertEquals(1000, core.notifications.size());
  for (long txId = 0; txId < txCount; txId ++)
    Assert.assertEquals(txId, core.notifications.poll().txId);
}
项目:hadoop-EAR    文件:TestPreTransactionalServerLogReader.java   
@Test
public void testTwoOperationsRoll() throws Exception {
  File editsDir = createEditsDir();
  DummyServerCore core = new DummyServerCore();
  EditLogFileOutputStream out = initEdits(editsDir);
  ServerLogReaderPreTransactional logReader = new ServerLogReaderPreTransactional(core,
      Util.stringAsURI(editsDir.getAbsolutePath()));
  core.logReader = logReader;
  Thread coreThread, logReaderThread;

  coreThread = new Thread(core);
  logReaderThread = new Thread(logReader);

  coreThread.start();
  Thread.sleep(1000);
  logReaderThread.start();
  writeOperation(out, 1000, true);
  out = beginRoll(editsDir, out);
  writeOperation(out, 1001, true);    
  Thread.sleep(500);
  endRoll(editsDir);
  Thread.sleep(500);
  core.shutdown();
  logReaderThread.join();
  coreThread.join();

  Assert.assertEquals(2, core.notifications.size());
  Assert.assertEquals(1000, core.notifications.poll().txId);
  Assert.assertEquals(1001, core.notifications.poll().txId);
}
项目:hadoop-EAR    文件:TestServerLogReaderVersion.java   
public static EditLogFileOutputStream initEdits(File editsDir)
    throws IOException {
  File edits = TestPreTransactionalServerLogReader.getFileWithCurrent(
      editsDir, "edits_inprogress_0000000000000000000");

  if (!edits.createNewFile())
    throw new IOException("Failed to create edits file");
  EditLogFileOutputStream out = new EditLogFileOutputStream(edits, null);
  out.create();
  return out;
}
项目:hadoop-EAR    文件:TestBookKeeperEditLogInputStream.java   
private void testReadFromClosedLedgerAfterWriteInner(int numEdits)
  throws Exception {
  LedgerHandle ledgerOut = createLedger();
  long ledgerId = ledgerOut.getId();
  BookKeeperEditLogOutputStream bkEditsOut =
      new BookKeeperEditLogOutputStream(ledgerOut);
  EditLogFileOutputStream fileEditsOut =
      new EditLogFileOutputStream(tempEditsFile, null);

  FSEditLogTestUtil.createAndPopulateStreams(1,
      numEdits, bkEditsOut, fileEditsOut);

  BookKeeperEditLogInputStream bkEditsIn =
      new BookKeeperEditLogInputStream(ledgerProvider,
          ledgerId,
          0,
          1,
          numEdits,
          false);
  EditLogFileInputStream fileEditsIn =
      new EditLogFileInputStream(tempEditsFile);

  assertEquals("Length in bytes must be equal!",
      bkEditsIn.length(), fileEditsIn.length());

  FSEditLogTestUtil.assertStreamsAreEquivalent(numEdits,
      ImmutableMap.of("BookKeeper", bkEditsIn, "File", fileEditsIn));
  assertNull("BookKeeper edit log must end at txid 100", bkEditsIn.readOp());
}
项目:hadoop-EAR    文件:TestBookKeeperJournalManager.java   
@Test
public void testGetInputStreamWithValidation() throws Exception {
  setupTest("test-get-input-stream-with-validation");
  File tempEditsFile = FSEditLogTestUtil.createTempEditsFile(
      "test-get-input-stream-with-validation");
  try {
    TestBKJMInjectionHandler h = new TestBKJMInjectionHandler();
    InjectionHandler.set(h);
    EditLogOutputStream bkeos = bkjm.startLogSegment(1);
    EditLogOutputStream elfos =
        new EditLogFileOutputStream(tempEditsFile, null);
    elfos.create();
    FSEditLogTestUtil.populateStreams(1, 100, bkeos, elfos);
    EditLogInputStream bkeis =
        FSEditLogTestUtil.getJournalInputStream(bkjm, 1, true);
    EditLogInputStream elfis = new EditLogFileInputStream(tempEditsFile);
    Map<String, EditLogInputStream> streamByName =
        ImmutableMap.of("BookKeeper", bkeis, "File", elfis);
    FSEditLogTestUtil.assertStreamsAreEquivalent(100, streamByName);
    assertNotNull("Log was validated", h.logValidation);
    assertEquals("numTrasactions validated correctly",
        100, h.logValidation.getNumTransactions());
    assertEquals("endTxId validated correctly",
        100, h.logValidation.getEndTxId());
  } finally {
    if (!tempEditsFile.delete()) {
      LOG.warn("Unable to delete edits file: " +
          tempEditsFile.getAbsolutePath());
    }
  }
}
项目:hadoop-EAR    文件:TestBookKeeperEditLogOutputStream.java   
/**
 * Writes workload to both a BookKeeperEditLogOutputStream and
 * a EditLogFileOutputStream and then verify that they function
 * (nearly) identically.
 */
@Test
public void testWrite() throws Exception {
  LedgerHandle ledger = createLedger();
  BookKeeperEditLogOutputStream bkEdits =
      new BookKeeperEditLogOutputStream(ledger);
  EditLogFileOutputStream fileEdits =
      new EditLogFileOutputStream(tempEditsFile, null);

  FSEditLogTestUtil.createAndPopulateStreams(1, 100, bkEdits, fileEdits);

  // Test that after closing both, an EditLogFileOutputStream
  // and a BookKeeperEditLogOutputStream objects return identical
  // length.
  assertEquals("Lengths must match", tempEditsFile.length(),
      ledger.getLength());

  long tempFileCrc32 = IOUtils.copyBytesAndGenerateCRC(
      new FileInputStream(tempEditsFile),
      new NullOutputStream(),
      (int) tempEditsFile.length(),
      false);
  long ledgerCrc32 = IOUtils.copyBytesAndGenerateCRC(
      new LedgerInputStream(ledger),
      new NullOutputStream(),
      (int) ledger.getLength(),
      false);

  // Test that the same data (including a log version) has been written
  // to both BookKeeperEditLogOutputStream and EditLogFileOutputStream
  // by comparing their Crc-32 checksums.
  assertEquals("Crc32 of data in file and in BookKeeper ledger must match",
      tempFileCrc32, ledgerCrc32);
}
项目:pbase    文件:HBaseTestingUtility.java   
/**
 * Start a minidfscluster.
 * Can only create one.
 * @param servers How many DNs to start.
 * @param hosts hostnames DNs to run on.
 * @throws Exception
 * @see {@link #shutdownMiniDFSCluster()}
 * @return The mini dfs cluster created.
 */
public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
throws Exception {
  createDirsAndSetProperties();
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);

  // Error level to skip some warnings specific to the minicluster. See HBASE-4709
  org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
      setLevel(org.apache.log4j.Level.ERROR);
  org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
      setLevel(org.apache.log4j.Level.ERROR);


  this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
    true, null, null, hosts, null);

  // Set this just-started cluster as our filesystem.
  FileSystem fs = this.dfsCluster.getFileSystem();
  FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));

  // Wait for the cluster to be totally up
  this.dfsCluster.waitClusterUp();

  //reset the test directory for test file system
  dataTestDirOnTestFS = null;

  return this.dfsCluster;
}
项目:FlexMap    文件:TestCachingStrategy.java   
@BeforeClass
public static void setupTest() {
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);

  // Track calls to posix_fadvise.
  NativeIO.POSIX.setCacheManipulator(tracker);

  // Normally, we wait for a few megabytes of data to be read or written 
  // before dropping the cache.  This is to avoid an excessive number of
  // JNI calls to the posix_fadvise function.  However, for the purpose
  // of this test, we want to use small files and see all fadvise calls
  // happen.
  BlockSender.CACHE_DROP_INTERVAL_BYTES = 4096;
  BlockReceiver.CACHE_DROP_LAG_BYTES = 4096;
}
项目:datacollector    文件:KafkaToHDFSIT.java   
@BeforeClass
public static void beforeClass() throws Exception {
  //setup kafka to read from
  KafkaTestUtil.startZookeeper();
  KafkaTestUtil.startKafkaBrokers(1);
  KafkaTestUtil.createTopic(TOPIC, 1, 1);
  producer = KafkaTestUtil.createProducer(KafkaTestUtil.getMetadataBrokerURI(), true);
  produceRecords(RECORDS_PRODUCED);

  // setting some dummy kerberos settings to be able to test a mis-setting
  System.setProperty("java.security.krb5.realm", "foo");
  System.setProperty("java.security.krb5.kdc", "localhost:0");

  File minidfsDir = new File("target/minidfs").getAbsoluteFile();
  if (!minidfsDir.exists()) {
    Assert.assertTrue(minidfsDir.mkdirs());
  }
  System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
  Configuration conf = new HdfsConfiguration();
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
  UserGroupInformation.createUserForTesting("foo", new String[]{"all", "supergroup"});
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
  miniDFS = new MiniDFSCluster.Builder(conf).build();

  //setup Cluster and start pipeline
  YarnConfiguration entries = new YarnConfiguration();
  //TODO: Investigate why this is required for test to pass. Is yarn messing with the miniDFS cluster configuration?
  entries.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
  entries.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
  ClusterUtil.setupCluster(TEST_NAME, getPipelineJson(), entries);
  serverURI = ClusterUtil.getServerURI();
  miniSDC = ClusterUtil.getMiniSDC();
}
项目:datacollector    文件:HdfsMetadataExecutorIT.java   
@BeforeClass
public static void setUpClass() throws Exception {
  // Conf dir
  new File(confDir).mkdirs();

  //setting some dummy kerberos settings to be able to test a mis-setting
  System.setProperty("java.security.krb5.realm", "foo");
  System.setProperty("java.security.krb5.kdc", "localhost:0");

  File minidfsDir = new File(baseDir, "minidfs").getAbsoluteFile();
  if (!minidfsDir.exists()) {
    Assert.assertTrue(minidfsDir.mkdirs());
  }
  Set<PosixFilePermission> set = new HashSet<>();
  set.add(PosixFilePermission.OWNER_EXECUTE);
  set.add(PosixFilePermission.OWNER_READ);
  set.add(PosixFilePermission.OWNER_WRITE);
  set.add(PosixFilePermission.OTHERS_READ);
  java.nio.file.Files.setPosixFilePermissions(minidfsDir.toPath(), set);
  System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
  Configuration conf = new HdfsConfiguration();
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
  conf.set("dfs.namenode.acls.enabled", "true");
  UserGroupInformation fooUgi = UserGroupInformation.createUserForTesting("foo", new String[]{"all"});
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
  FileSystem.closeAll();
  miniDFS = new MiniDFSCluster.Builder(conf).build();
  miniDFS.getFileSystem().setPermission(new Path("/"), FsPermission.createImmutable((short)0777));
  fs = miniDFS.getFileSystem();
  writeConfiguration(miniDFS.getConfiguration(0), confDir + "core-site.xml");
  writeConfiguration(miniDFS.getConfiguration(0), confDir + "hdfs-site.xml");
}
项目:hbase    文件:HBaseTestingUtility.java   
public MiniDFSCluster startMiniDFSCluster(int servers, final  String racks[], String hosts[])
    throws Exception {
  createDirsAndSetProperties();
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);

  // Error level to skip some warnings specific to the minicluster. See HBASE-4709
  org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
      setLevel(org.apache.log4j.Level.ERROR);
  org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
      setLevel(org.apache.log4j.Level.ERROR);

  TraceUtil.initTracer(conf);

  this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
      true, null, racks, hosts, null);

  // Set this just-started cluster as our filesystem.
  setFs();

  // Wait for the cluster to be totally up
  this.dfsCluster.waitClusterUp();

  //reset the test directory for test file system
  dataTestDirOnTestFS = null;
  String dataTestDir = getDataTestDir().toString();
  conf.set(HConstants.HBASE_DIR, dataTestDir);
  LOG.debug("Setting {} to {}", HConstants.HBASE_DIR, dataTestDir);

  return this.dfsCluster;
}
项目:hadoop-TCP    文件:TestCachingStrategy.java   
@BeforeClass
public static void setupTest() {
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);

  // Track calls to posix_fadvise.
  NativeIO.POSIX.cacheTracker = tracker;

  // Normally, we wait for a few megabytes of data to be read or written 
  // before dropping the cache.  This is to avoid an excessive number of
  // JNI calls to the posix_fadvise function.  However, for the purpose
  // of this test, we want to use small files and see all fadvise calls
  // happen.
  BlockSender.CACHE_DROP_INTERVAL_BYTES = 4096;
  BlockReceiver.CACHE_DROP_LAG_BYTES = 4096;
}
项目:hardfs    文件:TestCachingStrategy.java   
@BeforeClass
public static void setupTest() {
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);

  // Track calls to posix_fadvise.
  NativeIO.POSIX.cacheTracker = tracker;

  // Normally, we wait for a few megabytes of data to be read or written 
  // before dropping the cache.  This is to avoid an excessive number of
  // JNI calls to the posix_fadvise function.  However, for the purpose
  // of this test, we want to use small files and see all fadvise calls
  // happen.
  BlockSender.CACHE_DROP_INTERVAL_BYTES = 4096;
  BlockReceiver.CACHE_DROP_LAG_BYTES = 4096;
}
项目:hadoop-on-lustre2    文件:TestCachingStrategy.java   
@BeforeClass
public static void setupTest() {
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);

  // Track calls to posix_fadvise.
  NativeIO.POSIX.setCacheManipulator(tracker);

  // Normally, we wait for a few megabytes of data to be read or written 
  // before dropping the cache.  This is to avoid an excessive number of
  // JNI calls to the posix_fadvise function.  However, for the purpose
  // of this test, we want to use small files and see all fadvise calls
  // happen.
  BlockSender.CACHE_DROP_INTERVAL_BYTES = 4096;
  BlockReceiver.CACHE_DROP_LAG_BYTES = 4096;
}
项目:tez    文件:TestSecureShuffle.java   
@BeforeClass
public static void setupDFSCluster() throws Exception {
  conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH, false);
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
  conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR);
  miniDFSCluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
  fs = miniDFSCluster.getFileSystem();
  conf.set("fs.defaultFS", fs.getUri().toString());
  conf.setBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_OPTIMIZE_LOCAL_FETCH, false);
}
项目:tez    文件:TestPipelinedShuffle.java   
@BeforeClass
public static void setupDFSCluster() throws Exception {
  conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH, false);
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
  conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR);
  miniDFSCluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
  fs = miniDFSCluster.getFileSystem();
  conf.set("fs.defaultFS", fs.getUri().toString());
  conf.setBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_OPTIMIZE_LOCAL_FETCH, false);
}