Java 类org.apache.hadoop.hdfs.AppendTestUtil 实例源码

项目:hadoop    文件:BaseTestHttpFSWith.java   
private void testTruncate() throws Exception {
  if (!isLocalFS()) {
    final short repl = 3;
    final int blockSize = 1024;
    final int numOfBlocks = 2;
    FileSystem fs = FileSystem.get(getProxiedFSConf());
    fs.mkdirs(getProxiedFSTestDir());
    Path file = new Path(getProxiedFSTestDir(), "foo.txt");
    final byte[] data = FileSystemTestHelper.getFileData(
        numOfBlocks, blockSize);
    FileSystemTestHelper.createFile(fs, file, data, blockSize, repl);

    final int newLength = blockSize;

    boolean isReady = fs.truncate(file, newLength);
    Assert.assertTrue("Recovery is not expected.", isReady);

    FileStatus fileStatus = fs.getFileStatus(file);
    Assert.assertEquals(fileStatus.getLen(), newLength);
    AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());

    fs.close();
  }
}
项目:hadoop    文件:TestFSMainOperationsWebHdfs.java   
@Test
public void testTruncate() throws Exception {
  final short repl = 3;
  final int blockSize = 1024;
  final int numOfBlocks = 2;
  Path dir = getTestRootPath(fSys, "test/hadoop");
  Path file = getTestRootPath(fSys, "test/hadoop/file");

  final byte[] data = getFileData(numOfBlocks, blockSize);
  createFile(fSys, file, data, blockSize, repl);

  final int newLength = blockSize;

  boolean isReady = fSys.truncate(file, newLength);

  Assert.assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fSys.getFileStatus(file);
  Assert.assertEquals(fileStatus.getLen(), newLength);
  AppendTestUtil.checkFullFile(fSys, file, newLength, data, file.toString());

  ContentSummary cs = fSys.getContentSummary(dir);
  Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
      newLength * repl);
  Assert.assertTrue("Deleted", fSys.delete(dir, true));
}
项目:hadoop    文件:TestPipelinesFailover.java   
@Override
public void doAnAction() throws Exception {
  FSDataOutputStream stm = fs.create(path, true);
  try {
    AppendTestUtil.write(stm, 0, 100);
    stm.hflush();
    loopRecoverLease(fsOtherUser, path);
    AppendTestUtil.check(fs, path, 100);
  } finally {
    try {
      stm.close();
    } catch (IOException e) {
      // should expect this since we lost the lease
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:BaseTestHttpFSWith.java   
private void testTruncate() throws Exception {
  if (!isLocalFS()) {
    final short repl = 3;
    final int blockSize = 1024;
    final int numOfBlocks = 2;
    FileSystem fs = FileSystem.get(getProxiedFSConf());
    fs.mkdirs(getProxiedFSTestDir());
    Path file = new Path(getProxiedFSTestDir(), "foo.txt");
    final byte[] data = FileSystemTestHelper.getFileData(
        numOfBlocks, blockSize);
    FileSystemTestHelper.createFile(fs, file, data, blockSize, repl);

    final int newLength = blockSize;

    boolean isReady = fs.truncate(file, newLength);
    Assert.assertTrue("Recovery is not expected.", isReady);

    FileStatus fileStatus = fs.getFileStatus(file);
    Assert.assertEquals(fileStatus.getLen(), newLength);
    AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());

    fs.close();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestFSMainOperationsWebHdfs.java   
@Test
public void testTruncate() throws Exception {
  final short repl = 3;
  final int blockSize = 1024;
  final int numOfBlocks = 2;
  Path dir = getTestRootPath(fSys, "test/hadoop");
  Path file = getTestRootPath(fSys, "test/hadoop/file");

  final byte[] data = getFileData(numOfBlocks, blockSize);
  createFile(fSys, file, data, blockSize, repl);

  final int newLength = blockSize;

  boolean isReady = fSys.truncate(file, newLength);

  Assert.assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fSys.getFileStatus(file);
  Assert.assertEquals(fileStatus.getLen(), newLength);
  AppendTestUtil.checkFullFile(fSys, file, newLength, data, file.toString());

  ContentSummary cs = fSys.getContentSummary(dir);
  Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
      newLength * repl);
  Assert.assertTrue("Deleted", fSys.delete(dir, true));
}
项目:aliyun-oss-hadoop-fs    文件:TestPipelinesFailover.java   
@Override
public void doAnAction() throws Exception {
  FSDataOutputStream stm = fs.create(path, true);
  try {
    AppendTestUtil.write(stm, 0, 100);
    stm.hflush();
    loopRecoverLease(fsOtherUser, path);
    AppendTestUtil.check(fs, path, 100);
  } finally {
    try {
      stm.close();
    } catch (IOException e) {
      // should expect this since we lost the lease
    }
  }
}
项目:big-c    文件:BaseTestHttpFSWith.java   
private void testTruncate() throws Exception {
  if (!isLocalFS()) {
    final short repl = 3;
    final int blockSize = 1024;
    final int numOfBlocks = 2;
    FileSystem fs = FileSystem.get(getProxiedFSConf());
    fs.mkdirs(getProxiedFSTestDir());
    Path file = new Path(getProxiedFSTestDir(), "foo.txt");
    final byte[] data = FileSystemTestHelper.getFileData(
        numOfBlocks, blockSize);
    FileSystemTestHelper.createFile(fs, file, data, blockSize, repl);

    final int newLength = blockSize;

    boolean isReady = fs.truncate(file, newLength);
    Assert.assertTrue("Recovery is not expected.", isReady);

    FileStatus fileStatus = fs.getFileStatus(file);
    Assert.assertEquals(fileStatus.getLen(), newLength);
    AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());

    fs.close();
  }
}
项目:big-c    文件:TestFSMainOperationsWebHdfs.java   
@Test
public void testTruncate() throws Exception {
  final short repl = 3;
  final int blockSize = 1024;
  final int numOfBlocks = 2;
  Path dir = getTestRootPath(fSys, "test/hadoop");
  Path file = getTestRootPath(fSys, "test/hadoop/file");

  final byte[] data = getFileData(numOfBlocks, blockSize);
  createFile(fSys, file, data, blockSize, repl);

  final int newLength = blockSize;

  boolean isReady = fSys.truncate(file, newLength);

  Assert.assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fSys.getFileStatus(file);
  Assert.assertEquals(fileStatus.getLen(), newLength);
  AppendTestUtil.checkFullFile(fSys, file, newLength, data, file.toString());

  ContentSummary cs = fSys.getContentSummary(dir);
  Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
      newLength * repl);
  Assert.assertTrue("Deleted", fSys.delete(dir, true));
}
项目:big-c    文件:TestPipelinesFailover.java   
@Override
public void doAnAction() throws Exception {
  FSDataOutputStream stm = fs.create(path, true);
  try {
    AppendTestUtil.write(stm, 0, 100);
    stm.hflush();
    loopRecoverLease(fsOtherUser, path);
    AppendTestUtil.check(fs, path, 100);
  } finally {
    try {
      stm.close();
    } catch (IOException e) {
      // should expect this since we lost the lease
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestPipelinesFailover.java   
@Override
public void doAnAction() throws Exception {
  FSDataOutputStream stm = fs.create(path, true);
  try {
    AppendTestUtil.write(stm, 0, 100);
    stm.hflush();
    loopRecoverLease(fsOtherUser, path);
    AppendTestUtil.check(fs, path, 100);
  } finally {
    try {
      stm.close();
    } catch (IOException e) {
      // should expect this since we lost the lease
    }
  }
}
项目:hadoop-plus    文件:TestPipelinesFailover.java   
@Override
public void doAnAction() throws Exception {
  FSDataOutputStream stm = fs.create(path, true);
  try {
    AppendTestUtil.write(stm, 0, 100);
    stm.hflush();
    loopRecoverLease(fsOtherUser, path);
    AppendTestUtil.check(fs, path, 100);
  } finally {
    try {
      stm.close();
    } catch (IOException e) {
      // should expect this since we lost the lease
    }
  }
}
项目:FlexMap    文件:TestPipelinesFailover.java   
@Override
public void doAnAction() throws Exception {
  FSDataOutputStream stm = fs.create(path, true);
  try {
    AppendTestUtil.write(stm, 0, 100);
    stm.hflush();
    loopRecoverLease(fsOtherUser, path);
    AppendTestUtil.check(fs, path, 100);
  } finally {
    try {
      stm.close();
    } catch (IOException e) {
      // should expect this since we lost the lease
    }
  }
}
项目:hadoop-TCP    文件:TestPipelinesFailover.java   
@Override
public void doAnAction() throws Exception {
  FSDataOutputStream stm = fs.create(path, true);
  try {
    AppendTestUtil.write(stm, 0, 100);
    stm.hflush();
    loopRecoverLease(fsOtherUser, path);
    AppendTestUtil.check(fs, path, 100);
  } finally {
    try {
      stm.close();
    } catch (IOException e) {
      // should expect this since we lost the lease
    }
  }
}
项目:hardfs    文件:TestPipelinesFailover.java   
@Override
public void doAnAction() throws Exception {
  FSDataOutputStream stm = fs.create(path, true);
  try {
    AppendTestUtil.write(stm, 0, 100);
    stm.hflush();
    loopRecoverLease(fsOtherUser, path);
    AppendTestUtil.check(fs, path, 100);
  } finally {
    try {
      stm.close();
    } catch (IOException e) {
      // should expect this since we lost the lease
    }
  }
}
项目:hadoop-on-lustre2    文件:TestPipelinesFailover.java   
@Override
public void doAnAction() throws Exception {
  FSDataOutputStream stm = fs.create(path, true);
  try {
    AppendTestUtil.write(stm, 0, 100);
    stm.hflush();
    loopRecoverLease(fsOtherUser, path);
    AppendTestUtil.check(fs, path, 100);
  } finally {
    try {
      stm.close();
    } catch (IOException e) {
      // should expect this since we lost the lease
    }
  }
}
项目:hadoop    文件:TestHDFSFileContextMainOperations.java   
@Test
public void testTruncate() throws Exception {
  final short repl = 3;
  final int blockSize = 1024;
  final int numOfBlocks = 2;
  DistributedFileSystem fs = cluster.getFileSystem();
  Path dir = getTestRootPath(fc, "test/hadoop");
  Path file = getTestRootPath(fc, "test/hadoop/file");

  final byte[] data = FileSystemTestHelper.getFileData(
      numOfBlocks, blockSize);
  FileSystemTestHelper.createFile(fs, file, data, blockSize, repl);

  final int newLength = blockSize;

  boolean isReady = fc.truncate(file, newLength);

  Assert.assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fc.getFileStatus(file);
  Assert.assertEquals(fileStatus.getLen(), newLength);
  AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());

  ContentSummary cs = fs.getContentSummary(dir);
  Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
      newLength * repl);
  Assert.assertTrue(fs.delete(dir, true));
}
项目:hadoop    文件:TestPacketReceiver.java   
private void doTestReceiveAndMirror(PacketReceiver pr,
    int dataLen, int checksumsLen) throws IOException {
  final byte[] DATA = AppendTestUtil.initBuffer(dataLen);
  final byte[] CHECKSUMS = AppendTestUtil.initBuffer(checksumsLen);

  byte[] packet = prepareFakePacket(DATA, CHECKSUMS);
  ByteArrayInputStream in = new ByteArrayInputStream(packet);

  pr.receiveNextPacket(in);

  ByteBuffer parsedData = pr.getDataSlice();
  assertArrayEquals(DATA, remainingAsArray(parsedData));

  ByteBuffer parsedChecksums = pr.getChecksumSlice();
  assertArrayEquals(CHECKSUMS, remainingAsArray(parsedChecksums));

  PacketHeader header = pr.getHeader();
  assertEquals(SEQNO, header.getSeqno());
  assertEquals(OFFSET_IN_BLOCK, header.getOffsetInBlock());
  assertEquals(dataLen + checksumsLen + Ints.BYTES, header.getPacketLen());

  // Mirror the packet to an output stream and make sure it matches
  // the packet we sent.
  ByteArrayOutputStream mirrored = new ByteArrayOutputStream();
  mirrored = Mockito.spy(mirrored);

  pr.mirrorPacketTo(new DataOutputStream(mirrored));
  // The write should be done in a single call. Otherwise we may hit
  // nasty interactions with nagling (eg HDFS-4049).
  Mockito.verify(mirrored, Mockito.times(1))
    .write(Mockito.<byte[]>any(), Mockito.anyInt(),
        Mockito.eq(packet.length));
  Mockito.verifyNoMoreInteractions(mirrored);

  assertArrayEquals(packet, mirrored.toByteArray());
}
项目:hadoop    文件:TestShortCircuitLocalRead.java   
/**
 * Test that file data can be read by reading the block
 * through RemoteBlockReader
 * @throws IOException
*/
public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum, int size, String shortCircuitUser,
                                                        int readOffset, boolean shortCircuitFails) throws IOException, InterruptedException {
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
           .format(true).build();
  FileSystem fs = cluster.getFileSystem();
  // check that / exists
  Path path = new Path("/");
  URI uri = cluster.getURI();
  assertTrue("/ should be a directory", fs.getFileStatus(path)
              .isDirectory() == true);

  byte[] fileData = AppendTestUtil.randomBytes(seed, size);
  Path file1 = new Path("filelocal.dat");
  FSDataOutputStream stm = createFile(fs, file1, 1);

  stm.write(fileData);
  stm.close();
  try {
    checkFileContent(uri, file1, fileData, readOffset, shortCircuitUser, 
        conf, shortCircuitFails);
    //RemoteBlockReader have unsupported method read(ByteBuffer bf)
    assertTrue("RemoteBlockReader unsupported method read(ByteBuffer bf) error",
                  checkUnsupportedMethod(fs, file1, fileData, readOffset));
  } catch(IOException e) {
    throw new IOException("doTestShortCircuitReadWithRemoteBlockReader ex error ", e);
  } catch(InterruptedException inEx) {
    throw inEx;
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
项目:hadoop    文件:TestFileTruncate.java   
/**
 * Truncate files of different sizes byte by byte.
 */
@Test
public void testBasicTruncate() throws IOException {
  int startingFileSize = 3 * BLOCK_SIZE;

  Path parent = new Path("/test");
  fs.mkdirs(parent);
  fs.setQuota(parent, 100, 1000);
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  for (int fileLength = startingFileSize; fileLength > 0;
                                          fileLength -= BLOCK_SIZE - 1) {
    for (int toTruncate = 0; toTruncate <= fileLength; toTruncate++) {
      final Path p = new Path(parent, "testBasicTruncate" + fileLength);
      writeContents(contents, fileLength, p);

      int newLength = fileLength - toTruncate;
      boolean isReady = fs.truncate(p, newLength);
      LOG.info("fileLength=" + fileLength + ", newLength=" + newLength
          + ", toTruncate=" + toTruncate + ", isReady=" + isReady);

      assertEquals("File must be closed for zero truncate"
          + " or truncating at the block boundary",
          isReady, toTruncate == 0 || newLength % BLOCK_SIZE == 0);
      if (!isReady) {
        checkBlockRecovery(p);
      }

      ContentSummary cs = fs.getContentSummary(parent);
      assertEquals("Bad disk space usage",
          cs.getSpaceConsumed(), newLength * REPLICATION);
      // validate the file content
      checkFullFile(p, newLength, contents);
    }
  }
  fs.delete(parent, true);
}
项目:hadoop    文件:TestFileTruncate.java   
/**
 * The last block is truncated at mid. (non copy-on-truncate)
 * shutdown the datanodes immediately after truncate.
 */
@Test(timeout=60000)
public void testTruncateWithDataNodesShutdownImmediately() throws Exception {
  int startingFileSize = 3 * BLOCK_SIZE;
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  final Path parent = new Path("/test");
  final Path p = new Path(parent, "testTruncateWithDataNodesShutdownImmediately");

  writeContents(contents, startingFileSize, p);

  int toTruncateLength = 1;
  int newLength = startingFileSize - toTruncateLength;
  boolean isReady = fs.truncate(p, newLength);
  assertFalse(isReady);

  cluster.shutdownDataNodes();
  cluster.setDataNodesDead();
  try {
    for(int i = 0; i < SUCCESS_ATTEMPTS && cluster.isDataNodeUp(); i++) {
      Thread.sleep(SLEEP);
    }
    assertFalse("All DataNodes should be down.", cluster.isDataNodeUp());
    LocatedBlocks blocks = getLocatedBlocks(p);
    assertTrue(blocks.isUnderConstruction());
  } finally {
    cluster.startDataNodes(conf, DATANODE_NUM, true,
        StartupOption.REGULAR, null);
    cluster.waitActive();
  }
  checkBlockRecovery(p);

  fs.delete(parent, true);
}
项目:hadoop    文件:TestFileTruncate.java   
/**
 * EditLogOp load test for Truncate.
 */
@Test
public void testTruncateEditLogLoad() throws IOException {
  // purge previously accumulated edits
  fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  fs.saveNamespace();
  fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);

  int startingFileSize = 2 * BLOCK_SIZE + BLOCK_SIZE / 2;
  int toTruncate = 1;
  final String s = "/testTruncateEditLogLoad";
  final Path p = new Path(s);
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  writeContents(contents, startingFileSize, p);

  int newLength = startingFileSize - toTruncate;
  boolean isReady = fs.truncate(p, newLength);
  assertThat("truncate should have triggered block recovery.",
      isReady, is(false));

  cluster.restartNameNode();

  String holder = UserGroupInformation.getCurrentUser().getUserName();
  cluster.getNamesystem().recoverLease(s, holder, "");

  checkBlockRecovery(p);
  checkFullFile(p, newLength, contents);
  fs.delete(p, false);
}
项目:hadoop    文件:TestFileTruncate.java   
@Test
public void testTruncate4Symlink() throws IOException {
  final int fileLength = 3 * BLOCK_SIZE;

  final Path parent = new Path("/test");
  fs.mkdirs(parent);
  final byte[] contents = AppendTestUtil.initBuffer(fileLength);
  final Path file = new Path(parent, "testTruncate4Symlink");
  writeContents(contents, fileLength, file);

  final Path link = new Path(parent, "link");
  fs.createSymlink(file, link, false);

  final int newLength = fileLength/3;
  boolean isReady = fs.truncate(link, newLength);

  assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fs.getFileStatus(file);
  assertThat(fileStatus.getLen(), is((long) newLength));

  ContentSummary cs = fs.getContentSummary(parent);
  assertEquals("Bad disk space usage",
      cs.getSpaceConsumed(), newLength * REPLICATION);
  // validate the file content
  checkFullFile(file, newLength, contents);

  fs.delete(parent, true);
}
项目:hadoop    文件:TestDNFencing.java   
/**
 * Regression test for HDFS-2742. The issue in this bug was:
 * - DN does a block report while file is open. This BR contains
 *   the block in RBW state.
 * - Standby queues the RBW state in PendingDatanodeMessages
 * - Standby processes edit logs during failover. Before fixing
 *   this bug, it was mistakenly applying the RBW reported state
 *   after the block had been completed, causing the block to get
 *   marked corrupt. Instead, we should now be applying the RBW
 *   message on OP_ADD, and then the FINALIZED message on OP_CLOSE.
 */
@Test
public void testBlockReportsWhileFileBeingWritten() throws Exception {
  FSDataOutputStream out = fs.create(TEST_FILE_PATH);
  try {
    AppendTestUtil.write(out, 0, 10);
    out.hflush();

    // Block report will include the RBW replica, but will be
    // queued on the StandbyNode.
    cluster.triggerBlockReports();

  } finally {
    IOUtils.closeStream(out);
  }

  cluster.transitionToStandby(0);
  cluster.transitionToActive(1);

  // Verify that no replicas are marked corrupt, and that the
  // file is readable from the failed-over standby.
  BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
  BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager());
  assertEquals(0, nn1.getNamesystem().getCorruptReplicaBlocks());
  assertEquals(0, nn2.getNamesystem().getCorruptReplicaBlocks());

  DFSTestUtil.readFile(fs, TEST_FILE_PATH);
}
项目:hadoop    文件:TestHSync.java   
/** Test hsync on an exact block boundary */
@Test
public void testHSyncBlockBoundary() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  final FileSystem fs = cluster.getFileSystem();

  final Path p = new Path("/testHSyncBlockBoundary/foo");
  final int len = 1 << 16;
  final byte[] fileContents = AppendTestUtil.initBuffer(len);
  FSDataOutputStream out = fs.create(p, FsPermission.getDefault(),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
      4096, (short) 1, len, null);
  // fill exactly one block (tests the SYNC_BLOCK case) and flush
  out.write(fileContents, 0, len);
  out.hflush();
  // the full block should have caused a sync
  checkSyncMetric(cluster, 1);
  out.hsync();
  // first on block again
  checkSyncMetric(cluster, 1);
  // write one more byte and sync again
  out.write(1);
  out.hsync();
  checkSyncMetric(cluster, 2);
  out.close();
  checkSyncMetric(cluster, 3);
  cluster.shutdown();
}
项目:aliyun-oss-hadoop-fs    文件:TestHDFSFileContextMainOperations.java   
@Test
public void testTruncate() throws Exception {
  final short repl = 3;
  final int blockSize = 1024;
  final int numOfBlocks = 2;
  DistributedFileSystem fs = cluster.getFileSystem();
  Path dir = getTestRootPath(fc, "test/hadoop");
  Path file = getTestRootPath(fc, "test/hadoop/file");

  final byte[] data = FileSystemTestHelper.getFileData(
      numOfBlocks, blockSize);
  FileSystemTestHelper.createFile(fs, file, data, blockSize, repl);

  final int newLength = blockSize;

  boolean isReady = fc.truncate(file, newLength);

  Assert.assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fc.getFileStatus(file);
  Assert.assertEquals(fileStatus.getLen(), newLength);
  AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());

  ContentSummary cs = fs.getContentSummary(dir);
  Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
      newLength * repl);
  Assert.assertTrue(fs.delete(dir, true));
}
项目:aliyun-oss-hadoop-fs    文件:TestPacketReceiver.java   
private void doTestReceiveAndMirror(PacketReceiver pr,
    int dataLen, int checksumsLen) throws IOException {
  final byte[] DATA = AppendTestUtil.initBuffer(dataLen);
  final byte[] CHECKSUMS = AppendTestUtil.initBuffer(checksumsLen);

  byte[] packet = prepareFakePacket(DATA, CHECKSUMS);
  ByteArrayInputStream in = new ByteArrayInputStream(packet);

  pr.receiveNextPacket(in);

  ByteBuffer parsedData = pr.getDataSlice();
  assertArrayEquals(DATA, remainingAsArray(parsedData));

  ByteBuffer parsedChecksums = pr.getChecksumSlice();
  assertArrayEquals(CHECKSUMS, remainingAsArray(parsedChecksums));

  PacketHeader header = pr.getHeader();
  assertEquals(SEQNO, header.getSeqno());
  assertEquals(OFFSET_IN_BLOCK, header.getOffsetInBlock());
  assertEquals(dataLen + checksumsLen + Ints.BYTES, header.getPacketLen());

  // Mirror the packet to an output stream and make sure it matches
  // the packet we sent.
  ByteArrayOutputStream mirrored = new ByteArrayOutputStream();
  mirrored = Mockito.spy(mirrored);

  pr.mirrorPacketTo(new DataOutputStream(mirrored));
  // The write should be done in a single call. Otherwise we may hit
  // nasty interactions with nagling (eg HDFS-4049).
  Mockito.verify(mirrored, Mockito.times(1))
    .write(Mockito.<byte[]>any(), Mockito.anyInt(),
        Mockito.eq(packet.length));
  Mockito.verifyNoMoreInteractions(mirrored);

  assertArrayEquals(packet, mirrored.toByteArray());
}
项目:aliyun-oss-hadoop-fs    文件:TestShortCircuitLocalRead.java   
/**
 * Test that file data can be read by reading the block
 * through RemoteBlockReader
 * @throws IOException
*/
public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum,
    int size, String shortCircuitUser, int readOffset,
    boolean shortCircuitFails) throws IOException, InterruptedException {
  Configuration conf = new Configuration();
  conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
  conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
           .format(true).build();
  FileSystem fs = cluster.getFileSystem();
  // check that / exists
  Path path = new Path("/");
  URI uri = cluster.getURI();
  assertTrue("/ should be a directory", fs.getFileStatus(path).isDirectory());

  byte[] fileData = AppendTestUtil.randomBytes(seed, size);
  Path file1 = new Path("filelocal.dat");
  FSDataOutputStream stm = createFile(fs, file1, 1);

  stm.write(fileData);
  stm.close();
  try {
    checkFileContent(uri, file1, fileData, readOffset, shortCircuitUser, 
        conf, shortCircuitFails);
    //RemoteBlockReader have unsupported method read(ByteBuffer bf)
    assertTrue("RemoteBlockReader unsupported method read(ByteBuffer bf) error",
                  checkUnsupportedMethod(fs, file1, fileData, readOffset));
  } catch(IOException e) {
    throw new IOException("doTestShortCircuitReadWithRemoteBlockReader ex error ", e);
  } catch(InterruptedException inEx) {
    throw inEx;
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestFileTruncate.java   
/**
 * Truncate files of different sizes byte by byte.
 */
@Test
public void testBasicTruncate() throws IOException {
  int startingFileSize = 3 * BLOCK_SIZE;

  fs.mkdirs(parent);
  fs.setQuota(parent, 100, 1000);
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  for (int fileLength = startingFileSize; fileLength > 0;
                                          fileLength -= BLOCK_SIZE - 1) {
    for (int toTruncate = 0; toTruncate <= fileLength; toTruncate++) {
      final Path p = new Path(parent, "testBasicTruncate" + fileLength);
      writeContents(contents, fileLength, p);

      int newLength = fileLength - toTruncate;
      boolean isReady = fs.truncate(p, newLength);
      LOG.info("fileLength=" + fileLength + ", newLength=" + newLength
          + ", toTruncate=" + toTruncate + ", isReady=" + isReady);

      assertEquals("File must be closed for zero truncate"
          + " or truncating at the block boundary",
          isReady, toTruncate == 0 || newLength % BLOCK_SIZE == 0);
      if (!isReady) {
        checkBlockRecovery(p);
      }

      ContentSummary cs = fs.getContentSummary(parent);
      assertEquals("Bad disk space usage",
          cs.getSpaceConsumed(), newLength * REPLICATION);
      // validate the file content
      checkFullFile(p, newLength, contents);
    }
  }
  fs.delete(parent, true);
}
项目:aliyun-oss-hadoop-fs    文件:TestFileTruncate.java   
/**
 * The last block is truncated at mid. (non copy-on-truncate)
 * shutdown the datanodes immediately after truncate.
 */
@Test(timeout=60000)
public void testTruncateWithDataNodesShutdownImmediately() throws Exception {
  int startingFileSize = 3 * BLOCK_SIZE;
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  final Path p = new Path(parent, "testTruncateWithDataNodesShutdownImmediately");

  writeContents(contents, startingFileSize, p);

  int toTruncateLength = 1;
  int newLength = startingFileSize - toTruncateLength;
  boolean isReady = fs.truncate(p, newLength);
  assertFalse(isReady);

  cluster.shutdownDataNodes();
  cluster.setDataNodesDead();
  try {
    for(int i = 0; i < SUCCESS_ATTEMPTS && cluster.isDataNodeUp(); i++) {
      Thread.sleep(SLEEP);
    }
    assertFalse("All DataNodes should be down.", cluster.isDataNodeUp());
    LocatedBlocks blocks = getLocatedBlocks(p);
    assertTrue(blocks.isUnderConstruction());
  } finally {
    cluster.startDataNodes(conf, DATANODE_NUM, true,
        StartupOption.REGULAR, null);
    cluster.waitActive();
  }
  checkBlockRecovery(p);

  fs.delete(parent, true);
}
项目:aliyun-oss-hadoop-fs    文件:TestFileTruncate.java   
/**
 * EditLogOp load test for Truncate.
 */
@Test
public void testTruncateEditLogLoad() throws IOException {
  // purge previously accumulated edits
  fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  fs.saveNamespace();
  fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);

  int startingFileSize = 2 * BLOCK_SIZE + BLOCK_SIZE / 2;
  int toTruncate = 1;
  final String s = "/testTruncateEditLogLoad";
  final Path p = new Path(s);
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  writeContents(contents, startingFileSize, p);

  int newLength = startingFileSize - toTruncate;
  boolean isReady = fs.truncate(p, newLength);
  assertThat("truncate should have triggered block recovery.",
      isReady, is(false));

  cluster.restartNameNode();

  String holder = UserGroupInformation.getCurrentUser().getUserName();
  cluster.getNamesystem().recoverLease(s, holder, "");

  checkBlockRecovery(p);
  checkFullFile(p, newLength, contents);
  fs.delete(p, false);
}
项目:aliyun-oss-hadoop-fs    文件:TestFileTruncate.java   
@Test
public void testTruncate4Symlink() throws IOException {
  final int fileLength = 3 * BLOCK_SIZE;

  fs.mkdirs(parent);
  final byte[] contents = AppendTestUtil.initBuffer(fileLength);
  final Path file = new Path(parent, "testTruncate4Symlink");
  writeContents(contents, fileLength, file);

  final Path link = new Path(parent, "link");
  fs.createSymlink(file, link, false);

  final int newLength = fileLength/3;
  boolean isReady = fs.truncate(link, newLength);

  assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fs.getFileStatus(file);
  assertThat(fileStatus.getLen(), is((long) newLength));

  ContentSummary cs = fs.getContentSummary(parent);
  assertEquals("Bad disk space usage",
      cs.getSpaceConsumed(), newLength * REPLICATION);
  // validate the file content
  checkFullFile(file, newLength, contents);

  fs.delete(parent, true);
}
项目:aliyun-oss-hadoop-fs    文件:TestDNFencing.java   
/**
 * Regression test for HDFS-2742. The issue in this bug was:
 * - DN does a block report while file is open. This BR contains
 *   the block in RBW state.
 * - Standby queues the RBW state in PendingDatanodeMessages
 * - Standby processes edit logs during failover. Before fixing
 *   this bug, it was mistakenly applying the RBW reported state
 *   after the block had been completed, causing the block to get
 *   marked corrupt. Instead, we should now be applying the RBW
 *   message on OP_ADD, and then the FINALIZED message on OP_CLOSE.
 */
@Test
public void testBlockReportsWhileFileBeingWritten() throws Exception {
  FSDataOutputStream out = fs.create(TEST_FILE_PATH);
  try {
    AppendTestUtil.write(out, 0, 10);
    out.hflush();

    // Block report will include the RBW replica, but will be
    // queued on the StandbyNode.
    cluster.triggerBlockReports();

  } finally {
    IOUtils.closeStream(out);
  }

  cluster.transitionToStandby(0);
  cluster.transitionToActive(1);

  // Verify that no replicas are marked corrupt, and that the
  // file is readable from the failed-over standby.
  BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
  BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager());
  assertEquals(0, nn1.getNamesystem().getCorruptReplicaBlocks());
  assertEquals(0, nn2.getNamesystem().getCorruptReplicaBlocks());

  DFSTestUtil.readFile(fs, TEST_FILE_PATH);
}
项目:aliyun-oss-hadoop-fs    文件:TestHSync.java   
/** Test hsync on an exact block boundary */
@Test
public void testHSyncBlockBoundary() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  final FileSystem fs = cluster.getFileSystem();

  final Path p = new Path("/testHSyncBlockBoundary/foo");
  final int len = 1 << 16;
  final byte[] fileContents = AppendTestUtil.initBuffer(len);
  FSDataOutputStream out = fs.create(p, FsPermission.getDefault(),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
      4096, (short) 1, len, null);
  // fill exactly one block (tests the SYNC_BLOCK case) and flush
  out.write(fileContents, 0, len);
  out.hflush();
  // the full block should have caused a sync
  checkSyncMetric(cluster, 1);
  out.hsync();
  // first on block again
  checkSyncMetric(cluster, 1);
  // write one more byte and sync again
  out.write(1);
  out.hsync();
  checkSyncMetric(cluster, 2);
  out.close();
  checkSyncMetric(cluster, 3);
  cluster.shutdown();
}
项目:big-c    文件:TestHDFSFileContextMainOperations.java   
@Test
public void testTruncate() throws Exception {
  final short repl = 3;
  final int blockSize = 1024;
  final int numOfBlocks = 2;
  DistributedFileSystem fs = cluster.getFileSystem();
  Path dir = getTestRootPath(fc, "test/hadoop");
  Path file = getTestRootPath(fc, "test/hadoop/file");

  final byte[] data = FileSystemTestHelper.getFileData(
      numOfBlocks, blockSize);
  FileSystemTestHelper.createFile(fs, file, data, blockSize, repl);

  final int newLength = blockSize;

  boolean isReady = fc.truncate(file, newLength);

  Assert.assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fc.getFileStatus(file);
  Assert.assertEquals(fileStatus.getLen(), newLength);
  AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());

  ContentSummary cs = fs.getContentSummary(dir);
  Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
      newLength * repl);
  Assert.assertTrue(fs.delete(dir, true));
}
项目:big-c    文件:TestPacketReceiver.java   
private void doTestReceiveAndMirror(PacketReceiver pr,
    int dataLen, int checksumsLen) throws IOException {
  final byte[] DATA = AppendTestUtil.initBuffer(dataLen);
  final byte[] CHECKSUMS = AppendTestUtil.initBuffer(checksumsLen);

  byte[] packet = prepareFakePacket(DATA, CHECKSUMS);
  ByteArrayInputStream in = new ByteArrayInputStream(packet);

  pr.receiveNextPacket(in);

  ByteBuffer parsedData = pr.getDataSlice();
  assertArrayEquals(DATA, remainingAsArray(parsedData));

  ByteBuffer parsedChecksums = pr.getChecksumSlice();
  assertArrayEquals(CHECKSUMS, remainingAsArray(parsedChecksums));

  PacketHeader header = pr.getHeader();
  assertEquals(SEQNO, header.getSeqno());
  assertEquals(OFFSET_IN_BLOCK, header.getOffsetInBlock());
  assertEquals(dataLen + checksumsLen + Ints.BYTES, header.getPacketLen());

  // Mirror the packet to an output stream and make sure it matches
  // the packet we sent.
  ByteArrayOutputStream mirrored = new ByteArrayOutputStream();
  mirrored = Mockito.spy(mirrored);

  pr.mirrorPacketTo(new DataOutputStream(mirrored));
  // The write should be done in a single call. Otherwise we may hit
  // nasty interactions with nagling (eg HDFS-4049).
  Mockito.verify(mirrored, Mockito.times(1))
    .write(Mockito.<byte[]>any(), Mockito.anyInt(),
        Mockito.eq(packet.length));
  Mockito.verifyNoMoreInteractions(mirrored);

  assertArrayEquals(packet, mirrored.toByteArray());
}
项目:big-c    文件:TestShortCircuitLocalRead.java   
/**
 * Test that file data can be read by reading the block
 * through RemoteBlockReader
 * @throws IOException
*/
public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum, int size, String shortCircuitUser,
                                                        int readOffset, boolean shortCircuitFails) throws IOException, InterruptedException {
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
           .format(true).build();
  FileSystem fs = cluster.getFileSystem();
  // check that / exists
  Path path = new Path("/");
  URI uri = cluster.getURI();
  assertTrue("/ should be a directory", fs.getFileStatus(path)
              .isDirectory() == true);

  byte[] fileData = AppendTestUtil.randomBytes(seed, size);
  Path file1 = new Path("filelocal.dat");
  FSDataOutputStream stm = createFile(fs, file1, 1);

  stm.write(fileData);
  stm.close();
  try {
    checkFileContent(uri, file1, fileData, readOffset, shortCircuitUser, 
        conf, shortCircuitFails);
    //RemoteBlockReader have unsupported method read(ByteBuffer bf)
    assertTrue("RemoteBlockReader unsupported method read(ByteBuffer bf) error",
                  checkUnsupportedMethod(fs, file1, fileData, readOffset));
  } catch(IOException e) {
    throw new IOException("doTestShortCircuitReadWithRemoteBlockReader ex error ", e);
  } catch(InterruptedException inEx) {
    throw inEx;
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
项目:big-c    文件:TestFileTruncate.java   
/**
 * Truncate files of different sizes byte by byte.
 */
@Test
public void testBasicTruncate() throws IOException {
  int startingFileSize = 3 * BLOCK_SIZE;

  Path parent = new Path("/test");
  fs.mkdirs(parent);
  fs.setQuota(parent, 100, 1000);
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  for (int fileLength = startingFileSize; fileLength > 0;
                                          fileLength -= BLOCK_SIZE - 1) {
    for (int toTruncate = 0; toTruncate <= fileLength; toTruncate++) {
      final Path p = new Path(parent, "testBasicTruncate" + fileLength);
      writeContents(contents, fileLength, p);

      int newLength = fileLength - toTruncate;
      boolean isReady = fs.truncate(p, newLength);
      LOG.info("fileLength=" + fileLength + ", newLength=" + newLength
          + ", toTruncate=" + toTruncate + ", isReady=" + isReady);

      assertEquals("File must be closed for zero truncate"
          + " or truncating at the block boundary",
          isReady, toTruncate == 0 || newLength % BLOCK_SIZE == 0);
      if (!isReady) {
        checkBlockRecovery(p);
      }

      ContentSummary cs = fs.getContentSummary(parent);
      assertEquals("Bad disk space usage",
          cs.getSpaceConsumed(), newLength * REPLICATION);
      // validate the file content
      checkFullFile(p, newLength, contents);
    }
  }
  fs.delete(parent, true);
}
项目:big-c    文件:TestFileTruncate.java   
/**
 * The last block is truncated at mid. (non copy-on-truncate)
 * shutdown the datanodes immediately after truncate.
 */
@Test(timeout=60000)
public void testTruncateWithDataNodesShutdownImmediately() throws Exception {
  int startingFileSize = 3 * BLOCK_SIZE;
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  final Path parent = new Path("/test");
  final Path p = new Path(parent, "testTruncateWithDataNodesShutdownImmediately");

  writeContents(contents, startingFileSize, p);

  int toTruncateLength = 1;
  int newLength = startingFileSize - toTruncateLength;
  boolean isReady = fs.truncate(p, newLength);
  assertFalse(isReady);

  cluster.shutdownDataNodes();
  cluster.setDataNodesDead();
  try {
    for(int i = 0; i < SUCCESS_ATTEMPTS && cluster.isDataNodeUp(); i++) {
      Thread.sleep(SLEEP);
    }
    assertFalse("All DataNodes should be down.", cluster.isDataNodeUp());
    LocatedBlocks blocks = getLocatedBlocks(p);
    assertTrue(blocks.isUnderConstruction());
  } finally {
    cluster.startDataNodes(conf, DATANODE_NUM, true,
        StartupOption.REGULAR, null);
    cluster.waitActive();
  }
  checkBlockRecovery(p);

  fs.delete(parent, true);
}
项目:big-c    文件:TestFileTruncate.java   
/**
 * EditLogOp load test for Truncate.
 */
@Test
public void testTruncateEditLogLoad() throws IOException {
  // purge previously accumulated edits
  fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  fs.saveNamespace();
  fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);

  int startingFileSize = 2 * BLOCK_SIZE + BLOCK_SIZE / 2;
  int toTruncate = 1;
  final String s = "/testTruncateEditLogLoad";
  final Path p = new Path(s);
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  writeContents(contents, startingFileSize, p);

  int newLength = startingFileSize - toTruncate;
  boolean isReady = fs.truncate(p, newLength);
  assertThat("truncate should have triggered block recovery.",
      isReady, is(false));

  cluster.restartNameNode();

  String holder = UserGroupInformation.getCurrentUser().getUserName();
  cluster.getNamesystem().recoverLease(s, holder, "");

  checkBlockRecovery(p);
  checkFullFile(p, newLength, contents);
  fs.delete(p, false);
}
项目:big-c    文件:TestFileTruncate.java   
@Test
public void testTruncate4Symlink() throws IOException {
  final int fileLength = 3 * BLOCK_SIZE;

  final Path parent = new Path("/test");
  fs.mkdirs(parent);
  final byte[] contents = AppendTestUtil.initBuffer(fileLength);
  final Path file = new Path(parent, "testTruncate4Symlink");
  writeContents(contents, fileLength, file);

  final Path link = new Path(parent, "link");
  fs.createSymlink(file, link, false);

  final int newLength = fileLength/3;
  boolean isReady = fs.truncate(link, newLength);

  assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fs.getFileStatus(file);
  assertThat(fileStatus.getLen(), is((long) newLength));

  ContentSummary cs = fs.getContentSummary(parent);
  assertEquals("Bad disk space usage",
      cs.getSpaceConsumed(), newLength * REPLICATION);
  // validate the file content
  checkFullFile(file, newLength, contents);

  fs.delete(parent, true);
}