Java 类org.apache.hadoop.io.IOUtils 实例源码

项目:hadoop    文件:TestDFSClientFailover.java   
/**
 * Test that even a non-idempotent method will properly fail-over if the
 * first IPC attempt times out trying to connect. Regression test for
 * HDFS-4404. 
 */
@Test
public void testFailoverOnConnectTimeout() throws Exception {
  conf.setClass(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,
      InjectingSocketFactory.class, SocketFactory.class);
  // Set up the InjectingSocketFactory to throw a ConnectTimeoutException
  // when connecting to the first NN.
  InjectingSocketFactory.portToInjectOn = cluster.getNameNodePort(0);

  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);

  // Make the second NN the active one.
  cluster.shutdownNameNode(0);
  cluster.transitionToActive(1);

  // Call a non-idempotent method, and ensure the failover of the call proceeds
  // successfully.
  IOUtils.closeStream(fs.create(TEST_FILE));
}
项目:hadoop-oss    文件:TestSSLHttpServer.java   
/**
 * Test that verifies that excluded ciphers (SSL_RSA_WITH_RC4_128_SHA,
 * TLS_ECDH_ECDSA_WITH_RC4_128_SHA,TLS_ECDH_RSA_WITH_RC4_128_SHA,
 * TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,TLS_ECDHE_RSA_WITH_RC4_128_SHA) are not
 * available for negotiation during SSL connection.
 */
@Test
public void testExcludedCiphers() throws Exception {
  URL url = new URL(baseUrl, "/echo?a=b&c=d");
  HttpsURLConnection conn = (HttpsURLConnection) url.openConnection();
  SSLSocketFactory sslSocketF = clientSslFactory.createSSLSocketFactory();
  PrefferedCipherSSLSocketFactory testPreferredCipherSSLSocketF
      = new PrefferedCipherSSLSocketFactory(sslSocketF,
          excludeCiphers.split(","));
  conn.setSSLSocketFactory(testPreferredCipherSSLSocketF);
  assertFalse("excludedCipher list is empty", excludeCiphers.isEmpty());
  try {
    InputStream in = conn.getInputStream();
    ByteArrayOutputStream out = new ByteArrayOutputStream();
    IOUtils.copyBytes(in, out, 1024);
    fail("No Ciphers in common, SSLHandshake must fail.");
  } catch (SSLHandshakeException ex) {
    LOG.info("No Ciphers in common, expected succesful test result.", ex);
  }
}
项目:hadoop    文件:TestBloomMapFile.java   
/**
 * test {@link BloomMapFile.Reader} constructor with 
 * IOException
 */
public void testIOExceptionInWriterConstructor() {
  Path dirNameSpy = spy(TEST_FILE);
  BloomMapFile.Reader reader = null;
  BloomMapFile.Writer writer = null;
  try {
    writer = new BloomMapFile.Writer(conf, TEST_FILE,
        MapFile.Writer.keyClass(IntWritable.class),
        MapFile.Writer.valueClass(Text.class));
    writer.append(new IntWritable(1), new Text("123124142"));
    writer.close();

    when(dirNameSpy.getFileSystem(conf)).thenThrow(new IOException());
    reader = new BloomMapFile.Reader(dirNameSpy, conf,
        MapFile.Reader.comparator(new WritableComparator(IntWritable.class)));

    assertNull("testIOExceptionInWriterConstructor error !!!",
        reader.getBloomFilter());
  } catch (Exception ex) {
    fail("unexpect ex in testIOExceptionInWriterConstructor !!!");
  } finally {
    IOUtils.cleanup(null, writer, reader);
  }
}
项目:hadoop    文件:TestFileBasedCopyListing.java   
private void checkResult(Path listFile, int count) throws IOException {
  if (count == 0) {
    return;
  }

  int recCount = 0;
  SequenceFile.Reader reader = new SequenceFile.Reader(config,
                                          SequenceFile.Reader.file(listFile));
  try {
    Text relPath = new Text();
    CopyListingFileStatus fileStatus = new CopyListingFileStatus();
    while (reader.next(relPath, fileStatus)) {
      if (fileStatus.isDirectory() && relPath.toString().equals("")) {
        // ignore root with empty relPath, which is an entry to be 
        // used for preserving root attributes etc.
        continue;
      }
      Assert.assertEquals(fileStatus.getPath().toUri().getPath(), map.get(relPath.toString()));
      recCount++;
    }
  } finally {
    IOUtils.closeStream(reader);
  }
  Assert.assertEquals(recCount, count);
}
项目:hadoop    文件:VolumeScanner.java   
/**
 * Disallow the scanner from scanning the given block pool.
 *
 * @param bpid       The block pool id.
 */
public synchronized void disableBlockPoolId(String bpid) {
  Iterator<BlockIterator> i = blockIters.iterator();
  while (i.hasNext()) {
    BlockIterator iter = i.next();
    if (iter.getBlockPoolId().equals(bpid)) {
      LOG.trace("{}: disabling scanning on block pool {}", this, bpid);
      i.remove();
      IOUtils.cleanup(null, iter);
      if (curBlockIter == iter) {
        curBlockIter = null;
      }
      notify();
      return;
    }
  }
  LOG.warn("{}: can't remove block pool {}, because it was never " +
      "added.", this, bpid);
}
项目:hadoop    文件:HistoryServerLeveldbStateStoreService.java   
@Override
public void storeToken(MRDelegationTokenIdentifier tokenId, Long renewDate)
    throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Storing token " + tokenId.getSequenceNumber());
  }

  ByteArrayOutputStream memStream = new ByteArrayOutputStream();
  DataOutputStream dataStream = new DataOutputStream(memStream);
  try {
    tokenId.write(dataStream);
    dataStream.writeLong(renewDate);
    dataStream.close();
    dataStream = null;
  } finally {
    IOUtils.cleanup(LOG, dataStream);
  }

  String dbKey = getTokenDatabaseKey(tokenId);
  try {
    db.put(bytes(dbKey), memStream.toByteArray());
  } catch (DBException e) {
    throw new IOException(e);
  }
}
项目:ditb    文件:TestCoprocessorClassLoader.java   
@Test
public void testCleanupOldJars() throws Exception {
  String className = "TestCleanupOldJars";
  String folder = TEST_UTIL.getDataTestDir().toString();
  File jarFile = ClassLoaderTestHelper.buildJar(
    folder, className, null, ClassLoaderTestHelper.localDirPath(conf));
  File tmpJarFile = new File(jarFile.getParent(), "/tmp/" + className + ".test.jar");
  if (tmpJarFile.exists()) tmpJarFile.delete();
  assertFalse("tmp jar file should not exist", tmpJarFile.exists());
  IOUtils.copyBytes(new FileInputStream(jarFile),
    new FileOutputStream(tmpJarFile), conf, true);
  assertTrue("tmp jar file should be created", tmpJarFile.exists());
  Path path = new Path(jarFile.getAbsolutePath());
  ClassLoader parent = TestCoprocessorClassLoader.class.getClassLoader();
  CoprocessorClassLoader.parentDirLockSet.clear(); // So that clean up can be triggered
  ClassLoader classLoader = CoprocessorClassLoader.getClassLoader(path, parent, "111", conf);
  assertNotNull("Classloader should be created", classLoader);
  assertFalse("tmp jar file should be removed", tmpJarFile.exists());
}
项目:ditb    文件:TagCompressionContext.java   
/**
 * Uncompress tags from the InputStream and writes to the destination array.
 * @param src Stream where the compressed tags are available
 * @param dest Destination array where to write the uncompressed tags
 * @param offset Offset in destination where tags to be written
 * @param length Length of all tag bytes
 * @throws IOException
 */
public void uncompressTags(InputStream src, byte[] dest, int offset, int length)
    throws IOException {
  int endOffset = offset + length;
  while (offset < endOffset) {
    byte status = (byte) src.read();
    if (status == Dictionary.NOT_IN_DICTIONARY) {
      int tagLen = StreamUtils.readRawVarint32(src);
      offset = Bytes.putAsShort(dest, offset, tagLen);
      IOUtils.readFully(src, dest, offset, tagLen);
      tagDict.addEntry(dest, offset, tagLen);
      offset += tagLen;
    } else {
      short dictIdx = StreamUtils.toShort(status, (byte) src.read());
      byte[] entry = tagDict.getEntry(dictIdx);
      if (entry == null) {
        throw new IOException("Missing dictionary entry for index " + dictIdx);
      }
      offset = Bytes.putAsShort(dest, offset, entry.length);
      System.arraycopy(entry, 0, dest, offset, entry.length);
      offset += entry.length;
    }
  }
}
项目:hadoop    文件:TestMapFile.java   
/**
 * test {@code MapFile.Reader.midKey() } method 
 */
@Test
public void testMidKeyOnCurrentApi() throws Exception {
  // Write a mapfile of simple data: keys are
  final String TEST_PREFIX = "testMidKeyOnCurrentApi.mapfile";
  MapFile.Writer writer = null;
  MapFile.Reader reader = null;
  try {
    writer = createWriter(TEST_PREFIX, IntWritable.class, IntWritable.class);
    // 0,1,....9
    int SIZE = 10;
    for (int i = 0; i < SIZE; i++)
      writer.append(new IntWritable(i), new IntWritable(i));
    writer.close();

    reader = createReader(TEST_PREFIX, IntWritable.class);
    assertEquals(new IntWritable((SIZE - 1) / 2), reader.midKey());
  } finally {
    IOUtils.cleanup(null, writer, reader);
  }
}
项目:hadoop    文件:TestStickyBit.java   
/**
 * Ensure that even if a file is in a directory with the sticky bit on,
 * another user can write to that file (assuming correct permissions).
 */
private void confirmCanAppend(Configuration conf, Path p) throws Exception {
  // Write a file to the new tmp directory as a regular user
  Path file = new Path(p, "foo");
  writeFile(hdfsAsUser1, file);
  hdfsAsUser1.setPermission(file, new FsPermission((short) 0777));

  // Log onto cluster as another user and attempt to append to file
  Path file2 = new Path(p, "foo");
  FSDataOutputStream h = null;
  try {
    h = hdfsAsUser2.append(file2);
    h.write("Some more data".getBytes());
    h.close();
    h = null;
  } finally {
    IOUtils.cleanup(null, h);
  }
}
项目:hadoop-oss    文件:TestFsShellReturnCode.java   
@Test (timeout = 30000)
public void testRmForceWithNonexistentGlob() throws Exception {
  Configuration conf = new Configuration();
  FsShell shell = new FsShell();
  shell.setConf(conf);
  final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
  final PrintStream err = new PrintStream(bytes);
  final PrintStream oldErr = System.err;
  System.setErr(err);
  try {
    int exit = shell.run(new String[]{"-rm", "-f", "nomatch*"});
    assertEquals(0, exit);
    assertTrue(bytes.toString().isEmpty());
  } finally {
    IOUtils.closeStream(err);
    System.setErr(oldErr);
  }
}
项目:hadoop-oss    文件:AbstractContractOpenTest.java   
@Test
public void testOpenFileTwice() throws Throwable {
  describe("verify that two opened file streams are independent");
  Path path = path("testopenfiletwice.txt");
  byte[] block = dataset(TEST_FILE_LEN, 0, 255);
  //this file now has a simple rule: offset => value
  createFile(getFileSystem(), path, false, block);
  //open first
  FSDataInputStream instream1 = getFileSystem().open(path);
  int c = instream1.read();
  assertEquals(0,c);
  FSDataInputStream instream2 = null;
  try {
    instream2 = getFileSystem().open(path);
    assertEquals("first read of instream 2", 0, instream2.read());
    assertEquals("second read of instream 1", 1, instream1.read());
    instream1.close();
    assertEquals("second read of instream 2", 1, instream2.read());
    //close instream1 again
    instream1.close();
  } finally {
    IOUtils.closeStream(instream1);
    IOUtils.closeStream(instream2);
  }
}
项目:hadoop    文件:TestSaveNamespace.java   
/**
 * Test for save namespace should succeed when parent directory renamed with
 * open lease and destination directory exist. 
 * This test is a regression for HDFS-2827
 */
@Test
public void testSaveNamespaceWithRenamedLease() throws Exception {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration())
      .numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
  OutputStream out = null;
  try {
    fs.mkdirs(new Path("/test-target"));
    out = fs.create(new Path("/test-source/foo")); // don't close
    fs.rename(new Path("/test-source/"), new Path("/test-target/"));

    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    cluster.getNameNodeRpc().saveNamespace();
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  } finally {
    IOUtils.cleanup(LOG, out, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop-oss    文件:TestSSLHttpServer.java   
/** Test verifies that mutually exclusive server's disabled cipher suites and
 * client's enabled cipher suites can successfully establish TLS connection.
 */
@Test
public void testExclusiveEnabledCiphers() throws Exception {
  URL url = new URL(baseUrl, "/echo?a=b&c=d");
  HttpsURLConnection conn = (HttpsURLConnection) url.openConnection();
  SSLSocketFactory sslSocketF = clientSslFactory.createSSLSocketFactory();
  PrefferedCipherSSLSocketFactory testPreferredCipherSSLSocketF
      = new PrefferedCipherSSLSocketFactory(sslSocketF,
          exclusiveEnabledCiphers.split(","));
  conn.setSSLSocketFactory(testPreferredCipherSSLSocketF);
  assertFalse("excludedCipher list is empty",
      exclusiveEnabledCiphers.isEmpty());
  try {
    InputStream in = conn.getInputStream();
    ByteArrayOutputStream out = new ByteArrayOutputStream();
    IOUtils.copyBytes(in, out, 1024);
    assertEquals(out.toString(), "a:b\nc:d\n");
    LOG.info("Atleast one additional enabled cipher than excluded ciphers,"
        + " expected successful test result.");
  } catch (SSLHandshakeException ex) {
    fail("Atleast one additional cipher available for successful handshake."
        + " Unexpected test failure: " + ex);
  }
}
项目:hadoop    文件:TestFuseDFS.java   
/** Check that the given file exists with the given contents */
private static void checkFile(File f, String expectedContents) 
    throws IOException {
  FileInputStream fi = new FileInputStream(f);
  int len = expectedContents.length();
  byte[] b = new byte[len];
  try {
    IOUtils.readFully(fi, b, 0, len);
  } catch (IOException ie) {
    fail("Reading "+f.getName()+" failed with "+ie.getMessage());
  } finally {
    fi.close(); // NB: leaving f unclosed prevents unmount
  }
  String s = new String(b, 0, len);
  assertEquals("File content differs", expectedContents, s);
}
项目:hadoop    文件:MD5FileUtils.java   
/**
 * Read the md5 file stored alongside the given data file
 * and match the md5 file content.
 * @param dataFile the file containing data
 * @return a matcher with two matched groups
 *   where group(1) is the md5 string and group(2) is the data file path.
 */
private static Matcher readStoredMd5(File md5File) throws IOException {
  BufferedReader reader =
      new BufferedReader(new InputStreamReader(new FileInputStream(
          md5File), Charsets.UTF_8));
  String md5Line;
  try {
    md5Line = reader.readLine();
    if (md5Line == null) { md5Line = ""; }
    md5Line = md5Line.trim();
  } catch (IOException ioe) {
    throw new IOException("Error reading md5 file at " + md5File, ioe);
  } finally {
    IOUtils.cleanup(LOG, reader);
  }

  Matcher matcher = LINE_REGEX.matcher(md5Line);
  if (!matcher.matches()) {
    throw new IOException("Invalid MD5 file " + md5File + ": the content \""
        + md5Line + "\" does not match the expected pattern.");
  }
  return matcher;
}
项目:hadoop    文件:LeveldbRMStateStore.java   
private void loadRMDTSecretManagerTokenSequenceNumber(RMState state)
    throws IOException {
  byte[] data = null;
  try {
    data = db.get(bytes(RM_DT_SEQUENCE_NUMBER_KEY));
  } catch (DBException e) {
    throw new IOException(e);
  }
  if (data != null) {
    DataInputStream in = new DataInputStream(new ByteArrayInputStream(data));
    try {
      state.rmSecretManagerState.dtSequenceNumber = in.readInt();
    } finally {
      IOUtils.cleanup(LOG, in);
    }
  }
}
项目:hadoop    文件:TestFileCreationClient.java   
@Override
public void run() {
  FSDataOutputStream out = null;
  int i = 0;
  try {
    out = fs.create(filepath);
    for(; running; i++) {
      System.out.println(getName() + " writes " + i);
      out.write(i);
      out.hflush();
      sleep(100);
    }
  }
  catch(Exception e) {
    System.out.println(getName() + " dies: e=" + e);
  }
  finally {
    System.out.println(getName() + ": i=" + i);
    IOUtils.closeStream(out);
  }
}
项目:hadoop    文件:TotalOrderPartitioner.java   
/**
 * Read the cut points from the given IFile.
 * @param fs The file system
 * @param p The path to read
 * @param keyClass The map output key class
 * @param job The job config
 * @throws IOException
 */
                               // matching key types enforced by passing in
@SuppressWarnings("unchecked") // map output key class
private K[] readPartitions(FileSystem fs, Path p, Class<K> keyClass,
    Configuration conf) throws IOException {
  SequenceFile.Reader reader = new SequenceFile.Reader(fs, p, conf);
  ArrayList<K> parts = new ArrayList<K>();
  K key = ReflectionUtils.newInstance(keyClass, conf);
  NullWritable value = NullWritable.get();
  try {
    while (reader.next(key, value)) {
      parts.add(key);
      key = ReflectionUtils.newInstance(keyClass, conf);
    }
    reader.close();
    reader = null;
  } finally {
    IOUtils.cleanup(LOG, reader);
  }
  return parts.toArray((K[])Array.newInstance(keyClass, parts.size()));
}
项目:hadoop-oss    文件:TestMapFile.java   
@Test
public void testRenameWithFalse() {
  final String ERROR_MESSAGE = "Could not rename";
  final String NEW_FILE_NAME = "test-new.mapfile";
  final String OLD_FILE_NAME = "test-old.mapfile";
  MapFile.Writer writer = null;
  try {
    FileSystem fs = FileSystem.getLocal(conf);
    FileSystem spyFs = spy(fs);

    writer = createWriter(OLD_FILE_NAME, IntWritable.class, IntWritable.class);
    writer.close();

    Path oldDir = new Path(TEST_DIR, OLD_FILE_NAME);
    Path newDir = new Path(TEST_DIR, NEW_FILE_NAME);
    when(spyFs.rename(oldDir, newDir)).thenReturn(false);

    MapFile.rename(spyFs, oldDir.toString(), newDir.toString());
    fail("testRenameWithException no exception error !!!");
  } catch (IOException ex) {
    assertTrue("testRenameWithFalse invalid IOExceptionMessage error !!!", ex
        .getMessage().startsWith(ERROR_MESSAGE));
  } finally {
    IOUtils.cleanup(null, writer);
  }
}
项目:hadoop-oss    文件:TestMapFile.java   
/**
 * test {@code MapFile.Reader.getClosest() } with wrong class key
 */
@Test
public void testReaderGetClosest() throws Exception {
  final String TEST_METHOD_KEY = "testReaderWithWrongKeyClass.mapfile";
  MapFile.Writer writer = null;
  MapFile.Reader reader = null;
  try {
    writer = createWriter(TEST_METHOD_KEY, IntWritable.class, Text.class);

    for (int i = 0; i < 10; i++)
      writer.append(new IntWritable(i), new Text("value" + i));
    writer.close();

    reader = createReader(TEST_METHOD_KEY, Text.class);
    reader.getClosest(new Text("2"), new Text(""));
    fail("no excepted exception in testReaderWithWrongKeyClass !!!");
  } catch (IOException ex) {
    /* Should be thrown to pass the test */
  } finally {
    IOUtils.cleanup(null, writer, reader);
  }
}
项目:hadoop    文件:TestMRJobs.java   
/**
 * Used on Windows to determine if the specified file is a symlink that
 * targets a directory.  On most platforms, these checks can be done using
 * commons-io.  On Windows, the commons-io implementation is unreliable and
 * always returns false.  Instead, this method checks the output of the dir
 * command.  After migrating to Java 7, this method can be removed in favor
 * of the new method java.nio.file.Files.isSymbolicLink, which is expected to
 * work cross-platform.
 * 
 * @param file File to check
 * @return boolean true if the file is a symlink that targets a directory
 * @throws IOException thrown for any I/O error
 */
private static boolean isWindowsSymlinkedDirectory(File file)
    throws IOException {
  String dirOut = Shell.execCommand("cmd", "/c", "dir",
    file.getAbsoluteFile().getParent());
  StringReader sr = new StringReader(dirOut);
  BufferedReader br = new BufferedReader(sr);
  try {
    String line = br.readLine();
    while (line != null) {
      line = br.readLine();
      if (line.contains(file.getName()) && line.contains("<SYMLINKD>")) {
        return true;
      }
    }
    return false;
  } finally {
    IOUtils.closeStream(br);
    IOUtils.closeStream(sr);
  }
}
项目:hadoop    文件:TestFileJournalManager.java   
/**
 * Make sure that in-progress streams aren't counted if we don't ask for
 * them.
 */
@Test
public void testExcludeInProgressStreams() throws CorruptionException,
    IOException {
  File f = new File(TestEditLog.TEST_DIR + "/excludeinprogressstreams");

  // Don't close the edit log once the files have been set up.
  NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 
                                 10, false);
  StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();

  FileJournalManager jm = new FileJournalManager(conf, sd, storage);

  // If we exclude the in-progess stream, we should only have 100 tx.
  assertEquals(100, getNumberOfTransactions(jm, 1, false, false));

  EditLogInputStream elis = getJournalInputStream(jm, 90, false);
  try {
    FSEditLogOp lastReadOp = null;
    while ((lastReadOp = elis.readOp()) != null) {
      assertTrue(lastReadOp.getTransactionId() <= 100);
    }
  } finally {
    IOUtils.cleanup(LOG, elis);
  }
}
项目:hadoop    文件:BlockSender.java   
/**
 * Read checksum into given buffer
 * @param buf buffer to read the checksum into
 * @param checksumOffset offset at which to write the checksum into buf
 * @param checksumLen length of checksum to write
 * @throws IOException on error
 */
private void readChecksum(byte[] buf, final int checksumOffset,
    final int checksumLen) throws IOException {
  if (checksumSize <= 0 && checksumIn == null) {
    return;
  }
  try {
    checksumIn.readFully(buf, checksumOffset, checksumLen);
  } catch (IOException e) {
    LOG.warn(" Could not read or failed to veirfy checksum for data"
        + " at offset " + offset + " for block " + block, e);
    IOUtils.closeStream(checksumIn);
    checksumIn = null;
    if (corruptChecksumOk) {
      if (checksumOffset < checksumLen) {
        // Just fill the array with zeros.
        Arrays.fill(buf, checksumOffset, checksumLen, (byte) 0);
      }
    } else {
      throw e;
    }
  }
}
项目:rainbow    文件:HdfsUtil.java   
public void upFile(InputStream fileInputStream, String hdfsPath)
        throws IOException
{
    InputStream in = new BufferedInputStream(fileInputStream);
    OutputStream out = fileSystem.create(new Path(hdfsPath));
    try
    {
        IOUtils.copyBytes(in, out, conf);
    } catch (Exception e)
    {
        e.printStackTrace();
    } finally
    {
        // close Stream
        IOUtils.closeStream(in);
        IOUtils.closeStream(out);
    }
}
项目:hadoop    文件:TestUniformSizeInputFormat.java   
private void checkSplits(Path listFile, List<InputSplit> splits) throws IOException {
  long lastEnd = 0;

  //Verify if each split's start is matching with the previous end and
  //we are not missing anything
  for (InputSplit split : splits) {
    FileSplit fileSplit = (FileSplit) split;
    long start = fileSplit.getStart();
    Assert.assertEquals(lastEnd, start);
    lastEnd = start + fileSplit.getLength();
  }

  //Verify there is nothing more to read from the input file
  SequenceFile.Reader reader
          = new SequenceFile.Reader(cluster.getFileSystem().getConf(),
                  SequenceFile.Reader.file(listFile));

  try {
    reader.seek(lastEnd);
    CopyListingFileStatus srcFileStatus = new CopyListingFileStatus();
    Text srcRelPath = new Text();
    Assert.assertFalse(reader.next(srcRelPath, srcFileStatus));
  } finally {
    IOUtils.closeStream(reader);
  }
}
项目:rainbow    文件:HdfsUtil.java   
public void appendFile(String localFile, String hdfsPath)
        throws IOException
{
    InputStream in = new FileInputStream(localFile);
    OutputStream out = fileSystem.append(new Path(hdfsPath));
    try
    {
        IOUtils.copyBytes(in, out, conf);
    } catch (Exception e)
    {
        e.printStackTrace();
    } finally
    {
        IOUtils.closeStream(in);
        IOUtils.closeStream(out);
    }
}
项目:rainbow    文件:HdfsUtil.java   
public void downFile(String hdfsPath, Path localPath) throws IOException
{
    FSDataInputStream in = fileSystem.open(new Path(hdfsPath));
    /* FSDataOutputStream out = fileSystem.create(localPath); */
    FileOutputStream out = new FileOutputStream(new File(
            localPath.toString()));
    try
    {
        // read
        IOUtils.copyBytes(in, out, conf);
    } catch (Exception e)
    {
        e.printStackTrace();
    } finally
    {
        // close Stream
        IOUtils.closeStream(in);
        IOUtils.closeStream(out);
    }
}
项目:HadoopGuides    文件:FileCopyWithProgress.java   
public static void main(String[] args) throws IOException {
    final String localSrc = "/tmp/log/bigdata.pdf";
    final String hdfsUri = "hdfs://master:8020/test/bigdata.pdf";
    InputStream in = new BufferedInputStream(new FileInputStream(localSrc));
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(URI.create(hdfsUri), conf);
    OutputStream out = fs.create(new Path(hdfsUri), new Progressable() {
        // progress只有在Hadoop文件系统是HDFS的时候才调用,local,S3,FTP都不会调用
        @Override
        public void progress() {
            System.out.print(">");
        }
    });
    IOUtils.copyBytes(in, out, 4096, true);
}
项目:hadoop    文件:FileContextTestHelper.java   
public static byte[] readFile(FileContext fc, Path path, int len)
    throws IOException {
  DataInputStream dis = fc.open(path);
  byte[] buffer = new byte[len];
  IOUtils.readFully(dis, buffer, 0, len);
  dis.close();
  return buffer;
}
项目:hadoop    文件:DomainSocketWatcher.java   
/**
 * Add a socket.
 *
 * @param sock     The socket to add.  It is an error to re-add a socket that
 *                   we are already watching.
 * @param handler  The handler to associate with this socket.  This may be
 *                   called any time after this function is called.
 */
public void add(DomainSocket sock, Handler handler) {
  lock.lock();
  try {
    if (closed) {
      handler.handle(sock);
      IOUtils.cleanup(LOG, sock);
      return;
    }
    Entry entry = new Entry(sock, handler);
    try {
      sock.refCount.reference();
    } catch (ClosedChannelException e1) {
      // If the socket is already closed before we add it, invoke the
      // handler immediately.  Then we're done.
      handler.handle(sock);
      return;
    }
    toAdd.add(entry);
    kick();
    while (true) {
      try {
        processedCond.await();
      } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
      }
      if (!toAdd.contains(entry)) {
        break;
      }
    }
  } finally {
    lock.unlock();
  }
}
项目:ditb    文件:FSUtils.java   
/**
 * Verifies current version of file system
 *
 * @param fs filesystem object
 * @param rootdir root hbase directory
 * @return null if no version file exists, version string otherwise.
 * @throws IOException e
 * @throws org.apache.hadoop.hbase.exceptions.DeserializationException
 */
public static String getVersion(FileSystem fs, Path rootdir)
throws IOException, DeserializationException {
  Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
  FileStatus[] status = null;
  try {
    // hadoop 2.0 throws FNFE if directory does not exist.
    // hadoop 1.0 returns null if directory does not exist.
    status = fs.listStatus(versionFile);
  } catch (FileNotFoundException fnfe) {
    return null;
  }
  if (status == null || status.length == 0) return null;
  String version = null;
  byte [] content = new byte [(int)status[0].getLen()];
  FSDataInputStream s = fs.open(versionFile);
  try {
    IOUtils.readFully(s, content, 0, content.length);
    if (ProtobufUtil.isPBMagicPrefix(content)) {
      version = parseVersionFrom(content);
    } else {
      // Presume it pre-pb format.
      InputStream is = new ByteArrayInputStream(content);
      DataInputStream dis = new DataInputStream(is);
      try {
        version = dis.readUTF();
      } finally {
        dis.close();
      }
    }
  } catch (EOFException eof) {
    LOG.warn("Version file was empty, odd, will try to set it.");
  } finally {
    s.close();
  }
  return version;
}
项目:hadoop    文件:HistoryServerFileSystemStateStoreService.java   
private void loadTokenMasterKey(HistoryServerState state, Path keyFile,
    long numKeyFileBytes) throws IOException {
  DelegationKey key = new DelegationKey();
  byte[] keyData = readFile(keyFile, numKeyFileBytes);
  DataInputStream in =
      new DataInputStream(new ByteArrayInputStream(keyData));
  try {
    key.readFields(in);
  } finally {
    IOUtils.cleanup(LOG, in);
  }
  state.tokenMasterKeyState.add(key);
}
项目:hadoop    文件:MD5FileUtils.java   
/**
 * Read dataFile and compute its MD5 checksum.
 */
public static MD5Hash computeMd5ForFile(File dataFile) throws IOException {
  InputStream in = new FileInputStream(dataFile);
  try {
    MessageDigest digester = MD5Hash.getDigester();
    DigestInputStream dis = new DigestInputStream(in, digester);
    IOUtils.copyBytes(dis, new IOUtils.NullOutputStream(), 128*1024);

    return new MD5Hash(digester.digest());
  } finally {
    IOUtils.closeStream(in);
  }
}
项目:hadoop    文件:FsDatasetImpl.java   
@Override  // FsDatasetSpi
public synchronized ReplicaHandler append(ExtendedBlock b,
    long newGS, long expectedBlockLen) throws IOException {
  // If the block was successfully finalized because all packets
  // were successfully processed at the Datanode but the ack for
  // some of the packets were not received by the client. The client 
  // re-opens the connection and retries sending those packets.
  // The other reason is that an "append" is occurring to this block.

  // check the validity of the parameter
  if (newGS < b.getGenerationStamp()) {
    throw new IOException("The new generation stamp " + newGS + 
        " should be greater than the replica " + b + "'s generation stamp");
  }
  ReplicaInfo replicaInfo = getReplicaInfo(b);
  LOG.info("Appending to " + replicaInfo);
  if (replicaInfo.getState() != ReplicaState.FINALIZED) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNFINALIZED_REPLICA + b);
  }
  if (replicaInfo.getNumBytes() != expectedBlockLen) {
    throw new IOException("Corrupted replica " + replicaInfo + 
        " with a length of " + replicaInfo.getNumBytes() + 
        " expected length is " + expectedBlockLen);
  }

  FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
  ReplicaBeingWritten replica = null;
  try {
    replica = append(b.getBlockPoolId(), (FinalizedReplica)replicaInfo, newGS,
        b.getNumBytes());
  } catch (IOException e) {
    IOUtils.cleanup(null, ref);
    throw e;
  }
  return new ReplicaHandler(replica, ref);
}
项目:hadoop    文件:NameNodeConnector.java   
@Override
public void close() {
  keyManager.close();

  // close the output file
  IOUtils.closeStream(out); 
  if (fs != null) {
    try {
      fs.delete(idPath, true);
    } catch(IOException ioe) {
      LOG.warn("Failed to delete " + idPath, ioe);
    }
  }
}
项目:hadoop    文件:YarnChild.java   
/**
 * Write the task specific job-configuration file.
 * @throws IOException
 */
private static void writeLocalJobFile(Path jobFile, JobConf conf)
    throws IOException {
  FileSystem localFs = FileSystem.getLocal(conf);
  localFs.delete(jobFile);
  OutputStream out = null;
  try {
    out = FileSystem.create(localFs, jobFile, urw_gr);
    conf.writeXml(out);
  } finally {
    IOUtils.cleanup(LOG, out);
  }
}
项目:ditb    文件:ByteBufferUtils.java   
/**
 * Copy from the InputStream to a new heap ByteBuffer until the InputStream is exhausted.
 */
public static ByteBuffer drainInputStreamToBuffer(InputStream is) throws IOException {
  ByteArrayOutputStream baos = new ByteArrayOutputStream(4096);
  IOUtils.copyBytes(is, baos, 4096, true);
  ByteBuffer buffer = ByteBuffer.wrap(baos.toByteArray());
  buffer.rewind();
  return buffer;
}
项目:hadoop-oss    文件:VersionInfo.java   
protected VersionInfo(String component) {
  info = new Properties();
  String versionInfoFile = component + "-version-info.properties";
  InputStream is = null;
  try {
    is = ThreadUtil.getResourceAsStream(versionInfoFile);
    info.load(is);
  } catch (IOException ex) {
    LogFactory.getLog(getClass()).warn("Could not read '" +
        versionInfoFile + "', " + ex.toString(), ex);
  } finally {
    IOUtils.closeStream(is);
  }
}