Java 类org.apache.hadoop.hdfs.DFSClient 实例源码

项目:hadoop    文件:RpcProgramMountd.java   
public RpcProgramMountd(NfsConfiguration config,
    DatagramSocket registrationSocket, boolean allowInsecurePorts)
    throws IOException {
  // Note that RPC cache is not enabled
  super("mountd", "localhost", config.getInt(
      NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY,
      NfsConfigKeys.DFS_NFS_MOUNTD_PORT_DEFAULT), PROGRAM, VERSION_1,
      VERSION_3, registrationSocket, allowInsecurePorts);
  exports = new ArrayList<String>();
  exports.add(config.get(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY,
      NfsConfigKeys.DFS_NFS_EXPORT_POINT_DEFAULT));
  this.hostsMatcher = NfsExports.getInstance(config);
  this.mounts = Collections.synchronizedList(new ArrayList<MountEntry>());
  UserGroupInformation.setConfiguration(config);
  SecurityUtil.login(config, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY,
      NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY);
  this.dfsClient = new DFSClient(NameNode.getAddress(config), config);
}
项目:hadoop    文件:WriteManager.java   
Nfs3FileAttributes getFileAttr(DFSClient client, FileHandle dirHandle,
    String fileName) throws IOException {
  String fileIdPath = Nfs3Utils.getFileIdPath(dirHandle) + "/" + fileName;
  Nfs3FileAttributes attr = Nfs3Utils.getFileAttr(client, fileIdPath, iug);

  if ((attr != null) && (attr.getType() == NfsFileType.NFSREG.toValue())) {
    OpenFileCtx openFileCtx = fileContextCache.get(new FileHandle(attr
        .getFileId()));

    if (openFileCtx != null) {
      attr.setSize(openFileCtx.getNextOffset());
      attr.setUsed(openFileCtx.getNextOffset());
    }
  }
  return attr;
}
项目:hadoop    文件:DFSClientCache.java   
/**
 * Close all DFSClient instances in the Cache.
 * @param onlyAutomatic only close those that are marked for automatic closing
 */
synchronized void closeAll(boolean onlyAutomatic) throws IOException {
  List<IOException> exceptions = new ArrayList<IOException>();

  ConcurrentMap<String, DFSClient> map = clientCache.asMap();

  for (Entry<String, DFSClient> item : map.entrySet()) {
    final DFSClient client = item.getValue();
    if (client != null) {
      try {
        client.close();
      } catch (IOException ioe) {
        exceptions.add(ioe);
      }
    }
  }

  if (!exceptions.isEmpty()) {
    throw MultipleIOException.createIOException(exceptions);
  }
}
项目:hadoop    文件:DFSClientCache.java   
private CacheLoader<String, DFSClient> clientLoader() {
  return new CacheLoader<String, DFSClient>() {
    @Override
    public DFSClient load(String userName) throws Exception {
      UserGroupInformation ugi = getUserGroupInformation(
              userName,
              UserGroupInformation.getCurrentUser());

      // Guava requires CacheLoader never returns null.
      return ugi.doAs(new PrivilegedExceptionAction<DFSClient>() {
        @Override
        public DFSClient run() throws IOException {
          return new DFSClient(NameNode.getAddress(config), config);
        }
      });
    }
  };
}
项目:hadoop    文件:RpcProgramNfs3.java   
/**
 * Used by readdir and readdirplus to get dirents. It retries the listing if
 * the startAfter can't be found anymore.
 */
private DirectoryListing listPaths(DFSClient dfsClient, String dirFileIdPath,
    byte[] startAfter) throws IOException {
  DirectoryListing dlisting;
  try {
    dlisting = dfsClient.listPaths(dirFileIdPath, startAfter);
  } catch (RemoteException e) {
    IOException io = e.unwrapRemoteException();
    if (!(io instanceof DirectoryListingStartAfterNotFoundException)) {
      throw io;
    }
    // This happens when startAfter was just deleted
    LOG.info("Cookie couldn't be found: "
        + new String(startAfter, Charset.forName("UTF-8"))
        + ", do listing from beginning");
    dlisting = dfsClient
        .listPaths(dirFileIdPath, HdfsFileStatus.EMPTY_NAME);
  }
  return dlisting;
}
项目:hadoop    文件:TestDFSClientCache.java   
@Test
public void testEviction() throws IOException {
  NfsConfiguration conf = new NfsConfiguration();
  conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost");

  // Only one entry will be in the cache
  final int MAX_CACHE_SIZE = 1;

  DFSClientCache cache = new DFSClientCache(conf, MAX_CACHE_SIZE);

  DFSClient c1 = cache.getDfsClient("test1");
  assertTrue(cache.getDfsClient("test1").toString().contains("ugi=test1"));
  assertEquals(c1, cache.getDfsClient("test1"));
  assertFalse(isDfsClientClose(c1));

  cache.getDfsClient("test2");
  assertTrue(isDfsClientClose(c1));
  assertTrue("cache size should be the max size or less",
      cache.clientCache.size() <= MAX_CACHE_SIZE);
}
项目:hadoop    文件:DomainSocketFactory.java   
/**
 * Get information about a domain socket path.
 *
 * @param addr         The inet address to use.
 * @param conf         The client configuration.
 *
 * @return             Information about the socket path.
 */
public PathInfo getPathInfo(InetSocketAddress addr, DFSClient.Conf conf) {
  // If there is no domain socket path configured, we can't use domain
  // sockets.
  if (conf.getDomainSocketPath().isEmpty()) return PathInfo.NOT_CONFIGURED;
  // If we can't do anything with the domain socket, don't create it.
  if (!conf.isDomainSocketDataTraffic() &&
      (!conf.isShortCircuitLocalReads() || conf.isUseLegacyBlockReaderLocal())) {
    return PathInfo.NOT_CONFIGURED;
  }
  // If the DomainSocket code is not loaded, we can't create
  // DomainSocket objects.
  if (DomainSocket.getLoadingFailureReason() != null) {
    return PathInfo.NOT_CONFIGURED;
  }
  // UNIX domain sockets can only be used to talk to local peers
  if (!DFSClient.isLocalAddress(addr)) return PathInfo.NOT_CONFIGURED;
  String escapedPath = DomainSocket.getEffectivePath(
      conf.getDomainSocketPath(), addr.getPort());
  PathState status = pathMap.getIfPresent(escapedPath);
  if (status == null) {
    return new PathInfo(escapedPath, PathState.VALID);
  } else {
    return new PathInfo(escapedPath, status);
  }
}
项目:hadoop    文件:FileChecksumServlets.java   
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response
    ) throws ServletException, IOException {
  final PrintWriter out = response.getWriter();
  final String path = ServletUtil.getDecodedPath(request, "/getFileChecksum");
  final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
  xml.declaration();

  final ServletContext context = getServletContext();
  final DataNode datanode = (DataNode) context.getAttribute("datanode");
  final Configuration conf = 
    new HdfsConfiguration(datanode.getConf());

  try {
    final DFSClient dfs = DatanodeJspHelper.getDFSClient(request, 
        datanode, conf, getUGI(request, conf));
    final MD5MD5CRC32FileChecksum checksum = dfs.getFileChecksum(path, Long.MAX_VALUE);
    MD5MD5CRC32FileChecksum.write(xml, checksum);
  } catch(IOException ioe) {
    writeXml(ioe, path, xml);
  } catch (InterruptedException e) {
    writeXml(e, path, xml);
  }
  xml.endDocument();
}
项目:hadoop    文件:NamenodeFsck.java   
private void lostFoundInit(DFSClient dfs) {
  lfInited = true;
  try {
    String lfName = "/lost+found";

    final HdfsFileStatus lfStatus = dfs.getFileInfo(lfName);
    if (lfStatus == null) { // not exists
      lfInitedOk = dfs.mkdirs(lfName, null, true);
      lostFound = lfName;
    } else if (!lfStatus.isDir()) { // exists but not a directory
      LOG.warn("Cannot use /lost+found : a regular file with this name exists.");
      lfInitedOk = false;
    }  else { // exists and is a directory
      lostFound = lfName;
      lfInitedOk = true;
    }
  }  catch (Exception e) {
    e.printStackTrace();
    lfInitedOk = false;
  }
  if (lostFound == null) {
    LOG.warn("Cannot initialize /lost+found .");
    lfInitedOk = false;
    internalError = true;
  }
}
项目:hadoop    文件:WebHdfsHandler.java   
private void onCreate(ChannelHandlerContext ctx)
  throws IOException, URISyntaxException {
  writeContinueHeader(ctx);

  final String nnId = params.namenodeId();
  final int bufferSize = params.bufferSize();
  final short replication = params.replication();
  final long blockSize = params.blockSize();
  final FsPermission permission = params.permission();

  EnumSet<CreateFlag> flags = params.overwrite() ?
    EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
      : EnumSet.of(CreateFlag.CREATE);

  final DFSClient dfsClient = newDfsClient(nnId, confForCreate);
  OutputStream out = dfsClient.createWrappedOutputStream(dfsClient.create(
    path, permission, flags, replication,
    blockSize, null, bufferSize, null), null);
  DefaultHttpResponse resp = new DefaultHttpResponse(HTTP_1_1, CREATED);

  final URI uri = new URI(HDFS_URI_SCHEME, nnId, path, null, null);
  resp.headers().set(LOCATION, uri.toString());
  resp.headers().set(CONTENT_LENGTH, 0);
  ctx.pipeline().replace(this, HdfsWriter.class.getSimpleName(),
    new HdfsWriter(dfsClient, out, resp));
}
项目:hadoop    文件:WebHdfsHandler.java   
private void onGetFileChecksum(ChannelHandlerContext ctx) throws IOException {
  MD5MD5CRC32FileChecksum checksum = null;
  final String nnId = params.namenodeId();
  DFSClient dfsclient = newDfsClient(nnId, conf);
  try {
    checksum = dfsclient.getFileChecksum(path, Long.MAX_VALUE);
    dfsclient.close();
    dfsclient = null;
  } finally {
    IOUtils.cleanup(LOG, dfsclient);
  }
  final byte[] js = JsonUtil.toJsonString(checksum).getBytes(Charsets.UTF_8);
  DefaultFullHttpResponse resp =
    new DefaultFullHttpResponse(HTTP_1_1, OK, Unpooled.wrappedBuffer(js));

  resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
  resp.headers().set(CONTENT_LENGTH, js.length);
  resp.headers().set(CONNECTION, CLOSE);
  ctx.writeAndFlush(resp).addListener(ChannelFutureListener.CLOSE);
}
项目:ditb    文件:HBaseTestingUtility.java   
/**
 * Set maxRecoveryErrorCount in DFSClient.  In 0.20 pre-append its hard-coded to 5 and
 * makes tests linger.  Here is the exception you'll see:
 * <pre>
 * 2010-06-15 11:52:28,511 WARN  [DataStreamer for file /hbase/.logs/wal.1276627923013 block blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block blk_928005470262850423_1021 failed  because recovery from primary datanode 127.0.0.1:53683 failed 4 times.  Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
 * </pre>
 * @param stream A DFSClient.DFSOutputStream.
 * @param max
 * @throws NoSuchFieldException
 * @throws SecurityException
 * @throws IllegalAccessException
 * @throws IllegalArgumentException
 */
public static void setMaxRecoveryErrorCount(final OutputStream stream,
    final int max) {
  try {
    Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
    for (Class<?> clazz: clazzes) {
      String className = clazz.getSimpleName();
      if (className.equals("DFSOutputStream")) {
        if (clazz.isInstance(stream)) {
          Field maxRecoveryErrorCountField =
            stream.getClass().getDeclaredField("maxRecoveryErrorCount");
          maxRecoveryErrorCountField.setAccessible(true);
          maxRecoveryErrorCountField.setInt(stream, max);
          break;
        }
      }
    }
  } catch (Exception e) {
    LOG.info("Could not set max recovery field", e);
  }
}
项目:hadoop    文件:TestResolveHdfsSymlink.java   
/**
 * Verifies that attempting to resolve a non-symlink results in client
 * exception
 */
@Test
public void testLinkTargetNonSymlink() throws UnsupportedFileSystemException,
    IOException {
  FileContext fc = null;
  Path notSymlink = new Path("/notasymlink");
  try {
    fc = FileContext.getFileContext(cluster.getFileSystem().getUri());
    fc.create(notSymlink, EnumSet.of(CreateFlag.CREATE));
    DFSClient client = new DFSClient(cluster.getFileSystem().getUri(),
        cluster.getConfiguration(0));
    try {
      client.getLinkTarget(notSymlink.toString());
      fail("Expected exception for resolving non-symlink");
    } catch (IOException e) {
      GenericTestUtils.assertExceptionContains("is not a symbolic link", e);
    }
  } finally {
    if (fc != null) {
      fc.delete(notSymlink, false);
    }
  }
}
项目:hadoop    文件:TestRetryCacheWithHA.java   
private DFSClient genClientWithDummyHandler() throws IOException {
  URI nnUri = dfs.getUri();
  FailoverProxyProvider<ClientProtocol> failoverProxyProvider = 
      NameNodeProxies.createFailoverProxyProvider(conf, 
          nnUri, ClientProtocol.class, true, null);
  InvocationHandler dummyHandler = new DummyRetryInvocationHandler(
      failoverProxyProvider, RetryPolicies
      .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
          Integer.MAX_VALUE,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT));
  ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance(
      failoverProxyProvider.getInterface().getClassLoader(),
      new Class[] { ClientProtocol.class }, dummyHandler);

  DFSClient client = new DFSClient(null, proxy, conf, null);
  return client;
}
项目:hadoop    文件:TestRetryCacheWithHA.java   
ConcatOp(DFSClient client, Path target, int numSrc) {
  super("concat", client);
  this.target = target.toString();
  this.srcs = new String[numSrc];
  this.srcPaths = new Path[numSrc];
  Path parent = target.getParent();
  for (int i = 0; i < numSrc; i++) {
    srcPaths[i] = new Path(parent, "srcfile" + i);
    srcs[i] = srcPaths[i].toString();
  }
}
项目:hadoop    文件:OpenFileCtx.java   
OpenFileCtx(HdfsDataOutputStream fos, Nfs3FileAttributes latestAttr,
    String dumpFilePath, DFSClient client, IdMappingServiceProvider iug,
    boolean aixCompatMode, NfsConfiguration config) {
  this.fos = fos;
  this.latestAttr = latestAttr;
  this.aixCompatMode = aixCompatMode;
  // We use the ReverseComparatorOnMin as the comparator of the map. In this
  // way, we first dump the data with larger offset. In the meanwhile, we
  // retrieve the last element to write back to HDFS.
  pendingWrites = new ConcurrentSkipListMap<OffsetRange, WriteCtx>(
      OffsetRange.ReverseComparatorOnMin);

  pendingCommits = new ConcurrentSkipListMap<Long, CommitCtx>();

  updateLastAccessTime();
  activeState = true;
  asyncStatus = false;
  asyncWriteBackStartOffset = 0;
  dumpOut = null;
  raf = null;
  nonSequentialWriteInMemory = new AtomicLong(0);

  this.dumpFilePath = dumpFilePath;  
  enabledDump = dumpFilePath != null;
  nextOffset = new AtomicLong();
  nextOffset.set(latestAttr.getSize());
  try { 
    assert(nextOffset.get() == this.fos.getPos());
  } catch (IOException e) {}
  dumpThread = null;
  this.client = client;
  this.iug = iug;
  this.uploadLargeFile = config.getBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD,
      NfsConfigKeys.LARGE_FILE_UPLOAD_DEFAULT);
}
项目:hadoop    文件:OpenFileCtx.java   
/**
 * Check the commit status with the given offset
 * @param commitOffset the offset to commit
 * @param channel the channel to return response
 * @param xid the xid of the commit request
 * @param preOpAttr the preOp attribute
 * @param fromRead whether the commit is triggered from read request
 * @return one commit status: COMMIT_FINISHED, COMMIT_WAIT,
 * COMMIT_INACTIVE_CTX, COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR
 */
public COMMIT_STATUS checkCommit(DFSClient dfsClient, long commitOffset,
    Channel channel, int xid, Nfs3FileAttributes preOpAttr, boolean fromRead) {
  if (!fromRead) {
    Preconditions.checkState(channel != null && preOpAttr != null);
    // Keep stream active
    updateLastAccessTime();
  }
  Preconditions.checkState(commitOffset >= 0);

  COMMIT_STATUS ret = checkCommitInternal(commitOffset, channel, xid,
      preOpAttr, fromRead);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Got commit status: " + ret.name());
  }
  // Do the sync outside the lock
  if (ret == COMMIT_STATUS.COMMIT_DO_SYNC
      || ret == COMMIT_STATUS.COMMIT_FINISHED) {
    try {
      // Sync file data and length
      fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
      ret = COMMIT_STATUS.COMMIT_FINISHED; // Remove COMMIT_DO_SYNC status 
      // Nothing to do for metadata since attr related change is pass-through
    } catch (ClosedChannelException cce) {
      if (pendingWrites.isEmpty()) {
        ret = COMMIT_STATUS.COMMIT_FINISHED;
      } else {
        ret = COMMIT_STATUS.COMMIT_ERROR;
      }
    } catch (IOException e) {
      LOG.error("Got stream error during data sync: " + e);
      // Do nothing. Stream will be closed eventually by StreamMonitor.
      // status = Nfs3Status.NFS3ERR_IO;
      ret = COMMIT_STATUS.COMMIT_ERROR;
    }
  }
  return ret;
}
项目:hadoop    文件:WriteManager.java   
/**
 * If the file is in cache, update the size based on the cached data size
 */
Nfs3FileAttributes getFileAttr(DFSClient client, FileHandle fileHandle,
    IdMappingServiceProvider iug) throws IOException {
  String fileIdPath = Nfs3Utils.getFileIdPath(fileHandle);
  Nfs3FileAttributes attr = Nfs3Utils.getFileAttr(client, fileIdPath, iug);
  if (attr != null) {
    OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
    if (openFileCtx != null) {
      attr.setSize(openFileCtx.getNextOffset());
      attr.setUsed(openFileCtx.getNextOffset());
    }
  }
  return attr;
}
项目:hadoop    文件:DFSClientCache.java   
private RemovalListener<String, DFSClient> clientRemovalListener() {
  return new RemovalListener<String, DFSClient>() {
    @Override
    public void onRemoval(RemovalNotification<String, DFSClient> notification) {
      DFSClient client = notification.getValue();
      try {
        client.close();
      } catch (IOException e) {
        LOG.warn(String.format(
            "IOException when closing the DFSClient(%s), cause: %s", client,
            e));
      }
    }
  };
}
项目:hadoop    文件:DFSClientCache.java   
private CacheLoader<DFSInputStreamCaheKey, FSDataInputStream> inputStreamLoader() {
  return new CacheLoader<DFSInputStreamCaheKey, FSDataInputStream>() {

    @Override
    public FSDataInputStream load(DFSInputStreamCaheKey key) throws Exception {
      DFSClient client = getDfsClient(key.userId);
      DFSInputStream dis = client.open(key.inodePath);
      return client.createWrappedInputStream(dis);
    }
  };
}
项目:hadoop    文件:DFSClientCache.java   
DFSClient getDfsClient(String userName) {
  DFSClient client = null;
  try {
    client = clientCache.get(userName);
  } catch (ExecutionException e) {
    LOG.error("Failed to create DFSClient for user:" + userName + " Cause:"
        + e);
  }
  return client;
}
项目:hadoop    文件:RpcProgramNfs3.java   
private void setattrInternal(DFSClient dfsClient, String fileIdPath,
    SetAttr3 newAttr, boolean setMode) throws IOException {
  EnumSet<SetAttrField> updateFields = newAttr.getUpdateFields();

  if (setMode && updateFields.contains(SetAttrField.MODE)) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("set new mode: " + newAttr.getMode());
    }
    dfsClient.setPermission(fileIdPath,
        new FsPermission((short) (newAttr.getMode())));
  }
  if (updateFields.contains(SetAttrField.UID)
      || updateFields.contains(SetAttrField.GID)) {
    String uname = updateFields.contains(SetAttrField.UID) ? iug.getUserName(
        newAttr.getUid(), IdMappingConstant.UNKNOWN_USER) : null;
    String gname = updateFields.contains(SetAttrField.GID) ? iug
        .getGroupName(newAttr.getGid(), IdMappingConstant.UNKNOWN_GROUP) : null;
    dfsClient.setOwner(fileIdPath, uname, gname);
  }

  long atime = updateFields.contains(SetAttrField.ATIME) ? newAttr.getAtime()
      .getMilliSeconds() : -1;
  long mtime = updateFields.contains(SetAttrField.MTIME) ? newAttr.getMtime()
      .getMilliSeconds() : -1;
  if (atime != -1 || mtime != -1) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("set atime: " + +atime + " mtime: " + mtime);
    }
    dfsClient.setTimes(fileIdPath, mtime, atime);
  }
}
项目:hadoop    文件:Nfs3Utils.java   
public static WccAttr getWccAttr(DFSClient client, String fileIdPath)
    throws IOException {
  HdfsFileStatus fstat = getFileStatus(client, fileIdPath);
  if (fstat == null) {
    return null;
  }

  long size = fstat.isDir() ? getDirSize(fstat.getChildrenNum()) : fstat
      .getLen();
  return new WccAttr(size, new NfsTime(fstat.getModificationTime()),
      new NfsTime(fstat.getModificationTime()));
}
项目:hadoop    文件:Nfs3Utils.java   
public static WccData createWccData(final WccAttr preOpAttr,
    DFSClient dfsClient, final String fileIdPath,
    final IdMappingServiceProvider iug)
    throws IOException {
  Nfs3FileAttributes postOpDirAttr = getFileAttr(dfsClient, fileIdPath, iug);
  return new WccData(preOpAttr, postOpDirAttr);
}
项目:hadoop    文件:TestDFSClientCache.java   
private static boolean isDfsClientClose(DFSClient c) {
  try {
    c.exists("");
  } catch (IOException e) {
    return e.getMessage().equals("Filesystem closed");
  }
  return false;
}
项目:hadoop    文件:TestWrites.java   
@Test
public void testCheckCommitAixCompatMode() throws IOException {
  DFSClient dfsClient = Mockito.mock(DFSClient.class);
  Nfs3FileAttributes attr = new Nfs3FileAttributes();
  HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);

  NfsConfiguration conf = new NfsConfiguration();
  conf.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, false);
  // Enable AIX compatibility mode.
  OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient,
      new ShellBasedIdMapping(new NfsConfiguration()), true, conf);

  // Test fall-through to pendingWrites check in the event that commitOffset
  // is greater than the number of bytes we've so far flushed.
  Mockito.when(fos.getPos()).thenReturn((long) 2);
  COMMIT_STATUS status = ctx.checkCommitInternal(5, null, 1, attr, false);
  Assert.assertTrue(status == COMMIT_STATUS.COMMIT_FINISHED);

  // Test the case when we actually have received more bytes than we're trying
  // to commit.
  ctx.getPendingWritesForTest().put(new OffsetRange(0, 10),
      new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
  Mockito.when(fos.getPos()).thenReturn((long) 10);
  ctx.setNextOffsetForTest((long)10);
  status = ctx.checkCommitInternal(5, null, 1, attr, false);
  Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
}
项目:hadoop    文件:TestWrites.java   
@Test
public void testCheckSequential() throws IOException {
  DFSClient dfsClient = Mockito.mock(DFSClient.class);
  Nfs3FileAttributes attr = new Nfs3FileAttributes();
  HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
  Mockito.when(fos.getPos()).thenReturn((long) 0);
  NfsConfiguration config = new NfsConfiguration();

  config.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, false);
  OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient,
      new ShellBasedIdMapping(config), false, config);

  ctx.getPendingWritesForTest().put(new OffsetRange(5, 10),
      new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
  ctx.getPendingWritesForTest().put(new OffsetRange(10, 15),
      new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
  ctx.getPendingWritesForTest().put(new OffsetRange(20, 25),
      new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));

  assertTrue(!ctx.checkSequential(5, 4));
  assertTrue(ctx.checkSequential(9, 5));
  assertTrue(ctx.checkSequential(10, 5));
  assertTrue(ctx.checkSequential(14, 5));
  assertTrue(!ctx.checkSequential(15, 5));
  assertTrue(!ctx.checkSequential(20, 5));
  assertTrue(!ctx.checkSequential(25, 5));
  assertTrue(!ctx.checkSequential(999, 5));
}
项目:hadoop    文件:Hdfs.java   
/**
 * This constructor has the signature needed by
 * {@link AbstractFileSystem#createFileSystem(URI, Configuration)}
 * 
 * @param theUri which must be that of Hdfs
 * @param conf configuration
 * @throws IOException
 */
Hdfs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException {
  super(theUri, HdfsConstants.HDFS_URI_SCHEME, true, NameNode.DEFAULT_PORT);

  if (!theUri.getScheme().equalsIgnoreCase(HdfsConstants.HDFS_URI_SCHEME)) {
    throw new IllegalArgumentException("Passed URI's scheme is not for Hdfs");
  }
  String host = theUri.getHost();
  if (host == null) {
    throw new IOException("Incomplete HDFS URI, no host: " + theUri);
  }

  this.dfs = new DFSClient(theUri, conf, getStatistics());
}
项目:hadoop    文件:NamenodeFsck.java   
private DatanodeInfo bestNode(DFSClient dfs, DatanodeInfo[] nodes,
                              TreeSet<DatanodeInfo> deadNodes) throws IOException {
  if ((nodes == null) ||
      (nodes.length - deadNodes.size() < 1)) {
    throw new IOException("No live nodes contain current block");
  }
  DatanodeInfo chosenNode;
  do {
    chosenNode = nodes[DFSUtil.getRandom().nextInt(nodes.length)];
  } while (deadNodes.contains(chosenNode));
  return chosenNode;
}
项目:hadoop    文件:StreamFile.java   
protected DFSClient getDFSClient(HttpServletRequest request)
    throws IOException, InterruptedException {
  final Configuration conf =
    (Configuration) getServletContext().getAttribute(JspHelper.CURRENT_CONF);
  UserGroupInformation ugi = getUGI(request, conf);
  final ServletContext context = getServletContext();
  final DataNode datanode = (DataNode) context.getAttribute("datanode");
  return DatanodeJspHelper.getDFSClient(request, datanode, conf, ugi);
}
项目:hadoop    文件:WebHdfsHandler.java   
private void onAppend(ChannelHandlerContext ctx) throws IOException {
  writeContinueHeader(ctx);
  final String nnId = params.namenodeId();
  final int bufferSize = params.bufferSize();

  DFSClient dfsClient = newDfsClient(nnId, conf);
  OutputStream out = dfsClient.append(path, bufferSize,
      EnumSet.of(CreateFlag.APPEND), null, null);
  DefaultHttpResponse resp = new DefaultHttpResponse(HTTP_1_1, OK);
  resp.headers().set(CONTENT_LENGTH, 0);
  ctx.pipeline().replace(this, HdfsWriter.class.getSimpleName(),
    new HdfsWriter(dfsClient, out, resp));
}
项目:hadoop    文件:DatanodeJspHelper.java   
private static DFSClient getDFSClient(final UserGroupInformation user,
                                      final String addr,
                                      final Configuration conf
                                      ) throws IOException,
                                               InterruptedException {
  return
    user.doAs(new PrivilegedExceptionAction<DFSClient>() {
      @Override
      public DFSClient run() throws IOException {
        return new DFSClient(NetUtils.createSocketAddr(addr), conf);
      }
    });
}
项目:hadoop    文件:TestResolveHdfsSymlink.java   
/**
 * Tests that attempting to resolve a non-existent-file
 */
@Test
public void testLinkTargetNonExistent() throws IOException {
  Path doesNotExist = new Path("/filethatdoesnotexist");
  DFSClient client = new DFSClient(cluster.getFileSystem().getUri(),
      cluster.getConfiguration(0));
  try {
    client.getLinkTarget(doesNotExist.toString());
    fail("Expected exception for resolving non-existent file");
  } catch (FileNotFoundException e) {
    GenericTestUtils.assertExceptionContains("File does not exist: "
        + doesNotExist.toString(), e);
  }
}
项目:hadoop    文件:TestRetryCacheWithHA.java   
RenameSnapshotOp(DFSClient client, String dir, String oldName,
    String newName) {
  super("renameSnapshot", client);
  this.dir = dir;
  this.oldName = oldName;
  this.newName = newName;
}
项目:hadoop    文件:TestRetryCacheWithHA.java   
RemoveCacheDirectiveInfoOp(DFSClient client, String pool,
    String path) {
  super("removeCacheDirective", client);
  this.directive = new CacheDirectiveInfo.Builder().
      setPool(pool).
      setPath(new Path(path)).
      build();
}
项目:hadoop    文件:TestRetryCacheWithHA.java   
@Test (timeout=60000)
public void testAddCacheDirectiveInfo() throws Exception {
  DFSClient client = genClientWithDummyHandler();
  AtMostOnceOp op = new AddCacheDirectiveInfoOp(client, 
      new CacheDirectiveInfo.Builder().
          setPool("pool").
          setPath(new Path("/path")).
          build());
  testClientRetryWithFailover(op);
}
项目:hadoop    文件:TestRetryCacheWithHA.java   
@Test (timeout=60000)
public void testModifyCacheDirectiveInfo() throws Exception {
  DFSClient client = genClientWithDummyHandler();
  AtMostOnceOp op = new ModifyCacheDirectiveInfoOp(client, 
      new CacheDirectiveInfo.Builder().
          setPool("pool").
          setPath(new Path("/path")).
          setReplication((short)1).build(),
      (short)555);
  testClientRetryWithFailover(op);
}
项目:hadoop    文件:TestRetryCacheWithHA.java   
@Test (timeout=60000)
public void testRemoveCacheDescriptor() throws Exception {
  DFSClient client = genClientWithDummyHandler();
  AtMostOnceOp op = new RemoveCacheDirectiveInfoOp(client, "pool",
      "/path");
  testClientRetryWithFailover(op);
}
项目:hadoop    文件:TestFailoverWithBlockTokensEnabled.java   
@Test
public void ensureInvalidBlockTokensAreRejected() throws IOException,
    URISyntaxException {
  cluster.transitionToActive(0);
  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);

  DFSTestUtil.writeFile(fs, TEST_PATH, TEST_DATA);
  assertEquals(TEST_DATA, DFSTestUtil.readFile(fs, TEST_PATH));

  DFSClient dfsClient = DFSClientAdapter.getDFSClient((DistributedFileSystem) fs);
  DFSClient spyDfsClient = Mockito.spy(dfsClient);
  Mockito.doAnswer(
      new Answer<LocatedBlocks>() {
        @Override
        public LocatedBlocks answer(InvocationOnMock arg0) throws Throwable {
          LocatedBlocks locatedBlocks = (LocatedBlocks)arg0.callRealMethod();
          for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
            Token<BlockTokenIdentifier> token = lb.getBlockToken();
            BlockTokenIdentifier id = lb.getBlockToken().decodeIdentifier();
            // This will make the token invalid, since the password
            // won't match anymore
            id.setExpiryDate(Time.now() + 10);
            Token<BlockTokenIdentifier> newToken =
                new Token<BlockTokenIdentifier>(id.getBytes(),
                    token.getPassword(), token.getKind(), token.getService());
            lb.setBlockToken(newToken);
          }
          return locatedBlocks;
        }
      }).when(spyDfsClient).getLocatedBlocks(Mockito.anyString(),
          Mockito.anyLong(), Mockito.anyLong());
  DFSClientAdapter.setDFSClient((DistributedFileSystem)fs, spyDfsClient);

  try {
    assertEquals(TEST_DATA, DFSTestUtil.readFile(fs, TEST_PATH));
    fail("Shouldn't have been able to read a file with invalid block tokens");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains("Could not obtain block", ioe);
  }
}
项目:hadoop    文件:TestFsck.java   
public CorruptedTestFile(String name, Set<Integer> blocksToCorrupt,
    DFSClient dfsClient, int numDataNodes, int blockSize)
        throws IOException {
  this.name = name;
  this.blocksToCorrupt = blocksToCorrupt;
  this.dfsClient = dfsClient;
  this.numDataNodes = numDataNodes;
  this.blockSize = blockSize;
  this.initialContents = cacheInitialContents();
}