Java 类org.apache.hadoop.hdfs.protocol.HdfsFileStatus 实例源码

项目:hadoop    文件:Nfs3Utils.java   
public static Nfs3FileAttributes getNfs3FileAttrFromFileStatus(
    HdfsFileStatus fs, IdMappingServiceProvider iug) {
  /**
   * Some 32bit Linux client has problem with 64bit fileId: it seems the 32bit
   * client takes only the lower 32bit of the fileId and treats it as signed
   * int. When the 32th bit is 1, the client considers it invalid.
   */
  NfsFileType fileType = fs.isDir() ? NfsFileType.NFSDIR : NfsFileType.NFSREG;
  fileType = fs.isSymlink() ? NfsFileType.NFSLNK : fileType;
  int nlink = (fileType == NfsFileType.NFSDIR) ? fs.getChildrenNum() + 2 : 1;
  long size = (fileType == NfsFileType.NFSDIR) ? getDirSize(fs
      .getChildrenNum()) : fs.getLen();
  return new Nfs3FileAttributes(fileType, nlink,
      fs.getPermission().toShort(), iug.getUidAllowingUnknown(fs.getOwner()),
      iug.getGidAllowingUnknown(fs.getGroup()), size, 0 /* fsid */,
      fs.getFileId(), fs.getModificationTime(), fs.getAccessTime(),
      new Nfs3FileAttributes.Specdata3());
}
项目:hadoop    文件:TestLazyPersistFiles.java   
@Test
public void testPolicyPersistenceInFsImage() throws IOException {
  startUpCluster(false, -1);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path = new Path("/" + METHOD_NAME + ".dat");

  makeTestFile(path, 0, true);
  // checkpoint
  fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
  fs.saveNamespace();
  fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
  cluster.restartNameNode(true);

  // Stat the file and check that the lazyPersist flag is returned back.
  HdfsFileStatus status = client.getFileInfo(path.toString());
  assertThat(status.getStoragePolicy(), is(LAZY_PERSIST_POLICY_ID));
}
项目:hadoop    文件:TestRpcProgramNfs3.java   
@Test(timeout = 60000)
public void testGetattr() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);
  XDR xdr_req = new XDR();
  GETATTR3Request req = new GETATTR3Request(handle);
  req.serialize(xdr_req);

  // Attempt by an unpriviledged user should fail.
  GETATTR3Response response1 = nfsd.getattr(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  GETATTR3Response response2 = nfsd.getattr(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
项目:hadoop    文件:TestRpcProgramNfs3.java   
@Test(timeout = 60000)
public void testSetattr() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  XDR xdr_req = new XDR();
  FileHandle handle = new FileHandle(dirId);
  SetAttr3 symAttr = new SetAttr3(0, 1, 0, 0, null, null,
      EnumSet.of(SetAttrField.UID));
  SETATTR3Request req = new SETATTR3Request(handle, symAttr, false, null);
  req.serialize(xdr_req);

  // Attempt by an unprivileged user should fail.
  SETATTR3Response response1 = nfsd.setattr(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  SETATTR3Response response2 = nfsd.setattr(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
项目:hadoop    文件:TestRpcProgramNfs3.java   
@Test(timeout = 60000)
public void testLookup() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);
  LOOKUP3Request lookupReq = new LOOKUP3Request(handle, "bar");
  XDR xdr_req = new XDR();
  lookupReq.serialize(xdr_req);

  // Attempt by an unpriviledged user should fail.
  LOOKUP3Response response1 = nfsd.lookup(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  LOOKUP3Response response2 = nfsd.lookup(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
项目:hadoop    文件:TestRpcProgramNfs3.java   
@Test(timeout = 60000)
public void testRead() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);

  READ3Request readReq = new READ3Request(handle, 0, 5);
  XDR xdr_req = new XDR();
  readReq.serialize(xdr_req);

  // Attempt by an unpriviledged user should fail.
  READ3Response response1 = nfsd.read(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  READ3Response response2 = nfsd.read(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
项目:hadoop    文件:TestRpcProgramNfs3.java   
private byte[] getFileContentsUsingNfs(String fileName, int len)
    throws Exception {
  final HdfsFileStatus status = nn.getRpcServer().getFileInfo(fileName);
  final long dirId = status.getFileId();
  final FileHandle handle = new FileHandle(dirId);

  final READ3Request readReq = new READ3Request(handle, 0, len);
  final XDR xdr_req = new XDR();
  readReq.serialize(xdr_req);

  final READ3Response response = nfsd.read(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code: ", Nfs3Status.NFS3_OK,
      response.getStatus());
  assertTrue("expected full read", response.isEof());
  return response.getData().array();
}
项目:hadoop    文件:TestRpcProgramNfs3.java   
@Test(timeout = 60000)
public void testMkdir() throws Exception {//FixME
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  XDR xdr_req = new XDR();
  FileHandle handle = new FileHandle(dirId);
  MKDIR3Request req = new MKDIR3Request(handle, "fubar1", new SetAttr3());
  req.serialize(xdr_req);

  // Attempt to mkdir by an unprivileged user should fail.
  MKDIR3Response response1 = nfsd.mkdir(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  XDR xdr_req2 = new XDR();
  MKDIR3Request req2 = new MKDIR3Request(handle, "fubar2", new SetAttr3());
  req2.serialize(xdr_req2);

  // Attempt to mkdir by a privileged user should pass.
  MKDIR3Response response2 = nfsd.mkdir(xdr_req2.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
项目:hadoop    文件:TestRpcProgramNfs3.java   
@Test(timeout = 60000)
public void testSymlink() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  XDR xdr_req = new XDR();
  FileHandle handle = new FileHandle(dirId);
  SYMLINK3Request req = new SYMLINK3Request(handle, "fubar", new SetAttr3(),
      "bar");
  req.serialize(xdr_req);

  // Attempt by an unprivileged user should fail.
  SYMLINK3Response response1 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a privileged user should pass.
  SYMLINK3Response response2 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
项目:hadoop    文件:TestRpcProgramNfs3.java   
@Test(timeout = 60000)
public void testRmdir() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  XDR xdr_req = new XDR();
  FileHandle handle = new FileHandle(dirId);
  RMDIR3Request req = new RMDIR3Request(handle, "foo");
  req.serialize(xdr_req);

  // Attempt by an unprivileged user should fail.
  RMDIR3Response response1 = nfsd.rmdir(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a privileged user should pass.
  RMDIR3Response response2 = nfsd.rmdir(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
项目:hadoop    文件:ClientNamenodeProtocolTranslatorPB.java   
@Override
public LastBlockWithStatus append(String src, String clientName,
    EnumSetWritable<CreateFlag> flag) throws AccessControlException,
    DSQuotaExceededException, FileNotFoundException, SafeModeException,
    UnresolvedLinkException, IOException {
  AppendRequestProto req = AppendRequestProto.newBuilder().setSrc(src)
      .setClientName(clientName).setFlag(PBHelper.convertCreateFlag(flag))
      .build();
  try {
    AppendResponseProto res = rpcProxy.append(null, req);
    LocatedBlock lastBlock = res.hasBlock() ? PBHelper
        .convert(res.getBlock()) : null;
    HdfsFileStatus stat = (res.hasStat()) ? PBHelper.convert(res.getStat())
        : null;
    return new LastBlockWithStatus(lastBlock, stat);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
项目:hadoop    文件:TestRpcProgramNfs3.java   
@Test(timeout = 60000)
public void testReaddir() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);
  XDR xdr_req = new XDR();
  READDIR3Request req = new READDIR3Request(handle, 0, 0, 100);
  req.serialize(xdr_req);

  // Attempt by an unpriviledged user should fail.
  READDIR3Response response1 = nfsd.readdir(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  READDIR3Response response2 = nfsd.readdir(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
项目:hadoop    文件:TestRpcProgramNfs3.java   
@Test(timeout = 60000)
public void testReaddirplus() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);
  XDR xdr_req = new XDR();
  READDIRPLUS3Request req = new READDIRPLUS3Request(handle, 0, 0, 3, 2);
  req.serialize(xdr_req);

  // Attempt by an unprivileged user should fail.
  READDIRPLUS3Response response1 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a privileged user should pass.
  READDIRPLUS3Response response2 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
项目:hadoop    文件:TestRpcProgramNfs3.java   
@Test(timeout = 60000)
public void testFsstat() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);
  XDR xdr_req = new XDR();
  FSSTAT3Request req = new FSSTAT3Request(handle);
  req.serialize(xdr_req);

  // Attempt by an unpriviledged user should fail.
  FSSTAT3Response response1 = nfsd.fsstat(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  FSSTAT3Response response2 = nfsd.fsstat(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
项目:hadoop    文件:TestRpcProgramNfs3.java   
@Test(timeout = 60000)
public void testFsinfo() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);
  XDR xdr_req = new XDR();
  FSINFO3Request req = new FSINFO3Request(handle);
  req.serialize(xdr_req);

  // Attempt by an unpriviledged user should fail.
  FSINFO3Response response1 = nfsd.fsinfo(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  FSINFO3Response response2 = nfsd.fsinfo(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
项目:hadoop    文件:TestRpcProgramNfs3.java   
@Test(timeout = 60000)
public void testPathconf() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);
  XDR xdr_req = new XDR();
  PATHCONF3Request req = new PATHCONF3Request(handle);
  req.serialize(xdr_req);

  // Attempt by an unpriviledged user should fail.
  PATHCONF3Response response1 = nfsd.pathconf(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  PATHCONF3Response response2 = nfsd.pathconf(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
项目:hadoop    文件:TestJsonUtil.java   
@Test
public void testHdfsFileStatus() throws IOException {
  final long now = Time.now();
  final String parent = "/dir";
  final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
      now, now + 10, new FsPermission((short) 0644), "user", "group",
      DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
      INodeId.GRANDFATHER_INODE_ID, 0, null, (byte) 0);
  final FileStatus fstatus = toFileStatus(status, parent);
  System.out.println("status  = " + status);
  System.out.println("fstatus = " + fstatus);
  final String json = JsonUtil.toJsonString(status, true);
  System.out.println("json    = " + json.replace(",", ",\n  "));
  ObjectReader reader = new ObjectMapper().reader(Map.class);
  final HdfsFileStatus s2 =
      JsonUtil.toFileStatus((Map<?, ?>) reader.readValue(json), true);
  final FileStatus fs2 = toFileStatus(s2, parent);
  System.out.println("s2      = " + s2);
  System.out.println("fs2     = " + fs2);
  Assert.assertEquals(fstatus, fs2);
}
项目:hadoop    文件:PBHelper.java   
public static HdfsFileStatus convert(HdfsFileStatusProto fs) {
  if (fs == null)
    return null;
  return new HdfsLocatedFileStatus(
      fs.getLength(), fs.getFileType().equals(FileType.IS_DIR), 
      fs.getBlockReplication(), fs.getBlocksize(),
      fs.getModificationTime(), fs.getAccessTime(),
      PBHelper.convert(fs.getPermission()), fs.getOwner(), fs.getGroup(), 
      fs.getFileType().equals(FileType.IS_SYMLINK) ? 
          fs.getSymlink().toByteArray() : null,
      fs.getPath().toByteArray(),
      fs.hasFileId()? fs.getFileId(): INodeId.GRANDFATHER_INODE_ID,
      fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null,
      fs.hasChildrenNum() ? fs.getChildrenNum() : -1,
      fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) : null,
      fs.hasStoragePolicy() ? (byte) fs.getStoragePolicy()
          : BlockStoragePolicySuite.ID_UNSPECIFIED);
}
项目:hadoop    文件:TestEditLog.java   
/**
 * Test case for loading a very simple edit log from a format
 * prior to the inclusion of edit transaction IDs in the log.
 */
@Test
public void testPreTxidEditLogWithEdits() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final FSNamesystem namesystem = cluster.getNamesystem();

    long numEdits = testLoad(HADOOP20_SOME_EDITS, namesystem);
    assertEquals(3, numEdits);
    // Sanity check the edit
    HdfsFileStatus fileInfo = namesystem.getFileInfo("/myfile", false);
    assertEquals("supergroup", fileInfo.getGroup());
    assertEquals(3, fileInfo.getReplication());
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
项目:hadoop    文件:TestStorageMover.java   
void verifyFile(final Path file, final Byte expectedPolicyId)
    throws Exception {
  final Path parent = file.getParent();
  DirectoryListing children = dfs.getClient().listPaths(
      parent.toString(), HdfsFileStatus.EMPTY_NAME, true);
  for (HdfsFileStatus child : children.getPartialListing()) {
    if (child.getLocalName().equals(file.getName())) {
      verifyFile(parent,  child, expectedPolicyId);
      return;
    }
  }
  Assert.fail("File " + file + " not found.");
}
项目:hadoop    文件:DFSOutputStream.java   
/** Construct a new output stream for append. */
private DFSOutputStream(DFSClient dfsClient, String src,
    EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock,
    HdfsFileStatus stat, DataChecksum checksum) throws IOException {
  this(dfsClient, src, progress, stat, checksum);
  initialFileSize = stat.getLen(); // length of file when opened
  this.shouldSyncBlock = flags.contains(CreateFlag.SYNC_BLOCK);

  boolean toNewBlock = flags.contains(CreateFlag.NEW_BLOCK);

  // The last partial block of the file has to be filled.
  if (!toNewBlock && lastBlock != null) {
    // indicate that we are appending to an existing block
    bytesCurBlock = lastBlock.getBlockSize();
    streamer = new DataStreamer(lastBlock, stat, bytesPerChecksum);
  } else {
    computePacketChunkSize(dfsClient.getConf().writePacketSize,
        bytesPerChecksum);
    streamer = new DataStreamer(stat,
        lastBlock != null ? lastBlock.getBlock() : null);
  }
  this.fileEncryptionInfo = stat.getFileEncryptionInfo();
}
项目:hadoop    文件:DFSOutputStream.java   
static DFSOutputStream newStreamForAppend(DFSClient dfsClient, String src,
    EnumSet<CreateFlag> flags, int bufferSize, Progressable progress,
    LocatedBlock lastBlock, HdfsFileStatus stat, DataChecksum checksum,
    String[] favoredNodes) throws IOException {
  TraceScope scope =
      dfsClient.getPathTraceScope("newStreamForAppend", src);
  try {
    final DFSOutputStream out = new DFSOutputStream(dfsClient, src, flags,
        progress, lastBlock, stat, checksum);
    if (favoredNodes != null && favoredNodes.length != 0) {
      out.streamer.setFavoredNodes(favoredNodes);
    }
    out.start();
    return out;
  } finally {
    scope.close();
  }
}
项目:hadoop    文件:DFSClient.java   
/**
 * Append to an existing file if {@link CreateFlag#APPEND} is present
 */
private DFSOutputStream primitiveAppend(String src, EnumSet<CreateFlag> flag,
    int buffersize, Progressable progress) throws IOException {
  if (flag.contains(CreateFlag.APPEND)) {
    HdfsFileStatus stat = getFileInfo(src);
    if (stat == null) { // No file to append to
      // New file needs to be created if create option is present
      if (!flag.contains(CreateFlag.CREATE)) {
        throw new FileNotFoundException("failed to append to non-existent file "
            + src + " on client " + clientName);
      }
      return null;
    }
    return callAppend(src, buffersize, flag, progress, null);
  }
  return null;
}
项目:hadoop    文件:DistributedFileSystem.java   
@Override
@SuppressWarnings("unchecked")
public boolean hasNext() throws IOException {
  while (curStat == null && hasNextNoFilter()) {
    T next;
    HdfsFileStatus fileStat = thisListing.getPartialListing()[i++];
    if (needLocation) {
      next = (T)((HdfsLocatedFileStatus)fileStat)
          .makeQualifiedLocated(getUri(), p);
    } else {
      next = (T)fileStat.makeQualified(getUri(), p);
    }
      // apply filter if not null
    if (filter == null || filter.accept(next.getPath())) {
      curStat = next;
    }
  }
  return curStat != null;
}
项目:hadoop    文件:FSDirStatAndListingOp.java   
/**
 * Get the file info for a specific file.
 *
 * @param srcArg The string representation of the path to the file
 * @param resolveLink whether to throw UnresolvedLinkException
 *        if src refers to a symlink
 *
 * @return object containing information regarding the file
 *         or null if file not found
 */
static HdfsFileStatus getFileInfo(
    FSDirectory fsd, String srcArg, boolean resolveLink)
    throws IOException {
  String src = srcArg;
  if (!DFSUtil.isValidName(src)) {
    throw new InvalidPathException("Invalid file name: " + src);
  }
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  src = fsd.resolvePath(pc, src, pathComponents);
  final INodesInPath iip = fsd.getINodesInPath(src, resolveLink);
  boolean isSuperUser = true;
  if (fsd.isPermissionEnabled()) {
    fsd.checkPermission(pc, iip, false, null, null, null, null, false);
    isSuperUser = pc.isSuperUser();
  }
  return getFileInfo(fsd, src, resolveLink,
      FSDirectory.isReservedRawName(srcArg), isSuperUser);
}
项目:hadoop    文件:FSDirStatAndListingOp.java   
/** Get the file info for a specific file.
 * @param fsd FSDirectory
 * @param src The string representation of the path to the file
 * @param isRawPath true if a /.reserved/raw pathname was passed by the user
 * @param includeStoragePolicy whether to include storage policy
 * @return object containing information regarding the file
 *         or null if file not found
 */
static HdfsFileStatus getFileInfo(
    FSDirectory fsd, String path, INodesInPath src, boolean isRawPath,
    boolean includeStoragePolicy)
    throws IOException {
  fsd.readLock();
  try {
    final INode i = src.getLastINode();
    byte policyId = includeStoragePolicy && i != null && !i.isSymlink() ?
        i.getStoragePolicyID() : BlockStoragePolicySuite.ID_UNSPECIFIED;
    return i == null ? null : createFileStatus(
        fsd, path, HdfsFileStatus.EMPTY_NAME, i, policyId,
        src.getPathSnapshotId(), isRawPath, src);
  } finally {
    fsd.readUnlock();
  }
}
项目:hadoop    文件:FSDirStatAndListingOp.java   
static HdfsFileStatus getFileInfo(
    FSDirectory fsd, String src, boolean resolveLink, boolean isRawPath,
    boolean includeStoragePolicy)
  throws IOException {
  String srcs = FSDirectory.normalizePath(src);
  if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
    if (fsd.getINode4DotSnapshot(srcs) != null) {
      return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
          HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
          BlockStoragePolicySuite.ID_UNSPECIFIED);
    }
    return null;
  }

  fsd.readLock();
  try {
    final INodesInPath iip = fsd.getINodesInPath(srcs, resolveLink);
    return getFileInfo(fsd, src, iip, isRawPath, includeStoragePolicy);
  } finally {
    fsd.readUnlock();
  }
}
项目:hadoop    文件:FSDirAclOp.java   
static HdfsFileStatus modifyAclEntries(
    FSDirectory fsd, final String srcArg, List<AclEntry> aclSpec)
    throws IOException {
  String src = srcArg;
  checkAclsConfigFlag(fsd);
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  src = fsd.resolvePath(pc, src, pathComponents);
  INodesInPath iip;
  fsd.writeLock();
  try {
    iip = fsd.getINodesInPath4Write(FSDirectory.normalizePath(src), true);
    fsd.checkOwner(pc, iip);
    INode inode = FSDirectory.resolveLastINode(iip);
    int snapshotId = iip.getLatestSnapshotId();
    List<AclEntry> existingAcl = AclStorage.readINodeLogicalAcl(inode);
    List<AclEntry> newAcl = AclTransformation.mergeAclEntries(
        existingAcl, aclSpec);
    AclStorage.updateINodeAcl(inode, newAcl, snapshotId);
    fsd.getEditLog().logSetAcl(src, newAcl);
  } finally {
    fsd.writeUnlock();
  }
  return fsd.getAuditFileInfo(iip);
}
项目:hadoop    文件:FSDirAclOp.java   
static HdfsFileStatus removeAclEntries(
    FSDirectory fsd, final String srcArg, List<AclEntry> aclSpec)
    throws IOException {
  String src = srcArg;
  checkAclsConfigFlag(fsd);
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  src = fsd.resolvePath(pc, src, pathComponents);
  INodesInPath iip;
  fsd.writeLock();
  try {
    iip = fsd.getINodesInPath4Write(FSDirectory.normalizePath(src), true);
    fsd.checkOwner(pc, iip);
    INode inode = FSDirectory.resolveLastINode(iip);
    int snapshotId = iip.getLatestSnapshotId();
    List<AclEntry> existingAcl = AclStorage.readINodeLogicalAcl(inode);
    List<AclEntry> newAcl = AclTransformation.filterAclEntriesByAclSpec(
      existingAcl, aclSpec);
    AclStorage.updateINodeAcl(inode, newAcl, snapshotId);
    fsd.getEditLog().logSetAcl(src, newAcl);
  } finally {
    fsd.writeUnlock();
  }
  return fsd.getAuditFileInfo(iip);
}
项目:hadoop    文件:FSDirAclOp.java   
static HdfsFileStatus removeDefaultAcl(FSDirectory fsd, final String srcArg)
    throws IOException {
  String src = srcArg;
  checkAclsConfigFlag(fsd);
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  src = fsd.resolvePath(pc, src, pathComponents);
  INodesInPath iip;
  fsd.writeLock();
  try {
    iip = fsd.getINodesInPath4Write(FSDirectory.normalizePath(src), true);
    fsd.checkOwner(pc, iip);
    INode inode = FSDirectory.resolveLastINode(iip);
    int snapshotId = iip.getLatestSnapshotId();
    List<AclEntry> existingAcl = AclStorage.readINodeLogicalAcl(inode);
    List<AclEntry> newAcl = AclTransformation.filterDefaultAclEntries(
      existingAcl);
    AclStorage.updateINodeAcl(inode, newAcl, snapshotId);
    fsd.getEditLog().logSetAcl(src, newAcl);
  } finally {
    fsd.writeUnlock();
  }
  return fsd.getAuditFileInfo(iip);
}
项目:hadoop    文件:FSDirAclOp.java   
static HdfsFileStatus removeAcl(FSDirectory fsd, final String srcArg)
    throws IOException {
  String src = srcArg;
  checkAclsConfigFlag(fsd);
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  src = fsd.resolvePath(pc, src, pathComponents);
  INodesInPath iip;
  fsd.writeLock();
  try {
    iip = fsd.getINodesInPath4Write(src);
    fsd.checkOwner(pc, iip);
    unprotectedRemoveAcl(fsd, iip);
  } finally {
    fsd.writeUnlock();
  }
  fsd.getEditLog().logSetAcl(src, AclFeature.EMPTY_ENTRY_LIST);
  return fsd.getAuditFileInfo(iip);
}
项目:hadoop    文件:FSDirXAttrOp.java   
/**
 * Set xattr for a file or directory.
 *
 * @param src
 *          - path on which it sets the xattr
 * @param xAttr
 *          - xAttr details to set
 * @param flag
 *          - xAttrs flags
 * @throws IOException
 */
static HdfsFileStatus setXAttr(
    FSDirectory fsd, String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag,
    boolean logRetryCache)
    throws IOException {
  checkXAttrsConfigFlag(fsd);
  checkXAttrSize(fsd, xAttr);
  FSPermissionChecker pc = fsd.getPermissionChecker();
  XAttrPermissionFilter.checkPermissionForApi(
      pc, xAttr, FSDirectory.isReservedRawName(src));
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  src = fsd.resolvePath(pc, src, pathComponents);
  List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
  xAttrs.add(xAttr);
  INodesInPath iip;
  fsd.writeLock();
  try {
    iip = fsd.getINodesInPath4Write(src);
    checkXAttrChangeAccess(fsd, iip, xAttr, pc);
    unprotectedSetXAttrs(fsd, src, xAttrs, flag);
  } finally {
    fsd.writeUnlock();
  }
  fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
  return fsd.getAuditFileInfo(iip);
}
项目:hadoop    文件:ListPathsServlet.java   
/**
 * Write a node to output.
 * Node information includes path, modification, permission, owner and group.
 * For files, it also includes size, replication and block-size. 
 */
static void writeInfo(final Path fullpath, final HdfsFileStatus i,
    final XMLOutputter doc) throws IOException {
  final SimpleDateFormat ldf = df.get();
  doc.startTag(i.isDir() ? "directory" : "file");
  doc.attribute("path", fullpath.toUri().getPath());
  doc.attribute("modified", ldf.format(new Date(i.getModificationTime())));
  doc.attribute("accesstime", ldf.format(new Date(i.getAccessTime())));
  if (!i.isDir()) {
    doc.attribute("size", String.valueOf(i.getLen()));
    doc.attribute("replication", String.valueOf(i.getReplication()));
    doc.attribute("blocksize", String.valueOf(i.getBlockSize()));
  }
  doc.attribute("permission", (i.isDir()? "d": "-") + i.getPermission());
  doc.attribute("owner", i.getOwner());
  doc.attribute("group", i.getGroup());
  doc.endTag();
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Set permissions for an existing file.
 * @throws IOException
 */
void setPermission(String src, FsPermission permission) throws IOException {
  HdfsFileStatus auditStat;
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot set permission for " + src);
    auditStat = FSDirAttrOp.setPermission(dir, src, permission);
  } catch (AccessControlException e) {
    logAuditEvent(false, "setPermission", src);
    throw e;
  } finally {
    writeUnlock();
  }
  getEditLog().logSync();
  logAuditEvent(true, "setPermission", src, null, auditStat);
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Set owner for an existing file.
 * @throws IOException
 */
void setOwner(String src, String username, String group)
    throws IOException {
  HdfsFileStatus auditStat;
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot set owner for " + src);
    auditStat = FSDirAttrOp.setOwner(dir, src, username, group);
  } catch (AccessControlException e) {
    logAuditEvent(false, "setOwner", src);
    throw e;
  } finally {
    writeUnlock();
  }
  getEditLog().logSync();
  logAuditEvent(true, "setOwner", src, null, auditStat);
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Moves all the blocks from {@code srcs} and appends them to {@code target}
 * To avoid rollbacks we will verify validity of ALL of the args
 * before we start actual move.
 * 
 * This does not support ".inodes" relative path
 * @param target target to concat into
 * @param srcs file that will be concatenated
 * @throws IOException on error
 */
void concat(String target, String [] srcs, boolean logRetryCache)
    throws IOException {
  checkOperation(OperationCategory.WRITE);
  waitForLoadingFSImage();
  HdfsFileStatus stat = null;
  boolean success = false;
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot concat " + target);
    stat = FSDirConcatOp.concat(dir, target, srcs, logRetryCache);
    success = true;
  } finally {
    writeUnlock();
    if (success) {
      getEditLog().logSync();
    }
    logAuditEvent(success, "concat", Arrays.toString(srcs), target, stat);
  }
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * stores the modification and access time for this inode. 
 * The access time is precise up to an hour. The transaction, if needed, is
 * written to the edits log but is not flushed.
 */
void setTimes(String src, long mtime, long atime) throws IOException {
  HdfsFileStatus auditStat;
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot set times " + src);
    auditStat = FSDirAttrOp.setTimes(dir, src, mtime, atime);
  } catch (AccessControlException e) {
    logAuditEvent(false, "setTimes", src);
    throw e;
  } finally {
    writeUnlock();
  }
  getEditLog().logSync();
  logAuditEvent(true, "setTimes", src, null, auditStat);
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Create a symbolic link.
 */
@SuppressWarnings("deprecation")
void createSymlink(String target, String link,
    PermissionStatus dirPerms, boolean createParent, boolean logRetryCache)
    throws IOException {
  if (!FileSystem.areSymlinksEnabled()) {
    throw new UnsupportedOperationException("Symlinks not supported");
  }
  HdfsFileStatus auditStat = null;
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot create symlink " + link);
    auditStat = FSDirSymlinkOp.createSymlinkInt(this, target, link, dirPerms,
                                                createParent, logRetryCache);
  } catch (AccessControlException e) {
    logAuditEvent(false, "createSymlink", link, target, null);
    throw e;
  } finally {
    writeUnlock();
  }
  getEditLog().logSync();
  logAuditEvent(true, "createSymlink", link, target, auditStat);
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Set the storage policy for a file or a directory.
 *
 * @param src file/directory path
 * @param policyName storage policy name
 */
void setStoragePolicy(String src, String policyName) throws IOException {
  HdfsFileStatus auditStat;
  waitForLoadingFSImage();
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot set storage policy for " + src);
    auditStat = FSDirAttrOp.setStoragePolicy(
        dir, blockManager, src, policyName);
  } catch (AccessControlException e) {
    logAuditEvent(false, "setStoragePolicy", src);
    throw e;
  } finally {
    writeUnlock();
  }
  getEditLog().logSync();
  logAuditEvent(true, "setStoragePolicy", src, null, auditStat);
}
项目:hadoop    文件:TestDefaultBlockPlacementPolicy.java   
private void testPlacement(String clientMachine,
    String clientRack) throws IOException {
  // write 5 files and check whether all times block placed
  for (int i = 0; i < 5; i++) {
    String src = "/test-" + i;
    // Create the file with client machine
    HdfsFileStatus fileStatus = namesystem.startFile(src, perm,
        clientMachine, clientMachine, EnumSet.of(CreateFlag.CREATE), true,
        REPLICATION_FACTOR, DEFAULT_BLOCK_SIZE, null, false);
    LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine,
        null, null, fileStatus.getFileId(), null);

    assertEquals("Block should be allocated sufficient locations",
        REPLICATION_FACTOR, locatedBlock.getLocations().length);
    if (clientRack != null) {
      assertEquals("First datanode should be rack local", clientRack,
          locatedBlock.getLocations()[0].getNetworkLocation());
    }
    nameNodeRpc.abandonBlock(locatedBlock.getBlock(), fileStatus.getFileId(),
        src, clientMachine);
  }
}