Java 类org.apache.hadoop.hdfs.protocol.QuotaExceededException 实例源码

项目:hadoop    文件:DirectoryWithQuotaFeature.java   
void addSpaceConsumed(final INodeDirectory dir, final QuotaCounts counts,
    boolean verify) throws QuotaExceededException {
  if (dir.isQuotaSet()) {
    // The following steps are important:
    // check quotas in this inode and all ancestors before changing counts
    // so that no change is made if there is any quota violation.
    // (1) verify quota in this inode
    if (verify) {
      verifyQuota(counts);
    }
    // (2) verify quota and then add count in ancestors
    dir.addSpaceConsumed2Parent(counts, verify);
    // (3) add count in this inode
    addSpaceConsumed2Cache(counts);
  } else {
    dir.addSpaceConsumed2Parent(counts, verify);
  }
}
项目:hadoop    文件:FSDirectory.java   
/** update count of each inode with quota
 * 
 * @param iip inodes in a path
 * @param numOfINodes the number of inodes to update starting from index 0
 * @param counts the count of space/namespace/type usage to be update
 * @param checkQuota if true then check if quota is exceeded
 * @throws QuotaExceededException if the new count violates any quota limit
 */
void updateCount(INodesInPath iip, int numOfINodes,
                  QuotaCounts counts, boolean checkQuota)
                  throws QuotaExceededException {
  assert hasWriteLock();
  if (!namesystem.isImageLoaded()) {
    //still initializing. do not check or update quotas.
    return;
  }
  if (numOfINodes > iip.length()) {
    numOfINodes = iip.length();
  }
  if (checkQuota && !skipQuotaCheck) {
    verifyQuota(iip, numOfINodes, counts, null);
  }
  unprotectedUpdateCount(iip, numOfINodes, counts);
}
项目:hadoop    文件:FSDirectory.java   
private void verifyQuotaForTruncate(INodesInPath iip, INodeFile file,
    long newLength, QuotaCounts delta) throws QuotaExceededException {
  if (!getFSNamesystem().isImageLoaded() || shouldSkipQuotaChecks()) {
    // Do not check quota if edit log is still being processed
    return;
  }
  final long diff = file.computeQuotaDeltaForTruncate(newLength);
  final short repl = file.getBlockReplication();
  delta.addStorageSpace(diff * repl);
  final BlockStoragePolicy policy = getBlockStoragePolicySuite()
      .getPolicy(file.getStoragePolicyID());
  List<StorageType> types = policy.chooseStorageTypes(repl);
  for (StorageType t : types) {
    if (t.supportTypeQuota()) {
      delta.addTypeSpace(t, diff);
    }
  }
  if (diff > 0) {
    readLock();
    try {
      verifyQuota(iip, iip.length() - 1, delta, null);
    } finally {
      readUnlock();
    }
  }
}
项目:hadoop    文件:FSDirRenameOp.java   
/**
 * Verify quota for rename operation where srcInodes[srcInodes.length-1] moves
 * dstInodes[dstInodes.length-1]
 */
private static void verifyQuotaForRename(FSDirectory fsd, INodesInPath src,
    INodesInPath dst) throws QuotaExceededException {
  if (!fsd.getFSNamesystem().isImageLoaded() || fsd.shouldSkipQuotaChecks()) {
    // Do not check quota if edits log is still being processed
    return;
  }
  int i = 0;
  while(src.getINode(i) == dst.getINode(i)) { i++; }
  // src[i - 1] is the last common ancestor.
  BlockStoragePolicySuite bsps = fsd.getBlockStoragePolicySuite();
  final QuotaCounts delta = src.getLastINode().computeQuotaUsage(bsps);

  // Reduce the required quota by dst that is being removed
  final INode dstINode = dst.getLastINode();
  if (dstINode != null) {
    delta.subtract(dstINode.computeQuotaUsage(bsps));
  }
  FSDirectory.verifyQuota(dst, dst.length() - 1, delta, src.getINode(i - 1));
}
项目:hadoop    文件:FSDirRenameOp.java   
void restoreSource() throws QuotaExceededException {
  // Rename failed - restore src
  final INode oldSrcChild = srcChild;
  // put it back
  if (withCount == null) {
    srcChild.setLocalName(srcChildName);
  } else if (!srcChildIsReference) { // src must be in snapshot
    // the withCount node will no longer be used thus no need to update
    // its reference number here
    srcChild = withCount.getReferredINode();
    srcChild.setLocalName(srcChildName);
  } else {
    withCount.removeReference(oldSrcChild.asReference());
    srcChild = new INodeReference.DstReference(srcParent, withCount,
        srcRefDstSnapshot);
    withCount.getReferredINode().setLocalName(srcChildName);
  }

  if (isSrcInSnapshot) {
    srcParent.undoRename4ScrParent(oldSrcChild.asReference(), srcChild);
  } else {
    // srcParent is not an INodeDirectoryWithSnapshot, we only need to add
    // the srcChild back
    fsd.addLastINodeNoQuotaCheck(srcParentIIP, srcChild);
  }
}
项目:hadoop    文件:FSDirRenameOp.java   
boolean cleanDst(BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks)
    throws QuotaExceededException {
  Preconditions.checkState(oldDstChild != null);
  List<INode> removedINodes = new ChunkedArrayList<>();
  final boolean filesDeleted;
  if (!oldDstChild.isInLatestSnapshot(dstIIP.getLatestSnapshotId())) {
    oldDstChild.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
    filesDeleted = true;
  } else {
    filesDeleted = oldDstChild.cleanSubtree(bsps, Snapshot.CURRENT_STATE_ID,
        dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes)
        .getNameSpace() >= 0;
  }
  fsd.getFSNamesystem().removeLeasesAndINodes(src, removedINodes, false);
  return filesDeleted;
}
项目:hadoop    文件:FSDirAttrOp.java   
static void unprotectedSetOwner(
    FSDirectory fsd, String src, String username, String groupname)
    throws FileNotFoundException, UnresolvedLinkException,
    QuotaExceededException, SnapshotAccessControlException {
  assert fsd.hasWriteLock();
  final INodesInPath inodesInPath = fsd.getINodesInPath4Write(src, true);
  INode inode = inodesInPath.getLastINode();
  if (inode == null) {
    throw new FileNotFoundException("File does not exist: " + src);
  }
  if (username != null) {
    inode = inode.setUser(username, inodesInPath.getLatestSnapshotId());
  }
  if (groupname != null) {
    inode.setGroup(groupname, inodesInPath.getLatestSnapshotId());
  }
}
项目:hadoop    文件:FSDirAttrOp.java   
private static boolean unprotectedSetTimes(
    FSDirectory fsd, INode inode, long mtime, long atime, boolean force,
    int latest) throws QuotaExceededException {
  assert fsd.hasWriteLock();
  boolean status = false;
  if (mtime != -1) {
    inode = inode.setModificationTime(mtime, latest);
    status = true;
  }
  if (atime != -1) {
    long inodeTime = inode.getAccessTime();

    // if the last access time update was within the last precision interval, then
    // no need to store access time
    if (atime <= inodeTime + fsd.getFSNamesystem().getAccessTimePrecision()
        && !force) {
      status =  false;
    } else {
      inode.setAccessTime(atime, latest);
      status = true;
    }
  }
  return status;
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Verify quota when using the preferred block size for UC block. This is
 * usually used by append and truncate
 * @throws QuotaExceededException when violating the storage quota
 * @return expected quota usage update. null means no change or no need to
 *         update quota usage later
 */
private QuotaCounts verifyQuotaForUCBlock(INodeFile file, INodesInPath iip)
    throws QuotaExceededException {
  if (!isImageLoaded() || dir.shouldSkipQuotaChecks()) {
    // Do not check quota if editlog is still being processed
    return null;
  }
  if (file.getLastBlock() != null) {
    final QuotaCounts delta = computeQuotaDeltaForUCBlock(file);
    dir.readLock();
    try {
      FSDirectory.verifyQuota(iip, iip.length() - 1, delta, null);
      return delta;
    } finally {
      dir.readUnlock();
    }
  }
  return null;
}
项目:hadoop    文件:FSDirMkdirOp.java   
/**
 * create a directory at path specified by parent
 */
private static INodesInPath unprotectedMkdir(FSDirectory fsd, long inodeId,
    INodesInPath parent, byte[] name, PermissionStatus permission,
    List<AclEntry> aclEntries, long timestamp)
    throws QuotaExceededException, AclException, FileAlreadyExistsException {
  assert fsd.hasWriteLock();
  assert parent.getLastINode() != null;
  if (!parent.getLastINode().isDirectory()) {
    throw new FileAlreadyExistsException("Parent path is not a directory: " +
        parent.getPath() + " " + DFSUtil.bytes2String(name));
  }
  final INodeDirectory dir = new INodeDirectory(inodeId, name, permission,
      timestamp);

  INodesInPath iip = fsd.addLastINode(parent, dir, true);
  if (iip != null && aclEntries != null) {
    AclStorage.updateINodeAcl(dir, aclEntries, Snapshot.CURRENT_STATE_ID);
  }
  return iip;
}
项目:hadoop    文件:INodeDirectory.java   
/**
 * Add a child inode to the directory.
 * 
 * @param node INode to insert
 * @param setModTime set modification time for the parent node
 *                   not needed when replaying the addition and 
 *                   the parent already has the proper mod time
 * @return false if the child with this name already exists; 
 *         otherwise, return true;
 */
public boolean addChild(INode node, final boolean setModTime,
    final int latestSnapshotId) throws QuotaExceededException {
  final int low = searchChildren(node.getLocalNameBytes());
  if (low >= 0) {
    return false;
  }

  if (isInLatestSnapshot(latestSnapshotId)) {
    // create snapshot feature if necessary
    DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
    if (sf == null) {
      sf = this.addSnapshotFeature(null);
    }
    return sf.addChild(this, node, setModTime, latestSnapshotId);
  }
  addChild(node, low);
  if (setModTime) {
    // update modification time of the parent directory
    updateModificationTime(node.getModificationTime(), latestSnapshotId);
  }
  return true;
}
项目:hadoop    文件:INodeDirectory.java   
/**
 * Undo the rename operation for the dst tree, i.e., if the rename operation
 * (with OVERWRITE option) removes a file/dir from the dst tree, add it back
 * and delete possible record in the deleted list.  
 */
public void undoRename4DstParent(final BlockStoragePolicySuite bsps,
    final INode deletedChild,
    int latestSnapshotId) throws QuotaExceededException {
  DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
  Preconditions.checkState(sf != null,
      "Directory does not have snapshot feature");
  boolean removeDeletedChild = sf.getDiffs().removeChild(ListType.DELETED,
      deletedChild);
  int sid = removeDeletedChild ? Snapshot.CURRENT_STATE_ID : latestSnapshotId;
  final boolean added = addChild(deletedChild, true, sid);
  // update quota usage if adding is successfully and the old child has not
  // been stored in deleted list before
  if (added && !removeDeletedChild) {
    final QuotaCounts counts = deletedChild.computeQuotaUsage(bsps);
    addSpaceConsumed(counts, false);

  }
}
项目:hadoop    文件:TestSymlinkHdfs.java   
@Test(timeout=10000)
/** Test craeteSymlink(..) with quota. */
public void testQuota() throws IOException {
  final Path dir = new Path(testBaseDir1());
  dfs.setQuota(dir, 3, HdfsConstants.QUOTA_DONT_SET);

  final Path file = new Path(dir, "file");
  createAndWriteFile(file);

  //creating the first link should succeed
  final Path link1 = new Path(dir, "link1");
  wrapper.createSymlink(file, link1, false);

  try {
    //creating the second link should fail with QuotaExceededException.
    final Path link2 = new Path(dir, "link2");
    wrapper.createSymlink(file, link2, false);
    fail("Created symlink despite quota violation");
  } catch(QuotaExceededException qee) {
    //expected
  }
}
项目:hadoop    文件:TestAbandonBlock.java   
@Test
/** Make sure that the quota is decremented correctly when a block is abandoned */
public void testQuotaUpdatedWhenBlockAbandoned() throws IOException {
  // Setting diskspace quota to 3MB
  fs.setQuota(new Path("/"), HdfsConstants.QUOTA_DONT_SET, 3 * 1024 * 1024);

  // Start writing a file with 2 replicas to ensure each datanode has one.
  // Block Size is 1MB.
  String src = FILE_NAME_PREFIX + "test_quota1";
  FSDataOutputStream fout = fs.create(new Path(src), true, 4096, (short)2, 1024 * 1024);
  for (int i = 0; i < 1024; i++) {
    fout.writeByte(123);
  }

  // Shutdown one datanode, causing the block abandonment.
  cluster.getDataNodes().get(0).shutdown();

  // Close the file, new block will be allocated with 2MB pending size.
  try {
    fout.close();
  } catch (QuotaExceededException e) {
    fail("Unexpected quota exception when closing fout");
  }
}
项目:hadoop    文件:TestINodeFile.java   
/**
 * For a given path, build a tree of INodes and return the leaf node.
 */
private INode createTreeOfInodes(String path) throws QuotaExceededException {
  byte[][] components = INode.getPathComponents(path);
  FsPermission perm = FsPermission.createImmutable((short)0755);
  PermissionStatus permstatus = PermissionStatus.createImmutable("", "", perm);

  long id = 0;
  INodeDirectory prev = new INodeDirectory(++id, new byte[0], permstatus, 0);
  INodeDirectory dir = null;
  for (byte[] component : components) {
    if (component.length == 0) {
      continue;
    }
    System.out.println("Adding component " + DFSUtil.bytes2String(component));
    dir = new INodeDirectory(++id, component, permstatus, 0);
    prev.addChild(dir, false, Snapshot.CURRENT_STATE_ID);
    prev = dir;
  }
  return dir; // Last Inode in the chain
}
项目:aliyun-oss-hadoop-fs    文件:DirectoryWithQuotaFeature.java   
void addSpaceConsumed(final INodeDirectory dir, final QuotaCounts counts,
    boolean verify) throws QuotaExceededException {
  if (dir.isQuotaSet()) {
    // The following steps are important:
    // check quotas in this inode and all ancestors before changing counts
    // so that no change is made if there is any quota violation.
    // (1) verify quota in this inode
    if (verify) {
      verifyQuota(counts);
    }
    // (2) verify quota and then add count in ancestors
    dir.addSpaceConsumed2Parent(counts, verify);
    // (3) add count in this inode
    addSpaceConsumed2Cache(counts);
  } else {
    dir.addSpaceConsumed2Parent(counts, verify);
  }
}
项目:big-c    文件:TestAbandonBlock.java   
@Test
/** Make sure that the quota is decremented correctly when a block is abandoned */
public void testQuotaUpdatedWhenBlockAbandoned() throws IOException {
  // Setting diskspace quota to 3MB
  fs.setQuota(new Path("/"), HdfsConstants.QUOTA_DONT_SET, 3 * 1024 * 1024);

  // Start writing a file with 2 replicas to ensure each datanode has one.
  // Block Size is 1MB.
  String src = FILE_NAME_PREFIX + "test_quota1";
  FSDataOutputStream fout = fs.create(new Path(src), true, 4096, (short)2, 1024 * 1024);
  for (int i = 0; i < 1024; i++) {
    fout.writeByte(123);
  }

  // Shutdown one datanode, causing the block abandonment.
  cluster.getDataNodes().get(0).shutdown();

  // Close the file, new block will be allocated with 2MB pending size.
  try {
    fout.close();
  } catch (QuotaExceededException e) {
    fail("Unexpected quota exception when closing fout");
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSDirTruncateOp.java   
private static void verifyQuotaForTruncate(FSNamesystem fsn,
    INodesInPath iip, INodeFile file, long newLength, QuotaCounts delta)
    throws QuotaExceededException {
  FSDirectory fsd = fsn.getFSDirectory();
  if (!fsn.isImageLoaded() || fsd.shouldSkipQuotaChecks()) {
    // Do not check quota if edit log is still being processed
    return;
  }
  final BlockStoragePolicy policy = fsd.getBlockStoragePolicySuite()
      .getPolicy(file.getStoragePolicyID());
  file.computeQuotaDeltaForTruncate(newLength, policy, delta);
  fsd.readLock();
  try {
    FSDirectory.verifyQuota(iip, iip.length() - 1, delta, null);
  } finally {
    fsd.readUnlock();
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSDirAppendOp.java   
/**
 * Verify quota when using the preferred block size for UC block. This is
 * usually used by append and truncate.
 *
 * @throws QuotaExceededException when violating the storage quota
 * @return expected quota usage update. null means no change or no need to
 *         update quota usage later
 */
private static QuotaCounts verifyQuotaForUCBlock(FSNamesystem fsn,
    INodeFile file, INodesInPath iip) throws QuotaExceededException {
  FSDirectory fsd = fsn.getFSDirectory();
  if (!fsn.isImageLoaded() || fsd.shouldSkipQuotaChecks()) {
    // Do not check quota if editlog is still being processed
    return null;
  }
  if (file.getLastBlock() != null) {
    final QuotaCounts delta = computeQuotaDeltaForUCBlock(fsn, file);
    fsd.readLock();
    try {
      FSDirectory.verifyQuota(iip, iip.length() - 1, delta, null);
      return delta;
    } finally {
      fsd.readUnlock();
    }
  }
  return null;
}
项目:aliyun-oss-hadoop-fs    文件:FSDirectory.java   
/** update count of each inode with quota
 * 
 * @param iip inodes in a path
 * @param numOfINodes the number of inodes to update starting from index 0
 * @param counts the count of space/namespace/type usage to be update
 * @param checkQuota if true then check if quota is exceeded
 * @throws QuotaExceededException if the new count violates any quota limit
 */
void updateCount(INodesInPath iip, int numOfINodes,
                  QuotaCounts counts, boolean checkQuota)
                  throws QuotaExceededException {
  assert hasWriteLock();
  if (!namesystem.isImageLoaded()) {
    //still initializing. do not check or update quotas.
    return;
  }
  if (numOfINodes > iip.length()) {
    numOfINodes = iip.length();
  }
  if (checkQuota && !skipQuotaCheck) {
    verifyQuota(iip, numOfINodes, counts, null);
  }
  unprotectedUpdateCount(iip, numOfINodes, counts);
}
项目:aliyun-oss-hadoop-fs    文件:FSDirRenameOp.java   
/**
 * Verify quota for rename operation where srcInodes[srcInodes.length-1] moves
 * dstInodes[dstInodes.length-1]
 */
private static void verifyQuotaForRename(FSDirectory fsd, INodesInPath src,
    INodesInPath dst) throws QuotaExceededException {
  if (!fsd.getFSNamesystem().isImageLoaded() || fsd.shouldSkipQuotaChecks()) {
    // Do not check quota if edits log is still being processed
    return;
  }
  int i = 0;
  while(src.getINode(i) == dst.getINode(i)) { i++; }
  // src[i - 1] is the last common ancestor.
  BlockStoragePolicySuite bsps = fsd.getBlockStoragePolicySuite();
  final QuotaCounts delta = src.getLastINode().computeQuotaUsage(bsps);

  // Reduce the required quota by dst that is being removed
  final INode dstINode = dst.getLastINode();
  if (dstINode != null) {
    delta.subtract(dstINode.computeQuotaUsage(bsps));
  }
  FSDirectory.verifyQuota(dst, dst.length() - 1, delta, src.getINode(i - 1));
}
项目:aliyun-oss-hadoop-fs    文件:FSDirRenameOp.java   
void restoreSource() throws QuotaExceededException {
  // Rename failed - restore src
  final INode oldSrcChild = srcChild;
  // put it back
  if (withCount == null) {
    srcChild.setLocalName(srcChildName);
  } else if (!srcChildIsReference) { // src must be in snapshot
    // the withCount node will no longer be used thus no need to update
    // its reference number here
    srcChild = withCount.getReferredINode();
    srcChild.setLocalName(srcChildName);
  } else {
    withCount.removeReference(oldSrcChild.asReference());
    srcChild = new INodeReference.DstReference(srcParent, withCount,
        srcRefDstSnapshot);
    withCount.getReferredINode().setLocalName(srcChildName);
  }

  if (isSrcInSnapshot) {
    srcParent.undoRename4ScrParent(oldSrcChild.asReference(), srcChild);
  } else {
    // srcParent is not an INodeDirectoryWithSnapshot, we only need to add
    // the srcChild back
    fsd.addLastINodeNoQuotaCheck(srcParentIIP, srcChild);
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSDirRenameOp.java   
boolean cleanDst(BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks)
    throws QuotaExceededException {
  Preconditions.checkState(oldDstChild != null);
  List<INode> removedINodes = new ChunkedArrayList<>();
  List<Long> removedUCFiles = new ChunkedArrayList<>();
  INode.ReclaimContext context = new INode.ReclaimContext(
      bsps, collectedBlocks, removedINodes, removedUCFiles);
  final boolean filesDeleted;
  if (!oldDstChild.isInLatestSnapshot(dstIIP.getLatestSnapshotId())) {
    oldDstChild.destroyAndCollectBlocks(context);
    filesDeleted = true;
  } else {
    oldDstChild.cleanSubtree(context, Snapshot.CURRENT_STATE_ID,
        dstIIP.getLatestSnapshotId());
    filesDeleted = context.quotaDelta().getNsDelta() >= 0;
  }
  fsd.updateReplicationFactor(context.collectedBlocks()
                                  .toUpdateReplicationInfo());

  fsd.getFSNamesystem().removeLeasesAndINodes(
      removedUCFiles, removedINodes, false);
  return filesDeleted;
}
项目:aliyun-oss-hadoop-fs    文件:FSDirAttrOp.java   
static void unprotectedSetOwner(
    FSDirectory fsd, String src, String username, String groupname)
    throws FileNotFoundException, UnresolvedLinkException,
    QuotaExceededException, SnapshotAccessControlException {
  assert fsd.hasWriteLock();
  final INodesInPath inodesInPath = fsd.getINodesInPath4Write(src, true);
  INode inode = inodesInPath.getLastINode();
  if (inode == null) {
    throw new FileNotFoundException("File does not exist: " + src);
  }
  if (username != null) {
    inode = inode.setUser(username, inodesInPath.getLatestSnapshotId());
  }
  if (groupname != null) {
    inode.setGroup(groupname, inodesInPath.getLatestSnapshotId());
  }
}
项目:big-c    文件:TestSymlinkHdfs.java   
@Test(timeout=10000)
/** Test craeteSymlink(..) with quota. */
public void testQuota() throws IOException {
  final Path dir = new Path(testBaseDir1());
  dfs.setQuota(dir, 3, HdfsConstants.QUOTA_DONT_SET);

  final Path file = new Path(dir, "file");
  createAndWriteFile(file);

  //creating the first link should succeed
  final Path link1 = new Path(dir, "link1");
  wrapper.createSymlink(file, link1, false);

  try {
    //creating the second link should fail with QuotaExceededException.
    final Path link2 = new Path(dir, "link2");
    wrapper.createSymlink(file, link2, false);
    fail("Created symlink despite quota violation");
  } catch(QuotaExceededException qee) {
    //expected
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSDirAttrOp.java   
private static boolean unprotectedSetTimes(
    FSDirectory fsd, INode inode, long mtime, long atime, boolean force,
    int latest) throws QuotaExceededException {
  assert fsd.hasWriteLock();
  boolean status = false;
  if (mtime != -1) {
    inode = inode.setModificationTime(mtime, latest);
    status = true;
  }
  if (atime != -1) {
    long inodeTime = inode.getAccessTime();

    // if the last access time update was within the last precision interval, then
    // no need to store access time
    if (atime <= inodeTime + fsd.getAccessTimePrecision() && !force) {
      status =  false;
    } else {
      inode.setAccessTime(atime, latest);
      status = true;
    }
  }
  return status;
}
项目:aliyun-oss-hadoop-fs    文件:FSDirMkdirOp.java   
/**
 * create a directory at path specified by parent
 */
private static INodesInPath unprotectedMkdir(FSDirectory fsd, long inodeId,
    INodesInPath parent, byte[] name, PermissionStatus permission,
    List<AclEntry> aclEntries, long timestamp)
    throws QuotaExceededException, AclException, FileAlreadyExistsException {
  assert fsd.hasWriteLock();
  assert parent.getLastINode() != null;
  if (!parent.getLastINode().isDirectory()) {
    throw new FileAlreadyExistsException("Parent path is not a directory: " +
        parent.getPath() + " " + DFSUtil.bytes2String(name));
  }
  final INodeDirectory dir = new INodeDirectory(inodeId, name, permission,
      timestamp);

  INodesInPath iip = fsd.addLastINode(parent, dir, true);
  if (iip != null && aclEntries != null) {
    AclStorage.updateINodeAcl(dir, aclEntries, Snapshot.CURRENT_STATE_ID);
  }
  return iip;
}
项目:aliyun-oss-hadoop-fs    文件:INodeDirectory.java   
/**
 * Add a child inode to the directory.
 * 
 * @param node INode to insert
 * @param setModTime set modification time for the parent node
 *                   not needed when replaying the addition and 
 *                   the parent already has the proper mod time
 * @return false if the child with this name already exists; 
 *         otherwise, return true;
 */
public boolean addChild(INode node, final boolean setModTime,
    final int latestSnapshotId) throws QuotaExceededException {
  final int low = searchChildren(node.getLocalNameBytes());
  if (low >= 0) {
    return false;
  }

  if (isInLatestSnapshot(latestSnapshotId)) {
    // create snapshot feature if necessary
    DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
    if (sf == null) {
      sf = this.addSnapshotFeature(null);
    }
    return sf.addChild(this, node, setModTime, latestSnapshotId);
  }
  addChild(node, low);
  if (setModTime) {
    // update modification time of the parent directory
    updateModificationTime(node.getModificationTime(), latestSnapshotId);
  }
  return true;
}
项目:aliyun-oss-hadoop-fs    文件:INodeDirectory.java   
/**
 * Undo the rename operation for the dst tree, i.e., if the rename operation
 * (with OVERWRITE option) removes a file/dir from the dst tree, add it back
 * and delete possible record in the deleted list.  
 */
public void undoRename4DstParent(final BlockStoragePolicySuite bsps,
    final INode deletedChild,
    int latestSnapshotId) throws QuotaExceededException {
  DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
  assert sf != null : "Directory does not have snapshot feature";
  boolean removeDeletedChild = sf.getDiffs().removeChild(ListType.DELETED,
      deletedChild);
  int sid = removeDeletedChild ? Snapshot.CURRENT_STATE_ID : latestSnapshotId;
  final boolean added = addChild(deletedChild, true, sid);
  // update quota usage if adding is successfully and the old child has not
  // been stored in deleted list before
  if (added && !removeDeletedChild) {
    final QuotaCounts counts = deletedChild.computeQuotaUsage(bsps);
    addSpaceConsumed(counts, false);

  }
}
项目:aliyun-oss-hadoop-fs    文件:TestSymlinkHdfs.java   
@Test(timeout=10000)
/** Test craeteSymlink(..) with quota. */
public void testQuota() throws IOException {
  final Path dir = new Path(testBaseDir1());
  dfs.setQuota(dir, 3, HdfsConstants.QUOTA_DONT_SET);

  final Path file = new Path(dir, "file");
  createAndWriteFile(file);

  //creating the first link should succeed
  final Path link1 = new Path(dir, "link1");
  wrapper.createSymlink(file, link1, false);

  try {
    //creating the second link should fail with QuotaExceededException.
    final Path link2 = new Path(dir, "link2");
    wrapper.createSymlink(file, link2, false);
    fail("Created symlink despite quota violation");
  } catch(QuotaExceededException qee) {
    //expected
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestAbandonBlock.java   
@Test
/** Make sure that the quota is decremented correctly when a block is abandoned */
public void testQuotaUpdatedWhenBlockAbandoned() throws IOException {
  // Setting diskspace quota to 3MB
  fs.setQuota(new Path("/"), HdfsConstants.QUOTA_DONT_SET, 3 * 1024 * 1024);

  // Start writing a file with 2 replicas to ensure each datanode has one.
  // Block Size is 1MB.
  String src = FILE_NAME_PREFIX + "test_quota1";
  FSDataOutputStream fout = fs.create(new Path(src), true, 4096, (short)2, 1024 * 1024);
  for (int i = 0; i < 1024; i++) {
    fout.writeByte(123);
  }

  // Shutdown one datanode, causing the block abandonment.
  cluster.getDataNodes().get(0).shutdown();

  // Close the file, new block will be allocated with 2MB pending size.
  try {
    fout.close();
  } catch (QuotaExceededException e) {
    fail("Unexpected quota exception when closing fout");
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestINodeFile.java   
/**
 * For a given path, build a tree of INodes and return the leaf node.
 */
private INode createTreeOfInodes(String path) throws QuotaExceededException {
  byte[][] components = INode.getPathComponents(path);
  FsPermission perm = FsPermission.createImmutable((short)0755);
  PermissionStatus permstatus = PermissionStatus.createImmutable("", "", perm);

  long id = 0;
  INodeDirectory prev = new INodeDirectory(++id, new byte[0], permstatus, 0);
  INodeDirectory dir = null;
  for (byte[] component : components) {
    if (component.length == 0) {
      continue;
    }
    System.out.println("Adding component " + DFSUtil.bytes2String(component));
    dir = new INodeDirectory(++id, component, permstatus, 0);
    prev.addChild(dir, false, Snapshot.CURRENT_STATE_ID);
    prev = dir;
  }
  return dir; // Last Inode in the chain
}
项目:big-c    文件:DirectoryWithQuotaFeature.java   
void addSpaceConsumed(final INodeDirectory dir, final QuotaCounts counts,
    boolean verify) throws QuotaExceededException {
  if (dir.isQuotaSet()) {
    // The following steps are important:
    // check quotas in this inode and all ancestors before changing counts
    // so that no change is made if there is any quota violation.
    // (1) verify quota in this inode
    if (verify) {
      verifyQuota(counts);
    }
    // (2) verify quota and then add count in ancestors
    dir.addSpaceConsumed2Parent(counts, verify);
    // (3) add count in this inode
    addSpaceConsumed2Cache(counts);
  } else {
    dir.addSpaceConsumed2Parent(counts, verify);
  }
}
项目:big-c    文件:FSDirectory.java   
/** update count of each inode with quota
 * 
 * @param iip inodes in a path
 * @param numOfINodes the number of inodes to update starting from index 0
 * @param counts the count of space/namespace/type usage to be update
 * @param checkQuota if true then check if quota is exceeded
 * @throws QuotaExceededException if the new count violates any quota limit
 */
void updateCount(INodesInPath iip, int numOfINodes,
                  QuotaCounts counts, boolean checkQuota)
                  throws QuotaExceededException {
  assert hasWriteLock();
  if (!namesystem.isImageLoaded()) {
    //still initializing. do not check or update quotas.
    return;
  }
  if (numOfINodes > iip.length()) {
    numOfINodes = iip.length();
  }
  if (checkQuota && !skipQuotaCheck) {
    verifyQuota(iip, numOfINodes, counts, null);
  }
  unprotectedUpdateCount(iip, numOfINodes, counts);
}
项目:big-c    文件:FSDirectory.java   
private void verifyQuotaForTruncate(INodesInPath iip, INodeFile file,
    long newLength, QuotaCounts delta) throws QuotaExceededException {
  if (!getFSNamesystem().isImageLoaded() || shouldSkipQuotaChecks()) {
    // Do not check quota if edit log is still being processed
    return;
  }
  final long diff = file.computeQuotaDeltaForTruncate(newLength);
  final short repl = file.getBlockReplication();
  delta.addStorageSpace(diff * repl);
  final BlockStoragePolicy policy = getBlockStoragePolicySuite()
      .getPolicy(file.getStoragePolicyID());
  List<StorageType> types = policy.chooseStorageTypes(repl);
  for (StorageType t : types) {
    if (t.supportTypeQuota()) {
      delta.addTypeSpace(t, diff);
    }
  }
  if (diff > 0) {
    readLock();
    try {
      verifyQuota(iip, iip.length() - 1, delta, null);
    } finally {
      readUnlock();
    }
  }
}
项目:big-c    文件:FSDirRenameOp.java   
/**
 * Verify quota for rename operation where srcInodes[srcInodes.length-1] moves
 * dstInodes[dstInodes.length-1]
 */
private static void verifyQuotaForRename(FSDirectory fsd, INodesInPath src,
    INodesInPath dst) throws QuotaExceededException {
  if (!fsd.getFSNamesystem().isImageLoaded() || fsd.shouldSkipQuotaChecks()) {
    // Do not check quota if edits log is still being processed
    return;
  }
  int i = 0;
  while(src.getINode(i) == dst.getINode(i)) { i++; }
  // src[i - 1] is the last common ancestor.
  BlockStoragePolicySuite bsps = fsd.getBlockStoragePolicySuite();
  final QuotaCounts delta = src.getLastINode().computeQuotaUsage(bsps);

  // Reduce the required quota by dst that is being removed
  final INode dstINode = dst.getLastINode();
  if (dstINode != null) {
    delta.subtract(dstINode.computeQuotaUsage(bsps));
  }
  FSDirectory.verifyQuota(dst, dst.length() - 1, delta, src.getINode(i - 1));
}
项目:big-c    文件:FSDirRenameOp.java   
void restoreSource() throws QuotaExceededException {
  // Rename failed - restore src
  final INode oldSrcChild = srcChild;
  // put it back
  if (withCount == null) {
    srcChild.setLocalName(srcChildName);
  } else if (!srcChildIsReference) { // src must be in snapshot
    // the withCount node will no longer be used thus no need to update
    // its reference number here
    srcChild = withCount.getReferredINode();
    srcChild.setLocalName(srcChildName);
  } else {
    withCount.removeReference(oldSrcChild.asReference());
    srcChild = new INodeReference.DstReference(srcParent, withCount,
        srcRefDstSnapshot);
    withCount.getReferredINode().setLocalName(srcChildName);
  }

  if (isSrcInSnapshot) {
    srcParent.undoRename4ScrParent(oldSrcChild.asReference(), srcChild);
  } else {
    // srcParent is not an INodeDirectoryWithSnapshot, we only need to add
    // the srcChild back
    fsd.addLastINodeNoQuotaCheck(srcParentIIP, srcChild);
  }
}
项目:big-c    文件:FSDirRenameOp.java   
boolean cleanDst(BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks)
    throws QuotaExceededException {
  Preconditions.checkState(oldDstChild != null);
  List<INode> removedINodes = new ChunkedArrayList<>();
  final boolean filesDeleted;
  if (!oldDstChild.isInLatestSnapshot(dstIIP.getLatestSnapshotId())) {
    oldDstChild.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
    filesDeleted = true;
  } else {
    filesDeleted = oldDstChild.cleanSubtree(bsps, Snapshot.CURRENT_STATE_ID,
        dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes)
        .getNameSpace() >= 0;
  }
  fsd.getFSNamesystem().removeLeasesAndINodes(src, removedINodes, false);
  return filesDeleted;
}
项目:big-c    文件:FSDirAttrOp.java   
static void unprotectedSetOwner(
    FSDirectory fsd, String src, String username, String groupname)
    throws FileNotFoundException, UnresolvedLinkException,
    QuotaExceededException, SnapshotAccessControlException {
  assert fsd.hasWriteLock();
  final INodesInPath inodesInPath = fsd.getINodesInPath4Write(src, true);
  INode inode = inodesInPath.getLastINode();
  if (inode == null) {
    throw new FileNotFoundException("File does not exist: " + src);
  }
  if (username != null) {
    inode = inode.setUser(username, inodesInPath.getLatestSnapshotId());
  }
  if (groupname != null) {
    inode.setGroup(groupname, inodesInPath.getLatestSnapshotId());
  }
}
项目:big-c    文件:FSDirAttrOp.java   
private static boolean unprotectedSetTimes(
    FSDirectory fsd, INode inode, long mtime, long atime, boolean force,
    int latest) throws QuotaExceededException {
  assert fsd.hasWriteLock();
  boolean status = false;
  if (mtime != -1) {
    inode = inode.setModificationTime(mtime, latest);
    status = true;
  }
  if (atime != -1) {
    long inodeTime = inode.getAccessTime();

    // if the last access time update was within the last precision interval, then
    // no need to store access time
    if (atime <= inodeTime + fsd.getFSNamesystem().getAccessTimePrecision()
        && !force) {
      status =  false;
    } else {
      inode.setAccessTime(atime, latest);
      status = true;
    }
  }
  return status;
}