Java 类org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota 实例源码

项目:hadoop-plus    文件:TestSnapshotDeletion.java   
private void checkQuotaUsageComputation(final Path dirPath,
    final long expectedNs, final long expectedDs) throws IOException {
  INode node = fsdir.getINode(dirPath.toString());
  assertTrue(node.isDirectory() && node.isQuotaSet());
  INodeDirectoryWithQuota dirNode = (INodeDirectoryWithQuota) node;
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
      dirNode.getNamespace());
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
      dirNode.getDiskspace());
  Quota.Counts counts = Quota.Counts.newInstance();
  dirNode.computeQuotaUsage(counts, false);
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
      counts.get(Quota.NAMESPACE));
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
      counts.get(Quota.DISKSPACE));
}
项目:hadoop-TCP    文件:TestSnapshotDeletion.java   
private void checkQuotaUsageComputation(final Path dirPath,
    final long expectedNs, final long expectedDs) throws IOException {
  INode node = fsdir.getINode(dirPath.toString());
  assertTrue(node.isDirectory() && node.isQuotaSet());
  INodeDirectoryWithQuota dirNode = (INodeDirectoryWithQuota) node;
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
      dirNode.getNamespace());
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
      dirNode.getDiskspace());
  Quota.Counts counts = Quota.Counts.newInstance();
  dirNode.computeQuotaUsage(counts, false);
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
      counts.get(Quota.NAMESPACE));
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
      counts.get(Quota.DISKSPACE));
}
项目:hardfs    文件:TestSnapshotDeletion.java   
private void checkQuotaUsageComputation(final Path dirPath,
    final long expectedNs, final long expectedDs) throws IOException {
  INode node = fsdir.getINode(dirPath.toString());
  assertTrue(node.isDirectory() && node.isQuotaSet());
  INodeDirectoryWithQuota dirNode = (INodeDirectoryWithQuota) node;
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
      dirNode.getNamespace());
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
      dirNode.getDiskspace());
  Quota.Counts counts = Quota.Counts.newInstance();
  dirNode.computeQuotaUsage(counts, false);
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
      counts.get(Quota.NAMESPACE));
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
      counts.get(Quota.DISKSPACE));
}
项目:RDFS    文件:TestQuota.java   
/**
 * Test HDFS operations that change disk space consumed by a directory tree.
 * namely create, rename, delete, append, and setReplication.
 * 
 * This is based on testNamespaceCommands() above.
 */
public void testSpaceCommands() throws Exception {
  final Configuration conf = new Configuration();
  // set a smaller block size so that we can test with smaller 
  // diskspace quotas
  conf.set("dfs.block.size", "512");
  conf.setBoolean("dfs.support.append", true);
  final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
  final FileSystem fs = cluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
              fs instanceof DistributedFileSystem);

  final DistributedFileSystem dfs = (DistributedFileSystem)fs;
  FSDirectory fsd = cluster.getNameNode().namesystem.dir;
INodeDirectoryWithQuota rootDir = (INodeDirectoryWithQuota) (fsd
        .getExistingPathINodes("/")[0]);
  try {
    generateFiles(dfs, rootDir, 1024, 512);
    generateFiles(dfs, rootDir, 1019, 512);
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop-EAR    文件:TestQuota.java   
/**
  * Test HDFS operations that change disk space consumed by a directory tree.
  * namely create, rename, delete, append, and setReplication.
  * 
  * This is based on testNamespaceCommands() above.
  */
@Test
 public void testSpaceCommands() throws Exception {
  // smaller block size, support append
  setUp(true, true);

  FSDirectory fsd = cluster.getNameNode().namesystem.dir;
    INodeDirectoryWithQuota rootDir = (INodeDirectoryWithQuota) (fsd
            .getExistingPathINodes("/")[0]);
   generateFiles(dfs, rootDir, 1024, 512);
   generateFiles(dfs, rootDir, 1019, 512);
 }
项目:hops    文件:BaseINodeLock.java   
protected void acquireINodeAttributes()
    throws StorageException, TransactionContextException {
  List<INodeCandidatePrimaryKey> pks =
      new ArrayList<>();
  for (INode inode : getAllResolvedINodes()) {
    if (inode instanceof INodeDirectoryWithQuota) {
      INodeCandidatePrimaryKey pk =
          new INodeCandidatePrimaryKey(inode.getId());
      pks.add(pk);
    }
  }
  acquireLockList(DEFAULT_LOCK_TYPE, INodeAttributes.Finder.ByINodeIds, pks);
}
项目:hops    文件:INodeDALAdaptor.java   
@Override
public INode convertHDFStoDAL(
    org.apache.hadoop.hdfs.server.namenode.INode inode)
    throws StorageException {
  INode hopINode = null;
  if (inode != null) {
    hopINode = new INode();
    hopINode.setModificationTime(inode.getModificationTime());
    hopINode.setAccessTime(inode.getAccessTime());
    hopINode.setName(inode.getLocalName());

    hopINode.setUserID(inode.getUserID());
    hopINode.setGroupID(inode.getGroupID());
    hopINode.setPermission(inode.getFsPermission().toShort());
    hopINode.setParentId(inode.getParentId());
    hopINode.setId(inode.getId());
    hopINode.setIsDir(inode.isDirectory());
    hopINode.setPartitionId(inode.getPartitionId());
    hopINode.setLogicalTime(inode.getLogicalTime());

    if (inode.isDirectory()) {
      hopINode.setUnderConstruction(false);
      hopINode.setDirWithQuota(inode instanceof INodeDirectoryWithQuota ?
          true : false);
      hopINode.setMetaEnabled(((INodeDirectory) inode).isMetaEnabled());
    }
    if (inode instanceof INodeFile) {
      hopINode
          .setUnderConstruction(inode.isUnderConstruction() ? true : false);
      hopINode.setDirWithQuota(false);
      if (inode instanceof INodeFileUnderConstruction) {
        hopINode.setClientName(
            ((INodeFileUnderConstruction) inode).getClientName());
        hopINode.setClientMachine(
            ((INodeFileUnderConstruction) inode).getClientMachine());
        hopINode.setClientNode(
            ((INodeFileUnderConstruction) inode).getClientNode() == null ?
                null : ((INodeFileUnderConstruction) inode).getClientNode()
                .getXferAddr());
      }
      hopINode.setGenerationStamp(((INodeFile) inode).getGenerationStamp());
      hopINode.setFileSize(((INodeFile) inode).getSize());
      hopINode.setFileStoredInDB(((INodeFile)inode).isFileStoredInDB());
    }
    if (inode instanceof INodeSymlink) {
      hopINode.setUnderConstruction(false);
      hopINode.setDirWithQuota(false);

      String linkValue =
          DFSUtil.bytes2String(((INodeSymlink) inode).getSymlink());
      hopINode.setSymlink(linkValue);
    }
    hopINode.setSubtreeLocked(inode.isSubtreeLocked());
    hopINode.setSubtreeLockOwner(inode.getSubtreeLockOwner());
  }
  hopINode.setHeader(inode.getHeader());
  return hopINode;
}