Java 类org.apache.hadoop.hdfs.util.ByteArray 实例源码

项目:hadoop-on-lustre    文件:FSDirectory.java   
FSDirectory(FSImage fsImage, FSNamesystem ns, Configuration conf) {
  rootDir = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME,
      ns.createFsOwnerPermissions(new FsPermission((short)0755)),
      Integer.MAX_VALUE, -1);
  this.fsImage = fsImage;
  fsImage.setRestoreRemovedDirs(conf.getBoolean(
      DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY,
      DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_DEFAULT));
  fsImage.setEditsTolerationLength(conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_EDITS_TOLERATION_LENGTH_KEY,
      DFSConfigKeys.DFS_NAMENODE_EDITS_TOLERATION_LENGTH_DEFAULT));

  namesystem = ns;
  int configuredLimit = conf.getInt(
      DFSConfigKeys.DFS_LIST_LIMIT, DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT);
  this.lsLimit = configuredLimit>0 ? 
      configuredLimit : DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT;

  int threshold = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY,
      DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT);
  NameNode.LOG.info("Caching file names occuring more than " + threshold
      + " times ");
  nameCache = new NameCache<ByteArray>(threshold);

}
项目:cumulus    文件:FSDirectory.java   
FSDirectory(FSImage fsImage, FSNamesystem ns, Configuration conf) {
  this.bLock = new ReentrantReadWriteLock(true); // fair
  this.cond = bLock.writeLock().newCondition();
  fsImage.setFSNamesystem(ns);
  rootDir = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME,
      ns.createFsOwnerPermissions(new FsPermission((short)0755)),
      Integer.MAX_VALUE, UNKNOWN_DISK_SPACE);
  this.fsImage = fsImage;
  int configuredLimit = conf.getInt(
      DFSConfigKeys.DFS_LIST_LIMIT, DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT);
  this.lsLimit = configuredLimit>0 ?
      configuredLimit : DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT;

  int threshold = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY,
      DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT);
  NameNode.LOG.info("Caching file names occuring more than " + threshold
      + " times ");
  nameCache = new NameCache<ByteArray>(threshold);
}
项目:RDFS    文件:FSDirectory.java   
FSDirectory(FSImage fsImage, FSNamesystem ns, Configuration conf) {
  rootDir = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME,
      ns.createFsOwnerPermissions(new FsPermission((short)0755)),
      Integer.MAX_VALUE, Long.MAX_VALUE);
  this.fsImage = fsImage;
  this.fsImage.setFSNamesystem(ns);
  int configuredLimit = conf.getInt(
      "dfs.ls.limit", 1000);
  this.lsLimit = configuredLimit>0 ?
      configuredLimit : 1000;
  int threshold = conf.getInt(
      "dfs.namenode.name.cache.threshold",
      10);
  NameNode.LOG.info("Caching file names occuring more than " + threshold
      + " times ");
  nameCache = new NameCache<ByteArray>(threshold);
  initialize(conf);
}
项目:hortonworks-extension    文件:FSDirectory.java   
FSDirectory(FSImage fsImage, FSNamesystem ns, Configuration conf) {
  rootDir = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME,
      ns.createFsOwnerPermissions(new FsPermission((short)0755)),
      Integer.MAX_VALUE, -1);
  this.fsImage = fsImage;
  fsImage.setRestoreRemovedDirs(conf.getBoolean(
      DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY,
      DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_DEFAULT));
  fsImage.setEditsTolerationLength(conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_EDITS_TOLERATION_LENGTH_KEY,
      DFSConfigKeys.DFS_NAMENODE_EDITS_TOLERATION_LENGTH_DEFAULT));

  namesystem = ns;
  int configuredLimit = conf.getInt(
      DFSConfigKeys.DFS_LIST_LIMIT, DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT);
  this.lsLimit = configuredLimit>0 ? 
      configuredLimit : DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT;

  int threshold = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY,
      DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT);
  NameNode.LOG.info("Caching file names occuring more than " + threshold
      + " times ");
  nameCache = new NameCache<ByteArray>(threshold);

}
项目:hortonworks-extension    文件:FSDirectory.java   
FSDirectory(FSImage fsImage, FSNamesystem ns, Configuration conf) {
  rootDir = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME,
      ns.createFsOwnerPermissions(new FsPermission((short)0755)),
      Integer.MAX_VALUE, -1);
  this.fsImage = fsImage;
  fsImage.setRestoreRemovedDirs(conf.getBoolean(
      DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY,
      DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_DEFAULT));
  fsImage.setEditsTolerationLength(conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_EDITS_TOLERATION_LENGTH_KEY,
      DFSConfigKeys.DFS_NAMENODE_EDITS_TOLERATION_LENGTH_DEFAULT));

  namesystem = ns;
  int configuredLimit = conf.getInt(
      DFSConfigKeys.DFS_LIST_LIMIT, DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT);
  this.lsLimit = configuredLimit>0 ? 
      configuredLimit : DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT;

  int threshold = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY,
      DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT);
  NameNode.LOG.info("Caching file names occuring more than " + threshold
      + " times ");
  nameCache = new NameCache<ByteArray>(threshold);

}
项目:hadoop    文件:FSDirectory.java   
/**
 * Caches frequently used file names to reuse file name objects and
 * reduce heap size.
 */
void cacheName(INode inode) {
  // Name is cached only for files
  if (!inode.isFile()) {
    return;
  }
  ByteArray name = new ByteArray(inode.getLocalNameBytes());
  name = nameCache.put(name);
  if (name != null) {
    inode.setLocalName(name.getBytes());
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSDirectory.java   
/**
 * Caches frequently used file names to reuse file name objects and
 * reduce heap size.
 */
void cacheName(INode inode) {
  // Name is cached only for files
  if (!inode.isFile()) {
    return;
  }
  ByteArray name = new ByteArray(inode.getLocalNameBytes());
  name = nameCache.put(name);
  if (name != null) {
    inode.setLocalName(name.getBytes());
  }
}
项目:big-c    文件:FSDirectory.java   
/**
 * Caches frequently used file names to reuse file name objects and
 * reduce heap size.
 */
void cacheName(INode inode) {
  // Name is cached only for files
  if (!inode.isFile()) {
    return;
  }
  ByteArray name = new ByteArray(inode.getLocalNameBytes());
  name = nameCache.put(name);
  if (name != null) {
    inode.setLocalName(name.getBytes());
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSDirectory.java   
/**
 * Caches frequently used file names to reuse file name objects and
 * reduce heap size.
 */
void cacheName(INode inode) {
  // Name is cached only for files
  if (!inode.isFile()) {
    return;
  }
  ByteArray name = new ByteArray(inode.getLocalNameBytes());
  name = nameCache.put(name);
  if (name != null) {
    inode.setLocalName(name.getBytes());
  }
}
项目:hadoop-EAR    文件:FSDirectoryNameCache.java   
public FSDirectoryNameCache(int threshold) {
  nameCache = new NameCache<ByteArray>(threshold);
  imageLoaded = false;

  // executor for processing temporary queue (only 1 thread!!)
  cachingExecutor = Executors.newFixedThreadPool(1);
  cachingTempQueue = new ArrayList<INode>(MAX_QUEUE_SIZE);
  cachingTasks = new ArrayList<Future<Void>>();
}
项目:hadoop-EAR    文件:FSDirectoryNameCache.java   
/**
 * Adds cached entry to the map and updates INode
 */
private void cacheNameInternal(INode inode) {
  // Name is cached only for files
  if (inode.isDirectory()) {
    return;
  }
  ByteArray name = new ByteArray(inode.getLocalNameBytes());
  name = nameCache.put(name);
  if (name != null) {
    inode.setLocalName(name.getBytes());
  }
}
项目:hadoop-plus    文件:FSDirectory.java   
FSDirectory(FSImage fsImage, FSNamesystem ns, Configuration conf) {
  this.dirLock = new ReentrantReadWriteLock(true); // fair
  this.cond = dirLock.writeLock().newCondition();
  rootDir = createRoot(ns);
  inodeMap = INodeMap.newInstance(rootDir);
  this.fsImage = fsImage;
  int configuredLimit = conf.getInt(
      DFSConfigKeys.DFS_LIST_LIMIT, DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT);
  this.lsLimit = configuredLimit>0 ?
      configuredLimit : DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT;

  // filesystem limits
  this.maxComponentLength = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY,
      DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_DEFAULT);
  this.maxDirItems = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY,
      DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_DEFAULT);

  int threshold = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY,
      DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT);
  NameNode.LOG.info("Caching file names occuring more than " + threshold
      + " times");
  nameCache = new NameCache<ByteArray>(threshold);
  namesystem = ns;
}
项目:hadoop-plus    文件:FSDirectory.java   
/**
 * Caches frequently used file names to reuse file name objects and
 * reduce heap size.
 */
void cacheName(INode inode) {
  // Name is cached only for files
  if (!inode.isFile()) {
    return;
  }
  ByteArray name = new ByteArray(inode.getLocalNameBytes());
  name = nameCache.put(name);
  if (name != null) {
    inode.setLocalName(name.getBytes());
  }
}
项目:FlexMap    文件:FSDirectory.java   
/**
 * Caches frequently used file names to reuse file name objects and
 * reduce heap size.
 */
void cacheName(INode inode) {
  // Name is cached only for files
  if (!inode.isFile()) {
    return;
  }
  ByteArray name = new ByteArray(inode.getLocalNameBytes());
  name = nameCache.put(name);
  if (name != null) {
    inode.setLocalName(name.getBytes());
  }
}
项目:hops    文件:FSDirectory.java   
/**
 * Caches frequently used file names to reuse file name objects and
 * reduce heap size.
 */
void cacheName(INode inode)
    throws StorageException, TransactionContextException {
  // Name is cached only for files
  if (inode.isDirectory() || inode.isSymlink()) {
    return;
  }
  ByteArray name = new ByteArray(inode.getLocalNameBytes());
  name = nameCache.put(name);
  if (name != null) {
    inode.setLocalName(name.getBytes());
  }
}
项目:hadoop-TCP    文件:FSDirectory.java   
FSDirectory(FSImage fsImage, FSNamesystem ns, Configuration conf) {
  this.dirLock = new ReentrantReadWriteLock(true); // fair
  this.cond = dirLock.writeLock().newCondition();
  rootDir = createRoot(ns);
  inodeMap = INodeMap.newInstance(rootDir);
  this.fsImage = fsImage;
  int configuredLimit = conf.getInt(
      DFSConfigKeys.DFS_LIST_LIMIT, DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT);
  this.lsLimit = configuredLimit>0 ?
      configuredLimit : DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT;

  // filesystem limits
  this.maxComponentLength = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY,
      DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_DEFAULT);
  this.maxDirItems = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY,
      DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_DEFAULT);

  int threshold = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY,
      DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT);
  NameNode.LOG.info("Caching file names occuring more than " + threshold
      + " times");
  nameCache = new NameCache<ByteArray>(threshold);
  namesystem = ns;
}
项目:hadoop-TCP    文件:FSDirectory.java   
/**
 * Caches frequently used file names to reuse file name objects and
 * reduce heap size.
 */
void cacheName(INode inode) {
  // Name is cached only for files
  if (!inode.isFile()) {
    return;
  }
  ByteArray name = new ByteArray(inode.getLocalNameBytes());
  name = nameCache.put(name);
  if (name != null) {
    inode.setLocalName(name.getBytes());
  }
}
项目:hadoop-on-lustre    文件:FSDirectory.java   
/**
 * Caches frequently used file names to reuse file name objects and
 * reduce heap size.
 */
void cacheName(INode inode) {
  // Name is cached only for files
  if (inode.isDirectory()) {
    return;
  }
  ByteArray name = new ByteArray(inode.getLocalNameBytes());
  name = nameCache.put(name);
  if (name != null) {
    inode.setLocalName(name.getBytes());
  }
}
项目:hardfs    文件:FSDirectory.java   
FSDirectory(FSImage fsImage, FSNamesystem ns, Configuration conf) {
  this.dirLock = new ReentrantReadWriteLock(true); // fair
  this.cond = dirLock.writeLock().newCondition();
  rootDir = createRoot(ns);
  inodeMap = INodeMap.newInstance(rootDir);
  this.fsImage = fsImage;
  int configuredLimit = conf.getInt(
      DFSConfigKeys.DFS_LIST_LIMIT, DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT);
  this.lsLimit = configuredLimit>0 ?
      configuredLimit : DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT;

  // filesystem limits
  this.maxComponentLength = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY,
      DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_DEFAULT);
  this.maxDirItems = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY,
      DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_DEFAULT);

  int threshold = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY,
      DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT);
  NameNode.LOG.info("Caching file names occuring more than " + threshold
      + " times");
  nameCache = new NameCache<ByteArray>(threshold);
  namesystem = ns;
}
项目:hardfs    文件:FSDirectory.java   
/**
 * Caches frequently used file names to reuse file name objects and
 * reduce heap size.
 */
void cacheName(INode inode) {
  // Name is cached only for files
  if (!inode.isFile()) {
    return;
  }
  ByteArray name = new ByteArray(inode.getLocalNameBytes());
  name = nameCache.put(name);
  if (name != null) {
    inode.setLocalName(name.getBytes());
  }
}
项目:hadoop-on-lustre2    文件:FSDirectory.java   
FSDirectory(FSImage fsImage, FSNamesystem ns, Configuration conf) {
  this.dirLock = new ReentrantReadWriteLock(true); // fair
  this.cond = dirLock.writeLock().newCondition();
  rootDir = createRoot(ns);
  inodeMap = INodeMap.newInstance(rootDir);
  this.fsImage = fsImage;
  int configuredLimit = conf.getInt(
      DFSConfigKeys.DFS_LIST_LIMIT, DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT);
  this.lsLimit = configuredLimit>0 ?
      configuredLimit : DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT;
  this.contentCountLimit = conf.getInt(
      DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY,
      DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_DEFAULT);

  // filesystem limits
  this.maxComponentLength = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY,
      DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_DEFAULT);
  this.maxDirItems = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY,
      DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_DEFAULT);
  // We need a maximum maximum because by default, PB limits message sizes
  // to 64MB. This means we can only store approximately 6.7 million entries
  // per directory, but let's use 6.4 million for some safety.
  final int MAX_DIR_ITEMS = 64 * 100 * 1000;
  Preconditions.checkArgument(
      maxDirItems > 0 && maxDirItems <= MAX_DIR_ITEMS, "Cannot set "
          + DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY
          + " to a value less than 0 or greater than " + MAX_DIR_ITEMS);

  int threshold = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY,
      DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT);
  NameNode.LOG.info("Caching file names occuring more than " + threshold
      + " times");
  nameCache = new NameCache<ByteArray>(threshold);
  namesystem = ns;
}
项目:hadoop-on-lustre2    文件:FSDirectory.java   
/**
 * Caches frequently used file names to reuse file name objects and
 * reduce heap size.
 */
void cacheName(INode inode) {
  // Name is cached only for files
  if (!inode.isFile()) {
    return;
  }
  ByteArray name = new ByteArray(inode.getLocalNameBytes());
  name = nameCache.put(name);
  if (name != null) {
    inode.setLocalName(name.getBytes());
  }
}
项目:cumulus    文件:FSDirectory.java   
/**
 * Caches frequently used file names to reuse file name objects and
 * reduce heap size.
 */
void cacheName(INode inode) {
  // Name is cached only for files
  if (inode.isDirectory() || inode.isLink()) {
    return;
  }
  ByteArray name = new ByteArray(inode.getLocalNameBytes());
  name = nameCache.put(name);
  if (name != null) {
    inode.setLocalName(name.getBytes());
  }
}
项目:RDFS    文件:FSDirectory.java   
/**
 * Caches frequently used file names to reuse file name objects and
 * reduce heap size.
 */
void cacheName(INode inode) {
  // Name is cached only for files
  if (inode.isDirectory()) {
    return;
  }
  ByteArray name = new ByteArray(inode.getLocalNameBytes());
  name = nameCache.put(name);
  if (name != null) {
    inode.setLocalName(name.getBytes());
  }
}
项目:hortonworks-extension    文件:FSDirectory.java   
/**
 * Caches frequently used file names to reuse file name objects and
 * reduce heap size.
 */
void cacheName(INode inode) {
  // Name is cached only for files
  if (inode.isDirectory()) {
    return;
  }
  ByteArray name = new ByteArray(inode.getLocalNameBytes());
  name = nameCache.put(name);
  if (name != null) {
    inode.setLocalName(name.getBytes());
  }
}
项目:hortonworks-extension    文件:FSDirectory.java   
/**
 * Caches frequently used file names to reuse file name objects and
 * reduce heap size.
 */
void cacheName(INode inode) {
  // Name is cached only for files
  if (inode.isDirectory()) {
    return;
  }
  ByteArray name = new ByteArray(inode.getLocalNameBytes());
  name = nameCache.put(name);
  if (name != null) {
    inode.setLocalName(name.getBytes());
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSDirectory.java   
FSDirectory(FSNamesystem ns, Configuration conf) {
  this.dirLock = new ReentrantReadWriteLock(true); // fair
  rootDir = createRoot(ns);
  inodeMap = INodeMap.newInstance(rootDir);
  int configuredLimit = conf.getInt(
      DFSConfigKeys.DFS_LIST_LIMIT, DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT);
  this.lsLimit = configuredLimit>0 ?
      configuredLimit : DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT;
  this.contentCountLimit = conf.getInt(
      DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY,
      DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_DEFAULT);

  // filesystem limits
  this.maxComponentLength = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY,
      DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_DEFAULT);
  this.maxDirItems = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY,
      DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_DEFAULT);
  this.inodeXAttrsLimit = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY,
      DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT);

  Preconditions.checkArgument(this.inodeXAttrsLimit >= 0,
      "Cannot set a negative limit on the number of xattrs per inode (%s).",
      DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY);
  // We need a maximum maximum because by default, PB limits message sizes
  // to 64MB. This means we can only store approximately 6.7 million entries
  // per directory, but let's use 6.4 million for some safety.
  final int MAX_DIR_ITEMS = 64 * 100 * 1000;
  Preconditions.checkArgument(
      maxDirItems > 0 && maxDirItems <= MAX_DIR_ITEMS, "Cannot set "
          + DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY
          + " to a value less than 0 or greater than " + MAX_DIR_ITEMS);

  int threshold = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY,
      DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT);
  NameNode.LOG.info("Caching file names occuring more than " + threshold
      + " times");
  nameCache = new NameCache<ByteArray>(threshold);
  namesystem = ns;

  ezManager = new EncryptionZoneManager(this, conf);
}
项目:FlexMap    文件:FSDirectory.java   
FSDirectory(FSNamesystem ns, Configuration conf) {
  this.dirLock = new ReentrantReadWriteLock(true); // fair
  rootDir = createRoot(ns);
  inodeMap = INodeMap.newInstance(rootDir);
  int configuredLimit = conf.getInt(
      DFSConfigKeys.DFS_LIST_LIMIT, DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT);
  this.lsLimit = configuredLimit>0 ?
      configuredLimit : DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT;
  this.contentCountLimit = conf.getInt(
      DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY,
      DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_DEFAULT);

  // filesystem limits
  this.maxComponentLength = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY,
      DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_DEFAULT);
  this.maxDirItems = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY,
      DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_DEFAULT);
  this.inodeXAttrsLimit = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY,
      DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT);

  Preconditions.checkArgument(this.inodeXAttrsLimit >= 0,
      "Cannot set a negative limit on the number of xattrs per inode (%s).",
      DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY);
  // We need a maximum maximum because by default, PB limits message sizes
  // to 64MB. This means we can only store approximately 6.7 million entries
  // per directory, but let's use 6.4 million for some safety.
  final int MAX_DIR_ITEMS = 64 * 100 * 1000;
  Preconditions.checkArgument(
      maxDirItems > 0 && maxDirItems <= MAX_DIR_ITEMS, "Cannot set "
          + DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY
          + " to a value less than 0 or greater than " + MAX_DIR_ITEMS);

  int threshold = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY,
      DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT);
  NameNode.LOG.info("Caching file names occuring more than " + threshold
      + " times");
  nameCache = new NameCache<ByteArray>(threshold);
  namesystem = ns;

  ezManager = new EncryptionZoneManager(this, conf);
}