Java 类org.apache.hadoop.hdfs.server.balancer.NameNodeConnector 实例源码

项目:hadoop    文件:Mover.java   
Mover(NameNodeConnector nnc, Configuration conf, AtomicInteger retryCount) {
  final long movedWinWidth = conf.getLong(
      DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_KEY,
      DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_DEFAULT);
  final int moverThreads = conf.getInt(
      DFSConfigKeys.DFS_MOVER_MOVERTHREADS_KEY,
      DFSConfigKeys.DFS_MOVER_MOVERTHREADS_DEFAULT);
  final int maxConcurrentMovesPerNode = conf.getInt(
      DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
      DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);
  this.retryMaxAttempts = conf.getInt(
      DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY,
      DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_DEFAULT);
  this.retryCount = retryCount;
  this.dispatcher = new Dispatcher(nnc, Collections.<String> emptySet(),
      Collections.<String> emptySet(), movedWinWidth, moverThreads, 0,
      maxConcurrentMovesPerNode, conf);
  this.storages = new StorageMap();
  this.targetPaths = nnc.getTargetPaths();
  this.blockStoragePolicies = new BlockStoragePolicy[1 <<
      BlockStoragePolicySuite.ID_BIT_LENGTH];
}
项目:aliyun-oss-hadoop-fs    文件:Mover.java   
Mover(NameNodeConnector nnc, Configuration conf, AtomicInteger retryCount) {
  final long movedWinWidth = conf.getLong(
      DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_KEY,
      DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_DEFAULT);
  final int moverThreads = conf.getInt(
      DFSConfigKeys.DFS_MOVER_MOVERTHREADS_KEY,
      DFSConfigKeys.DFS_MOVER_MOVERTHREADS_DEFAULT);
  final int maxConcurrentMovesPerNode = conf.getInt(
      DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
      DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);
  this.retryMaxAttempts = conf.getInt(
      DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY,
      DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_DEFAULT);
  this.retryCount = retryCount;
  this.dispatcher = new Dispatcher(nnc, Collections.<String> emptySet(),
      Collections.<String> emptySet(), movedWinWidth, moverThreads, 0,
      maxConcurrentMovesPerNode, conf);
  this.storages = new StorageMap();
  this.targetPaths = nnc.getTargetPaths();
  this.blockStoragePolicies = new BlockStoragePolicy[1 <<
      BlockStoragePolicySuite.ID_BIT_LENGTH];
}
项目:big-c    文件:Mover.java   
Mover(NameNodeConnector nnc, Configuration conf, AtomicInteger retryCount) {
  final long movedWinWidth = conf.getLong(
      DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_KEY,
      DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_DEFAULT);
  final int moverThreads = conf.getInt(
      DFSConfigKeys.DFS_MOVER_MOVERTHREADS_KEY,
      DFSConfigKeys.DFS_MOVER_MOVERTHREADS_DEFAULT);
  final int maxConcurrentMovesPerNode = conf.getInt(
      DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
      DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);
  this.retryMaxAttempts = conf.getInt(
      DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY,
      DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_DEFAULT);
  this.retryCount = retryCount;
  this.dispatcher = new Dispatcher(nnc, Collections.<String> emptySet(),
      Collections.<String> emptySet(), movedWinWidth, moverThreads, 0,
      maxConcurrentMovesPerNode, conf);
  this.storages = new StorageMap();
  this.targetPaths = nnc.getTargetPaths();
  this.blockStoragePolicies = new BlockStoragePolicy[1 <<
      BlockStoragePolicySuite.ID_BIT_LENGTH];
}
项目:hadoop-2.6.0-cdh5.4.3    文件:Mover.java   
Mover(NameNodeConnector nnc, Configuration conf) {
  final long movedWinWidth = conf.getLong(
      DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_KEY,
      DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_DEFAULT);
  final int moverThreads = conf.getInt(
      DFSConfigKeys.DFS_MOVER_MOVERTHREADS_KEY,
      DFSConfigKeys.DFS_MOVER_MOVERTHREADS_DEFAULT);
  final int maxConcurrentMovesPerNode = conf.getInt(
      DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
      DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);

  this.dispatcher = new Dispatcher(nnc, Collections.<String> emptySet(),
      Collections.<String> emptySet(), movedWinWidth, moverThreads, 0,
      maxConcurrentMovesPerNode, conf);
  this.storages = new StorageMap();
  this.targetPaths = nnc.getTargetPaths();
  this.blockStoragePolicies = new BlockStoragePolicy[1 <<
      BlockStoragePolicySuite.ID_BIT_LENGTH];
}
项目:FlexMap    文件:Mover.java   
Mover(NameNodeConnector nnc, Configuration conf) {
  final long movedWinWidth = conf.getLong(
      DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_KEY,
      DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_DEFAULT);
  final int moverThreads = conf.getInt(
      DFSConfigKeys.DFS_MOVER_MOVERTHREADS_KEY,
      DFSConfigKeys.DFS_MOVER_MOVERTHREADS_DEFAULT);
  final int maxConcurrentMovesPerNode = conf.getInt(
      DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
      DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);

  this.dispatcher = new Dispatcher(nnc, Collections.<String> emptySet(),
      Collections.<String> emptySet(), movedWinWidth, moverThreads, 0,
      maxConcurrentMovesPerNode, conf);
  this.storages = new StorageMap();
  this.targetPaths = nnc.getTargetPaths();
  this.blockStoragePolicies = new BlockStoragePolicy[1 <<
      BlockStoragePolicySuite.ID_BIT_LENGTH];
}
项目:hadoop    文件:TestMover.java   
static Mover newMover(Configuration conf) throws IOException {
  final Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
  Assert.assertEquals(1, namenodes.size());
  Map<URI, List<Path>> nnMap = Maps.newHashMap();
  for (URI nn : namenodes) {
    nnMap.put(nn, null);
  }

  final List<NameNodeConnector> nncs = NameNodeConnector.newNameNodeConnectors(
      nnMap, Mover.class.getSimpleName(), Mover.MOVER_ID_PATH, conf,
      NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS);
  return new Mover(nncs.get(0), conf, new AtomicInteger(0));
}
项目:aliyun-oss-hadoop-fs    文件:TestMover.java   
static Mover newMover(Configuration conf) throws IOException {
  final Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
  Assert.assertEquals(1, namenodes.size());
  Map<URI, List<Path>> nnMap = Maps.newHashMap();
  for (URI nn : namenodes) {
    nnMap.put(nn, null);
  }

  final List<NameNodeConnector> nncs = NameNodeConnector.newNameNodeConnectors(
      nnMap, Mover.class.getSimpleName(), Mover.MOVER_ID_PATH, conf,
      NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS);
  return new Mover(nncs.get(0), conf, new AtomicInteger(0));
}
项目:big-c    文件:TestMover.java   
static Mover newMover(Configuration conf) throws IOException {
  final Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
  Assert.assertEquals(1, namenodes.size());
  Map<URI, List<Path>> nnMap = Maps.newHashMap();
  for (URI nn : namenodes) {
    nnMap.put(nn, null);
  }

  final List<NameNodeConnector> nncs = NameNodeConnector.newNameNodeConnectors(
      nnMap, Mover.class.getSimpleName(), Mover.MOVER_ID_PATH, conf,
      NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS);
  return new Mover(nncs.get(0), conf, new AtomicInteger(0));
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestMover.java   
static Mover newMover(Configuration conf) throws IOException {
  final Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
  Assert.assertEquals(1, namenodes.size());

  final List<NameNodeConnector> nncs = NameNodeConnector.newNameNodeConnectors(
      namenodes, Mover.class.getSimpleName(), Mover.MOVER_ID_PATH, conf);
  return new Mover(nncs.get(0), conf);
}
项目:FlexMap    文件:TestMover.java   
static Mover newMover(Configuration conf) throws IOException {
  final Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
  Assert.assertEquals(1, namenodes.size());

  final List<NameNodeConnector> nncs = NameNodeConnector.newNameNodeConnectors(
      namenodes, Mover.class.getSimpleName(), Mover.MOVER_ID_PATH, conf);
  return new Mover(nncs.get(0), conf);
}