Java 类org.apache.hadoop.hdfs.MiniDFSClusterWithNodeGroup 实例源码

项目:hadoop    文件:TestBalancerWithNodeGroup.java   
/**
 * Create a 4 nodes cluster: 2 nodes (n0, n1) in RACK0/NODEGROUP0, 1 node (n2)
 * in RACK1/NODEGROUP1 and 1 node (n3) in RACK1/NODEGROUP2. Fill the cluster 
 * to 60% and 3 replicas, so n2 and n3 will have replica for all blocks according
 * to replica placement policy with NodeGroup. As a result, n2 and n3 will be
 * filled with 80% (60% x 4 / 3), and no blocks can be migrated from n2 and n3
 * to n0 or n1 as balancer policy with node group. Thus, we expect the balancer
 * to end in 5 iterations without move block process.
 */
@Test(timeout=60000)
public void testBalancerEndInNoMoveProgress() throws Exception {
  Configuration conf = createConf();
  long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY};
  String[] racks = new String[]{RACK0, RACK0, RACK1, RACK1};
  String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2};

  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  assertEquals(numOfDatanodes, nodeGroups.length);
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities);
  MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
  cluster = new MiniDFSClusterWithNodeGroup(builder);
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, 
        cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 60% full
    long totalUsedSpace = totalCapacity * 6 / 10;
    TestBalancer.createFile(cluster, filePath, totalUsedSpace / 3, 
        (short) (3), 0);

    // run balancer which can finish in 5 iterations with no block movement.
    runBalancerCanFinish(conf, totalUsedSpace, totalCapacity);

  } finally {
    cluster.shutdown();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestBalancerWithNodeGroup.java   
/**
 * Create a 4 nodes cluster: 2 nodes (n0, n1) in RACK0/NODEGROUP0, 1 node (n2)
 * in RACK1/NODEGROUP1 and 1 node (n3) in RACK1/NODEGROUP2. Fill the cluster 
 * to 60% and 3 replicas, so n2 and n3 will have replica for all blocks according
 * to replica placement policy with NodeGroup. As a result, n2 and n3 will be
 * filled with 80% (60% x 4 / 3), and no blocks can be migrated from n2 and n3
 * to n0 or n1 as balancer policy with node group. Thus, we expect the balancer
 * to end in 5 iterations without move block process.
 */
@Test(timeout=60000)
public void testBalancerEndInNoMoveProgress() throws Exception {
  Configuration conf = createConf();
  long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY};
  String[] racks = new String[]{RACK0, RACK0, RACK1, RACK1};
  String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2};

  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  assertEquals(numOfDatanodes, nodeGroups.length);
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities);
  MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
  cluster = new MiniDFSClusterWithNodeGroup(builder);
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, 
        cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 60% full
    long totalUsedSpace = totalCapacity * 6 / 10;
    TestBalancer.createFile(cluster, filePath, totalUsedSpace / 3, 
        (short) (3), 0);

    // run balancer which can finish in 5 iterations with no block movement.
    runBalancerCanFinish(conf, totalUsedSpace, totalCapacity);

  } finally {
    cluster.shutdown();
  }
}
项目:big-c    文件:TestBalancerWithNodeGroup.java   
/**
 * Create a 4 nodes cluster: 2 nodes (n0, n1) in RACK0/NODEGROUP0, 1 node (n2)
 * in RACK1/NODEGROUP1 and 1 node (n3) in RACK1/NODEGROUP2. Fill the cluster 
 * to 60% and 3 replicas, so n2 and n3 will have replica for all blocks according
 * to replica placement policy with NodeGroup. As a result, n2 and n3 will be
 * filled with 80% (60% x 4 / 3), and no blocks can be migrated from n2 and n3
 * to n0 or n1 as balancer policy with node group. Thus, we expect the balancer
 * to end in 5 iterations without move block process.
 */
@Test(timeout=60000)
public void testBalancerEndInNoMoveProgress() throws Exception {
  Configuration conf = createConf();
  long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY};
  String[] racks = new String[]{RACK0, RACK0, RACK1, RACK1};
  String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2};

  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  assertEquals(numOfDatanodes, nodeGroups.length);
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities);
  MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
  cluster = new MiniDFSClusterWithNodeGroup(builder);
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, 
        cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 60% full
    long totalUsedSpace = totalCapacity * 6 / 10;
    TestBalancer.createFile(cluster, filePath, totalUsedSpace / 3, 
        (short) (3), 0);

    // run balancer which can finish in 5 iterations with no block movement.
    runBalancerCanFinish(conf, totalUsedSpace, totalCapacity);

  } finally {
    cluster.shutdown();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestBalancerWithNodeGroup.java   
/**
 * Create a 4 nodes cluster: 2 nodes (n0, n1) in RACK0/NODEGROUP0, 1 node (n2)
 * in RACK1/NODEGROUP1 and 1 node (n3) in RACK1/NODEGROUP2. Fill the cluster 
 * to 60% and 3 replicas, so n2 and n3 will have replica for all blocks according
 * to replica placement policy with NodeGroup. As a result, n2 and n3 will be
 * filled with 80% (60% x 4 / 3), and no blocks can be migrated from n2 and n3
 * to n0 or n1 as balancer policy with node group. Thus, we expect the balancer
 * to end in 5 iterations without move block process.
 */
@Test(timeout=60000)
public void testBalancerEndInNoMoveProgress() throws Exception {
  Configuration conf = createConf();
  long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY};
  String[] racks = new String[]{RACK0, RACK0, RACK1, RACK1};
  String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2};

  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  assertEquals(numOfDatanodes, nodeGroups.length);
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities);
  MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
  cluster = new MiniDFSClusterWithNodeGroup(builder);
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, 
        cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 60% full
    long totalUsedSpace = totalCapacity * 6 / 10;
    TestBalancer.createFile(cluster, filePath, totalUsedSpace / 3, 
        (short) (3), 0);

    // run balancer which can finish in 5 iterations with no block movement.
    runBalancerCanFinish(conf, totalUsedSpace, totalCapacity);

  } finally {
    cluster.shutdown();
  }
}
项目:hadoop-plus    文件:TestBalancerWithNodeGroup.java   
/**
 * Create a 4 nodes cluster: 2 nodes (n0, n1) in RACK0/NODEGROUP0, 1 node (n2)
 * in RACK1/NODEGROUP1 and 1 node (n3) in RACK1/NODEGROUP2. Fill the cluster 
 * to 60% and 3 replicas, so n2 and n3 will have replica for all blocks according
 * to replica placement policy with NodeGroup. As a result, n2 and n3 will be
 * filled with 80% (60% x 4 / 3), and no blocks can be migrated from n2 and n3
 * to n0 or n1 as balancer policy with node group. Thus, we expect the balancer
 * to end in 5 iterations without move block process.
 */
@Test(timeout=60000)
public void testBalancerEndInNoMoveProgress() throws Exception {
  Configuration conf = createConf();
  long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY};
  String[] racks = new String[]{RACK0, RACK0, RACK1, RACK1};
  String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2};

  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  assertEquals(numOfDatanodes, nodeGroups.length);
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities);
  MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
  cluster = new MiniDFSClusterWithNodeGroup(builder);
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, 
        cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 60% full
    long totalUsedSpace = totalCapacity * 6 / 10;
    TestBalancer.createFile(cluster, filePath, totalUsedSpace / 3, 
        (short) (3), 0);

    // run balancer which can finish in 5 iterations with no block movement.
    runBalancerCanFinish(conf, totalUsedSpace, totalCapacity);

  } finally {
    cluster.shutdown();
  }
}
项目:FlexMap    文件:TestBalancerWithNodeGroup.java   
/**
 * Create a 4 nodes cluster: 2 nodes (n0, n1) in RACK0/NODEGROUP0, 1 node (n2)
 * in RACK1/NODEGROUP1 and 1 node (n3) in RACK1/NODEGROUP2. Fill the cluster 
 * to 60% and 3 replicas, so n2 and n3 will have replica for all blocks according
 * to replica placement policy with NodeGroup. As a result, n2 and n3 will be
 * filled with 80% (60% x 4 / 3), and no blocks can be migrated from n2 and n3
 * to n0 or n1 as balancer policy with node group. Thus, we expect the balancer
 * to end in 5 iterations without move block process.
 */
@Test(timeout=60000)
public void testBalancerEndInNoMoveProgress() throws Exception {
  Configuration conf = createConf();
  long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY};
  String[] racks = new String[]{RACK0, RACK0, RACK1, RACK1};
  String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2};

  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  assertEquals(numOfDatanodes, nodeGroups.length);
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities);
  MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
  cluster = new MiniDFSClusterWithNodeGroup(builder);
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, 
        cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 60% full
    long totalUsedSpace = totalCapacity * 6 / 10;
    TestBalancer.createFile(cluster, filePath, totalUsedSpace / 3, 
        (short) (3), 0);

    // run balancer which can finish in 5 iterations with no block movement.
    runBalancerCanFinish(conf, totalUsedSpace, totalCapacity);

  } finally {
    cluster.shutdown();
  }
}
项目:hadoop-TCP    文件:TestBalancerWithNodeGroup.java   
/**
 * Create a 4 nodes cluster: 2 nodes (n0, n1) in RACK0/NODEGROUP0, 1 node (n2)
 * in RACK1/NODEGROUP1 and 1 node (n3) in RACK1/NODEGROUP2. Fill the cluster 
 * to 60% and 3 replicas, so n2 and n3 will have replica for all blocks according
 * to replica placement policy with NodeGroup. As a result, n2 and n3 will be
 * filled with 80% (60% x 4 / 3), and no blocks can be migrated from n2 and n3
 * to n0 or n1 as balancer policy with node group. Thus, we expect the balancer
 * to end in 5 iterations without move block process.
 */
@Test(timeout=60000)
public void testBalancerEndInNoMoveProgress() throws Exception {
  Configuration conf = createConf();
  long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY};
  String[] racks = new String[]{RACK0, RACK0, RACK1, RACK1};
  String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2};

  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  assertEquals(numOfDatanodes, nodeGroups.length);
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities);
  MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
  cluster = new MiniDFSClusterWithNodeGroup(builder);
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, 
        cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 60% full
    long totalUsedSpace = totalCapacity * 6 / 10;
    TestBalancer.createFile(cluster, filePath, totalUsedSpace / 3, 
        (short) (3), 0);

    // run balancer which can finish in 5 iterations with no block movement.
    runBalancerCanFinish(conf, totalUsedSpace, totalCapacity);

  } finally {
    cluster.shutdown();
  }
}
项目:hardfs    文件:TestBalancerWithNodeGroup.java   
/**
 * Create a 4 nodes cluster: 2 nodes (n0, n1) in RACK0/NODEGROUP0, 1 node (n2)
 * in RACK1/NODEGROUP1 and 1 node (n3) in RACK1/NODEGROUP2. Fill the cluster 
 * to 60% and 3 replicas, so n2 and n3 will have replica for all blocks according
 * to replica placement policy with NodeGroup. As a result, n2 and n3 will be
 * filled with 80% (60% x 4 / 3), and no blocks can be migrated from n2 and n3
 * to n0 or n1 as balancer policy with node group. Thus, we expect the balancer
 * to end in 5 iterations without move block process.
 */
@Test(timeout=60000)
public void testBalancerEndInNoMoveProgress() throws Exception {
  Configuration conf = createConf();
  long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY};
  String[] racks = new String[]{RACK0, RACK0, RACK1, RACK1};
  String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2};

  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  assertEquals(numOfDatanodes, nodeGroups.length);
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities);
  MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
  cluster = new MiniDFSClusterWithNodeGroup(builder);
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, 
        cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 60% full
    long totalUsedSpace = totalCapacity * 6 / 10;
    TestBalancer.createFile(cluster, filePath, totalUsedSpace / 3, 
        (short) (3), 0);

    // run balancer which can finish in 5 iterations with no block movement.
    runBalancerCanFinish(conf, totalUsedSpace, totalCapacity);

  } finally {
    cluster.shutdown();
  }
}
项目:hadoop-on-lustre2    文件:TestBalancerWithNodeGroup.java   
/**
 * Create a 4 nodes cluster: 2 nodes (n0, n1) in RACK0/NODEGROUP0, 1 node (n2)
 * in RACK1/NODEGROUP1 and 1 node (n3) in RACK1/NODEGROUP2. Fill the cluster 
 * to 60% and 3 replicas, so n2 and n3 will have replica for all blocks according
 * to replica placement policy with NodeGroup. As a result, n2 and n3 will be
 * filled with 80% (60% x 4 / 3), and no blocks can be migrated from n2 and n3
 * to n0 or n1 as balancer policy with node group. Thus, we expect the balancer
 * to end in 5 iterations without move block process.
 */
@Test(timeout=60000)
public void testBalancerEndInNoMoveProgress() throws Exception {
  Configuration conf = createConf();
  long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY};
  String[] racks = new String[]{RACK0, RACK0, RACK1, RACK1};
  String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2};

  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  assertEquals(numOfDatanodes, nodeGroups.length);
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities);
  MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
  cluster = new MiniDFSClusterWithNodeGroup(builder);
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, 
        cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 60% full
    long totalUsedSpace = totalCapacity * 6 / 10;
    TestBalancer.createFile(cluster, filePath, totalUsedSpace / 3, 
        (short) (3), 0);

    // run balancer which can finish in 5 iterations with no block movement.
    runBalancerCanFinish(conf, totalUsedSpace, totalCapacity);

  } finally {
    cluster.shutdown();
  }
}
项目:hadoop    文件:TestBalancerWithNodeGroup.java   
/**
 * Create a cluster with even distribution, and a new empty node is added to
 * the cluster, then test node-group locality for balancer policy.
 */
@Test(timeout=60000)
public void testBalancerWithNodeGroup() throws Exception {
  Configuration conf = createConf();
  long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY};
  String[] racks = new String[]{RACK0, RACK0, RACK1, RACK1};
  String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2};

  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  assertEquals(numOfDatanodes, nodeGroups.length);
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities);
  MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
  cluster = new MiniDFSClusterWithNodeGroup(builder);
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, 
        cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 20% full
    long totalUsedSpace = totalCapacity * 2 / 10;
    TestBalancer.createFile(cluster, filePath, totalUsedSpace / (numOfDatanodes/2),
        (short) (numOfDatanodes/2), 0);

    long newCapacity = CAPACITY;
    String newRack = RACK1;
    String newNodeGroup = NODEGROUP2;
    // start up an empty node with the same capacity and on NODEGROUP2
    cluster.startDataNodes(conf, 1, true, null, new String[]{newRack},
        new long[] {newCapacity}, new String[]{newNodeGroup});

    totalCapacity += newCapacity;

    // run balancer and validate results
    runBalancer(conf, totalUsedSpace, totalCapacity);

  } finally {
    cluster.shutdown();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestBalancerWithNodeGroup.java   
/**
 * Create a cluster with even distribution, and a new empty node is added to
 * the cluster, then test node-group locality for balancer policy.
 */
@Test(timeout=60000)
public void testBalancerWithNodeGroup() throws Exception {
  Configuration conf = createConf();
  long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY};
  String[] racks = new String[]{RACK0, RACK0, RACK1, RACK1};
  String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2};

  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  assertEquals(numOfDatanodes, nodeGroups.length);
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities);
  MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
  cluster = new MiniDFSClusterWithNodeGroup(builder);
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, 
        cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 20% full
    long totalUsedSpace = totalCapacity * 2 / 10;
    TestBalancer.createFile(cluster, filePath, totalUsedSpace / (numOfDatanodes/2),
        (short) (numOfDatanodes/2), 0);

    long newCapacity = CAPACITY;
    String newRack = RACK1;
    String newNodeGroup = NODEGROUP2;
    // start up an empty node with the same capacity and on NODEGROUP2
    cluster.startDataNodes(conf, 1, true, null, new String[]{newRack},
        new long[] {newCapacity}, new String[]{newNodeGroup});

    totalCapacity += newCapacity;

    // run balancer and validate results
    runBalancer(conf, totalUsedSpace, totalCapacity);

  } finally {
    cluster.shutdown();
  }
}
项目:big-c    文件:TestBalancerWithNodeGroup.java   
/**
 * Create a cluster with even distribution, and a new empty node is added to
 * the cluster, then test node-group locality for balancer policy.
 */
@Test(timeout=60000)
public void testBalancerWithNodeGroup() throws Exception {
  Configuration conf = createConf();
  long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY};
  String[] racks = new String[]{RACK0, RACK0, RACK1, RACK1};
  String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2};

  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  assertEquals(numOfDatanodes, nodeGroups.length);
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities);
  MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
  cluster = new MiniDFSClusterWithNodeGroup(builder);
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, 
        cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 20% full
    long totalUsedSpace = totalCapacity * 2 / 10;
    TestBalancer.createFile(cluster, filePath, totalUsedSpace / (numOfDatanodes/2),
        (short) (numOfDatanodes/2), 0);

    long newCapacity = CAPACITY;
    String newRack = RACK1;
    String newNodeGroup = NODEGROUP2;
    // start up an empty node with the same capacity and on NODEGROUP2
    cluster.startDataNodes(conf, 1, true, null, new String[]{newRack},
        new long[] {newCapacity}, new String[]{newNodeGroup});

    totalCapacity += newCapacity;

    // run balancer and validate results
    runBalancer(conf, totalUsedSpace, totalCapacity);

  } finally {
    cluster.shutdown();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestBalancerWithNodeGroup.java   
/**
 * Create a cluster with even distribution, and a new empty node is added to
 * the cluster, then test node-group locality for balancer policy.
 */
@Test(timeout=60000)
public void testBalancerWithNodeGroup() throws Exception {
  Configuration conf = createConf();
  long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY};
  String[] racks = new String[]{RACK0, RACK0, RACK1, RACK1};
  String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2};

  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  assertEquals(numOfDatanodes, nodeGroups.length);
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities);
  MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
  cluster = new MiniDFSClusterWithNodeGroup(builder);
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, 
        cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 20% full
    long totalUsedSpace = totalCapacity * 2 / 10;
    TestBalancer.createFile(cluster, filePath, totalUsedSpace / (numOfDatanodes/2),
        (short) (numOfDatanodes/2), 0);

    long newCapacity = CAPACITY;
    String newRack = RACK1;
    String newNodeGroup = NODEGROUP2;
    // start up an empty node with the same capacity and on NODEGROUP2
    cluster.startDataNodes(conf, 1, true, null, new String[]{newRack},
        new long[] {newCapacity}, new String[]{newNodeGroup});

    totalCapacity += newCapacity;

    // run balancer and validate results
    runBalancer(conf, totalUsedSpace, totalCapacity);

  } finally {
    cluster.shutdown();
  }
}
项目:hadoop-plus    文件:TestBalancerWithNodeGroup.java   
/**
 * Create a cluster with even distribution, and a new empty node is added to
 * the cluster, then test rack locality for balancer policy. 
 */
@Test(timeout=60000)
public void testBalancerWithRackLocality() throws Exception {
  Configuration conf = createConf();
  long[] capacities = new long[]{CAPACITY, CAPACITY};
  String[] racks = new String[]{RACK0, RACK1};
  String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP1};

  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities);
  MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
  cluster = new MiniDFSClusterWithNodeGroup(builder);
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, 
        cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = TestBalancer.sum(capacities);

    // fill up the cluster to be 30% full
    long totalUsedSpace = totalCapacity * 3 / 10;
    TestBalancer.createFile(cluster, filePath, totalUsedSpace / numOfDatanodes,
        (short) numOfDatanodes, 0);

    long newCapacity = CAPACITY;
    String newRack = RACK1;
    String newNodeGroup = NODEGROUP2;
    // start up an empty node with the same capacity and on the same rack
    cluster.startDataNodes(conf, 1, true, null, new String[]{newRack},
        new long[] {newCapacity}, new String[]{newNodeGroup});

    totalCapacity += newCapacity;

    // run balancer and validate results
    runBalancerCanFinish(conf, totalUsedSpace, totalCapacity);

    DatanodeInfo[] datanodeReport = 
            client.getDatanodeReport(DatanodeReportType.ALL);

    Map<String, Integer> rackToUsedCapacity = new HashMap<String, Integer>();
    for (DatanodeInfo datanode: datanodeReport) {
      String rack = NetworkTopology.getFirstHalf(datanode.getNetworkLocation());
      int usedCapacity = (int) datanode.getDfsUsed();

      if (rackToUsedCapacity.get(rack) != null) {
        rackToUsedCapacity.put(rack, usedCapacity + rackToUsedCapacity.get(rack));
      } else {
        rackToUsedCapacity.put(rack, usedCapacity);
      }
    }
    assertEquals(rackToUsedCapacity.size(), 2);
    assertEquals(rackToUsedCapacity.get(RACK0), rackToUsedCapacity.get(RACK1));

  } finally {
    cluster.shutdown();
  }
}
项目:hadoop-plus    文件:TestBalancerWithNodeGroup.java   
/**
 * Create a cluster with even distribution, and a new empty node is added to
 * the cluster, then test node-group locality for balancer policy.
 */
@Test(timeout=60000)
public void testBalancerWithNodeGroup() throws Exception {
  Configuration conf = createConf();
  long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY};
  String[] racks = new String[]{RACK0, RACK0, RACK1, RACK1};
  String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2};

  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  assertEquals(numOfDatanodes, nodeGroups.length);
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities);
  MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
  cluster = new MiniDFSClusterWithNodeGroup(builder);
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, 
        cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 20% full
    long totalUsedSpace = totalCapacity * 2 / 10;
    TestBalancer.createFile(cluster, filePath, totalUsedSpace / (numOfDatanodes/2),
        (short) (numOfDatanodes/2), 0);

    long newCapacity = CAPACITY;
    String newRack = RACK1;
    String newNodeGroup = NODEGROUP2;
    // start up an empty node with the same capacity and on NODEGROUP2
    cluster.startDataNodes(conf, 1, true, null, new String[]{newRack},
        new long[] {newCapacity}, new String[]{newNodeGroup});

    totalCapacity += newCapacity;

    // run balancer and validate results
    runBalancer(conf, totalUsedSpace, totalCapacity);

  } finally {
    cluster.shutdown();
  }
}
项目:FlexMap    文件:TestBalancerWithNodeGroup.java   
/**
 * Create a cluster with even distribution, and a new empty node is added to
 * the cluster, then test node-group locality for balancer policy.
 */
@Test(timeout=60000)
public void testBalancerWithNodeGroup() throws Exception {
  Configuration conf = createConf();
  long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY};
  String[] racks = new String[]{RACK0, RACK0, RACK1, RACK1};
  String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2};

  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  assertEquals(numOfDatanodes, nodeGroups.length);
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities);
  MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
  cluster = new MiniDFSClusterWithNodeGroup(builder);
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, 
        cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 20% full
    long totalUsedSpace = totalCapacity * 2 / 10;
    TestBalancer.createFile(cluster, filePath, totalUsedSpace / (numOfDatanodes/2),
        (short) (numOfDatanodes/2), 0);

    long newCapacity = CAPACITY;
    String newRack = RACK1;
    String newNodeGroup = NODEGROUP2;
    // start up an empty node with the same capacity and on NODEGROUP2
    cluster.startDataNodes(conf, 1, true, null, new String[]{newRack},
        new long[] {newCapacity}, new String[]{newNodeGroup});

    totalCapacity += newCapacity;

    // run balancer and validate results
    runBalancer(conf, totalUsedSpace, totalCapacity);

  } finally {
    cluster.shutdown();
  }
}
项目:hadoop-TCP    文件:TestBalancerWithNodeGroup.java   
/**
 * Create a cluster with even distribution, and a new empty node is added to
 * the cluster, then test rack locality for balancer policy. 
 */
@Test(timeout=60000)
public void testBalancerWithRackLocality() throws Exception {
  Configuration conf = createConf();
  long[] capacities = new long[]{CAPACITY, CAPACITY};
  String[] racks = new String[]{RACK0, RACK1};
  String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP1};

  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities);
  MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
  cluster = new MiniDFSClusterWithNodeGroup(builder);
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, 
        cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = TestBalancer.sum(capacities);

    // fill up the cluster to be 30% full
    long totalUsedSpace = totalCapacity * 3 / 10;
    TestBalancer.createFile(cluster, filePath, totalUsedSpace / numOfDatanodes,
        (short) numOfDatanodes, 0);

    long newCapacity = CAPACITY;
    String newRack = RACK1;
    String newNodeGroup = NODEGROUP2;
    // start up an empty node with the same capacity and on the same rack
    cluster.startDataNodes(conf, 1, true, null, new String[]{newRack},
        new long[] {newCapacity}, new String[]{newNodeGroup});

    totalCapacity += newCapacity;

    // run balancer and validate results
    runBalancerCanFinish(conf, totalUsedSpace, totalCapacity);

    DatanodeInfo[] datanodeReport = 
            client.getDatanodeReport(DatanodeReportType.ALL);

    Map<String, Integer> rackToUsedCapacity = new HashMap<String, Integer>();
    for (DatanodeInfo datanode: datanodeReport) {
      String rack = NetworkTopology.getFirstHalf(datanode.getNetworkLocation());
      int usedCapacity = (int) datanode.getDfsUsed();

      if (rackToUsedCapacity.get(rack) != null) {
        rackToUsedCapacity.put(rack, usedCapacity + rackToUsedCapacity.get(rack));
      } else {
        rackToUsedCapacity.put(rack, usedCapacity);
      }
    }
    assertEquals(rackToUsedCapacity.size(), 2);
    assertEquals(rackToUsedCapacity.get(RACK0), rackToUsedCapacity.get(RACK1));

  } finally {
    cluster.shutdown();
  }
}
项目:hadoop-TCP    文件:TestBalancerWithNodeGroup.java   
/**
 * Create a cluster with even distribution, and a new empty node is added to
 * the cluster, then test node-group locality for balancer policy.
 */
@Test(timeout=60000)
public void testBalancerWithNodeGroup() throws Exception {
  Configuration conf = createConf();
  long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY};
  String[] racks = new String[]{RACK0, RACK0, RACK1, RACK1};
  String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2};

  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  assertEquals(numOfDatanodes, nodeGroups.length);
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities);
  MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
  cluster = new MiniDFSClusterWithNodeGroup(builder);
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, 
        cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 20% full
    long totalUsedSpace = totalCapacity * 2 / 10;
    TestBalancer.createFile(cluster, filePath, totalUsedSpace / (numOfDatanodes/2),
        (short) (numOfDatanodes/2), 0);

    long newCapacity = CAPACITY;
    String newRack = RACK1;
    String newNodeGroup = NODEGROUP2;
    // start up an empty node with the same capacity and on NODEGROUP2
    cluster.startDataNodes(conf, 1, true, null, new String[]{newRack},
        new long[] {newCapacity}, new String[]{newNodeGroup});

    totalCapacity += newCapacity;

    // run balancer and validate results
    runBalancer(conf, totalUsedSpace, totalCapacity);

  } finally {
    cluster.shutdown();
  }
}
项目:hardfs    文件:TestBalancerWithNodeGroup.java   
/**
 * Create a cluster with even distribution, and a new empty node is added to
 * the cluster, then test rack locality for balancer policy. 
 */
@Test(timeout=60000)
public void testBalancerWithRackLocality() throws Exception {
  Configuration conf = createConf();
  long[] capacities = new long[]{CAPACITY, CAPACITY};
  String[] racks = new String[]{RACK0, RACK1};
  String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP1};

  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities);
  MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
  cluster = new MiniDFSClusterWithNodeGroup(builder);
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, 
        cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = TestBalancer.sum(capacities);

    // fill up the cluster to be 30% full
    long totalUsedSpace = totalCapacity * 3 / 10;
    TestBalancer.createFile(cluster, filePath, totalUsedSpace / numOfDatanodes,
        (short) numOfDatanodes, 0);

    long newCapacity = CAPACITY;
    String newRack = RACK1;
    String newNodeGroup = NODEGROUP2;
    // start up an empty node with the same capacity and on the same rack
    cluster.startDataNodes(conf, 1, true, null, new String[]{newRack},
        new long[] {newCapacity}, new String[]{newNodeGroup});

    totalCapacity += newCapacity;

    // run balancer and validate results
    runBalancerCanFinish(conf, totalUsedSpace, totalCapacity);

    DatanodeInfo[] datanodeReport = 
            client.getDatanodeReport(DatanodeReportType.ALL);

    Map<String, Integer> rackToUsedCapacity = new HashMap<String, Integer>();
    for (DatanodeInfo datanode: datanodeReport) {
      String rack = NetworkTopology.getFirstHalf(datanode.getNetworkLocation());
      int usedCapacity = (int) datanode.getDfsUsed();

      if (rackToUsedCapacity.get(rack) != null) {
        rackToUsedCapacity.put(rack, usedCapacity + rackToUsedCapacity.get(rack));
      } else {
        rackToUsedCapacity.put(rack, usedCapacity);
      }
    }
    assertEquals(rackToUsedCapacity.size(), 2);
    assertEquals(rackToUsedCapacity.get(RACK0), rackToUsedCapacity.get(RACK1));

  } finally {
    cluster.shutdown();
  }
}
项目:hardfs    文件:TestBalancerWithNodeGroup.java   
/**
 * Create a cluster with even distribution, and a new empty node is added to
 * the cluster, then test node-group locality for balancer policy.
 */
@Test(timeout=60000)
public void testBalancerWithNodeGroup() throws Exception {
  Configuration conf = createConf();
  long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY};
  String[] racks = new String[]{RACK0, RACK0, RACK1, RACK1};
  String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2};

  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  assertEquals(numOfDatanodes, nodeGroups.length);
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities);
  MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
  cluster = new MiniDFSClusterWithNodeGroup(builder);
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, 
        cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 20% full
    long totalUsedSpace = totalCapacity * 2 / 10;
    TestBalancer.createFile(cluster, filePath, totalUsedSpace / (numOfDatanodes/2),
        (short) (numOfDatanodes/2), 0);

    long newCapacity = CAPACITY;
    String newRack = RACK1;
    String newNodeGroup = NODEGROUP2;
    // start up an empty node with the same capacity and on NODEGROUP2
    cluster.startDataNodes(conf, 1, true, null, new String[]{newRack},
        new long[] {newCapacity}, new String[]{newNodeGroup});

    totalCapacity += newCapacity;

    // run balancer and validate results
    runBalancer(conf, totalUsedSpace, totalCapacity);

  } finally {
    cluster.shutdown();
  }
}
项目:hadoop-on-lustre2    文件:TestBalancerWithNodeGroup.java   
/**
 * Create a cluster with even distribution, and a new empty node is added to
 * the cluster, then test rack locality for balancer policy. 
 */
@Test(timeout=60000)
public void testBalancerWithRackLocality() throws Exception {
  Configuration conf = createConf();
  long[] capacities = new long[]{CAPACITY, CAPACITY};
  String[] racks = new String[]{RACK0, RACK1};
  String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP1};

  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities);
  MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
  cluster = new MiniDFSClusterWithNodeGroup(builder);
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, 
        cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = TestBalancer.sum(capacities);

    // fill up the cluster to be 30% full
    long totalUsedSpace = totalCapacity * 3 / 10;
    TestBalancer.createFile(cluster, filePath, totalUsedSpace / numOfDatanodes,
        (short) numOfDatanodes, 0);

    long newCapacity = CAPACITY;
    String newRack = RACK1;
    String newNodeGroup = NODEGROUP2;
    // start up an empty node with the same capacity and on the same rack
    cluster.startDataNodes(conf, 1, true, null, new String[]{newRack},
        new long[] {newCapacity}, new String[]{newNodeGroup});

    totalCapacity += newCapacity;

    // run balancer and validate results
    runBalancerCanFinish(conf, totalUsedSpace, totalCapacity);

    DatanodeInfo[] datanodeReport = 
            client.getDatanodeReport(DatanodeReportType.ALL);

    Map<String, Integer> rackToUsedCapacity = new HashMap<String, Integer>();
    for (DatanodeInfo datanode: datanodeReport) {
      String rack = NetworkTopology.getFirstHalf(datanode.getNetworkLocation());
      int usedCapacity = (int) datanode.getDfsUsed();

      if (rackToUsedCapacity.get(rack) != null) {
        rackToUsedCapacity.put(rack, usedCapacity + rackToUsedCapacity.get(rack));
      } else {
        rackToUsedCapacity.put(rack, usedCapacity);
      }
    }
    assertEquals(rackToUsedCapacity.size(), 2);
    assertEquals(rackToUsedCapacity.get(RACK0), rackToUsedCapacity.get(RACK1));

  } finally {
    cluster.shutdown();
  }
}
项目:hadoop-on-lustre2    文件:TestBalancerWithNodeGroup.java   
/**
 * Create a cluster with even distribution, and a new empty node is added to
 * the cluster, then test node-group locality for balancer policy.
 */
@Test(timeout=60000)
public void testBalancerWithNodeGroup() throws Exception {
  Configuration conf = createConf();
  long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY};
  String[] racks = new String[]{RACK0, RACK0, RACK1, RACK1};
  String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2};

  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  assertEquals(numOfDatanodes, nodeGroups.length);
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities);
  MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
  cluster = new MiniDFSClusterWithNodeGroup(builder);
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, 
        cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 20% full
    long totalUsedSpace = totalCapacity * 2 / 10;
    TestBalancer.createFile(cluster, filePath, totalUsedSpace / (numOfDatanodes/2),
        (short) (numOfDatanodes/2), 0);

    long newCapacity = CAPACITY;
    String newRack = RACK1;
    String newNodeGroup = NODEGROUP2;
    // start up an empty node with the same capacity and on NODEGROUP2
    cluster.startDataNodes(conf, 1, true, null, new String[]{newRack},
        new long[] {newCapacity}, new String[]{newNodeGroup});

    totalCapacity += newCapacity;

    // run balancer and validate results
    runBalancer(conf, totalUsedSpace, totalCapacity);

  } finally {
    cluster.shutdown();
  }
}