Java 类org.apache.hadoop.hdfs.test.system.DNClient 实例源码

项目:hadoop-2.6.0-cdh5.4.3    文件:TestHL040.java   
@Test
public void testConnect() throws IOException {
  LOG.info("Staring TestHL040: connecting to the HDFSCluster ");
  LOG.info("================ Getting namenode info ================");
  NNClient dfsMaster = cluster.getNNClient();
  LOG.info("Process info of namenode " + dfsMaster.getHostName() + " is: " +
      dfsMaster.getProcessInfo());
  LOG.info("================ Getting datanode info ================");
  Collection<DNClient> clients = cluster.getDNClients();
  for (DNClient dnC : clients) {
    LOG.info("Process info of datanode " + dnC.getHostName() + " is: " +
        dnC.getProcessInfo());
    Assert.assertNotNull("Datanode process info isn't suppose to be null",
        dnC.getProcessInfo());
  }
}
项目:hadoop-on-lustre    文件:TestHL040.java   
@Test
public void testConnect() throws IOException {
  LOG.info("Staring TestHL040: connecting to the HDFSCluster ");
  LOG.info("================ Getting namenode info ================");
  NNClient dfsMaster = cluster.getNNClient();
  LOG.info("Process info of namenode " + dfsMaster.getHostName() + " is: " +
      dfsMaster.getProcessInfo());
  LOG.info("================ Getting datanode info ================");
  Collection<DNClient> clients = cluster.getDNClients();
  for (DNClient dnC : clients) {
    LOG.info("Process info of datanode " + dnC.getHostName() + " is: " +
        dnC.getProcessInfo());
    Assert.assertNotNull("Datanode process info isn't suppose to be null",
        dnC.getProcessInfo());
    LOG.info("Free space " + readAttr(dnC));
  }
}
项目:hadoop-on-lustre    文件:TestBalancer.java   
private static long getVolumeAttribute(DNClient datanode,
                                             String attribName)
    throws IOException {

    Object volInfo = datanode.getDaemonAttribute(DATANODE_VOLUME_INFO);
    Assert
    .assertNotNull( String
                    .format( "Attribute \"%s\" should be non-null",
                             DATANODE_VOLUME_INFO ),
                    volInfo );
    String strVolInfo = volInfo.toString();
    LOG.debug( String.format("Value of %s: %s",
               DATANODE_VOLUME_INFO,
               strVolInfo) );
    Map volInfoMap = (Map) JSON.parse(strVolInfo);
    long attrVal = 0L;
    for(Object key: volInfoMap.keySet()) {
        Map attrMap = (Map) volInfoMap.get(key);
        long val = (Long) attrMap.get(attribName);
        attrVal += val;
    }
    return attrVal;

}
项目:hadoop-on-lustre    文件:TestBalancer.java   
/**
* Balancer_07
* Bring up a dfs cluster with nodes A and B. Set file replication
* factor to be 2 and fill up the cluster to 30% full. Then add an
* empty data node C. All three nodes are on the same rack.
*/
@Test
public void testBalancerTwoNodeSingleRackClusterWuthNewNodeAdded()
        throws IOException {

   final short TEST_REPLICATION_FACTOR = 3;
   List<DNClient> testnodes = reserveDatanodesForTest(3);
   DNClient dnA = testnodes.get(0);
   DNClient dnB = testnodes.get(1);

   DNClient dnC = testnodes.get(2);
   stopDatanode(dnC);

   // change test: 30% full-er (ie, 30% over pre-test capacity),
   // use most heavily node as baseline
   long targetLoad = (long) ( 
           (1/DFS_BLOCK_SIZE) *
           0.30 *
       Math.max( getDatanodeUsedSpace(dnA), getDatanodeUsedSpace(dnB) ) );
   generateFileSystemLoad(targetLoad, TEST_REPLICATION_FACTOR);
   startDatanode(dnC);
   runBalancerAndVerify(testnodes);
}
项目:cumulus    文件:TestHL040.java   
@Test
public void testConnect() throws IOException {
  LOG.info("Staring TestHL040: connecting to the HDFSCluster ");
  LOG.info("================ Getting namenode info ================");
  NNClient dfsMaster = cluster.getNNClient();
  LOG.info("Process info of namenode " + dfsMaster.getHostName() + " is: " +
      dfsMaster.getProcessInfo());
  LOG.info("================ Getting datanode info ================");
  Collection<DNClient> clients = cluster.getDNClients();
  for (DNClient dnC : clients) {
    LOG.info("Process info of datanode " + dnC.getHostName() + " is: " +
        dnC.getProcessInfo());
    Assert.assertNotNull("Datanode process info isn't suppose to be null",
        dnC.getProcessInfo());
    LOG.info("Free space " + getFreeSpace(dnC));
  }
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TestHL040.java   
@Test
public void testConnect() throws IOException {
  LOG.info("Staring TestHL040: connecting to the HDFSCluster ");
  LOG.info("================ Getting namenode info ================");
  NNClient dfsMaster = cluster.getNNClient();
  LOG.info("Process info of namenode " + dfsMaster.getHostName() + " is: " +
      dfsMaster.getProcessInfo());
  LOG.info("================ Getting datanode info ================");
  Collection<DNClient> clients = cluster.getDNClients();
  for (DNClient dnC : clients) {
    LOG.info("Process info of datanode " + dnC.getHostName() + " is: " +
        dnC.getProcessInfo());
    Assert.assertNotNull("Datanode process info isn't suppose to be null",
        dnC.getProcessInfo());
  }
}
项目:hortonworks-extension    文件:TestHL040.java   
@Test
public void testConnect() throws IOException {
  LOG.info("Staring TestHL040: connecting to the HDFSCluster ");
  LOG.info("================ Getting namenode info ================");
  NNClient dfsMaster = cluster.getNNClient();
  LOG.info("Process info of namenode " + dfsMaster.getHostName() + " is: " +
      dfsMaster.getProcessInfo());
  LOG.info("================ Getting datanode info ================");
  Collection<DNClient> clients = cluster.getDNClients();
  for (DNClient dnC : clients) {
    LOG.info("Process info of datanode " + dnC.getHostName() + " is: " +
        dnC.getProcessInfo());
    Assert.assertNotNull("Datanode process info isn't suppose to be null",
        dnC.getProcessInfo());
    LOG.info("Free space " + readAttr(dnC));
  }
}
项目:hortonworks-extension    文件:TestBalancer.java   
private static long getVolumeAttribute(DNClient datanode,
                                             String attribName)
    throws IOException {

    Object volInfo = datanode.getDaemonAttribute(DATANODE_VOLUME_INFO);
    Assert
    .assertNotNull( String
                    .format( "Attribute \"%s\" should be non-null",
                             DATANODE_VOLUME_INFO ),
                    volInfo );
    String strVolInfo = volInfo.toString();
    LOG.debug( String.format("Value of %s: %s",
               DATANODE_VOLUME_INFO,
               strVolInfo) );
    Map volInfoMap = (Map) JSON.parse(strVolInfo);
    long attrVal = 0L;
    for(Object key: volInfoMap.keySet()) {
        Map attrMap = (Map) volInfoMap.get(key);
        long val = (Long) attrMap.get(attribName);
        attrVal += val;
    }
    return attrVal;

}
项目:hortonworks-extension    文件:TestBalancer.java   
/**
* Balancer_07
* Bring up a dfs cluster with nodes A and B. Set file replication
* factor to be 2 and fill up the cluster to 30% full. Then add an
* empty data node C. All three nodes are on the same rack.
*/
@Test
public void testBalancerTwoNodeSingleRackClusterWuthNewNodeAdded()
        throws IOException {

   final short TEST_REPLICATION_FACTOR = 3;
   List<DNClient> testnodes = reserveDatanodesForTest(3);
   DNClient dnA = testnodes.get(0);
   DNClient dnB = testnodes.get(1);

   DNClient dnC = testnodes.get(2);
   stopDatanode(dnC);

   // change test: 30% full-er (ie, 30% over pre-test capacity),
   // use most heavily node as baseline
   long targetLoad = (long) ( 
           (1/DFS_BLOCK_SIZE) *
           0.30 *
       Math.max( getDatanodeUsedSpace(dnA), getDatanodeUsedSpace(dnB) ) );
   generateFileSystemLoad(targetLoad, TEST_REPLICATION_FACTOR);
   startDatanode(dnC);
   runBalancerAndVerify(testnodes);
}
项目:hortonworks-extension    文件:TestHL040.java   
@Test
public void testConnect() throws IOException {
  LOG.info("Staring TestHL040: connecting to the HDFSCluster ");
  LOG.info("================ Getting namenode info ================");
  NNClient dfsMaster = cluster.getNNClient();
  LOG.info("Process info of namenode " + dfsMaster.getHostName() + " is: " +
      dfsMaster.getProcessInfo());
  LOG.info("================ Getting datanode info ================");
  Collection<DNClient> clients = cluster.getDNClients();
  for (DNClient dnC : clients) {
    LOG.info("Process info of datanode " + dnC.getHostName() + " is: " +
        dnC.getProcessInfo());
    Assert.assertNotNull("Datanode process info isn't suppose to be null",
        dnC.getProcessInfo());
    LOG.info("Free space " + readAttr(dnC));
  }
}
项目:hortonworks-extension    文件:TestBalancer.java   
private static long getVolumeAttribute(DNClient datanode,
                                             String attribName)
    throws IOException {

    Object volInfo = datanode.getDaemonAttribute(DATANODE_VOLUME_INFO);
    Assert
    .assertNotNull( String
                    .format( "Attribute \"%s\" should be non-null",
                             DATANODE_VOLUME_INFO ),
                    volInfo );
    String strVolInfo = volInfo.toString();
    LOG.debug( String.format("Value of %s: %s",
               DATANODE_VOLUME_INFO,
               strVolInfo) );
    Map volInfoMap = (Map) JSON.parse(strVolInfo);
    long attrVal = 0L;
    for(Object key: volInfoMap.keySet()) {
        Map attrMap = (Map) volInfoMap.get(key);
        long val = (Long) attrMap.get(attribName);
        attrVal += val;
    }
    return attrVal;

}
项目:hortonworks-extension    文件:TestBalancer.java   
/**
* Balancer_07
* Bring up a dfs cluster with nodes A and B. Set file replication
* factor to be 2 and fill up the cluster to 30% full. Then add an
* empty data node C. All three nodes are on the same rack.
*/
@Test
public void testBalancerTwoNodeSingleRackClusterWuthNewNodeAdded()
        throws IOException {

   final short TEST_REPLICATION_FACTOR = 3;
   List<DNClient> testnodes = reserveDatanodesForTest(3);
   DNClient dnA = testnodes.get(0);
   DNClient dnB = testnodes.get(1);

   DNClient dnC = testnodes.get(2);
   stopDatanode(dnC);

   // change test: 30% full-er (ie, 30% over pre-test capacity),
   // use most heavily node as baseline
   long targetLoad = (long) ( 
           (1/DFS_BLOCK_SIZE) *
           0.30 *
       Math.max( getDatanodeUsedSpace(dnA), getDatanodeUsedSpace(dnB) ) );
   generateFileSystemLoad(targetLoad, TEST_REPLICATION_FACTOR);
   startDatanode(dnC);
   runBalancerAndVerify(testnodes);
}
项目:hadoop-on-lustre    文件:TestHL040.java   
private long readAttr(DNClient dnC) throws IOException {
  Object volObj = dnC.getDaemonAttribute("VolumeInfo");
  Assert.assertNotNull("Attribute value is expected to be not null", volObj);
  LOG.debug("Got object: " + volObj);
  Map volInfoMap = (Map) JSON.parse(volObj.toString());
  long totalFreeSpace = 0L;
  for (Object key : volInfoMap.keySet()) {
    Map attrMap = (Map) volInfoMap.get(key);
    long freeSpace = (Long) attrMap.get("freeSpace");
    totalFreeSpace += freeSpace;
  }
  return totalFreeSpace;
}
项目:hadoop-on-lustre    文件:TestBalancer.java   
private void shutdownNonTestNodes(List<DNClient> testnodes) {
    Set killSet = new HashSet(getAllDatanodes());
    killSet.removeAll(testnodes);
    LOG.info("attempting to kill/suspend all the nodes not used for this test");
    Iterator<DNClient> iter = killSet.iterator();
    DNClient dn = null;
    while (iter.hasNext()) {
        dn = iter.next();
        // kill may not work with some secure-HDFS configs,
        // so using our stopDataNode() method
        stopDatanode(dn);
    }
}
项目:hadoop-on-lustre    文件:TestBalancer.java   
/** 
 * Kill all datanodes but leave reservationCount nodes alive,
 * return a list of the reserved datanodes
 */
private List<DNClient> reserveDatanodesForTest(int reservationCount) {
    List<DNClient> testDNs = new LinkedList<DNClient>();
    List<DNClient> dieDNs = new LinkedList<DNClient>();
    LOG.info("getting collection of live data nodes");
    List<DNClient> dnList = getAllDatanodes();
    int dnCount = dnList.size();
    //  check to make sure there is enough capacity on these nodes to run test
    Assert.assertTrue(
            String.format(
                "not enough datanodes available to run test,"
                + " need %d datanodes but have only %d available",
                reservationCount, dnCount),
            ( dnCount >= reservationCount ));
    LOG.info("selecting "+reservationCount+" nodes for test");
    dieDNs = new LinkedList<DNClient>(dnList);
    testDNs = new LinkedList<DNClient>();

    final int LEN = dnCount - 1;
    int i = getRandom(LEN);
    DNClient testDN = dieDNs.get(i);
    testDNs.add(testDN);
    dieDNs.remove(testDN);
    int j = i;
    do {
        i = getRandom(LEN);
    } while (i != j);
    testDN = dieDNs.get(i);
    testDNs.add(testDN);
    dieDNs.remove(testDN);

    LOG.info("nodes reserved for test");
    printDatanodeList(testDNs);

    LOG.info("nodes not used in test");
    printDatanodeList(dieDNs);

    return testDNs;
}
项目:hadoop-on-lustre    文件:TestBalancer.java   
protected boolean isClusterBalanced(DNClient[] datanodes, int tolerance)
        throws IOException {

    Assert.assertFalse("empty datanode array specified",
            ArrayUtils.isEmpty(datanodes));
    boolean result = true;
    double[] utilizationByNode = new double[ datanodes.length ];
    double totalUsedSpace = 0L;
    double totalCapacity = 0L;
    Map datanodeVolumeMap = new HashMap();
    // accumulate space stored on each node
    for(int i=0; i<datanodes.length; i++) {
        DNClient datanode = datanodes[i];
        Map volumeInfoMap = getDatanodeVolumeAttributes(datanode);
        long usedSpace = (Long)volumeInfoMap.get(ATTRNAME_USED_SPACE);
        long capacity  = (Long)volumeInfoMap.get(ATTRNAME_CAPACITY  );
        utilizationByNode[i] = ( ((double)usedSpace)/capacity ) * 100;
        totalUsedSpace += usedSpace;
        totalCapacity  += capacity;
    }
    // here we are reusing previously fetched volume-info, for speed
    // an alternative is to get fresh values from the cluster here instead
    double avgUtilization = ( totalUsedSpace/totalCapacity ) * 100;
    for(int i=0; i<datanodes.length; i++) {
        double varUtilization = Math.abs(avgUtilization - utilizationByNode[i]);
        if(varUtilization > tolerance) {
            result = false;
            break;
        }
    }

    return result;
}
项目:hadoop-on-lustre    文件:TestBalancer.java   
private void runBalancerAndVerify(List<DNClient> testnodes, String threshold)
        throws IOException {
    int exitStatus = runBalancer(threshold);
    // assert balancer exits with status SUCCESSe
    Assert.assertTrue(
            String.format("balancer returned non-success exit code: %d",
            exitStatus),
            (exitStatus == SUCCESS));
    DNClient[] testnodeArr = toDatanodeArray(testnodes);
    Assert.assertTrue(
            "cluster is not balanced",
            isClusterBalanced(testnodeArr));
}
项目:hadoop-on-lustre    文件:TestBalancer.java   
private static Map getDatanodeVolumeAttributes(DNClient datanode)
        throws IOException {
    Map result = new HashMap();
    long usedSpace = getVolumeAttribute(datanode, ATTRNAME_USED_SPACE);
    long freeSpace = getVolumeAttribute(datanode, ATTRNAME_FREE_SPACE);
    result.put(ATTRNAME_USED_SPACE, usedSpace);
    result.put(ATTRNAME_CAPACITY,   usedSpace+freeSpace);
    return result;
}
项目:hadoop-on-lustre    文件:TestBalancer.java   
/**
 * Balancer_01
 * Start balancer and check if the cluster is balanced after the run.
 * Cluster should end up in balanced state.
 */
@Test
public void testBalancerSimple() throws IOException {

    DNClient[] datanodes = toDatanodeArray( getAllDatanodes() );
    int exitStatus = runBalancer();
    // assert on successful exit code here
    Assert.assertTrue(
            String.format("balancer returned non-success exit code: %d",
                          exitStatus),
            (exitStatus == SUCCESS));
    Assert.assertTrue( "cluster is not balanced", isClusterBalanced(datanodes) );

}
项目:hadoop-on-lustre    文件:TestBalancer.java   
/**
 * Balancer_16
 * Start balancer with a negative threshold value.
 */
@Test
public void testBalancerConfiguredWithThresholdValueNegative()
        throws IOException {
    List<DNClient> testnodes = getAllDatanodes();
    final int TRIALS=5;
    for(int i=0; i<TRIALS; i++) {
        int negThreshold = (int)(-1 * 100 * Math.random());
        runBalancerAndVerify(testnodes, negThreshold);
    }
}
项目:hadoop-on-lustre    文件:TestBalancer.java   
/**
 * Balancer_17
 * Start balancer with out-of-range threshold value
 *  (e.g. -123, 0, -324, 100000, -12222222, 1000000000, -10000, 345, 989)
 */
@Test
public void testBalancerConfiguredWithThresholdValueOutOfRange()
        throws IOException {
    List<DNClient> testnodes = getAllDatanodes();
    final int[] THRESHOLD_OUT_OF_RANGE_DATA = {
        -123, 0, -324, 100000, -12222222, 1000000000, -10000, 345, 989
    };
    for(int threshold: THRESHOLD_OUT_OF_RANGE_DATA) {
        runBalancerAndVerify(testnodes, threshold);
    }
}
项目:hadoop-on-lustre    文件:TestBalancer.java   
/**
 * Balancer_18
 * Start balancer with alpha-numeric threshold value
 *  (e.g., 103dsf, asd234, asfd, ASD, #$asd, 2345&, $35, %34)
 */
@Test
public void testBalancerConfiguredWithThresholdValueAlphanumeric()
        throws IOException {
    List<DNClient> testnodes = getAllDatanodes();
    final String[] THRESHOLD_ALPHA_DATA = {
        "103dsf", "asd234", "asfd", "ASD", "#$asd", "2345&", "$35", "%34", 
        "0x64", "0xde", "0xad", "0xbe", "0xef"
    };
    for(String threshold: THRESHOLD_ALPHA_DATA) {
        runBalancerAndVerify(testnodes,threshold);
    }
}
项目:hadoop-on-lustre    文件:TestBalancer.java   
/**
 * Balancer_22
 * Running the balancer with half the data nodes not running
 */
 @Test
 public void testBalancerWithOnlyHalfOfDataNodesRunning()
        throws IOException {
    List<DNClient> datanodes = getAllDatanodes();
    int testnodeCount = (int)Math.floor(datanodes.size() * 0.5);
    List<DNClient> testnodes = reserveDatanodesForTest(testnodeCount);
    runBalancerAndVerify(testnodes);
}
项目:hadoop-on-lustre    文件:TestBalancer.java   
/**
 * NamenodeProtocolTest_05
 * Get blocks from a non-existent datanode.
 */
@Test
public void testNamenodeProtocolGetBlocksFromNonexistentDatanode()
        throws IOException {
    final short replication = 1;
    Path balancerTempDir = null;
    try {
    // reserve 2 nodes for test
    List<DNClient> testnodes = reserveDatanodesForTest(2);
    shutdownNonTestNodes(testnodes);

    DNClient testnode1 = testnodes.get(0);
    DNClient testnode2 = testnodes.get(1);

    // write some blocks with replication factor of 1
    balancerTempDir = makeTempDir();
    generateFileSystemLoad(20, replication);

    // get block locations from NN
    NNClient namenode = dfsCluster.getNNClient();
    // TODO extend namenode to get block locations
    //namenode.get

    // shutdown 1 node
    stopDatanode(testnode1);

    // attempt to retrieve blocks from the dead node
    // we should fail
    } finally {
        // cleanup
                    // finally block to run cleanup
        LOG.info("clean off test data from DFS [rmr ~/balancer-temp]");
        try {
            deleteTempDir(balancerTempDir);
        } catch (Exception e) {
            LOG.warn("problem cleaning up temp dir", e);
        }
    }
}
项目:cumulus    文件:TestHL040.java   
private long getFreeSpace(DNClient dnC) throws IOException {
  Object volObj = dnC.getDaemonAttribute("VolumeInfo");
  Assert.assertNotNull("Attribute value is expected to be not null", volObj);
  LOG.debug("Got object: " + volObj);
  Map volInfoMap = (Map) JSON.parse(volObj.toString());
  long totalFreeSpace = 0L;
  for (Object key : volInfoMap.keySet()) {
    Map attrMap = (Map) volInfoMap.get(key);
    long freeSpace = (Long) attrMap.get("freeSpace");
    totalFreeSpace += freeSpace;
  }
  return totalFreeSpace;
}
项目:hortonworks-extension    文件:TestHL040.java   
private long readAttr(DNClient dnC) throws IOException {
  Object volObj = dnC.getDaemonAttribute("VolumeInfo");
  Assert.assertNotNull("Attribute value is expected to be not null", volObj);
  LOG.debug("Got object: " + volObj);
  Map volInfoMap = (Map) JSON.parse(volObj.toString());
  long totalFreeSpace = 0L;
  for (Object key : volInfoMap.keySet()) {
    Map attrMap = (Map) volInfoMap.get(key);
    long freeSpace = (Long) attrMap.get("freeSpace");
    totalFreeSpace += freeSpace;
  }
  return totalFreeSpace;
}
项目:hortonworks-extension    文件:TestBalancer.java   
private void shutdownNonTestNodes(List<DNClient> testnodes) {
    Set killSet = new HashSet(getAllDatanodes());
    killSet.removeAll(testnodes);
    LOG.info("attempting to kill/suspend all the nodes not used for this test");
    Iterator<DNClient> iter = killSet.iterator();
    DNClient dn = null;
    while (iter.hasNext()) {
        dn = iter.next();
        // kill may not work with some secure-HDFS configs,
        // so using our stopDataNode() method
        stopDatanode(dn);
    }
}
项目:hortonworks-extension    文件:TestBalancer.java   
/** 
 * Kill all datanodes but leave reservationCount nodes alive,
 * return a list of the reserved datanodes
 */
private List<DNClient> reserveDatanodesForTest(int reservationCount) {
    List<DNClient> testDNs = new LinkedList<DNClient>();
    List<DNClient> dieDNs = new LinkedList<DNClient>();
    LOG.info("getting collection of live data nodes");
    List<DNClient> dnList = getAllDatanodes();
    int dnCount = dnList.size();
    //  check to make sure there is enough capacity on these nodes to run test
    Assert.assertTrue(
            String.format(
                "not enough datanodes available to run test,"
                + " need %d datanodes but have only %d available",
                reservationCount, dnCount),
            ( dnCount >= reservationCount ));
    LOG.info("selecting "+reservationCount+" nodes for test");
    dieDNs = new LinkedList<DNClient>(dnList);
    testDNs = new LinkedList<DNClient>();

    final int LEN = dnCount - 1;
    int i = getRandom(LEN);
    DNClient testDN = dieDNs.get(i);
    testDNs.add(testDN);
    dieDNs.remove(testDN);
    int j = i;
    do {
        i = getRandom(LEN);
    } while (i != j);
    testDN = dieDNs.get(i);
    testDNs.add(testDN);
    dieDNs.remove(testDN);

    LOG.info("nodes reserved for test");
    printDatanodeList(testDNs);

    LOG.info("nodes not used in test");
    printDatanodeList(dieDNs);

    return testDNs;
}
项目:hortonworks-extension    文件:TestBalancer.java   
protected boolean isClusterBalanced(DNClient[] datanodes, int tolerance)
        throws IOException {

    Assert.assertFalse("empty datanode array specified",
            ArrayUtils.isEmpty(datanodes));
    boolean result = true;
    double[] utilizationByNode = new double[ datanodes.length ];
    double totalUsedSpace = 0L;
    double totalCapacity = 0L;
    Map datanodeVolumeMap = new HashMap();
    // accumulate space stored on each node
    for(int i=0; i<datanodes.length; i++) {
        DNClient datanode = datanodes[i];
        Map volumeInfoMap = getDatanodeVolumeAttributes(datanode);
        long usedSpace = (Long)volumeInfoMap.get(ATTRNAME_USED_SPACE);
        long capacity  = (Long)volumeInfoMap.get(ATTRNAME_CAPACITY  );
        utilizationByNode[i] = ( ((double)usedSpace)/capacity ) * 100;
        totalUsedSpace += usedSpace;
        totalCapacity  += capacity;
    }
    // here we are reusing previously fetched volume-info, for speed
    // an alternative is to get fresh values from the cluster here instead
    double avgUtilization = ( totalUsedSpace/totalCapacity ) * 100;
    for(int i=0; i<datanodes.length; i++) {
        double varUtilization = Math.abs(avgUtilization - utilizationByNode[i]);
        if(varUtilization > tolerance) {
            result = false;
            break;
        }
    }

    return result;
}
项目:hortonworks-extension    文件:TestBalancer.java   
private void runBalancerAndVerify(List<DNClient> testnodes, String threshold)
        throws IOException {
    int exitStatus = runBalancer(threshold);
    // assert balancer exits with status SUCCESSe
    Assert.assertTrue(
            String.format("balancer returned non-success exit code: %d",
            exitStatus),
            (exitStatus == SUCCESS));
    DNClient[] testnodeArr = toDatanodeArray(testnodes);
    Assert.assertTrue(
            "cluster is not balanced",
            isClusterBalanced(testnodeArr));
}
项目:hortonworks-extension    文件:TestBalancer.java   
private static Map getDatanodeVolumeAttributes(DNClient datanode)
        throws IOException {
    Map result = new HashMap();
    long usedSpace = getVolumeAttribute(datanode, ATTRNAME_USED_SPACE);
    long freeSpace = getVolumeAttribute(datanode, ATTRNAME_FREE_SPACE);
    result.put(ATTRNAME_USED_SPACE, usedSpace);
    result.put(ATTRNAME_CAPACITY,   usedSpace+freeSpace);
    return result;
}
项目:hortonworks-extension    文件:TestBalancer.java   
/**
 * Balancer_01
 * Start balancer and check if the cluster is balanced after the run.
 * Cluster should end up in balanced state.
 */
@Test
public void testBalancerSimple() throws IOException {

    DNClient[] datanodes = toDatanodeArray( getAllDatanodes() );
    int exitStatus = runBalancer();
    // assert on successful exit code here
    Assert.assertTrue(
            String.format("balancer returned non-success exit code: %d",
                          exitStatus),
            (exitStatus == SUCCESS));
    Assert.assertTrue( "cluster is not balanced", isClusterBalanced(datanodes) );

}
项目:hortonworks-extension    文件:TestBalancer.java   
/**
 * Balancer_16
 * Start balancer with a negative threshold value.
 */
@Test
public void testBalancerConfiguredWithThresholdValueNegative()
        throws IOException {
    List<DNClient> testnodes = getAllDatanodes();
    final int TRIALS=5;
    for(int i=0; i<TRIALS; i++) {
        int negThreshold = (int)(-1 * 100 * Math.random());
        runBalancerAndVerify(testnodes, negThreshold);
    }
}
项目:hortonworks-extension    文件:TestBalancer.java   
/**
 * Balancer_17
 * Start balancer with out-of-range threshold value
 *  (e.g. -123, 0, -324, 100000, -12222222, 1000000000, -10000, 345, 989)
 */
@Test
public void testBalancerConfiguredWithThresholdValueOutOfRange()
        throws IOException {
    List<DNClient> testnodes = getAllDatanodes();
    final int[] THRESHOLD_OUT_OF_RANGE_DATA = {
        -123, 0, -324, 100000, -12222222, 1000000000, -10000, 345, 989
    };
    for(int threshold: THRESHOLD_OUT_OF_RANGE_DATA) {
        runBalancerAndVerify(testnodes, threshold);
    }
}
项目:hortonworks-extension    文件:TestBalancer.java   
/**
 * Balancer_18
 * Start balancer with alpha-numeric threshold value
 *  (e.g., 103dsf, asd234, asfd, ASD, #$asd, 2345&, $35, %34)
 */
@Test
public void testBalancerConfiguredWithThresholdValueAlphanumeric()
        throws IOException {
    List<DNClient> testnodes = getAllDatanodes();
    final String[] THRESHOLD_ALPHA_DATA = {
        "103dsf", "asd234", "asfd", "ASD", "#$asd", "2345&", "$35", "%34", 
        "0x64", "0xde", "0xad", "0xbe", "0xef"
    };
    for(String threshold: THRESHOLD_ALPHA_DATA) {
        runBalancerAndVerify(testnodes,threshold);
    }
}
项目:hortonworks-extension    文件:TestBalancer.java   
/**
 * Balancer_22
 * Running the balancer with half the data nodes not running
 */
 @Test
 public void testBalancerWithOnlyHalfOfDataNodesRunning()
        throws IOException {
    List<DNClient> datanodes = getAllDatanodes();
    int testnodeCount = (int)Math.floor(datanodes.size() * 0.5);
    List<DNClient> testnodes = reserveDatanodesForTest(testnodeCount);
    runBalancerAndVerify(testnodes);
}
项目:hortonworks-extension    文件:TestBalancer.java   
/**
 * NamenodeProtocolTest_05
 * Get blocks from a non-existent datanode.
 */
@Test
public void testNamenodeProtocolGetBlocksFromNonexistentDatanode()
        throws IOException {
    final short replication = 1;
    Path balancerTempDir = null;
    try {
    // reserve 2 nodes for test
    List<DNClient> testnodes = reserveDatanodesForTest(2);
    shutdownNonTestNodes(testnodes);

    DNClient testnode1 = testnodes.get(0);
    DNClient testnode2 = testnodes.get(1);

    // write some blocks with replication factor of 1
    balancerTempDir = makeTempDir();
    generateFileSystemLoad(20, replication);

    // get block locations from NN
    NNClient namenode = dfsCluster.getNNClient();
    // TODO extend namenode to get block locations
    //namenode.get

    // shutdown 1 node
    stopDatanode(testnode1);

    // attempt to retrieve blocks from the dead node
    // we should fail
    } finally {
        // cleanup
                    // finally block to run cleanup
        LOG.info("clean off test data from DFS [rmr ~/balancer-temp]");
        try {
            deleteTempDir(balancerTempDir);
        } catch (Exception e) {
            LOG.warn("problem cleaning up temp dir", e);
        }
    }
}
项目:hortonworks-extension    文件:TestHL040.java   
private long readAttr(DNClient dnC) throws IOException {
  Object volObj = dnC.getDaemonAttribute("VolumeInfo");
  Assert.assertNotNull("Attribute value is expected to be not null", volObj);
  LOG.debug("Got object: " + volObj);
  Map volInfoMap = (Map) JSON.parse(volObj.toString());
  long totalFreeSpace = 0L;
  for (Object key : volInfoMap.keySet()) {
    Map attrMap = (Map) volInfoMap.get(key);
    long freeSpace = (Long) attrMap.get("freeSpace");
    totalFreeSpace += freeSpace;
  }
  return totalFreeSpace;
}
项目:hortonworks-extension    文件:TestBalancer.java   
private void shutdownNonTestNodes(List<DNClient> testnodes) {
    Set killSet = new HashSet(getAllDatanodes());
    killSet.removeAll(testnodes);
    LOG.info("attempting to kill/suspend all the nodes not used for this test");
    Iterator<DNClient> iter = killSet.iterator();
    DNClient dn = null;
    while (iter.hasNext()) {
        dn = iter.next();
        // kill may not work with some secure-HDFS configs,
        // so using our stopDataNode() method
        stopDatanode(dn);
    }
}
项目:hortonworks-extension    文件:TestBalancer.java   
/** 
 * Kill all datanodes but leave reservationCount nodes alive,
 * return a list of the reserved datanodes
 */
private List<DNClient> reserveDatanodesForTest(int reservationCount) {
    List<DNClient> testDNs = new LinkedList<DNClient>();
    List<DNClient> dieDNs = new LinkedList<DNClient>();
    LOG.info("getting collection of live data nodes");
    List<DNClient> dnList = getAllDatanodes();
    int dnCount = dnList.size();
    //  check to make sure there is enough capacity on these nodes to run test
    Assert.assertTrue(
            String.format(
                "not enough datanodes available to run test,"
                + " need %d datanodes but have only %d available",
                reservationCount, dnCount),
            ( dnCount >= reservationCount ));
    LOG.info("selecting "+reservationCount+" nodes for test");
    dieDNs = new LinkedList<DNClient>(dnList);
    testDNs = new LinkedList<DNClient>();

    final int LEN = dnCount - 1;
    int i = getRandom(LEN);
    DNClient testDN = dieDNs.get(i);
    testDNs.add(testDN);
    dieDNs.remove(testDN);
    int j = i;
    do {
        i = getRandom(LEN);
    } while (i != j);
    testDN = dieDNs.get(i);
    testDNs.add(testDN);
    dieDNs.remove(testDN);

    LOG.info("nodes reserved for test");
    printDatanodeList(testDNs);

    LOG.info("nodes not used in test");
    printDatanodeList(dieDNs);

    return testDNs;
}