Java 类org.apache.hadoop.hdfs.TestHDFSServerPorts 实例源码

项目:hadoop-EAR    文件:TestMRServerPorts.java   
private void setDataNodePorts(Configuration conf) {
  conf.set("dfs.datanode.address", 
      TestHDFSServerPorts.NAME_NODE_HOST + "0");
  conf.set("dfs.datanode.http.address", 
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + "0");
  conf.set("dfs.datanode.ipc.address", 
      TestHDFSServerPorts.NAME_NODE_HOST + "0");
}
项目:hadoop-EAR    文件:TestMRServerPorts.java   
/**
 * Verify JobTracker port usage.
 */
public void testJobTrackerPorts() throws Exception {
  NameNode nn = null;
  DataNode dn = null;
  try {
    nn = hdfs.startNameNode();
    setDataNodePorts(hdfs.getConfig());
    dn = hdfs.startDataNode(1, hdfs.getConfig());

    // start job tracker on the same port as name-node
    JobConf conf2 = new JobConf(hdfs.getConfig());
    conf2.set("mapred.job.tracker",
              FileSystem.getDefaultUri(hdfs.getConfig()).toString());
    conf2.set("mapred.job.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    boolean started = canStartJobTracker(conf2);
    assertFalse(started); // should fail

    // bind http server to the same port as name-node
    conf2.set("mapred.job.tracker", TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.job.tracker.http.address",
      hdfs.getConfig().get("dfs.http.address"));
    started = canStartJobTracker(conf2);
    assertFalse(started); // should fail again

    // both ports are different from the name-node ones
    conf2.set("mapred.job.tracker", TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.job.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    started = canStartJobTracker(conf2);
    assertTrue(started); // should start now

  } finally {
    hdfs.stopDataNode(dn);
    hdfs.stopNameNode(nn);
  }
}
项目:hadoop-on-lustre    文件:TestMRServerPorts.java   
private void setDataNodePorts(Configuration conf) {
  conf.set("dfs.datanode.address", 
      TestHDFSServerPorts.NAME_NODE_HOST + "0");
  conf.set("dfs.datanode.http.address", 
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + "0");
  conf.set("dfs.datanode.ipc.address", 
      TestHDFSServerPorts.NAME_NODE_HOST + "0");
}
项目:hadoop-on-lustre    文件:TestMRServerPorts.java   
/**
 * Verify JobTracker port usage.
 */
public void testJobTrackerPorts() throws Exception {
  NameNode nn = null;
  DataNode dn = null;
  try {
    nn = hdfs.startNameNode();
    setDataNodePorts(hdfs.getConfig());
    dn = hdfs.startDataNode(1, hdfs.getConfig());

    // start job tracker on the same port as name-node
    JobConf conf2 = new JobConf(hdfs.getConfig());
    conf2.set("mapred.job.tracker",
              FileSystem.getDefaultUri(hdfs.getConfig()).toString());
    conf2.set("mapred.job.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    boolean started = canStartJobTracker(conf2);
    assertFalse(started); // should fail

    // bind http server to the same port as name-node
    conf2.set("mapred.job.tracker", TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.job.tracker.http.address",
      hdfs.getConfig().get("dfs.http.address"));
    started = canStartJobTracker(conf2);
    assertFalse(started); // should fail again

    // both ports are different from the name-node ones
    conf2.set("mapred.job.tracker", TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.job.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    started = canStartJobTracker(conf2);
    assertTrue(started); // should start now

  } finally {
    hdfs.stopDataNode(dn);
    hdfs.stopNameNode(nn);
  }
}
项目:RDFS    文件:TestMRServerPorts.java   
private void setDataNodePorts(Configuration conf) {
  conf.set("dfs.datanode.address", 
      TestHDFSServerPorts.NAME_NODE_HOST + "0");
  conf.set("dfs.datanode.http.address", 
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + "0");
  conf.set("dfs.datanode.ipc.address", 
      TestHDFSServerPorts.NAME_NODE_HOST + "0");
}
项目:RDFS    文件:TestMRServerPorts.java   
/**
 * Verify JobTracker port usage.
 */
public void testJobTrackerPorts() throws Exception {
  NameNode nn = null;
  DataNode dn = null;
  try {
    nn = hdfs.startNameNode();
    setDataNodePorts(hdfs.getConfig());
    dn = hdfs.startDataNode(1, hdfs.getConfig());

    // start job tracker on the same port as name-node
    JobConf conf2 = new JobConf(hdfs.getConfig());
    conf2.set("mapred.job.tracker",
              FileSystem.getDefaultUri(hdfs.getConfig()).toString());
    conf2.set("mapred.job.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    boolean started = canStartJobTracker(conf2);
    assertFalse(started); // should fail

    // bind http server to the same port as name-node
    conf2.set("mapred.job.tracker", TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.job.tracker.http.address",
      hdfs.getConfig().get("dfs.http.address"));
    started = canStartJobTracker(conf2);
    assertFalse(started); // should fail again

    // both ports are different from the name-node ones
    conf2.set("mapred.job.tracker", TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.job.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    started = canStartJobTracker(conf2);
    assertTrue(started); // should start now

  } finally {
    hdfs.stopDataNode(dn);
    hdfs.stopNameNode(nn);
  }
}
项目:hadoop-0.20    文件:TestMRServerPorts.java   
private void setDataNodePorts(Configuration conf) {
  conf.set("dfs.datanode.address", 
      TestHDFSServerPorts.NAME_NODE_HOST + "0");
  conf.set("dfs.datanode.http.address", 
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + "0");
  conf.set("dfs.datanode.ipc.address", 
      TestHDFSServerPorts.NAME_NODE_HOST + "0");
}
项目:hadoop-0.20    文件:TestMRServerPorts.java   
/**
 * Verify JobTracker port usage.
 */
public void testJobTrackerPorts() throws Exception {
  NameNode nn = null;
  DataNode dn = null;
  try {
    nn = hdfs.startNameNode();
    setDataNodePorts(hdfs.getConfig());
    dn = hdfs.startDataNode(1, hdfs.getConfig());

    // start job tracker on the same port as name-node
    JobConf conf2 = new JobConf(hdfs.getConfig());
    conf2.set("mapred.job.tracker",
              FileSystem.getDefaultUri(hdfs.getConfig()).toString());
    conf2.set("mapred.job.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    boolean started = canStartJobTracker(conf2);
    assertFalse(started); // should fail

    // bind http server to the same port as name-node
    conf2.set("mapred.job.tracker", TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.job.tracker.http.address",
      hdfs.getConfig().get("dfs.http.address"));
    started = canStartJobTracker(conf2);
    assertFalse(started); // should fail again

    // both ports are different from the name-node ones
    conf2.set("mapred.job.tracker", TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.job.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    started = canStartJobTracker(conf2);
    assertTrue(started); // should start now

  } finally {
    hdfs.stopDataNode(dn);
    hdfs.stopNameNode(nn);
  }
}
项目:hortonworks-extension    文件:TestMRServerPorts.java   
private void setDataNodePorts(Configuration conf) {
  conf.set("dfs.datanode.address", 
      TestHDFSServerPorts.NAME_NODE_HOST + "0");
  conf.set("dfs.datanode.http.address", 
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + "0");
  conf.set("dfs.datanode.ipc.address", 
      TestHDFSServerPorts.NAME_NODE_HOST + "0");
}
项目:hortonworks-extension    文件:TestMRServerPorts.java   
/**
 * Verify JobTracker port usage.
 */
public void testJobTrackerPorts() throws Exception {
  NameNode nn = null;
  DataNode dn = null;
  try {
    nn = hdfs.startNameNode();
    setDataNodePorts(hdfs.getConfig());
    dn = hdfs.startDataNode(1, hdfs.getConfig());

    // start job tracker on the same port as name-node
    JobConf conf2 = new JobConf(hdfs.getConfig());
    conf2.set("mapred.job.tracker",
              FileSystem.getDefaultUri(hdfs.getConfig()).toString());
    conf2.set("mapred.job.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    boolean started = canStartJobTracker(conf2);
    assertFalse(started); // should fail

    // bind http server to the same port as name-node
    conf2.set("mapred.job.tracker", TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.job.tracker.http.address",
      hdfs.getConfig().get("dfs.http.address"));
    started = canStartJobTracker(conf2);
    assertFalse(started); // should fail again

    // both ports are different from the name-node ones
    conf2.set("mapred.job.tracker", TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.job.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    started = canStartJobTracker(conf2);
    assertTrue(started); // should start now

  } finally {
    hdfs.stopDataNode(dn);
    hdfs.stopNameNode(nn);
  }
}
项目:hortonworks-extension    文件:TestMRServerPorts.java   
private void setDataNodePorts(Configuration conf) {
  conf.set("dfs.datanode.address", 
      TestHDFSServerPorts.NAME_NODE_HOST + "0");
  conf.set("dfs.datanode.http.address", 
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + "0");
  conf.set("dfs.datanode.ipc.address", 
      TestHDFSServerPorts.NAME_NODE_HOST + "0");
}
项目:hortonworks-extension    文件:TestMRServerPorts.java   
/**
 * Verify JobTracker port usage.
 */
public void testJobTrackerPorts() throws Exception {
  NameNode nn = null;
  DataNode dn = null;
  try {
    nn = hdfs.startNameNode();
    setDataNodePorts(hdfs.getConfig());
    dn = hdfs.startDataNode(1, hdfs.getConfig());

    // start job tracker on the same port as name-node
    JobConf conf2 = new JobConf(hdfs.getConfig());
    conf2.set("mapred.job.tracker",
              FileSystem.getDefaultUri(hdfs.getConfig()).toString());
    conf2.set("mapred.job.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    boolean started = canStartJobTracker(conf2);
    assertFalse(started); // should fail

    // bind http server to the same port as name-node
    conf2.set("mapred.job.tracker", TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.job.tracker.http.address",
      hdfs.getConfig().get("dfs.http.address"));
    started = canStartJobTracker(conf2);
    assertFalse(started); // should fail again

    // both ports are different from the name-node ones
    conf2.set("mapred.job.tracker", TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.job.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    started = canStartJobTracker(conf2);
    assertTrue(started); // should start now

  } finally {
    hdfs.stopDataNode(dn);
    hdfs.stopNameNode(nn);
  }
}
项目:hadoop-gpu    文件:TestMRServerPorts.java   
private void setDataNodePorts(Configuration conf) {
  conf.set("dfs.datanode.address", 
      TestHDFSServerPorts.NAME_NODE_HOST + "0");
  conf.set("dfs.datanode.http.address", 
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + "0");
  conf.set("dfs.datanode.ipc.address", 
      TestHDFSServerPorts.NAME_NODE_HOST + "0");
}
项目:hadoop-gpu    文件:TestMRServerPorts.java   
/**
 * Verify JobTracker port usage.
 */
public void testJobTrackerPorts() throws Exception {
  NameNode nn = null;
  DataNode dn = null;
  try {
    nn = hdfs.startNameNode();
    setDataNodePorts(hdfs.getConfig());
    dn = hdfs.startDataNode(1, hdfs.getConfig());

    // start job tracker on the same port as name-node
    JobConf conf2 = new JobConf(hdfs.getConfig());
    conf2.set("mapred.job.tracker",
              FileSystem.getDefaultUri(hdfs.getConfig()).toString());
    conf2.set("mapred.job.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    boolean started = canStartJobTracker(conf2);
    assertFalse(started); // should fail

    // bind http server to the same port as name-node
    conf2.set("mapred.job.tracker", TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.job.tracker.http.address",
      hdfs.getConfig().get("dfs.http.address"));
    started = canStartJobTracker(conf2);
    assertFalse(started); // should fail again

    // both ports are different from the name-node ones
    conf2.set("mapred.job.tracker", TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.job.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    started = canStartJobTracker(conf2);
    assertTrue(started); // should start now

  } finally {
    hdfs.stopDataNode(dn);
    hdfs.stopNameNode(nn);
  }
}
项目:hadoop-EAR    文件:TestMRServerPorts.java   
/**
 * Verify JobTracker port usage.
 */
public void testTaskTrackerPorts() throws Exception {
  NameNode nn = null;
  DataNode dn = null;
  JobTracker jt = null;
  JTRunner runner = null;
  try {
    nn = hdfs.startNameNode();
    setDataNodePorts(hdfs.getConfig());
    dn = hdfs.startDataNode(2, hdfs.getConfig());

    JobConf conf2 = new JobConf(hdfs.getConfig());
    runner = new JTRunner();
    jt = startJobTracker(conf2, runner);

    // start job tracker on the same port as name-node
    conf2.set("mapred.task.tracker.report.address",
              FileSystem.getDefaultUri(hdfs.getConfig()).toString());
    conf2.set("mapred.task.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    boolean started = canStartTaskTracker(conf2);
    assertFalse(started); // should fail

    // bind http server to the same port as name-node
    conf2.set("mapred.task.tracker.report.address",
      TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.task.tracker.http.address",
      hdfs.getConfig().get("dfs.http.address"));
    started = canStartTaskTracker(conf2);
    assertFalse(started); // should fail again

    // both ports are different from the name-node ones
    conf2.set("mapred.task.tracker.report.address",
      TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.task.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    started = canStartTaskTracker(conf2);
    assertTrue(started); // should start now
  } catch (IOException ioe) {
    // HACK!  we know this message isn't a problem, but it's polluting our
    // daily build test results.  Just ignore it for now...
    if (ioe.getMessage().matches("Cannot delete.*because it's outside of.*")) {
      System.out.println("Ignoring: " + ioe.getMessage());
    } else {
      throw ioe;
    }
  } finally {
    if (jt != null) {
      jt.fs.close();
      jt.stopTracker();
      runner.interrupt();
      runner.join();
    }
    hdfs.stopDataNode(dn);
    hdfs.stopNameNode(nn);
  }
}
项目:hadoop-on-lustre    文件:TestMRServerPorts.java   
/**
 * Verify JobTracker port usage.
 */
public void testTaskTrackerPorts() throws Exception {
  NameNode nn = null;
  DataNode dn = null;
  JobTracker jt = null;
  JTRunner runner = null;
  try {
    nn = hdfs.startNameNode();
    setDataNodePorts(hdfs.getConfig());
    dn = hdfs.startDataNode(2, hdfs.getConfig());

    JobConf conf2 = new JobConf(hdfs.getConfig());
    runner = new JTRunner();
    jt = startJobTracker(conf2, runner);

    // start job tracker on the same port as name-node
    conf2.set("mapred.task.tracker.report.address",
              FileSystem.getDefaultUri(hdfs.getConfig()).toString());
    conf2.set("mapred.task.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    boolean started = canStartTaskTracker(conf2);
    assertFalse(started); // should fail

    // bind http server to the same port as name-node
    conf2.set("mapred.task.tracker.report.address",
      TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.task.tracker.http.address",
      hdfs.getConfig().get("dfs.http.address"));
    started = canStartTaskTracker(conf2);
    assertFalse(started); // should fail again

    // both ports are different from the name-node ones
    conf2.set("mapred.task.tracker.report.address",
      TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.task.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    started = canStartTaskTracker(conf2);
    assertTrue(started); // should start now
  } finally {
    if (jt != null) {
      jt.fs.close();
      jt.stopTracker();
      runner.interrupt();
      runner.join();
    }
    hdfs.stopDataNode(dn);
    hdfs.stopNameNode(nn);
  }
}
项目:RDFS    文件:TestMRServerPorts.java   
/**
 * Verify JobTracker port usage.
 */
public void testTaskTrackerPorts() throws Exception {
  NameNode nn = null;
  DataNode dn = null;
  JobTracker jt = null;
  JTRunner runner = null;
  try {
    nn = hdfs.startNameNode();
    setDataNodePorts(hdfs.getConfig());
    dn = hdfs.startDataNode(2, hdfs.getConfig());

    JobConf conf2 = new JobConf(hdfs.getConfig());
    runner = new JTRunner();
    jt = startJobTracker(conf2, runner);

    // start job tracker on the same port as name-node
    conf2.set("mapred.task.tracker.report.address",
              FileSystem.getDefaultUri(hdfs.getConfig()).toString());
    conf2.set("mapred.task.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    boolean started = canStartTaskTracker(conf2);
    assertFalse(started); // should fail

    // bind http server to the same port as name-node
    conf2.set("mapred.task.tracker.report.address",
      TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.task.tracker.http.address",
      hdfs.getConfig().get("dfs.http.address"));
    started = canStartTaskTracker(conf2);
    assertFalse(started); // should fail again

    // both ports are different from the name-node ones
    conf2.set("mapred.task.tracker.report.address",
      TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.task.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    started = canStartTaskTracker(conf2);
    assertTrue(started); // should start now
  } catch (IOException ioe) {
    // HACK!  we know this message isn't a problem, but it's polluting our
    // daily build test results.  Just ignore it for now...
    if (ioe.getMessage().matches("Cannot delete.*because it's outside of.*")) {
      System.out.println("Ignoring: " + ioe.getMessage());
    } else {
      throw ioe;
    }
  } finally {
    if (jt != null) {
      jt.fs.close();
      jt.stopTracker();
      runner.interrupt();
      runner.join();
    }
    hdfs.stopDataNode(dn);
    hdfs.stopNameNode(nn);
  }
}
项目:hadoop-0.20    文件:TestMRServerPorts.java   
/**
 * Verify JobTracker port usage.
 */
public void testTaskTrackerPorts() throws Exception {
  NameNode nn = null;
  DataNode dn = null;
  JobTracker jt = null;
  JTRunner runner = null;
  try {
    nn = hdfs.startNameNode();
    setDataNodePorts(hdfs.getConfig());
    dn = hdfs.startDataNode(2, hdfs.getConfig());

    JobConf conf2 = new JobConf(hdfs.getConfig());
    runner = new JTRunner();
    jt = startJobTracker(conf2, runner);

    // start job tracker on the same port as name-node
    conf2.set("mapred.task.tracker.report.address",
              FileSystem.getDefaultUri(hdfs.getConfig()).toString());
    conf2.set("mapred.task.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    boolean started = canStartTaskTracker(conf2);
    assertFalse(started); // should fail

    // bind http server to the same port as name-node
    conf2.set("mapred.task.tracker.report.address",
      TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.task.tracker.http.address",
      hdfs.getConfig().get("dfs.http.address"));
    started = canStartTaskTracker(conf2);
    assertFalse(started); // should fail again

    // both ports are different from the name-node ones
    conf2.set("mapred.task.tracker.report.address",
      TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.task.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    started = canStartTaskTracker(conf2);
    assertTrue(started); // should start now
  } finally {
    if (jt != null) {
      jt.fs.close();
      jt.stopTracker();
      runner.interrupt();
      runner.join();
    }
    hdfs.stopDataNode(dn);
    hdfs.stopNameNode(nn);
  }
}
项目:hortonworks-extension    文件:TestMRServerPorts.java   
/**
 * Verify JobTracker port usage.
 */
public void testTaskTrackerPorts() throws Exception {
  NameNode nn = null;
  DataNode dn = null;
  JobTracker jt = null;
  JTRunner runner = null;
  try {
    nn = hdfs.startNameNode();
    setDataNodePorts(hdfs.getConfig());
    dn = hdfs.startDataNode(2, hdfs.getConfig());

    JobConf conf2 = new JobConf(hdfs.getConfig());
    runner = new JTRunner();
    jt = startJobTracker(conf2, runner);

    // start job tracker on the same port as name-node
    conf2.set("mapred.task.tracker.report.address",
              FileSystem.getDefaultUri(hdfs.getConfig()).toString());
    conf2.set("mapred.task.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    boolean started = canStartTaskTracker(conf2);
    assertFalse(started); // should fail

    // bind http server to the same port as name-node
    conf2.set("mapred.task.tracker.report.address",
      TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.task.tracker.http.address",
      hdfs.getConfig().get("dfs.http.address"));
    started = canStartTaskTracker(conf2);
    assertFalse(started); // should fail again

    // both ports are different from the name-node ones
    conf2.set("mapred.task.tracker.report.address",
      TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.task.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    started = canStartTaskTracker(conf2);
    assertTrue(started); // should start now
  } finally {
    if (jt != null) {
      jt.fs.close();
      jt.stopTracker();
      runner.interrupt();
      runner.join();
    }
    hdfs.stopDataNode(dn);
    hdfs.stopNameNode(nn);
  }
}
项目:hortonworks-extension    文件:TestMRServerPorts.java   
/**
 * Verify JobTracker port usage.
 */
public void testTaskTrackerPorts() throws Exception {
  NameNode nn = null;
  DataNode dn = null;
  JobTracker jt = null;
  JTRunner runner = null;
  try {
    nn = hdfs.startNameNode();
    setDataNodePorts(hdfs.getConfig());
    dn = hdfs.startDataNode(2, hdfs.getConfig());

    JobConf conf2 = new JobConf(hdfs.getConfig());
    runner = new JTRunner();
    jt = startJobTracker(conf2, runner);

    // start job tracker on the same port as name-node
    conf2.set("mapred.task.tracker.report.address",
              FileSystem.getDefaultUri(hdfs.getConfig()).toString());
    conf2.set("mapred.task.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    boolean started = canStartTaskTracker(conf2);
    assertFalse(started); // should fail

    // bind http server to the same port as name-node
    conf2.set("mapred.task.tracker.report.address",
      TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.task.tracker.http.address",
      hdfs.getConfig().get("dfs.http.address"));
    started = canStartTaskTracker(conf2);
    assertFalse(started); // should fail again

    // both ports are different from the name-node ones
    conf2.set("mapred.task.tracker.report.address",
      TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.task.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    started = canStartTaskTracker(conf2);
    assertTrue(started); // should start now
  } finally {
    if (jt != null) {
      jt.fs.close();
      jt.stopTracker();
      runner.interrupt();
      runner.join();
    }
    hdfs.stopDataNode(dn);
    hdfs.stopNameNode(nn);
  }
}
项目:hadoop-gpu    文件:TestMRServerPorts.java   
/**
 * Verify JobTracker port usage.
 */
public void testTaskTrackerPorts() throws Exception {
  NameNode nn = null;
  DataNode dn = null;
  JobTracker jt = null;
  JTRunner runner = null;
  try {
    nn = hdfs.startNameNode();
    setDataNodePorts(hdfs.getConfig());
    dn = hdfs.startDataNode(2, hdfs.getConfig());

    JobConf conf2 = new JobConf(hdfs.getConfig());
    runner = new JTRunner();
    jt = startJobTracker(conf2, runner);

    // start job tracker on the same port as name-node
    conf2.set("mapred.task.tracker.report.address",
              FileSystem.getDefaultUri(hdfs.getConfig()).toString());
    conf2.set("mapred.task.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    boolean started = canStartTaskTracker(conf2);
    assertFalse(started); // should fail

    // bind http server to the same port as name-node
    conf2.set("mapred.task.tracker.report.address",
      TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.task.tracker.http.address",
      hdfs.getConfig().get("dfs.http.address"));
    started = canStartTaskTracker(conf2);
    assertFalse(started); // should fail again

    // both ports are different from the name-node ones
    conf2.set("mapred.task.tracker.report.address",
      TestHDFSServerPorts.NAME_NODE_HOST + 0);
    conf2.set("mapred.task.tracker.http.address",
      TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
    started = canStartTaskTracker(conf2);
    assertTrue(started); // should start now
  } finally {
    if (jt != null) {
      jt.fs.close();
      jt.stopTracker();
      runner.interrupt();
      runner.join();
    }
    hdfs.stopDataNode(dn);
    hdfs.stopNameNode(nn);
  }
}