Java 类org.apache.hadoop.mapreduce.v2.app.AppContext 实例源码

项目:hadoop    文件:TestJobImpl.java   
private static StubbedJob createStubbedJob(Configuration conf,
    Dispatcher dispatcher, int numSplits, AppContext appContext) {
  JobID jobID = JobID.forName("job_1234567890000_0001");
  JobId jobId = TypeConverter.toYarn(jobID);
  if (appContext == null) {
    appContext = mock(AppContext.class);
    when(appContext.hasSuccessfullyUnregistered()).thenReturn(true);
  }
  StubbedJob job = new StubbedJob(jobId,
      ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 0), 0),
      conf,dispatcher.getEventHandler(), true, "somebody", numSplits, appContext);
  dispatcher.register(JobEventType.class, job);
  EventHandler mockHandler = mock(EventHandler.class);
  dispatcher.register(TaskEventType.class, mockHandler);
  dispatcher.register(org.apache.hadoop.mapreduce.jobhistory.EventType.class,
      mockHandler);
  dispatcher.register(JobFinishEvent.Type.class, mockHandler);
  return job;
}
项目:hadoop    文件:DefaultSpeculator.java   
public DefaultSpeculator
    (Configuration conf, AppContext context,
     TaskRuntimeEstimator estimator, Clock clock) {
  super(DefaultSpeculator.class.getName());

  this.conf = conf;
  this.context = context;
  this.estimator = estimator;
  this.clock = clock;
  this.eventHandler = context.getEventHandler();
  this.soonestRetryAfterNoSpeculate =
      conf.getLong(MRJobConfig.SPECULATIVE_RETRY_AFTER_NO_SPECULATE,
              MRJobConfig.DEFAULT_SPECULATIVE_RETRY_AFTER_NO_SPECULATE);
  this.soonestRetryAfterSpeculate =
      conf.getLong(MRJobConfig.SPECULATIVE_RETRY_AFTER_SPECULATE,
              MRJobConfig.DEFAULT_SPECULATIVE_RETRY_AFTER_SPECULATE);
  this.proportionRunningTasksSpeculatable =
      conf.getDouble(MRJobConfig.SPECULATIVECAP_RUNNING_TASKS,
              MRJobConfig.DEFAULT_SPECULATIVECAP_RUNNING_TASKS);
  this.proportionTotalTasksSpeculatable =
      conf.getDouble(MRJobConfig.SPECULATIVECAP_TOTAL_TASKS,
              MRJobConfig.DEFAULT_SPECULATIVECAP_TOTAL_TASKS);
  this.minimumAllowedSpeculativeTasks =
      conf.getInt(MRJobConfig.SPECULATIVE_MINIMUM_ALLOWED_TASKS,
              MRJobConfig.DEFAULT_SPECULATIVE_MINIMUM_ALLOWED_TASKS);
}
项目:hadoop    文件:JobCounterInfo.java   
public JobCounterInfo(AppContext ctx, Job job) {
  getCounters(ctx, job);
  counterGroup = new ArrayList<CounterGroupInfo>();
  this.id = MRApps.toString(job.getID());

  if (total != null) {
    for (CounterGroup g : total) {
      if (g != null) {
        CounterGroup mg = map == null ? null : map.getGroup(g.getName());
        CounterGroup rg = reduce == null ? null : reduce
          .getGroup(g.getName());

        CounterGroupInfo cginfo = new CounterGroupInfo(g.getName(), g,
          mg, rg);
        counterGroup.add(cginfo);
      }
    }
  }
}
项目:hadoop    文件:TestHsWebServicesAttempts.java   
@Override
protected void configureServlets() {

  appContext = new MockHistoryContext(0, 1, 2, 1);
  webApp = mock(HsWebApp.class);
  when(webApp.name()).thenReturn("hsmockwebapp");

  bind(JAXBContextResolver.class);
  bind(HsWebServices.class);
  bind(GenericExceptionHandler.class);
  bind(WebApp.class).toInstance(webApp);
  bind(AppContext.class).toInstance(appContext);
  bind(HistoryContext.class).toInstance(appContext);
  bind(Configuration.class).toInstance(conf);

  serve("/*").with(GuiceContainer.class);
}
项目:hadoop    文件:TestLocalContainerAllocator.java   
private static AppContext createAppContext() {
  ApplicationId appId = ApplicationId.newInstance(1, 1);
  ApplicationAttemptId attemptId =
      ApplicationAttemptId.newInstance(appId, 1);
  Job job = mock(Job.class);
  @SuppressWarnings("rawtypes")
  EventHandler eventHandler = mock(EventHandler.class);
  AppContext ctx = mock(AppContext.class);
  when(ctx.getApplicationID()).thenReturn(appId);
  when(ctx.getApplicationAttemptId()).thenReturn(attemptId);
  when(ctx.getJob(isA(JobId.class))).thenReturn(job);
  when(ctx.getClusterInfo()).thenReturn(
    new ClusterInfo(Resource.newInstance(10240, 1, 0)));
  when(ctx.getEventHandler()).thenReturn(eventHandler);
  return ctx;
}
项目:hadoop    文件:TestAMWebServices.java   
public void verifyAMInfoXML(String xml, AppContext ctx)
    throws JSONException, Exception {
  DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
  DocumentBuilder db = dbf.newDocumentBuilder();
  InputSource is = new InputSource();
  is.setCharacterStream(new StringReader(xml));
  Document dom = db.parse(is);
  NodeList nodes = dom.getElementsByTagName("info");
  assertEquals("incorrect number of elements", 1, nodes.getLength());

  for (int i = 0; i < nodes.getLength(); i++) {
    Element element = (Element) nodes.item(i);
    verifyAMInfoGeneric(ctx,
        WebServicesTestUtils.getXmlString(element, "appId"),
        WebServicesTestUtils.getXmlString(element, "user"),
        WebServicesTestUtils.getXmlString(element, "name"),
        WebServicesTestUtils.getXmlLong(element, "startedOn"),
        WebServicesTestUtils.getXmlLong(element, "elapsedTime"));
  }
}
项目:hadoop    文件:TestAMWebApp.java   
@Test public void testSingleTaskCounterView() {
  AppContext appContext = new MockAppContext(0, 1, 1, 2);
  Map<String, String> params = getTaskParams(appContext);
  params.put(AMParams.COUNTER_GROUP, 
      "org.apache.hadoop.mapreduce.FileSystemCounter");
  params.put(AMParams.COUNTER_NAME, "HDFS_WRITE_OPS");

  // remove counters from one task attempt
  // to test handling of missing counters
  TaskId taskID = MRApps.toTaskID(params.get(AMParams.TASK_ID));
  Job job = appContext.getJob(taskID.getJobId());
  Task task = job.getTask(taskID);
  TaskAttempt attempt = task.getAttempts().values().iterator().next();
  attempt.getReport().setCounters(null);

  WebAppTests.testPage(SingleCounterPage.class, AppContext.class,
                       appContext, params);
}
项目:hadoop    文件:TestRMContainerAllocator.java   
private static AppContext createAppContext(
    ApplicationAttemptId appAttemptId, Job job) {
  AppContext context = mock(AppContext.class);
  ApplicationId appId = appAttemptId.getApplicationId();
  when(context.getApplicationID()).thenReturn(appId);
  when(context.getApplicationAttemptId()).thenReturn(appAttemptId);
  when(context.getJob(isA(JobId.class))).thenReturn(job);
  when(context.getClusterInfo()).thenReturn(
    new ClusterInfo(Resource.newInstance(10240, 1)));
  when(context.getEventHandler()).thenReturn(new EventHandler() {
    @Override
    public void handle(Event event) {
      // Only capture interesting events.
      if (event instanceof TaskAttemptContainerAssignedEvent) {
        events.add((TaskAttemptContainerAssignedEvent) event);
      } else if (event instanceof TaskAttemptKillEvent) {
        taskAttemptKillEvents.add((TaskAttemptKillEvent)event);
      } else if (event instanceof JobUpdatedNodesEvent) {
        jobUpdatedNodeEvents.add((JobUpdatedNodesEvent)event);
      } else if (event instanceof JobEvent) {
        jobEvents.add((JobEvent)event);
      }
    }
  });
  return context;
}
项目:hadoop    文件:TestHsWebServices.java   
@Override
protected void configureServlets() {

  appContext = new MockHistoryContext(0, 1, 1, 1);
  JobHistory jobHistoryService = new JobHistory();
  HistoryContext historyContext = (HistoryContext) jobHistoryService;
  webApp = new HsWebApp(historyContext);

  bind(JAXBContextResolver.class);
  bind(HsWebServices.class);
  bind(GenericExceptionHandler.class);
  bind(WebApp.class).toInstance(webApp);
  bind(AppContext.class).toInstance(appContext);
  bind(HistoryContext.class).toInstance(appContext);
  bind(Configuration.class).toInstance(conf);

  serve("/*").with(GuiceContainer.class);
}
项目:hadoop    文件:TestJobImpl.java   
@Test
public void testTransitionsAtFailed() throws IOException {
  Configuration conf = new Configuration();
  AsyncDispatcher dispatcher = new AsyncDispatcher();
  dispatcher.init(conf);
  dispatcher.start();

  OutputCommitter committer = mock(OutputCommitter.class);
  doThrow(new IOException("forcefail"))
    .when(committer).setupJob(any(JobContext.class));
  CommitterEventHandler commitHandler =
      createCommitterEventHandler(dispatcher, committer);
  commitHandler.init(conf);
  commitHandler.start();

  AppContext mockContext = mock(AppContext.class);
  when(mockContext.hasSuccessfullyUnregistered()).thenReturn(false);
  JobImpl job = createStubbedJob(conf, dispatcher, 2, mockContext);
  JobId jobId = job.getID();
  job.handle(new JobEvent(jobId, JobEventType.JOB_INIT));
  assertJobState(job, JobStateInternal.INITED);
  job.handle(new JobStartEvent(jobId));
  assertJobState(job, JobStateInternal.FAILED);

  job.handle(new JobEvent(jobId, JobEventType.JOB_TASK_COMPLETED));
  assertJobState(job, JobStateInternal.FAILED);
  job.handle(new JobEvent(jobId, JobEventType.JOB_TASK_ATTEMPT_COMPLETED));
  assertJobState(job, JobStateInternal.FAILED);
  job.handle(new JobEvent(jobId, JobEventType.JOB_MAP_TASK_RESCHEDULED));
  assertJobState(job, JobStateInternal.FAILED);
  job.handle(new JobEvent(jobId, JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE));
  assertJobState(job, JobStateInternal.FAILED);
  Assert.assertEquals(JobState.RUNNING, job.getState());
  when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true);
  Assert.assertEquals(JobState.FAILED, job.getState());

  dispatcher.stop();
  commitHandler.stop();
}
项目:hadoop    文件:TestJobImpl.java   
private static CommitterEventHandler createCommitterEventHandler(
    Dispatcher dispatcher, OutputCommitter committer) {
  final SystemClock clock = new SystemClock();
  AppContext appContext = mock(AppContext.class);
  when(appContext.getEventHandler()).thenReturn(
      dispatcher.getEventHandler());
  when(appContext.getClock()).thenReturn(clock);
  RMHeartbeatHandler heartbeatHandler = new RMHeartbeatHandler() {
    @Override
    public long getLastHeartbeatTime() {
      return clock.getTime();
    }
    @Override
    public void runOnNextHeartbeat(Runnable callback) {
      callback.run();
    }
  };
  ApplicationAttemptId id = 
    ConverterUtils.toApplicationAttemptId("appattempt_1234567890000_0001_0");
  when(appContext.getApplicationID()).thenReturn(id.getApplicationId());
  when(appContext.getApplicationAttemptId()).thenReturn(id);
  CommitterEventHandler handler =
      new CommitterEventHandler(appContext, committer, heartbeatHandler);
  dispatcher.register(CommitterEventType.class, handler);
  return handler;
}
项目:hadoop    文件:TestJobImpl.java   
public StubbedJob(JobId jobId, ApplicationAttemptId applicationAttemptId,
    Configuration conf, EventHandler eventHandler, boolean newApiCommitter,
    String user, int numSplits, AppContext appContext) {
  super(jobId, applicationAttemptId, conf, eventHandler,
      null, new JobTokenSecretManager(), new Credentials(),
      new SystemClock(), Collections.<TaskId, TaskInfo> emptyMap(),
      MRAppMetrics.create(), null, newApiCommitter, user,
      System.currentTimeMillis(), null, appContext, null, null);

  initTransition = getInitTransition(numSplits);
  localFactory = stateMachineFactory.addTransition(JobStateInternal.NEW,
        EnumSet.of(JobStateInternal.INITED, JobStateInternal.FAILED),
        JobEventType.JOB_INIT,
        // This is abusive.
        initTransition);

  // This "this leak" is okay because the retained pointer is in an
  //  instance variable.
  localStateMachine = localFactory.make(this);
}
项目:hadoop    文件:TestHSWebApp.java   
@Test
public void testLogsView1() throws IOException {
  LOG.info("HsLogsPage");
  Injector injector =
      WebAppTests.testPage(AggregatedLogsPage.class, AppContext.class,
          new MockAppContext(0, 1, 1, 1));
  PrintWriter spyPw = WebAppTests.getPrintWriter(injector);
  verify(spyPw).write("Cannot get container logs without a ContainerId");
  verify(spyPw).write("Cannot get container logs without a NodeId");
  verify(spyPw).write("Cannot get container logs without an app owner");
}
项目:hadoop    文件:LocalContainerLauncher.java   
public LocalContainerLauncher(AppContext context,
                              TaskUmbilicalProtocol umbilical) {
  super(LocalContainerLauncher.class.getName());
  this.context = context;
  this.umbilical = umbilical;
      // umbilical:  MRAppMaster creates (taskAttemptListener), passes to us
      // (TODO/FIXME:  pointless to use RPC to talk to self; should create
      // LocalTaskAttemptListener or similar:  implement umbilical protocol
      // but skip RPC stuff)

  try {
    curFC = FileContext.getFileContext(curDir.toURI());
  } catch (UnsupportedFileSystemException ufse) {
    LOG.error("Local filesystem " + curDir.toURI().toString()
              + " is unsupported?? (should never happen)");
  }

  // Save list of files/dirs that are supposed to be present so can delete
  // any extras created by one task before starting subsequent task.  Note
  // that there's no protection against deleted or renamed localization;
  // users who do that get what they deserve (and will have to disable
  // uberization in order to run correctly).
  File[] curLocalFiles = curDir.listFiles();
  localizedFiles = new HashSet<File>(curLocalFiles.length);
  for (int j = 0; j < curLocalFiles.length; ++j) {
    localizedFiles.add(curLocalFiles[j]);
  }

  // Relocalization note/future FIXME (per chrisdo, 20110315):  At moment,
  // full localization info is in AppSubmissionContext passed from client to
  // RM and then to NM for AM-container launch:  no difference between AM-
  // localization and MapTask- or ReduceTask-localization, so can assume all
  // OK.  Longer-term, will need to override uber-AM container-localization
  // request ("needed resources") with union of regular-AM-resources + task-
  // resources (and, if maps and reduces ever differ, then union of all three
  // types), OR will need localizer service/API that uber-AM can request
  // after running (e.g., "localizeForTask()" or "localizeForMapTask()").
}
项目:hadoop    文件:MapTaskAttemptImpl.java   
public MapTaskAttemptImpl(TaskId taskId, int attempt, 
    EventHandler eventHandler, Path jobFile, 
    int partition, TaskSplitMetaInfo splitInfo, JobConf conf,
    TaskAttemptListener taskAttemptListener, 
    Token<JobTokenIdentifier> jobToken,
    Credentials credentials, Clock clock,
    AppContext appContext) {
  super(taskId, attempt, eventHandler, 
      taskAttemptListener, jobFile, partition, conf, splitInfo.getLocations(),
      jobToken, credentials, clock, appContext);
  this.splitInfo = splitInfo;
}
项目:hadoop    文件:TestHSWebApp.java   
@Test
public void testTaskView() {
  LOG.info("HsTaskPage");
  AppContext appContext = new MockAppContext(0, 1, 1, 1);
  Map<String, String> params = TestAMWebApp.getTaskParams(appContext);
  WebAppTests
      .testPage(HsTaskPage.class, AppContext.class, appContext, params);
}
项目:hadoop    文件:TaskSpeculationPredicate.java   
boolean canSpeculate(AppContext context, TaskId taskID) {
  // This class rejects speculating any task that already has speculations,
  //  or isn't running.
  //  Subclasses should call TaskSpeculationPredicate.canSpeculate(...) , but
  //  can be even more restrictive.
  JobId jobID = taskID.getJobId();
  Job job = context.getJob(jobID);
  Task task = job.getTask(taskID);
  return task.getAttempts().size() == 1;
}
项目:hadoop    文件:TestHSWebApp.java   
@Test public void testAttemptsWithJobView() {
  LOG.info("HsAttemptsPage with data");
  MockAppContext ctx = new MockAppContext(0, 1, 1, 1);
  JobId id = ctx.getAllJobs().keySet().iterator().next();
  Map<String, String> params = new HashMap<String,String>();
  params.put(JOB_ID, id.toString());
  params.put(TASK_TYPE, "m");
  params.put(ATTEMPT_STATE, "SUCCESSFUL");
  WebAppTests.testPage(HsAttemptsPage.class, AppContext.class,
      ctx, params);
}
项目:hadoop    文件:LocalContainerAllocator.java   
public LocalContainerAllocator(ClientService clientService,
  AppContext context, String nmHost, int nmPort, int nmHttpPort
  , ContainerId cId) {
  super(clientService, context);
  this.eventHandler = context.getEventHandler();
  this.nmHost = nmHost;
  this.nmPort = nmPort;
  this.nmHttpPort = nmHttpPort;
  this.containerId = cId;
}
项目:hadoop    文件:AppInfo.java   
public AppInfo(App app, AppContext context) {
  this.appId = context.getApplicationID().toString();
  this.name = context.getApplicationName().toString();
  this.user = context.getUser().toString();
  this.startedOn = context.getStartTime();
  this.elapsedTime = Times.elapsed(this.startedOn, 0);
}
项目:hadoop    文件:TestHSWebApp.java   
@Test public void testAppControllerIndex() {
  MockAppContext ctx = new MockAppContext(0, 1, 1, 1);
  Injector injector = WebAppTests.createMockInjector(AppContext.class, ctx);
  HsController controller = injector.getInstance(HsController.class);
  controller.index();
  assertEquals(ctx.getApplicationID().toString(), controller.get(APP_ID,""));
}
项目:hadoop    文件:MapTaskImpl.java   
public MapTaskImpl(JobId jobId, int partition, EventHandler eventHandler,
    Path remoteJobConfFile, JobConf conf,
    TaskSplitMetaInfo taskSplitMetaInfo,
    TaskAttemptListener taskAttemptListener,
    Token<JobTokenIdentifier> jobToken,
    Credentials credentials, Clock clock,
    int appAttemptId, MRAppMetrics metrics, AppContext appContext) {
  super(jobId, TaskType.MAP, partition, eventHandler, remoteJobConfFile,
      conf, taskAttemptListener, jobToken, credentials, clock,
      appAttemptId, metrics, appContext);
  this.taskSplitMetaInfo = taskSplitMetaInfo;
}
项目:hadoop    文件:ReduceTaskImpl.java   
public ReduceTaskImpl(JobId jobId, int partition,
    EventHandler eventHandler, Path jobFile, JobConf conf,
    int numMapTasks, TaskAttemptListener taskAttemptListener,
    Token<JobTokenIdentifier> jobToken,
    Credentials credentials, Clock clock,
    int appAttemptId, MRAppMetrics metrics, AppContext appContext) {
  super(jobId, TaskType.REDUCE, partition, eventHandler, jobFile, conf,
      taskAttemptListener, jobToken, credentials, clock,
      appAttemptId, metrics, appContext);
  this.numMapTasks = numMapTasks;
}
项目:hadoop    文件:RMCommunicator.java   
public RMCommunicator(ClientService clientService, AppContext context) {
  super("RMCommunicator");
  this.clientService = clientService;
  this.context = context;
  this.eventHandler = context.getEventHandler();
  this.applicationId = context.getApplicationID();
  this.stopped = new AtomicBoolean(false);
  this.heartbeatCallbacks = new ConcurrentLinkedQueue<Runnable>();
  this.schedulerResourceTypes = EnumSet.of(SchedulerResourceTypes.MEMORY);
}
项目:hadoop    文件:TestTaskAttemptListenerImpl.java   
public MockTaskAttemptListenerImpl(AppContext context,
    JobTokenSecretManager jobTokenSecretManager,
    RMHeartbeatHandler rmHeartbeatHandler,
    TaskHeartbeatHandler hbHandler) {
  super(context, jobTokenSecretManager, rmHeartbeatHandler, null);
  this.taskHeartbeatHandler = hbHandler;
}
项目:hadoop    文件:TestAMWebServicesTasks.java   
@Override
protected void configureServlets() {

  appContext = new MockAppContext(0, 1, 2, 1);
  bind(JAXBContextResolver.class);
  bind(AMWebServices.class);
  bind(GenericExceptionHandler.class);
  bind(AppContext.class).toInstance(appContext);
  bind(Configuration.class).toInstance(conf);

  serve("/*").with(GuiceContainer.class);
}
项目:hadoop    文件:TestBlocks.java   
/**
 * Test rendering for ConfBlock
 */
@Test
public void testConfigurationBlock() throws Exception {
  AppContext ctx = mock(AppContext.class);
  Job job = mock(Job.class);
  Path path = new Path("conf");
  Configuration configuration = new Configuration();
  configuration.set("Key for test", "Value for test");
  when(job.getConfFile()).thenReturn(path);
  when(job.loadConfFile()).thenReturn(configuration);

  when(ctx.getJob(any(JobId.class))).thenReturn(job);


  ConfBlockForTest configurationBlock = new ConfBlockForTest(ctx);
  PrintWriter pWriter = new PrintWriter(data);
  Block html = new BlockForTest(new HtmlBlockForTest(), pWriter, 0, false);

  configurationBlock.render(html);
  pWriter.flush();
  assertTrue(data.toString().contains(
          "Sorry, can't do anything without a JobID"));

  configurationBlock.addParameter(AMParams.JOB_ID, "job_01_01");
  data.reset();
  configurationBlock.render(html);
  pWriter.flush();
  assertTrue(data.toString().contains("Key for test"));

  assertTrue(data.toString().contains("Value for test"));

}
项目:hadoop    文件:TestHSWebApp.java   
@Test
public void testLogsViewSingle() throws IOException {
  LOG.info("HsLogsPage with params for single log and data limits");
  MockAppContext ctx = new MockAppContext(0, 1, 1, 1);
  Map<String, String> params = new HashMap<String, String>();

  final Configuration conf = new YarnConfiguration();
  conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);

  params.put("start", "-2048");
  params.put("end", "-1024");
  params.put(CONTAINER_LOG_TYPE, "syslog");
  params.put(CONTAINER_ID, MRApp.newContainerId(1, 1, 333, 1)
      .toString());
  params.put(NM_NODENAME,
      NodeId.newInstance(MockJobs.NM_HOST, MockJobs.NM_PORT).toString());
  params.put(ENTITY_STRING, "container_10_0001_01_000001");
  params.put(APP_OWNER, "owner");

  Injector injector =
      WebAppTests.testPage(AggregatedLogsPage.class, AppContext.class, ctx,
          params, new AbstractModule() {
        @Override
        protected void configure() {
          bind(Configuration.class).toInstance(conf);
        }
      });
  PrintWriter spyPw = WebAppTests.getPrintWriter(injector);
  verify(spyPw).write(
      "Logs not available for container_10_0001_01_000001."
          + " Aggregation may not be complete, "
          + "Check back later or try the nodemanager at "
          + MockJobs.NM_HOST + ":" + MockJobs.NM_PORT);
}
项目:hadoop    文件:TestHsWebServicesJobs.java   
@Test
public void testJobCountersForKilledJob() throws Exception {
  WebResource r = resource();
  appContext = new MockHistoryContext(0, 1, 1, 1, true);
  injector = Guice.createInjector(new ServletModule() {
    @Override
    protected void configureServlets() {

      webApp = mock(HsWebApp.class);
      when(webApp.name()).thenReturn("hsmockwebapp");

      bind(JAXBContextResolver.class);
      bind(HsWebServices.class);
      bind(GenericExceptionHandler.class);
      bind(WebApp.class).toInstance(webApp);
      bind(AppContext.class).toInstance(appContext);
      bind(HistoryContext.class).toInstance(appContext);
      bind(Configuration.class).toInstance(conf);

      serve("/*").with(GuiceContainer.class);
    }
  });

  Map<JobId, Job> jobsMap = appContext.getAllJobs();
  for (JobId id : jobsMap.keySet()) {
    String jobId = MRApps.toString(id);

    ClientResponse response = r.path("ws").path("v1").path("history")
        .path("mapreduce").path("jobs").path(jobId).path("counters/")
        .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
    JSONObject json = response.getEntity(JSONObject.class);
    assertEquals("incorrect number of elements", 1, json.length());
    JSONObject info = json.getJSONObject("jobCounters");
    WebServicesTestUtils.checkStringMatch("id", MRApps.toString(id),
        info.getString("id"));
    assertTrue("Job shouldn't contain any counters", info.length() == 1);
  }
}
项目:hadoop    文件:TestAMWebServicesJobs.java   
@Override
protected void configureServlets() {

  appContext = new MockAppContext(0, 1, 2, 1);
  bind(JAXBContextResolver.class);
  bind(AMWebServices.class);
  bind(GenericExceptionHandler.class);
  bind(AppContext.class).toInstance(appContext);
  bind(Configuration.class).toInstance(conf);

  serve("/*").with(GuiceContainer.class);
}
项目:hadoop    文件:TestAMWebServices.java   
@Override
protected void configureServlets() {

  appContext = new MockAppContext(0, 1, 1, 1);
  appContext.setBlacklistedNodes(Sets.newHashSet("badnode1", "badnode2"));

  bind(JAXBContextResolver.class);
  bind(AMWebServices.class);
  bind(GenericExceptionHandler.class);
  bind(AppContext.class).toInstance(appContext);
  bind(Configuration.class).toInstance(conf);

  serve("/*").with(GuiceContainer.class);
}
项目:hadoop    文件:TestAMWebServices.java   
public void verifyAMInfo(JSONObject info, AppContext ctx)
    throws JSONException {
  assertEquals("incorrect number of elements", 5, info.length());

  verifyAMInfoGeneric(ctx, info.getString("appId"), info.getString("user"),
      info.getString("name"), info.getLong("startedOn"),
      info.getLong("elapsedTime"));
}
项目:hadoop    文件:TestAMWebServices.java   
public void verifyAMInfoGeneric(AppContext ctx, String id, String user,
    String name, long startedOn, long elapsedTime) {

  WebServicesTestUtils.checkStringMatch("id", ctx.getApplicationID()
      .toString(), id);
  WebServicesTestUtils.checkStringMatch("user", ctx.getUser().toString(),
      user);
  WebServicesTestUtils.checkStringMatch("name", ctx.getApplicationName(),
      name);

  assertEquals("startedOn incorrect", ctx.getStartTime(), startedOn);
  assertTrue("elapsedTime not greater then 0", (elapsedTime > 0));

}
项目:hadoop    文件:TestAMWebApp.java   
@Test public void testAppControllerIndex() {
  AppContext ctx = new MockAppContext(0, 1, 1, 1);
  Injector injector = WebAppTests.createMockInjector(AppContext.class, ctx);
  AppController controller = injector.getInstance(AppController.class);
  controller.index();
  assertEquals(ctx.getApplicationID().toString(), controller.get(APP_ID,""));
}
项目:hadoop    文件:TestHSWebApp.java   
@Test public void testAttemptsView() {
  LOG.info("HsAttemptsPage");
  AppContext appContext = new MockAppContext(0, 1, 1, 1);
  Map<String, String> params = TestAMWebApp.getTaskParams(appContext);
  WebAppTests.testPage(HsAttemptsPage.class, AppContext.class,
                       appContext, params);
}
项目:hadoop    文件:TestAMWebApp.java   
@Test public void testTaskView() {
  AppContext appContext = new MockAppContext(0, 1, 1, 1);
  Map<String, String> params = getTaskParams(appContext);
  App app = new App(appContext);
  app.setJob(appContext.getAllJobs().values().iterator().next());
  app.setTask(app.getJob().getTasks().values().iterator().next());
  WebAppTests.testPage(TaskPage.class, App.class, app, params);
}
项目:hadoop    文件:TestHsWebServices.java   
public void verifyHSInfo(JSONObject info, AppContext ctx)
    throws JSONException {
  assertEquals("incorrect number of elements", 4, info.length());

  verifyHsInfoGeneric(info.getString("hadoopVersionBuiltOn"),
      info.getString("hadoopBuildVersion"), info.getString("hadoopVersion"),
      info.getLong("startedOn"));
}
项目:hadoop    文件:TestAMWebApp.java   
public static Map<String, String> getTaskParams(AppContext appContext) {
  JobId jobId = appContext.getAllJobs().entrySet().iterator().next().getKey();
  Entry<TaskId, Task> e = appContext.getJob(jobId).getTasks().entrySet().iterator().next();
  e.getValue().getType();
  Map<String, String> params = new HashMap<String, String>();
  params.put(AMParams.JOB_ID, MRApps.toString(jobId));
  params.put(AMParams.TASK_ID, MRApps.toString(e.getKey()));
  params.put(AMParams.TASK_TYPE, MRApps.taskSymbol(e.getValue().getType()));
  return params;
}
项目:hadoop    文件:TestAMWebApp.java   
@Test public void testSingleCounterView() {
  AppContext appContext = new MockAppContext(0, 1, 1, 1);
  Job job = appContext.getAllJobs().values().iterator().next();
  // add a failed task to the job without any counters
  Task failedTask = MockJobs.newTask(job.getID(), 2, 1, true);
  Map<TaskId,Task> tasks = job.getTasks();
  tasks.put(failedTask.getID(), failedTask);
  Map<String, String> params = getJobParams(appContext);
  params.put(AMParams.COUNTER_GROUP, 
      "org.apache.hadoop.mapreduce.FileSystemCounter");
  params.put(AMParams.COUNTER_NAME, "HDFS_WRITE_OPS");
  WebAppTests.testPage(SingleCounterPage.class, AppContext.class,
                       appContext, params);
}
项目:hadoop    文件:HsWebApp.java   
@Override
public void setup() {
  bind(HsWebServices.class);
  bind(JAXBContextResolver.class);
  bind(GenericExceptionHandler.class);
  bind(AppContext.class).toInstance(history);
  bind(HistoryContext.class).toInstance(history);
  route("/", HsController.class);
  route("/app", HsController.class);
  route(pajoin("/job", JOB_ID), HsController.class, "job");
  route(pajoin("/conf", JOB_ID), HsController.class, "conf");
  route(pajoin("/jobcounters", JOB_ID), HsController.class, "jobCounters");
  route(pajoin("/singlejobcounter",JOB_ID, COUNTER_GROUP, COUNTER_NAME),
      HsController.class, "singleJobCounter");
  route(pajoin("/tasks", JOB_ID, TASK_TYPE), HsController.class, "tasks");
  route(pajoin("/attempts", JOB_ID, TASK_TYPE, ATTEMPT_STATE),
      HsController.class, "attempts");
  route(pajoin("/task", TASK_ID), HsController.class, "task");
  route(pajoin("/taskcounters", TASK_ID), HsController.class, "taskCounters");
  route(pajoin("/singletaskcounter",TASK_ID, COUNTER_GROUP, COUNTER_NAME),
      HsController.class, "singleTaskCounter");
  route("/about", HsController.class, "about");
  route(pajoin("/logs", NM_NODENAME, CONTAINER_ID, ENTITY_STRING, APP_OWNER,
      CONTAINER_LOG_TYPE), HsController.class, "logs");
  route(pajoin("/nmlogs", NM_NODENAME, CONTAINER_ID, ENTITY_STRING, APP_OWNER,
      CONTAINER_LOG_TYPE), HsController.class, "nmlogs");
}