Java 类org.apache.hadoop.mapreduce.test.system.FinishTaskControlAction 实例源码

项目:hadoop-2.6.0-cdh5.4.3    文件:TestCMExceptionDuringRunJob.java   
public JobID runSleepJob(boolean signalJob) throws Exception{
  SleepJob job = new SleepJob();
  job.setConf(conf);
  conf = job.setupJobConf(5, 1, 100, 5, 100, 5);
  JobConf jconf = new JobConf(conf);
  //Controls the job till all verification is done 
  FinishTaskControlAction.configureControlActionForJob(conf);
  //Submitting the job
  RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
  JobID jobId = rJob.getID();
  JobInfo jInfo = remoteJTClient.getJobInfo(jobId);
  LOG.info("jInfo is :" + jInfo);
  boolean jobStarted = cluster.getJTClient().isJobStarted(jobId);
  Assert.assertTrue("Job has not started even after a minute", 
      jobStarted );

  if(signalJob) {
    cluster.signalAllTasks(jobId);
    Assert.assertTrue("Job has not stopped yet",
        cluster.getJTClient().isJobStopped(jobId));
  }
  return jobId;
}
项目:hadoop-on-lustre    文件:TestCMExceptionDuringRunJob.java   
public JobID runSleepJob(boolean signalJob) throws Exception{
  SleepJob job = new SleepJob();
  job.setConf(conf);
  conf = job.setupJobConf(5, 1, 100, 5, 100, 5);
  JobConf jconf = new JobConf(conf);
  //Controls the job till all verification is done 
  FinishTaskControlAction.configureControlActionForJob(conf);
  //Submitting the job
  RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
  JobID jobId = rJob.getID();
  JobInfo jInfo = remoteJTClient.getJobInfo(jobId);
  LOG.info("jInfo is :" + jInfo);
  boolean jobStarted = cluster.getJTClient().isJobStarted(jobId);
  Assert.assertTrue("Job has not started even after a minute", 
      jobStarted );

  if(signalJob) {
    cluster.signalAllTasks(jobId);
    Assert.assertTrue("Job has not stopped yet",
        cluster.getJTClient().isJobStopped(jobId));
  }
  return jobId;
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TestCMExceptionDuringRunJob.java   
public JobID runSleepJob(boolean signalJob) throws Exception{
  SleepJob job = new SleepJob();
  job.setConf(conf);
  conf = job.setupJobConf(5, 1, 100, 5, 100, 5);
  JobConf jconf = new JobConf(conf);
  //Controls the job till all verification is done 
  FinishTaskControlAction.configureControlActionForJob(conf);
  //Submitting the job
  RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
  JobID jobId = rJob.getID();
  JobInfo jInfo = remoteJTClient.getJobInfo(jobId);
  LOG.info("jInfo is :" + jInfo);
  boolean jobStarted = cluster.getJTClient().isJobStarted(jobId);
  Assert.assertTrue("Job has not started even after a minute", 
      jobStarted );

  if(signalJob) {
    cluster.signalAllTasks(jobId);
    Assert.assertTrue("Job has not stopped yet",
        cluster.getJTClient().isJobStopped(jobId));
  }
  return jobId;
}
项目:hortonworks-extension    文件:TestCMExceptionDuringRunJob.java   
public JobID runSleepJob(boolean signalJob) throws Exception{
  SleepJob job = new SleepJob();
  job.setConf(conf);
  conf = job.setupJobConf(5, 1, 100, 5, 100, 5);
  JobConf jconf = new JobConf(conf);
  //Controls the job till all verification is done 
  FinishTaskControlAction.configureControlActionForJob(conf);
  //Submitting the job
  RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
  JobID jobId = rJob.getID();
  JobInfo jInfo = remoteJTClient.getJobInfo(jobId);
  LOG.info("jInfo is :" + jInfo);
  boolean jobStarted = cluster.getJTClient().isJobStarted(jobId);
  Assert.assertTrue("Job has not started even after a minute", 
      jobStarted );

  if(signalJob) {
    cluster.signalAllTasks(jobId);
    Assert.assertTrue("Job has not stopped yet",
        cluster.getJTClient().isJobStopped(jobId));
  }
  return jobId;
}
项目:hortonworks-extension    文件:TestCMExceptionDuringRunJob.java   
public JobID runSleepJob(boolean signalJob) throws Exception{
  SleepJob job = new SleepJob();
  job.setConf(conf);
  conf = job.setupJobConf(5, 1, 100, 5, 100, 5);
  JobConf jconf = new JobConf(conf);
  //Controls the job till all verification is done 
  FinishTaskControlAction.configureControlActionForJob(conf);
  //Submitting the job
  RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
  JobID jobId = rJob.getID();
  JobInfo jInfo = remoteJTClient.getJobInfo(jobId);
  LOG.info("jInfo is :" + jInfo);
  boolean jobStarted = cluster.getJTClient().isJobStarted(jobId);
  Assert.assertTrue("Job has not started even after a minute", 
      jobStarted );

  if(signalJob) {
    cluster.signalAllTasks(jobId);
    Assert.assertTrue("Job has not stopped yet",
        cluster.getJTClient().isJobStopped(jobId));
  }
  return jobId;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestJobCacheDirectoriesCleanUp.java   
private static ArrayList <String> getTTClientMapRedLocalDirs(
    TTClient ttClient, TaskInfo taskinfo, JobID jobId) throws IOException {
  ArrayList <String> fileList = null;
  TaskID taskId = TaskID.downgrade(taskinfo.getTaskID());
  FinishTaskControlAction action = new FinishTaskControlAction(taskId);
  if (ttClient != null ) {
    String localDirs[] = ttClient.getMapredLocalDirs();
    TaskAttemptID taskAttID = new TaskAttemptID(taskId, 0);
    fileList = createFilesInTaskDir(localDirs, jobId, taskAttID, ttClient);
  }
  ttClient.getProxy().sendAction(action);
  return fileList;
}
项目:hadoop-on-lustre    文件:TestJobCacheDirectoriesCleanUp.java   
private static ArrayList <String> getTTClientMapRedLocalDirs(
    TTClient ttClient, TaskInfo taskinfo, JobID jobId) throws IOException {
  ArrayList <String> fileList = null;
  TaskID taskId = TaskID.downgrade(taskinfo.getTaskID());
  FinishTaskControlAction action = new FinishTaskControlAction(taskId);
  if (ttClient != null ) {
    String localDirs[] = ttClient.getMapredLocalDirs();
    TaskAttemptID taskAttID = new TaskAttemptID(taskId, 0);
    fileList = createFilesInTaskDir(localDirs, jobId, taskAttID, ttClient);
  }
  ttClient.getProxy().sendAction(action);
  return fileList;
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TestJobCacheDirectoriesCleanUp.java   
private static ArrayList <String> getTTClientMapRedLocalDirs(
    TTClient ttClient, TaskInfo taskinfo, JobID jobId) throws IOException {
  ArrayList <String> fileList = null;
  TaskID taskId = TaskID.downgrade(taskinfo.getTaskID());
  FinishTaskControlAction action = new FinishTaskControlAction(taskId);
  if (ttClient != null ) {
    String localDirs[] = ttClient.getMapredLocalDirs();
    TaskAttemptID taskAttID = new TaskAttemptID(taskId, 0);
    fileList = createFilesInTaskDir(localDirs, jobId, taskAttID, ttClient);
  }
  ttClient.getProxy().sendAction(action);
  return fileList;
}
项目:hortonworks-extension    文件:TestJobCacheDirectoriesCleanUp.java   
private static ArrayList <String> getTTClientMapRedLocalDirs(
    TTClient ttClient, TaskInfo taskinfo, JobID jobId) throws IOException {
  ArrayList <String> fileList = null;
  TaskID taskId = TaskID.downgrade(taskinfo.getTaskID());
  FinishTaskControlAction action = new FinishTaskControlAction(taskId);
  if (ttClient != null ) {
    String localDirs[] = ttClient.getMapredLocalDirs();
    TaskAttemptID taskAttID = new TaskAttemptID(taskId, 0);
    fileList = createFilesInTaskDir(localDirs, jobId, taskAttID, ttClient);
  }
  ttClient.getProxy().sendAction(action);
  return fileList;
}
项目:hortonworks-extension    文件:TestJobCacheDirectoriesCleanUp.java   
private static ArrayList <String> getTTClientMapRedLocalDirs(
    TTClient ttClient, TaskInfo taskinfo, JobID jobId) throws IOException {
  ArrayList <String> fileList = null;
  TaskID taskId = TaskID.downgrade(taskinfo.getTaskID());
  FinishTaskControlAction action = new FinishTaskControlAction(taskId);
  if (ttClient != null ) {
    String localDirs[] = ttClient.getMapredLocalDirs();
    TaskAttemptID taskAttID = new TaskAttemptID(taskId, 0);
    fileList = createFilesInTaskDir(localDirs, jobId, taskAttID, ttClient);
  }
  ttClient.getProxy().sendAction(action);
  return fileList;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestFileOwner.java   
@Test
public void testFilePermission() throws Exception {
  wovenClient = cluster.getJTClient().getProxy();
  Configuration conf = new Configuration(cluster.getConf());
  FinishTaskControlAction.configureControlActionForJob(conf);
  SleepJob job = new SleepJob();
  job.setConf(conf);
  conf = job.setupJobConf(1, 0, 100, 100, 100, 100);
  JobConf jconf = new JobConf(conf);
  RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
  taskController = conf.get("mapred.task.tracker.task-controller");
  // get the job info so we can get the env variables from the daemon.
  // Now wait for the task to be in the running state, only then the
  // directories will be created
  JobInfo info = wovenClient.getJobInfo(rJob.getID());
  Assert.assertNotNull("JobInfo is null",info);
  JobID id = rJob.getID();
  while (info.runningMaps() != 1) {
    Thread.sleep(1000);
    info = wovenClient.getJobInfo(id);
  }
  TaskInfo[] myTaskInfos = wovenClient.getTaskInfo(id);
  for (TaskInfo tInfo : myTaskInfos) {
    if (!tInfo.isSetupOrCleanup()) {
      String[] taskTrackers = tInfo.getTaskTrackers();
      for (String taskTracker : taskTrackers) {
        TTInfo ttInfo = wovenClient.getTTInfo(taskTracker);
        TTClient ttCli = cluster.getTTClient(ttInfo.getStatus().getHost());
        Assert.assertNotNull("TTClient instance is null",ttCli);
        TTTaskInfo ttTaskInfo = ttCli.getProxy().getTask(tInfo.getTaskID());
        Assert.assertNotNull("TTTaskInfo is null",ttTaskInfo);
        while (ttTaskInfo.getTaskStatus().getRunState() !=
               TaskStatus.State.RUNNING) {
          Thread.sleep(100);
          ttTaskInfo = ttCli.getProxy().getTask(tInfo.getTaskID());
        }
        testPermissionWithTaskController(ttCli, conf, info);
        FinishTaskControlAction action = new FinishTaskControlAction(TaskID
            .downgrade(tInfo.getTaskID()));
        for (TTClient cli : cluster.getTTClients()) {
          cli.getProxy().sendAction(action);
        }
      }
    }
  }
  JobInfo jInfo = wovenClient.getJobInfo(id);
  jInfo = cluster.getJTClient().getProxy().getJobInfo(id);
  while (!jInfo.getStatus().isJobComplete()) {
    Thread.sleep(100);
    jInfo = cluster.getJTClient().getProxy().getJobInfo(id);
  }
}
项目:hadoop-on-lustre    文件:TestFileOwner.java   
@Test
public void testFilePermission() throws Exception {
  wovenClient = cluster.getJTClient().getProxy();
  Configuration conf = new Configuration(cluster.getConf());
  FinishTaskControlAction.configureControlActionForJob(conf);
  SleepJob job = new SleepJob();
  job.setConf(conf);
  conf = job.setupJobConf(1, 0, 100, 100, 100, 100);
  JobConf jconf = new JobConf(conf);
  RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
  taskController = conf.get("mapred.task.tracker.task-controller");
  // get the job info so we can get the env variables from the daemon.
  // Now wait for the task to be in the running state, only then the
  // directories will be created
  JobInfo info = wovenClient.getJobInfo(rJob.getID());
  Assert.assertNotNull("JobInfo is null",info);
  JobID id = rJob.getID();
  while (info.runningMaps() != 1) {
    Thread.sleep(1000);
    info = wovenClient.getJobInfo(id);
  }
  TaskInfo[] myTaskInfos = wovenClient.getTaskInfo(id);
  for (TaskInfo tInfo : myTaskInfos) {
    if (!tInfo.isSetupOrCleanup()) {
      String[] taskTrackers = tInfo.getTaskTrackers();
      for (String taskTracker : taskTrackers) {
        TTInfo ttInfo = wovenClient.getTTInfo(taskTracker);
        TTClient ttCli = cluster.getTTClient(ttInfo.getStatus().getHost());
        Assert.assertNotNull("TTClient instance is null",ttCli);
        TTTaskInfo ttTaskInfo = ttCli.getProxy().getTask(tInfo.getTaskID());
        Assert.assertNotNull("TTTaskInfo is null",ttTaskInfo);
        while (ttTaskInfo.getTaskStatus().getRunState() !=
               TaskStatus.State.RUNNING) {
          Thread.sleep(100);
          ttTaskInfo = ttCli.getProxy().getTask(tInfo.getTaskID());
        }
        testPermissionWithTaskController(ttCli, conf, info);
        FinishTaskControlAction action = new FinishTaskControlAction(TaskID
            .downgrade(tInfo.getTaskID()));
        for (TTClient cli : cluster.getTTClients()) {
          cli.getProxy().sendAction(action);
        }
      }
    }
  }
  JobInfo jInfo = wovenClient.getJobInfo(id);
  jInfo = cluster.getJTClient().getProxy().getJobInfo(id);
  while (!jInfo.getStatus().isJobComplete()) {
    Thread.sleep(100);
    jInfo = cluster.getJTClient().getProxy().getJobInfo(id);
  }
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TestFileOwner.java   
@Test
public void testFilePermission() throws Exception {
  wovenClient = cluster.getJTClient().getProxy();
  Configuration conf = new Configuration(cluster.getConf());
  FinishTaskControlAction.configureControlActionForJob(conf);
  SleepJob job = new SleepJob();
  job.setConf(conf);
  conf = job.setupJobConf(1, 0, 100, 100, 100, 100);
  JobConf jconf = new JobConf(conf);
  RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
  taskController = conf.get("mapred.task.tracker.task-controller");
  // get the job info so we can get the env variables from the daemon.
  // Now wait for the task to be in the running state, only then the
  // directories will be created
  JobInfo info = wovenClient.getJobInfo(rJob.getID());
  Assert.assertNotNull("JobInfo is null",info);
  JobID id = rJob.getID();
  while (info.runningMaps() != 1) {
    Thread.sleep(1000);
    info = wovenClient.getJobInfo(id);
  }
  TaskInfo[] myTaskInfos = wovenClient.getTaskInfo(id);
  for (TaskInfo tInfo : myTaskInfos) {
    if (!tInfo.isSetupOrCleanup()) {
      String[] taskTrackers = tInfo.getTaskTrackers();
      for (String taskTracker : taskTrackers) {
        TTInfo ttInfo = wovenClient.getTTInfo(taskTracker);
        TTClient ttCli = cluster.getTTClient(ttInfo.getStatus().getHost());
        Assert.assertNotNull("TTClient instance is null",ttCli);
        TTTaskInfo ttTaskInfo = ttCli.getProxy().getTask(tInfo.getTaskID());
        Assert.assertNotNull("TTTaskInfo is null",ttTaskInfo);
        while (ttTaskInfo.getTaskStatus().getRunState() !=
               TaskStatus.State.RUNNING) {
          Thread.sleep(100);
          ttTaskInfo = ttCli.getProxy().getTask(tInfo.getTaskID());
        }
        testPermissionWithTaskController(ttCli, conf, info);
        FinishTaskControlAction action = new FinishTaskControlAction(TaskID
            .downgrade(tInfo.getTaskID()));
        for (TTClient cli : cluster.getTTClients()) {
          cli.getProxy().sendAction(action);
        }
      }
    }
  }
  JobInfo jInfo = wovenClient.getJobInfo(id);
  jInfo = cluster.getJTClient().getProxy().getJobInfo(id);
  while (!jInfo.getStatus().isJobComplete()) {
    Thread.sleep(100);
    jInfo = cluster.getJTClient().getProxy().getJobInfo(id);
  }
}
项目:mapreduce-fork    文件:TestFileOwner.java   
@Test
public void testFilePermission() throws Exception {
  wovenClient = cluster.getJTClient().getProxy();
  Configuration conf = new Configuration(cluster.getConf());
  FinishTaskControlAction.configureControlActionForJob(conf);
  SleepJob job = new SleepJob();
  job.setConf(conf);
  Job slpJob = job.createJob(1, 0, 100, 100, 100, 100);
  JobConf jconf = new JobConf(conf);
  slpJob.submit();
  RunningJob rJob =
      cluster.getJTClient().getClient().getJob(
          org.apache.hadoop.mapred.JobID.downgrade(slpJob.getJobID()));
  taskController = conf.get(TTConfig.TT_TASK_CONTROLLER);
  // get the job info so we can get the env variables from the daemon.
  // Now wait for the task to be in the running state, only then the
  // directories will be created
  JobInfo info = wovenClient.getJobInfo(rJob.getID());
  Assert.assertNotNull("JobInfo is null", info);
  JobID id = rJob.getID();
  while (info.runningMaps() != 1) {
    Thread.sleep(1000);
    info = wovenClient.getJobInfo(id);
  }
  TaskInfo[] myTaskInfos = wovenClient.getTaskInfo(id);
  for (TaskInfo tInfo : myTaskInfos) {
    if (!tInfo.isSetupOrCleanup()) {
      String[] taskTrackers = tInfo.getTaskTrackers();
      for (String taskTracker : taskTrackers) {
        TTInfo ttInfo = wovenClient.getTTInfo(taskTracker);
        TTClient ttCli = cluster.getTTClient(ttInfo.getStatus().getHost());
        Assert.assertNotNull("TTClient instance is null", ttCli);
        TTTaskInfo ttTaskInfo = ttCli.getProxy().getTask(tInfo.getTaskID());
        Assert.assertNotNull("TTTaskInfo is null", ttTaskInfo);
        while (ttTaskInfo.getTaskStatus().getRunState() != TaskStatus.State.RUNNING) {
          Thread.sleep(100);
          ttTaskInfo = ttCli.getProxy().getTask(tInfo.getTaskID());
        }
        testPermissionWithTaskController(ttCli, conf, info);
        FinishTaskControlAction action =
            new FinishTaskControlAction(TaskID.downgrade(tInfo.getTaskID()));
        for (TTClient cli : cluster.getTTClients()) {
          cli.getProxy().sendAction(action);
        }
      }
    }
  }
  JobInfo jInfo = wovenClient.getJobInfo(id);
  jInfo = cluster.getJTClient().getProxy().getJobInfo(id);
  while (!jInfo.getStatus().isJobComplete()) {
    Thread.sleep(100);
    jInfo = cluster.getJTClient().getProxy().getJobInfo(id);
  }
}
项目:hortonworks-extension    文件:TestFileOwner.java   
@Test
public void testFilePermission() throws Exception {
  wovenClient = cluster.getJTClient().getProxy();
  Configuration conf = new Configuration(cluster.getConf());
  FinishTaskControlAction.configureControlActionForJob(conf);
  SleepJob job = new SleepJob();
  job.setConf(conf);
  conf = job.setupJobConf(1, 0, 100, 100, 100, 100);
  JobConf jconf = new JobConf(conf);
  RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
  taskController = conf.get("mapred.task.tracker.task-controller");
  // get the job info so we can get the env variables from the daemon.
  // Now wait for the task to be in the running state, only then the
  // directories will be created
  JobInfo info = wovenClient.getJobInfo(rJob.getID());
  Assert.assertNotNull("JobInfo is null",info);
  JobID id = rJob.getID();
  while (info.runningMaps() != 1) {
    Thread.sleep(1000);
    info = wovenClient.getJobInfo(id);
  }
  TaskInfo[] myTaskInfos = wovenClient.getTaskInfo(id);
  for (TaskInfo tInfo : myTaskInfos) {
    if (!tInfo.isSetupOrCleanup()) {
      String[] taskTrackers = tInfo.getTaskTrackers();
      for (String taskTracker : taskTrackers) {
        TTInfo ttInfo = wovenClient.getTTInfo(taskTracker);
        TTClient ttCli = cluster.getTTClient(ttInfo.getStatus().getHost());
        Assert.assertNotNull("TTClient instance is null",ttCli);
        TTTaskInfo ttTaskInfo = ttCli.getProxy().getTask(tInfo.getTaskID());
        Assert.assertNotNull("TTTaskInfo is null",ttTaskInfo);
        while (ttTaskInfo.getTaskStatus().getRunState() !=
               TaskStatus.State.RUNNING) {
          Thread.sleep(100);
          ttTaskInfo = ttCli.getProxy().getTask(tInfo.getTaskID());
        }
        testPermissionWithTaskController(ttCli, conf, info);
        FinishTaskControlAction action = new FinishTaskControlAction(TaskID
            .downgrade(tInfo.getTaskID()));
        for (TTClient cli : cluster.getTTClients()) {
          cli.getProxy().sendAction(action);
        }
      }
    }
  }
  JobInfo jInfo = wovenClient.getJobInfo(id);
  jInfo = cluster.getJTClient().getProxy().getJobInfo(id);
  while (!jInfo.getStatus().isJobComplete()) {
    Thread.sleep(100);
    jInfo = cluster.getJTClient().getProxy().getJobInfo(id);
  }
}
项目:hortonworks-extension    文件:TestFileOwner.java   
@Test
public void testFilePermission() throws Exception {
  wovenClient = cluster.getJTClient().getProxy();
  Configuration conf = new Configuration(cluster.getConf());
  FinishTaskControlAction.configureControlActionForJob(conf);
  SleepJob job = new SleepJob();
  job.setConf(conf);
  conf = job.setupJobConf(1, 0, 100, 100, 100, 100);
  JobConf jconf = new JobConf(conf);
  RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
  taskController = conf.get("mapred.task.tracker.task-controller");
  // get the job info so we can get the env variables from the daemon.
  // Now wait for the task to be in the running state, only then the
  // directories will be created
  JobInfo info = wovenClient.getJobInfo(rJob.getID());
  Assert.assertNotNull("JobInfo is null",info);
  JobID id = rJob.getID();
  while (info.runningMaps() != 1) {
    Thread.sleep(1000);
    info = wovenClient.getJobInfo(id);
  }
  TaskInfo[] myTaskInfos = wovenClient.getTaskInfo(id);
  for (TaskInfo tInfo : myTaskInfos) {
    if (!tInfo.isSetupOrCleanup()) {
      String[] taskTrackers = tInfo.getTaskTrackers();
      for (String taskTracker : taskTrackers) {
        TTInfo ttInfo = wovenClient.getTTInfo(taskTracker);
        TTClient ttCli = cluster.getTTClient(ttInfo.getStatus().getHost());
        Assert.assertNotNull("TTClient instance is null",ttCli);
        TTTaskInfo ttTaskInfo = ttCli.getProxy().getTask(tInfo.getTaskID());
        Assert.assertNotNull("TTTaskInfo is null",ttTaskInfo);
        while (ttTaskInfo.getTaskStatus().getRunState() !=
               TaskStatus.State.RUNNING) {
          Thread.sleep(100);
          ttTaskInfo = ttCli.getProxy().getTask(tInfo.getTaskID());
        }
        testPermissionWithTaskController(ttCli, conf, info);
        FinishTaskControlAction action = new FinishTaskControlAction(TaskID
            .downgrade(tInfo.getTaskID()));
        for (TTClient cli : cluster.getTTClients()) {
          cli.getProxy().sendAction(action);
        }
      }
    }
  }
  JobInfo jInfo = wovenClient.getJobInfo(id);
  jInfo = cluster.getJTClient().getProxy().getJobInfo(id);
  while (!jInfo.getStatus().isJobComplete()) {
    Thread.sleep(100);
    jInfo = cluster.getJTClient().getProxy().getJobInfo(id);
  }
}