Java 类org.apache.hadoop.mapreduce.Cluster 实例源码

项目:hadoop    文件:TestCLI.java   
@Test
public void testListAttemptIdsWithInvalidInputs() throws Exception {
  JobID jobId = JobID.forName(jobIdStr);
  Cluster mockCluster = mock(Cluster.class);
  Job job = mock(Job.class);
  CLI cli = spy(new CLI());

  doReturn(mockCluster).when(cli).createCluster();
  when(mockCluster.getJob(jobId)).thenReturn(job);

  int retCode_JOB_SETUP = cli.run(new String[] { "-list-attempt-ids",
      jobIdStr, "JOB_SETUP", "running" });

  int retCode_JOB_CLEANUP = cli.run(new String[] { "-list-attempt-ids",
      jobIdStr, "JOB_CLEANUP", "running" });

  int retCode_invalidTaskState = cli.run(new String[] { "-list-attempt-ids",
      jobIdStr, "REDUCE", "complete" });

  assertEquals("JOB_SETUP is an invalid input,exit code should be -1", -1,
      retCode_JOB_SETUP);
  assertEquals("JOB_CLEANUP is an invalid input,exit code should be -1", -1,
      retCode_JOB_CLEANUP);
  assertEquals("complete is an invalid input,exit code should be -1", -1,
      retCode_invalidTaskState);

}
项目:hadoop    文件:TestExternalCall.java   
/**
* test methods run end execute of DistCp class. silple copy file
* @throws Exception 
*/
 @Test
 public void testCleanup() throws Exception {

     Configuration conf = getConf();

     Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf),
         conf);
     stagingDir.getFileSystem(conf).mkdirs(stagingDir);
     Path soure = createFile("tmp.txt");
     Path target = createFile("target.txt");

     DistCp distcp = new DistCp(conf, null);
     String[] arg = { soure.toString(), target.toString() };

     distcp.run(arg);
     Assert.assertTrue(fs.exists(target));


 }
项目:hadoop    文件:TestExternalCall.java   
/**
 * test main method of DistCp. Method should to call System.exit().
 * 
 */
@Test
public void testCleanupTestViaToolRunner() throws IOException, InterruptedException {

  Configuration conf = getConf();

  Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf), conf);
  stagingDir.getFileSystem(conf).mkdirs(stagingDir);

  Path soure = createFile("tmp.txt");
  Path target = createFile("target.txt");
  try {

    String[] arg = {target.toString(),soure.toString()};
    DistCp.main(arg);
    Assert.fail();

  } catch (ExitException t) {
    Assert.assertTrue(fs.exists(target));
    Assert.assertEquals(t.status, 0);
    Assert.assertEquals(
        stagingDir.getFileSystem(conf).listStatus(stagingDir).length, 0);
  }

}
项目:hadoop    文件:TestIntegration.java   
@Test(timeout=100000)
public void testCleanup() {
  try {
    Path sourcePath = new Path("noscheme:///file");
    List<Path> sources = new ArrayList<Path>();
    sources.add(sourcePath);

    DistCpOptions options = new DistCpOptions(sources, target);

    Configuration conf = getConf();
    Path stagingDir = JobSubmissionFiles.getStagingDir(
            new Cluster(conf), conf);
    stagingDir.getFileSystem(conf).mkdirs(stagingDir);

    try {
      new DistCp(conf, options).execute();
    } catch (Throwable t) {
      Assert.assertEquals(stagingDir.getFileSystem(conf).
          listStatus(stagingDir).length, 0);
    }
  } catch (Exception e) {
    LOG.error("Exception encountered ", e);
    Assert.fail("testCleanup failed " + e.getMessage());
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestCLI.java   
@Test
public void testGetJob() throws Exception {
  Configuration conf = new Configuration();
  long sleepTime = 100;
  conf.setLong(MRJobConfig.MR_CLIENT_JOB_RETRY_INTERVAL, sleepTime);
  Cluster mockCluster = mock(Cluster.class);
  JobID jobId1 = JobID.forName("job_1234654654_001");
  when(mockCluster.getJob(jobId1)).thenReturn(null);

  for (int i = 0; i < 2; ++i) {
    conf.setInt(MRJobConfig.MR_CLIENT_JOB_MAX_RETRIES, i);
    CLI cli = spy(new CLI(conf));
    cli.cluster = mockCluster;
    doReturn(mockCluster).when(cli).createCluster();
    long start = Time.monotonicNow();
    cli.getJob(jobId1);
    long end = Time.monotonicNow();
    Assert.assertTrue(end - start > (i * sleepTime));
    Assert.assertTrue(end - start < ((i + 1) * sleepTime));
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestExternalCall.java   
/**
* test methods run end execute of DistCp class. silple copy file
* @throws Exception 
*/
 @Test
 public void testCleanup() throws Exception {

     Configuration conf = getConf();

     Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf),
         conf);
     stagingDir.getFileSystem(conf).mkdirs(stagingDir);
     Path soure = createFile("tmp.txt");
     Path target = createFile("target.txt");

     DistCp distcp = new DistCp(conf, null);
     String[] arg = { soure.toString(), target.toString() };

     distcp.run(arg);
     Assert.assertTrue(fs.exists(target));


 }
项目:aliyun-oss-hadoop-fs    文件:TestExternalCall.java   
/**
 * test main method of DistCp. Method should to call System.exit().
 * 
 */
@Test
public void testCleanupTestViaToolRunner() throws IOException, InterruptedException {

  Configuration conf = getConf();

  Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf), conf);
  stagingDir.getFileSystem(conf).mkdirs(stagingDir);

  Path soure = createFile("tmp.txt");
  Path target = createFile("target.txt");
  try {

    String[] arg = {target.toString(),soure.toString()};
    DistCp.main(arg);
    Assert.fail();

  } catch (ExitException t) {
    Assert.assertTrue(fs.exists(target));
    Assert.assertEquals(t.status, 0);
    Assert.assertEquals(
        stagingDir.getFileSystem(conf).listStatus(stagingDir).length, 0);
  }

}
项目:aliyun-oss-hadoop-fs    文件:TestIntegration.java   
@Test(timeout=100000)
public void testCleanup() {
  try {
    Path sourcePath = new Path("noscheme:///file");
    List<Path> sources = new ArrayList<Path>();
    sources.add(sourcePath);

    DistCpOptions options = new DistCpOptions(sources, target);

    Configuration conf = getConf();
    Path stagingDir = JobSubmissionFiles.getStagingDir(
            new Cluster(conf), conf);
    stagingDir.getFileSystem(conf).mkdirs(stagingDir);

    try {
      new DistCp(conf, options).execute();
    } catch (Throwable t) {
      Assert.assertEquals(stagingDir.getFileSystem(conf).
          listStatus(stagingDir).length, 0);
    }
  } catch (Exception e) {
    LOG.error("Exception encountered ", e);
    Assert.fail("testCleanup failed " + e.getMessage());
  }
}
项目:big-c    文件:TestExternalCall.java   
/**
* test methods run end execute of DistCp class. silple copy file
* @throws Exception 
*/
 @Test
 public void testCleanup() throws Exception {

     Configuration conf = getConf();

     Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf),
         conf);
     stagingDir.getFileSystem(conf).mkdirs(stagingDir);
     Path soure = createFile("tmp.txt");
     Path target = createFile("target.txt");

     DistCp distcp = new DistCp(conf, null);
     String[] arg = { soure.toString(), target.toString() };

     distcp.run(arg);
     Assert.assertTrue(fs.exists(target));


 }
项目:big-c    文件:TestExternalCall.java   
/**
 * test main method of DistCp. Method should to call System.exit().
 * 
 */
@Test
public void testCleanupTestViaToolRunner() throws IOException, InterruptedException {

  Configuration conf = getConf();

  Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf), conf);
  stagingDir.getFileSystem(conf).mkdirs(stagingDir);

  Path soure = createFile("tmp.txt");
  Path target = createFile("target.txt");
  try {

    String[] arg = {target.toString(),soure.toString()};
    DistCp.main(arg);
    Assert.fail();

  } catch (ExitException t) {
    Assert.assertTrue(fs.exists(target));
    Assert.assertEquals(t.status, 0);
    Assert.assertEquals(
        stagingDir.getFileSystem(conf).listStatus(stagingDir).length, 0);
  }

}
项目:big-c    文件:TestIntegration.java   
@Test(timeout=100000)
public void testCleanup() {
  try {
    Path sourcePath = new Path("noscheme:///file");
    List<Path> sources = new ArrayList<Path>();
    sources.add(sourcePath);

    DistCpOptions options = new DistCpOptions(sources, target);

    Configuration conf = getConf();
    Path stagingDir = JobSubmissionFiles.getStagingDir(
            new Cluster(conf), conf);
    stagingDir.getFileSystem(conf).mkdirs(stagingDir);

    try {
      new DistCp(conf, options).execute();
    } catch (Throwable t) {
      Assert.assertEquals(stagingDir.getFileSystem(conf).
          listStatus(stagingDir).length, 0);
    }
  } catch (Exception e) {
    LOG.error("Exception encountered ", e);
    Assert.fail("testCleanup failed " + e.getMessage());
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestExternalCall.java   
/**
* test methods run end execute of DistCp class. silple copy file
* @throws Exception 
*/
 @Test
 public void testCleanup() throws Exception {

     Configuration conf = getConf();

     Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf),
         conf);
     stagingDir.getFileSystem(conf).mkdirs(stagingDir);
     Path soure = createFile("tmp.txt");
     Path target = createFile("target.txt");

     DistCp distcp = new DistCp(conf, null);
     String[] arg = { soure.toString(), target.toString() };

     distcp.run(arg);
     Assert.assertTrue(fs.exists(target));


 }
项目:hadoop-2.6.0-cdh5.4.3    文件:TestExternalCall.java   
/**
 * test main method of DistCp. Method should to call System.exit().
 * 
 */
@Test
public void testCleanupTestViaToolRunner() throws IOException, InterruptedException {

  Configuration conf = getConf();

  Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf), conf);
  stagingDir.getFileSystem(conf).mkdirs(stagingDir);

  Path soure = createFile("tmp.txt");
  Path target = createFile("target.txt");
  try {

    String[] arg = {target.toString(),soure.toString()};
    DistCp.main(arg);
    Assert.fail();

  } catch (ExitException t) {
    Assert.assertTrue(fs.exists(target));
    Assert.assertEquals(t.status, 0);
    Assert.assertEquals(
        stagingDir.getFileSystem(conf).listStatus(stagingDir).length, 0);
  }

}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestIntegration.java   
@Test(timeout=100000)
public void testCleanup() {
  try {
    Path sourcePath = new Path("noscheme:///file");
    List<Path> sources = new ArrayList<Path>();
    sources.add(sourcePath);

    DistCpOptions options = new DistCpOptions(sources, target);

    Configuration conf = getConf();
    Path stagingDir = JobSubmissionFiles.getStagingDir(
            new Cluster(conf), conf);
    stagingDir.getFileSystem(conf).mkdirs(stagingDir);

    try {
      new DistCp(conf, options).execute();
    } catch (Throwable t) {
      Assert.assertEquals(stagingDir.getFileSystem(conf).
          listStatus(stagingDir).length, 0);
    }
  } catch (Exception e) {
    LOG.error("Exception encountered ", e);
    Assert.fail("testCleanup failed " + e.getMessage());
  }
}
项目:hadoop-plus    文件:TestCLI.java   
@Test
public void testListAttemptIdsWithInvalidInputs() throws Exception {
  JobID jobId = JobID.forName(jobIdStr);
  Cluster mockCluster = mock(Cluster.class);
  Job job = mock(Job.class);
  CLI cli = spy(new CLI());

  doReturn(mockCluster).when(cli).createCluster();
  when(mockCluster.getJob(jobId)).thenReturn(job);

  int retCode_JOB_SETUP = cli.run(new String[] { "-list-attempt-ids",
      jobIdStr, "JOB_SETUP", "running" });
  int retCode_JOB_CLEANUP = cli.run(new String[] { "-list-attempt-ids",
      jobIdStr, "JOB_CLEANUP", "running" });

  assertEquals("JOB_SETUP is a invalid input,exit code should be -1", -1,
      retCode_JOB_SETUP);
  assertEquals("JOB_CLEANUP is a invalid input,exit code should be -1", -1,
      retCode_JOB_CLEANUP);

}
项目:hadoop-plus    文件:TestExternalCall.java   
/**
* test methods run end execute of DistCp class. silple copy file
* @throws Exception 
*/
 @Test
 public void testCleanup() throws Exception {

     Configuration conf = getConf();

     Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf),
         conf);
     stagingDir.getFileSystem(conf).mkdirs(stagingDir);
     Path soure = createFile("tmp.txt");
     Path target = createFile("target.txt");

     DistCp distcp = new DistCp(conf, null);
     String[] arg = { soure.toString(), target.toString() };

     distcp.run(arg);
     Assert.assertTrue(fs.exists(target));


 }
项目:hadoop-plus    文件:TestExternalCall.java   
/**
 * test main method of DistCp. Method should to call System.exit().
 * 
 */
@Test
public void testCleanupTestViaToolRunner() throws IOException, InterruptedException {

  Configuration conf = getConf();

  Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf), conf);
  stagingDir.getFileSystem(conf).mkdirs(stagingDir);

  Path soure = createFile("tmp.txt");
  Path target = createFile("target.txt");
  try {

    String[] arg = {target.toString(),soure.toString()};
    DistCp.main(arg);
    Assert.fail();

  } catch (ExitException t) {
    Assert.assertTrue(fs.exists(target));
    Assert.assertEquals(t.status, 0);
    Assert.assertEquals(
        stagingDir.getFileSystem(conf).listStatus(stagingDir).length, 0);
  }

}
项目:hadoop-plus    文件:TestIntegration.java   
@Test
public void testCleanup() {
  try {
    Path sourcePath = new Path("noscheme:///file");
    List<Path> sources = new ArrayList<Path>();
    sources.add(sourcePath);

    DistCpOptions options = new DistCpOptions(sources, target);

    Configuration conf = getConf();
    Path stagingDir = JobSubmissionFiles.getStagingDir(
            new Cluster(conf), conf);
    stagingDir.getFileSystem(conf).mkdirs(stagingDir);

    try {
      new DistCp(conf, options).execute();
    } catch (Throwable t) {
      Assert.assertEquals(stagingDir.getFileSystem(conf).
          listStatus(stagingDir).length, 0);
    }
  } catch (Exception e) {
    LOG.error("Exception encountered ", e);
    Assert.fail("testCleanup failed " + e.getMessage());
  }
}
项目:hops    文件:TestExternalCall.java   
/**
* test methods run end execute of DistCp class. silple copy file
* @throws Exception 
*/
 @Test
 public void testCleanup() throws Exception {

     Configuration conf = getConf();

     Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf),
         conf);
     stagingDir.getFileSystem(conf).mkdirs(stagingDir);
     Path soure = createFile("tmp.txt");
     Path target = createFile("target.txt");

     DistCp distcp = new DistCp(conf, null);
     String[] arg = { soure.toString(), target.toString() };

     distcp.run(arg);
     Assert.assertTrue(fs.exists(target));


 }
项目:hops    文件:TestExternalCall.java   
/**
 * test main method of DistCp. Method should to call System.exit().
 * 
 */
@Test
public void testCleanupTestViaToolRunner() throws IOException, InterruptedException {

  Configuration conf = getConf();

  Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf), conf);
  stagingDir.getFileSystem(conf).mkdirs(stagingDir);

  Path soure = createFile("tmp.txt");
  Path target = createFile("target.txt");
  try {

    String[] arg = {target.toString(),soure.toString()};
    DistCp.main(arg);
    Assert.fail();

  } catch (ExitException t) {
    Assert.assertTrue(fs.exists(target));
    Assert.assertEquals(t.status, 0);
    Assert.assertEquals(
        stagingDir.getFileSystem(conf).listStatus(stagingDir).length, 0);
  }

}
项目:hops    文件:TestIntegration.java   
@Test(timeout=100000)
public void testCleanup() {
  try {
    Path sourcePath = new Path("noscheme:///file");
    List<Path> sources = new ArrayList<Path>();
    sources.add(sourcePath);

    DistCpOptions options = new DistCpOptions(sources, target);

    Configuration conf = getConf();
    Path stagingDir = JobSubmissionFiles.getStagingDir(
            new Cluster(conf), conf);
    stagingDir.getFileSystem(conf).mkdirs(stagingDir);

    try {
      new DistCp(conf, options).execute();
    } catch (Throwable t) {
      Assert.assertEquals(stagingDir.getFileSystem(conf).
          listStatus(stagingDir).length, 0);
    }
  } catch (Exception e) {
    LOG.error("Exception encountered ", e);
    Assert.fail("testCleanup failed " + e.getMessage());
  }
}
项目:hbase    文件:MapReduceBackupCopyJob.java   
@Override
public void cancel(String jobId) throws IOException {
  JobID id = JobID.forName(jobId);
  Cluster cluster = new Cluster(this.getConf());
  try {
    Job job = cluster.getJob(id);
    if (job == null) {
      LOG.error("No job found for " + id);
      // should we throw exception
      return;
    }
    if (job.isComplete() || job.isRetired()) {
      return;
    }

    job.killJob();
    LOG.debug("Killed copy job " + id);
  } catch (InterruptedException e) {
    throw new IOException(e);
  }
}
项目:hadoop-TCP    文件:TestCLI.java   
@Test
public void testListAttemptIdsWithInvalidInputs() throws Exception {
  JobID jobId = JobID.forName(jobIdStr);
  Cluster mockCluster = mock(Cluster.class);
  Job job = mock(Job.class);
  CLI cli = spy(new CLI());

  doReturn(mockCluster).when(cli).createCluster();
  when(mockCluster.getJob(jobId)).thenReturn(job);

  int retCode_JOB_SETUP = cli.run(new String[] { "-list-attempt-ids",
      jobIdStr, "JOB_SETUP", "running" });
  int retCode_JOB_CLEANUP = cli.run(new String[] { "-list-attempt-ids",
      jobIdStr, "JOB_CLEANUP", "running" });

  assertEquals("JOB_SETUP is a invalid input,exit code should be -1", -1,
      retCode_JOB_SETUP);
  assertEquals("JOB_CLEANUP is a invalid input,exit code should be -1", -1,
      retCode_JOB_CLEANUP);

}
项目:hadoop-TCP    文件:TestExternalCall.java   
/**
* test methods run end execute of DistCp class. silple copy file
* @throws Exception 
*/
 @Test
 public void testCleanup() throws Exception {

     Configuration conf = getConf();

     Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf),
         conf);
     stagingDir.getFileSystem(conf).mkdirs(stagingDir);
     Path soure = createFile("tmp.txt");
     Path target = createFile("target.txt");

     DistCp distcp = new DistCp(conf, null);
     String[] arg = { soure.toString(), target.toString() };

     distcp.run(arg);
     Assert.assertTrue(fs.exists(target));


 }
项目:hadoop-TCP    文件:TestExternalCall.java   
/**
 * test main method of DistCp. Method should to call System.exit().
 * 
 */
@Test
public void testCleanupTestViaToolRunner() throws IOException, InterruptedException {

  Configuration conf = getConf();

  Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf), conf);
  stagingDir.getFileSystem(conf).mkdirs(stagingDir);

  Path soure = createFile("tmp.txt");
  Path target = createFile("target.txt");
  try {

    String[] arg = {target.toString(),soure.toString()};
    DistCp.main(arg);
    Assert.fail();

  } catch (ExitException t) {
    Assert.assertTrue(fs.exists(target));
    Assert.assertEquals(t.status, 0);
    Assert.assertEquals(
        stagingDir.getFileSystem(conf).listStatus(stagingDir).length, 0);
  }

}
项目:hadoop-TCP    文件:TestIntegration.java   
@Test
public void testCleanup() {
  try {
    Path sourcePath = new Path("noscheme:///file");
    List<Path> sources = new ArrayList<Path>();
    sources.add(sourcePath);

    DistCpOptions options = new DistCpOptions(sources, target);

    Configuration conf = getConf();
    Path stagingDir = JobSubmissionFiles.getStagingDir(
            new Cluster(conf), conf);
    stagingDir.getFileSystem(conf).mkdirs(stagingDir);

    try {
      new DistCp(conf, options).execute();
    } catch (Throwable t) {
      Assert.assertEquals(stagingDir.getFileSystem(conf).
          listStatus(stagingDir).length, 0);
    }
  } catch (Exception e) {
    LOG.error("Exception encountered ", e);
    Assert.fail("testCleanup failed " + e.getMessage());
  }
}
项目:spork    文件:HadoopShims.java   
public static Iterator<TaskReport> getTaskReports(Job job, TaskType type) throws IOException {
    if (job.getJobConf().getBoolean(PigConfiguration.PIG_NO_TASK_REPORT, false)) {
        LOG.info("TaskReports are disabled for job: " + job.getAssignedJobID());
        return null;
    }
    Cluster cluster = new Cluster(job.getJobConf());
    try {
        org.apache.hadoop.mapreduce.Job mrJob = cluster.getJob(job.getAssignedJobID());
        if (mrJob == null) { // In local mode, mrJob will be null
            mrJob = job.getJob();
        }
        org.apache.hadoop.mapreduce.TaskReport[] reports = mrJob.getTaskReports(type);
        return DowngradeHelper.downgradeTaskReports(reports);
    } catch (InterruptedException ir) {
        throw new IOException(ir);
    }
}
项目:hardfs    文件:TestCLI.java   
@Test
public void testListAttemptIdsWithInvalidInputs() throws Exception {
  JobID jobId = JobID.forName(jobIdStr);
  Cluster mockCluster = mock(Cluster.class);
  Job job = mock(Job.class);
  CLI cli = spy(new CLI());

  doReturn(mockCluster).when(cli).createCluster();
  when(mockCluster.getJob(jobId)).thenReturn(job);

  int retCode_JOB_SETUP = cli.run(new String[] { "-list-attempt-ids",
      jobIdStr, "JOB_SETUP", "running" });
  int retCode_JOB_CLEANUP = cli.run(new String[] { "-list-attempt-ids",
      jobIdStr, "JOB_CLEANUP", "running" });

  assertEquals("JOB_SETUP is a invalid input,exit code should be -1", -1,
      retCode_JOB_SETUP);
  assertEquals("JOB_CLEANUP is a invalid input,exit code should be -1", -1,
      retCode_JOB_CLEANUP);

}
项目:hardfs    文件:TestExternalCall.java   
/**
* test methods run end execute of DistCp class. silple copy file
* @throws Exception 
*/
 @Test
 public void testCleanup() throws Exception {

     Configuration conf = getConf();

     Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf),
         conf);
     stagingDir.getFileSystem(conf).mkdirs(stagingDir);
     Path soure = createFile("tmp.txt");
     Path target = createFile("target.txt");

     DistCp distcp = new DistCp(conf, null);
     String[] arg = { soure.toString(), target.toString() };

     distcp.run(arg);
     Assert.assertTrue(fs.exists(target));


 }
项目:hardfs    文件:TestExternalCall.java   
/**
 * test main method of DistCp. Method should to call System.exit().
 * 
 */
@Test
public void testCleanupTestViaToolRunner() throws IOException, InterruptedException {

  Configuration conf = getConf();

  Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf), conf);
  stagingDir.getFileSystem(conf).mkdirs(stagingDir);

  Path soure = createFile("tmp.txt");
  Path target = createFile("target.txt");
  try {

    String[] arg = {target.toString(),soure.toString()};
    DistCp.main(arg);
    Assert.fail();

  } catch (ExitException t) {
    Assert.assertTrue(fs.exists(target));
    Assert.assertEquals(t.status, 0);
    Assert.assertEquals(
        stagingDir.getFileSystem(conf).listStatus(stagingDir).length, 0);
  }

}
项目:hardfs    文件:TestIntegration.java   
@Test
public void testCleanup() {
  try {
    Path sourcePath = new Path("noscheme:///file");
    List<Path> sources = new ArrayList<Path>();
    sources.add(sourcePath);

    DistCpOptions options = new DistCpOptions(sources, target);

    Configuration conf = getConf();
    Path stagingDir = JobSubmissionFiles.getStagingDir(
            new Cluster(conf), conf);
    stagingDir.getFileSystem(conf).mkdirs(stagingDir);

    try {
      new DistCp(conf, options).execute();
    } catch (Throwable t) {
      Assert.assertEquals(stagingDir.getFileSystem(conf).
          listStatus(stagingDir).length, 0);
    }
  } catch (Exception e) {
    LOG.error("Exception encountered ", e);
    Assert.fail("testCleanup failed " + e.getMessage());
  }
}
项目:hadoop-on-lustre2    文件:TestExternalCall.java   
/**
* test methods run end execute of DistCp class. silple copy file
* @throws Exception 
*/
 @Test
 public void testCleanup() throws Exception {

     Configuration conf = getConf();

     Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf),
         conf);
     stagingDir.getFileSystem(conf).mkdirs(stagingDir);
     Path soure = createFile("tmp.txt");
     Path target = createFile("target.txt");

     DistCp distcp = new DistCp(conf, null);
     String[] arg = { soure.toString(), target.toString() };

     distcp.run(arg);
     Assert.assertTrue(fs.exists(target));


 }
项目:hadoop-on-lustre2    文件:TestExternalCall.java   
/**
 * test main method of DistCp. Method should to call System.exit().
 * 
 */
@Test
public void testCleanupTestViaToolRunner() throws IOException, InterruptedException {

  Configuration conf = getConf();

  Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf), conf);
  stagingDir.getFileSystem(conf).mkdirs(stagingDir);

  Path soure = createFile("tmp.txt");
  Path target = createFile("target.txt");
  try {

    String[] arg = {target.toString(),soure.toString()};
    DistCp.main(arg);
    Assert.fail();

  } catch (ExitException t) {
    Assert.assertTrue(fs.exists(target));
    Assert.assertEquals(t.status, 0);
    Assert.assertEquals(
        stagingDir.getFileSystem(conf).listStatus(stagingDir).length, 0);
  }

}
项目:hadoop-on-lustre2    文件:TestIntegration.java   
@Test
public void testCleanup() {
  try {
    Path sourcePath = new Path("noscheme:///file");
    List<Path> sources = new ArrayList<Path>();
    sources.add(sourcePath);

    DistCpOptions options = new DistCpOptions(sources, target);

    Configuration conf = getConf();
    Path stagingDir = JobSubmissionFiles.getStagingDir(
            new Cluster(conf), conf);
    stagingDir.getFileSystem(conf).mkdirs(stagingDir);

    try {
      new DistCp(conf, options).execute();
    } catch (Throwable t) {
      Assert.assertEquals(stagingDir.getFileSystem(conf).
          listStatus(stagingDir).length, 0);
    }
  } catch (Exception e) {
    LOG.error("Exception encountered ", e);
    Assert.fail("testCleanup failed " + e.getMessage());
  }
}
项目:pentaho-hadoop-shims    文件:ConfigurationProxyV2Test.java   
@Test(expected = YarnQueueAclsException.class)
public void testSubmitWhenUserHasNoPermissionsToSubmitJobInQueueShouldRaiseYarnQueueAclsException() throws IOException, InterruptedException, ClassNotFoundException{
  Mockito.spy( YarnQueueAclsVerifier.class );
  ConfigurationProxyV2 configurationProxyV2 = Mockito.mock( ConfigurationProxyV2.class );
  Cluster cluster = Mockito.mock( Cluster.class );
  Job job = Mockito.mock( Job.class );

  Mockito.when( configurationProxyV2.getJob() ).thenReturn( job );
  Mockito.when( configurationProxyV2.createClusterDescription( Mockito.any( Configuration.class ) ) ).thenReturn( cluster );
  Mockito.when( configurationProxyV2.submit() ).thenCallRealMethod();
  Mockito.when( cluster.getQueueAclsForCurrentUser() ).thenReturn( new QueueAclsInfo[]{
    new QueueAclsInfo( StringUtils.EMPTY, new String[]{
      "ANOTHER_RIGHTS"
    } ),
    new QueueAclsInfo( StringUtils.EMPTY, new String[]{})
  });

  configurationProxyV2.submit();
}
项目:pentaho-hadoop-shims    文件:ConfigurationProxyV2Test.java   
@Test
public void testSubmitWhenUserHasPermissionsToSubmitJobInQueueShouldExecuteSuccessfully() throws IOException, InterruptedException, ClassNotFoundException{
  Mockito.spy( YarnQueueAclsVerifier.class );
  ConfigurationProxyV2 configurationProxyV2 = Mockito.mock( ConfigurationProxyV2.class );
  Cluster cluster = Mockito.mock( Cluster.class );
  Job job = Mockito.mock( Job.class );

  Mockito.when( configurationProxyV2.getJob() ).thenReturn( job );
  Mockito.when( configurationProxyV2.createClusterDescription( Mockito.any( Configuration.class ) ) ).thenReturn( cluster );
  Mockito.when( configurationProxyV2.submit() ).thenCallRealMethod();
  Mockito.when( cluster.getQueueAclsForCurrentUser() ).thenReturn( new QueueAclsInfo[]{
    new QueueAclsInfo( StringUtils.EMPTY, new String[]{
      "SUBMIT_APPLICATIONS"
    } ),
    new QueueAclsInfo( StringUtils.EMPTY, new String[]{})
  });

  Assert.assertNotNull( configurationProxyV2.submit() );
}
项目:mapreduce-fork    文件:TeraChecksum.java   
public int run(String[] args) throws Exception {
  Job job = Job.getInstance(new Cluster(getConf()), getConf());
  if (args.length != 2) {
    usage();
    return 2;
  }
  TeraInputFormat.setInputPaths(job, new Path(args[0]));
  FileOutputFormat.setOutputPath(job, new Path(args[1]));
  job.setJobName("TeraSum");
  job.setJarByClass(TeraChecksum.class);
  job.setMapperClass(ChecksumMapper.class);
  job.setReducerClass(ChecksumReducer.class);
  job.setOutputKeyClass(NullWritable.class);
  job.setOutputValueClass(Unsigned16.class);
  // force a single reducer
  job.setNumReduceTasks(1);
  job.setInputFormatClass(TeraInputFormat.class);
  return job.waitForCompletion(true) ? 0 : 1;
}
项目:mapreduce-fork    文件:TeraGen.java   
/**
 * @param args the cli arguments
 */
public int run(String[] args) 
    throws IOException, InterruptedException, ClassNotFoundException {
  Job job = Job.getInstance(new Cluster(getConf()), getConf());
  if (args.length != 2) {
    usage();
    return 2;
  }
  setNumberOfRows(job, parseHumanLong(args[0]));
  Path outputDir = new Path(args[1]);
  if (outputDir.getFileSystem(getConf()).exists(outputDir)) {
    throw new IOException("Output directory " + outputDir + 
                          " already exists.");
  }
  FileOutputFormat.setOutputPath(job, outputDir);
  job.setJobName("TeraGen");
  job.setJarByClass(TeraGen.class);
  job.setMapperClass(SortGenMapper.class);
  job.setNumReduceTasks(0);
  job.setOutputKeyClass(Text.class);
  job.setOutputValueClass(Text.class);
  job.setInputFormatClass(RangeInputFormat.class);
  job.setOutputFormatClass(TeraOutputFormat.class);
  return job.waitForCompletion(true) ? 0 : 1;
}
项目:mapreduce-fork    文件:TeraValidate.java   
public int run(String[] args) throws Exception {
  Job job = Job.getInstance(new Cluster(getConf()), getConf());
  if (args.length != 2) {
    usage();
    return 1;
  }
  TeraInputFormat.setInputPaths(job, new Path(args[0]));
  FileOutputFormat.setOutputPath(job, new Path(args[1]));
  job.setJobName("TeraValidate");
  job.setJarByClass(TeraValidate.class);
  job.setMapperClass(ValidateMapper.class);
  job.setReducerClass(ValidateReducer.class);
  job.setOutputKeyClass(Text.class);
  job.setOutputValueClass(Text.class);
  // force a single reducer
  job.setNumReduceTasks(1);
  // force a single split 
  FileInputFormat.setMinInputSplitSize(job, Long.MAX_VALUE);
  job.setInputFormatClass(TeraInputFormat.class);
  return job.waitForCompletion(true) ? 0 : 1;
}
项目:mapreduce-fork    文件:TestQueueManagerWithJobTracker.java   
private void checkAccessToKill(JobTracker tracker, final JobConf mrConf, 
    UserGroupInformation killer) throws IOException, InterruptedException,
    ClassNotFoundException {
  Job job = submitSleepJob(1, 1, 100, 100, false, "u1,g1",
      "p1" + NAME_SEPARATOR + "p11", conf);
  JobID jobID = job.getStatus().getJobID();
  //Ensure that the jobinprogress is initied before we issue a kill 
  //signal to the job.
  JobInProgress jip =  tracker.getJob(
      org.apache.hadoop.mapred.JobID.downgrade(jobID));
  tracker.initJob(jip);
  Cluster cluster = killer.doAs(new PrivilegedExceptionAction<Cluster>() {
    public Cluster run() throws IOException {
      return new Cluster(mrConf);
    }
  });
  cluster.getJob(jobID).killJob();
  assertEquals("job not killed by " + killer,
      cluster.getJob(jobID).getStatus().getState(), (State.KILLED));
}