Java 类org.apache.hadoop.mapreduce.SleepJob 实例源码

项目:hadoop    文件:TestLocalJobSubmission.java   
/**
 * test the local job submission options of
 * -jt local -libjars
 * @throws IOException
 */
@Test
public void testLocalJobLibjarsOption() throws IOException {
  Path jarPath = makeJar(new Path(TEST_ROOT_DIR, "test.jar"));

  Configuration conf = new Configuration();
  conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://testcluster");
  final String[] args = {
      "-jt" , "local", "-libjars", jarPath.toString(),
      "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
  };
  int res = -1;
  try {
    res = ToolRunner.run(conf, new SleepJob(), args);
  } catch (Exception e) {
    System.out.println("Job failed with " + e.getLocalizedMessage());
    e.printStackTrace(System.out);
    fail("Job failed");
  }
  assertEquals("dist job res is not 0:", 0, res);
}
项目:hadoop    文件:CredentialsTestJob.java   
public Job createJob() 
throws IOException {
  Configuration conf = getConf();
  conf.setInt(MRJobConfig.NUM_MAPS, 1);
  Job job = Job.getInstance(conf, "test");
  job.setNumReduceTasks(1);
  job.setJarByClass(CredentialsTestJob.class);
  job.setNumReduceTasks(1);
  job.setMapperClass(CredentialsTestJob.CredentialsTestMapper.class);
  job.setMapOutputKeyClass(IntWritable.class);
  job.setMapOutputValueClass(NullWritable.class);
  job.setReducerClass(CredentialsTestJob.CredentialsTestReducer.class);
  job.setInputFormatClass(SleepJob.SleepInputFormat.class);
  job.setPartitionerClass(SleepJob.SleepJobPartitioner.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setSpeculativeExecution(false);
  job.setJobName("test job");
  FileInputFormat.addInputPath(job, new Path("ignored"));
  return job;
}
项目:hadoop    文件:TestBinaryTokenFile.java   
/**
 * run a distributed job with -tokenCacheFile option parameter and
 * verify that no exception happens.
 * @throws IOException
*/
@Test
public void testTokenCacheFile() throws IOException {
  Configuration conf = mrCluster.getConfig();
  createBinaryTokenFile(conf);
  // provide namenodes names for the job to get the delegation tokens for
  final String nnUri = dfsCluster.getURI(0).toString();
  conf.set(MRJobConfig.JOB_NAMENODES, nnUri + "," + nnUri);

  // using argument to pass the file name
  final String[] args = {
      "-tokenCacheFile", binaryTokenFileName.toString(),
      "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
      };
  int res = -1;
  try {
    res = ToolRunner.run(conf, new SleepJob(), args);
  } catch (Exception e) {
    System.out.println("Job failed with " + e.getLocalizedMessage());
    e.printStackTrace(System.out);
    fail("Job failed");
  }
  assertEquals("dist job res is not 0:", 0, res);
}
项目:hadoop    文件:TestMRAMWithNonNormalizedCapabilities.java   
/**
 * To ensure nothing broken after we removed normalization 
 * from the MRAM side
 * @throws Exception
 */
@Test
public void testJobWithNonNormalizedCapabilities() throws Exception {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
              + " not found. Not running test.");
    return;
  }

  JobConf jobConf = new JobConf(mrCluster.getConfig());
  jobConf.setInt("mapreduce.map.memory.mb", 700);
  jobConf.setInt("mapred.reduce.memory.mb", 1500);

  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(jobConf);
  Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
  job.setJarByClass(SleepJob.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.submit();
  boolean completed = job.waitForCompletion(true);
  Assert.assertTrue("Job should be completed", completed);
  Assert.assertEquals("Job should be finished successfully", 
                  JobStatus.State.SUCCEEDED, job.getJobState());
}
项目:aliyun-oss-hadoop-fs    文件:TestLocalJobSubmission.java   
/**
 * test the local job submission options of
 * -jt local -libjars
 * @throws IOException
 */
@Test
public void testLocalJobLibjarsOption() throws IOException {
  Path jarPath = makeJar(new Path(TEST_ROOT_DIR, "test.jar"));

  Configuration conf = new Configuration();
  conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost:9000");
  conf.set(MRConfig.FRAMEWORK_NAME, "local");
  final String[] args = {
      "-jt" , "local", "-libjars", jarPath.toString(),
      "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
  };
  int res = -1;
  try {
    res = ToolRunner.run(conf, new SleepJob(), args);
  } catch (Exception e) {
    System.out.println("Job failed with " + e.getLocalizedMessage());
    e.printStackTrace(System.out);
    fail("Job failed");
  }
  assertEquals("dist job res is not 0:", 0, res);
}
项目:aliyun-oss-hadoop-fs    文件:TestLocalJobSubmission.java   
/**
 * test the local job submission with
 * intermediate data encryption enabled.
 * @throws IOException
 */
@Test
public void testLocalJobEncryptedIntermediateData() throws IOException {
  Configuration conf = new Configuration();
  conf.set(MRConfig.FRAMEWORK_NAME, "local");
  conf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true);
  final String[] args = {
      "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
  };
  int res = -1;
  try {
    res = ToolRunner.run(conf, new SleepJob(), args);
  } catch (Exception e) {
    System.out.println("Job failed with " + e.getLocalizedMessage());
    e.printStackTrace(System.out);
    fail("Job failed");
  }
  assertEquals("dist job res is not 0:", 0, res);
}
项目:aliyun-oss-hadoop-fs    文件:CredentialsTestJob.java   
public Job createJob() 
throws IOException {
  Configuration conf = getConf();
  conf.setInt(MRJobConfig.NUM_MAPS, 1);
  Job job = Job.getInstance(conf, "test");
  job.setNumReduceTasks(1);
  job.setJarByClass(CredentialsTestJob.class);
  job.setNumReduceTasks(1);
  job.setMapperClass(CredentialsTestJob.CredentialsTestMapper.class);
  job.setMapOutputKeyClass(IntWritable.class);
  job.setMapOutputValueClass(NullWritable.class);
  job.setReducerClass(CredentialsTestJob.CredentialsTestReducer.class);
  job.setInputFormatClass(SleepJob.SleepInputFormat.class);
  job.setPartitionerClass(SleepJob.SleepJobPartitioner.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setSpeculativeExecution(false);
  job.setJobName("test job");
  FileInputFormat.addInputPath(job, new Path("ignored"));
  return job;
}
项目:aliyun-oss-hadoop-fs    文件:TestBinaryTokenFile.java   
/**
 * run a distributed job with -tokenCacheFile option parameter and
 * verify that no exception happens.
 * @throws IOException
*/
@Test
public void testTokenCacheFile() throws IOException {
  Configuration conf = mrCluster.getConfig();
  createBinaryTokenFile(conf);
  // provide namenodes names for the job to get the delegation tokens for
  final String nnUri = dfsCluster.getURI(0).toString();
  conf.set(MRJobConfig.JOB_NAMENODES, nnUri + "," + nnUri);

  // using argument to pass the file name
  final String[] args = {
      "-tokenCacheFile", binaryTokenFileName.toString(),
      "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
      };
  int res = -1;
  try {
    res = ToolRunner.run(conf, new SleepJob(), args);
  } catch (Exception e) {
    System.out.println("Job failed with " + e.getLocalizedMessage());
    e.printStackTrace(System.out);
    fail("Job failed");
  }
  assertEquals("dist job res is not 0:", 0, res);
}
项目:aliyun-oss-hadoop-fs    文件:TestMRAMWithNonNormalizedCapabilities.java   
/**
 * To ensure nothing broken after we removed normalization 
 * from the MRAM side
 * @throws Exception
 */
@Test
public void testJobWithNonNormalizedCapabilities() throws Exception {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
              + " not found. Not running test.");
    return;
  }

  JobConf jobConf = new JobConf(mrCluster.getConfig());
  jobConf.setInt("mapreduce.map.memory.mb", 700);
  jobConf.setInt("mapred.reduce.memory.mb", 1500);

  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(jobConf);
  Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
  job.setJarByClass(SleepJob.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.submit();
  boolean completed = job.waitForCompletion(true);
  Assert.assertTrue("Job should be completed", completed);
  Assert.assertEquals("Job should be finished successfully", 
                  JobStatus.State.SUCCEEDED, job.getJobState());
}
项目:big-c    文件:TestLocalJobSubmission.java   
/**
 * test the local job submission options of
 * -jt local -libjars
 * @throws IOException
 */
@Test
public void testLocalJobLibjarsOption() throws IOException {
  Path jarPath = makeJar(new Path(TEST_ROOT_DIR, "test.jar"));

  Configuration conf = new Configuration();
  conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://testcluster");
  final String[] args = {
      "-jt" , "local", "-libjars", jarPath.toString(),
      "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
  };
  int res = -1;
  try {
    res = ToolRunner.run(conf, new SleepJob(), args);
  } catch (Exception e) {
    System.out.println("Job failed with " + e.getLocalizedMessage());
    e.printStackTrace(System.out);
    fail("Job failed");
  }
  assertEquals("dist job res is not 0:", 0, res);
}
项目:big-c    文件:CredentialsTestJob.java   
public Job createJob() 
throws IOException {
  Configuration conf = getConf();
  conf.setInt(MRJobConfig.NUM_MAPS, 1);
  Job job = Job.getInstance(conf, "test");
  job.setNumReduceTasks(1);
  job.setJarByClass(CredentialsTestJob.class);
  job.setNumReduceTasks(1);
  job.setMapperClass(CredentialsTestJob.CredentialsTestMapper.class);
  job.setMapOutputKeyClass(IntWritable.class);
  job.setMapOutputValueClass(NullWritable.class);
  job.setReducerClass(CredentialsTestJob.CredentialsTestReducer.class);
  job.setInputFormatClass(SleepJob.SleepInputFormat.class);
  job.setPartitionerClass(SleepJob.SleepJobPartitioner.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setSpeculativeExecution(false);
  job.setJobName("test job");
  FileInputFormat.addInputPath(job, new Path("ignored"));
  return job;
}
项目:big-c    文件:TestBinaryTokenFile.java   
/**
 * run a distributed job with -tokenCacheFile option parameter and
 * verify that no exception happens.
 * @throws IOException
*/
@Test
public void testTokenCacheFile() throws IOException {
  Configuration conf = mrCluster.getConfig();
  createBinaryTokenFile(conf);
  // provide namenodes names for the job to get the delegation tokens for
  final String nnUri = dfsCluster.getURI(0).toString();
  conf.set(MRJobConfig.JOB_NAMENODES, nnUri + "," + nnUri);

  // using argument to pass the file name
  final String[] args = {
      "-tokenCacheFile", binaryTokenFileName.toString(),
      "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
      };
  int res = -1;
  try {
    res = ToolRunner.run(conf, new SleepJob(), args);
  } catch (Exception e) {
    System.out.println("Job failed with " + e.getLocalizedMessage());
    e.printStackTrace(System.out);
    fail("Job failed");
  }
  assertEquals("dist job res is not 0:", 0, res);
}
项目:big-c    文件:TestMRAMWithNonNormalizedCapabilities.java   
/**
 * To ensure nothing broken after we removed normalization 
 * from the MRAM side
 * @throws Exception
 */
@Test
public void testJobWithNonNormalizedCapabilities() throws Exception {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
              + " not found. Not running test.");
    return;
  }

  JobConf jobConf = new JobConf(mrCluster.getConfig());
  jobConf.setInt("mapreduce.map.memory.mb", 700);
  jobConf.setInt("mapred.reduce.memory.mb", 1500);

  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(jobConf);
  Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
  job.setJarByClass(SleepJob.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.submit();
  boolean completed = job.waitForCompletion(true);
  Assert.assertTrue("Job should be completed", completed);
  Assert.assertEquals("Job should be finished successfully", 
                  JobStatus.State.SUCCEEDED, job.getJobState());
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestLocalJobSubmission.java   
/**
 * test the local job submission options of
 * -jt local -libjars
 * @throws IOException
 */
@Test
public void testLocalJobLibjarsOption() throws IOException {
  Path jarPath = makeJar(new Path(TEST_ROOT_DIR, "test.jar"));

  Configuration conf = new Configuration();
  conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://testcluster");
  final String[] args = {
      "-jt" , "local", "-libjars", jarPath.toString(),
      "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
  };
  int res = -1;
  try {
    res = ToolRunner.run(conf, new SleepJob(), args);
  } catch (Exception e) {
    System.out.println("Job failed with " + e.getLocalizedMessage());
    e.printStackTrace(System.out);
    fail("Job failed");
  }
  assertEquals("dist job res is not 0:", 0, res);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:CredentialsTestJob.java   
public Job createJob() 
throws IOException {
  Configuration conf = getConf();
  conf.setInt(MRJobConfig.NUM_MAPS, 1);
  Job job = Job.getInstance(conf, "test");
  job.setNumReduceTasks(1);
  job.setJarByClass(CredentialsTestJob.class);
  job.setNumReduceTasks(1);
  job.setMapperClass(CredentialsTestJob.CredentialsTestMapper.class);
  job.setMapOutputKeyClass(IntWritable.class);
  job.setMapOutputValueClass(NullWritable.class);
  job.setReducerClass(CredentialsTestJob.CredentialsTestReducer.class);
  job.setInputFormatClass(SleepJob.SleepInputFormat.class);
  job.setPartitionerClass(SleepJob.SleepJobPartitioner.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setSpeculativeExecution(false);
  job.setJobName("test job");
  FileInputFormat.addInputPath(job, new Path("ignored"));
  return job;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestBinaryTokenFile.java   
/**
 * run a distributed job with -tokenCacheFile option parameter and
 * verify that no exception happens.
 * @throws IOException
*/
@Test
public void testTokenCacheFile() throws IOException {
  Configuration conf = mrCluster.getConfig();
  createBinaryTokenFile(conf);
  // provide namenodes names for the job to get the delegation tokens for
  final String nnUri = dfsCluster.getURI(0).toString();
  conf.set(MRJobConfig.JOB_NAMENODES, nnUri + "," + nnUri);

  // using argument to pass the file name
  final String[] args = {
      "-tokenCacheFile", binaryTokenFileName.toString(),
      "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
      };
  int res = -1;
  try {
    res = ToolRunner.run(conf, new SleepJob(), args);
  } catch (Exception e) {
    System.out.println("Job failed with " + e.getLocalizedMessage());
    e.printStackTrace(System.out);
    fail("Job failed");
  }
  assertEquals("dist job res is not 0:", 0, res);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestMRAMWithNonNormalizedCapabilities.java   
/**
 * To ensure nothing broken after we removed normalization 
 * from the MRAM side
 * @throws Exception
 */
@Test
public void testJobWithNonNormalizedCapabilities() throws Exception {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
              + " not found. Not running test.");
    return;
  }

  JobConf jobConf = new JobConf(mrCluster.getConfig());
  jobConf.setInt("mapreduce.map.memory.mb", 700);
  jobConf.setInt("mapred.reduce.memory.mb", 1500);

  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(jobConf);
  Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
  job.setJarByClass(SleepJob.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.submit();
  boolean completed = job.waitForCompletion(true);
  Assert.assertTrue("Job should be completed", completed);
  Assert.assertEquals("Job should be finished successfully", 
                  JobStatus.State.SUCCEEDED, job.getJobState());
}
项目:hadoop-plus    文件:CredentialsTestJob.java   
public Job createJob() 
throws IOException {
  Configuration conf = getConf();
  conf.setInt(MRJobConfig.NUM_MAPS, 1);
  Job job = Job.getInstance(conf, "test");
  job.setNumReduceTasks(1);
  job.setJarByClass(CredentialsTestJob.class);
  job.setNumReduceTasks(1);
  job.setMapperClass(CredentialsTestJob.CredentialsTestMapper.class);
  job.setMapOutputKeyClass(IntWritable.class);
  job.setMapOutputValueClass(NullWritable.class);
  job.setReducerClass(CredentialsTestJob.CredentialsTestReducer.class);
  job.setInputFormatClass(SleepJob.SleepInputFormat.class);
  job.setPartitionerClass(SleepJob.SleepJobPartitioner.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setSpeculativeExecution(false);
  job.setJobName("test job");
  FileInputFormat.addInputPath(job, new Path("ignored"));
  return job;
}
项目:FlexMap    文件:CredentialsTestJob.java   
public Job createJob() 
throws IOException {
  Configuration conf = getConf();
  conf.setInt(MRJobConfig.NUM_MAPS, 1);
  Job job = Job.getInstance(conf, "test");
  job.setNumReduceTasks(1);
  job.setJarByClass(CredentialsTestJob.class);
  job.setNumReduceTasks(1);
  job.setMapperClass(CredentialsTestJob.CredentialsTestMapper.class);
  job.setMapOutputKeyClass(IntWritable.class);
  job.setMapOutputValueClass(NullWritable.class);
  job.setReducerClass(CredentialsTestJob.CredentialsTestReducer.class);
  job.setInputFormatClass(SleepJob.SleepInputFormat.class);
  job.setPartitionerClass(SleepJob.SleepJobPartitioner.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setSpeculativeExecution(false);
  job.setJobName("test job");
  FileInputFormat.addInputPath(job, new Path("ignored"));
  return job;
}
项目:FlexMap    文件:TestBinaryTokenFile.java   
/**
 * run a distributed job with -tokenCacheFile option parameter and
 * verify that no exception happens.
 * @throws IOException
*/
@Test
public void testTokenCacheFile() throws IOException {
  Configuration conf = mrCluster.getConfig();
  createBinaryTokenFile(conf);
  // provide namenodes names for the job to get the delegation tokens for
  final String nnUri = dfsCluster.getURI(0).toString();
  conf.set(MRJobConfig.JOB_NAMENODES, nnUri + "," + nnUri);

  // using argument to pass the file name
  final String[] args = {
      "-tokenCacheFile", binaryTokenFileName.toString(),
      "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
      };
  int res = -1;
  try {
    res = ToolRunner.run(conf, new SleepJob(), args);
  } catch (Exception e) {
    System.out.println("Job failed with " + e.getLocalizedMessage());
    e.printStackTrace(System.out);
    fail("Job failed");
  }
  assertEquals("dist job res is not 0:", 0, res);
}
项目:FlexMap    文件:TestMRAMWithNonNormalizedCapabilities.java   
/**
 * To ensure nothing broken after we removed normalization 
 * from the MRAM side
 * @throws Exception
 */
@Test
public void testJobWithNonNormalizedCapabilities() throws Exception {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
              + " not found. Not running test.");
    return;
  }

  JobConf jobConf = new JobConf(mrCluster.getConfig());
  jobConf.setInt("mapreduce.map.memory.mb", 700);
  jobConf.setInt("mapred.reduce.memory.mb", 1500);

  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(jobConf);
  Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
  job.setJarByClass(SleepJob.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.submit();
  boolean completed = job.waitForCompletion(true);
  Assert.assertTrue("Job should be completed", completed);
  Assert.assertEquals("Job should be finished successfully", 
                  JobStatus.State.SUCCEEDED, job.getJobState());
}
项目:hops    文件:TestLocalJobSubmission.java   
/**
 * test the local job submission options of
 * -jt local -libjars
 * @throws IOException
 */
@Test
public void testLocalJobLibjarsOption() throws IOException {
  Path jarPath = makeJar(new Path(TEST_ROOT_DIR, "test.jar"));

  Configuration conf = new Configuration();
  conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost:9000");
  conf.set(MRConfig.FRAMEWORK_NAME, "local");
  final String[] args = {
      "-jt" , "local", "-libjars", jarPath.toString(),
      "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
  };
  int res = -1;
  try {
    res = ToolRunner.run(conf, new SleepJob(), args);
  } catch (Exception e) {
    System.out.println("Job failed with " + e.getLocalizedMessage());
    e.printStackTrace(System.out);
    fail("Job failed");
  }
  assertEquals("dist job res is not 0:", 0, res);
}
项目:hops    文件:TestLocalJobSubmission.java   
/**
 * test the local job submission with
 * intermediate data encryption enabled.
 * @throws IOException
 */
@Test
public void testLocalJobEncryptedIntermediateData() throws IOException {
  Configuration conf = new Configuration();
  conf.set(MRConfig.FRAMEWORK_NAME, "local");
  conf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true);
  final String[] args = {
      "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
  };
  int res = -1;
  try {
    res = ToolRunner.run(conf, new SleepJob(), args);
  } catch (Exception e) {
    System.out.println("Job failed with " + e.getLocalizedMessage());
    e.printStackTrace(System.out);
    fail("Job failed");
  }
  assertEquals("dist job res is not 0:", 0, res);
}
项目:hops    文件:CredentialsTestJob.java   
public Job createJob() 
throws IOException {
  Configuration conf = getConf();
  conf.setInt(MRJobConfig.NUM_MAPS, 1);
  Job job = Job.getInstance(conf, "test");
  job.setNumReduceTasks(1);
  job.setJarByClass(CredentialsTestJob.class);
  job.setNumReduceTasks(1);
  job.setMapperClass(CredentialsTestJob.CredentialsTestMapper.class);
  job.setMapOutputKeyClass(IntWritable.class);
  job.setMapOutputValueClass(NullWritable.class);
  job.setReducerClass(CredentialsTestJob.CredentialsTestReducer.class);
  job.setInputFormatClass(SleepJob.SleepInputFormat.class);
  job.setPartitionerClass(SleepJob.SleepJobPartitioner.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setSpeculativeExecution(false);
  job.setJobName("test job");
  FileInputFormat.addInputPath(job, new Path("ignored"));
  return job;
}
项目:hops    文件:TestBinaryTokenFile.java   
/**
 * run a distributed job with -tokenCacheFile option parameter and
 * verify that no exception happens.
 * @throws IOException
*/
@Test
public void testTokenCacheFile() throws IOException {
  Configuration conf = mrCluster.getConfig();
  createBinaryTokenFile(conf);
  // provide namenodes names for the job to get the delegation tokens for
  final String nnUri = dfsCluster.getURI(0).toString();
  conf.set(MRJobConfig.JOB_NAMENODES, nnUri + "," + nnUri);

  // using argument to pass the file name
  final String[] args = {
      "-tokenCacheFile", binaryTokenFileName.toString(),
      "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
      };
  int res = -1;
  try {
    res = ToolRunner.run(conf, new SleepJob(), args);
  } catch (Exception e) {
    System.out.println("Job failed with " + e.getLocalizedMessage());
    e.printStackTrace(System.out);
    fail("Job failed");
  }
  assertEquals("dist job res is not 0:", 0, res);
}
项目:hops    文件:TestMRAMWithNonNormalizedCapabilities.java   
/**
 * To ensure nothing broken after we removed normalization 
 * from the MRAM side
 * @throws Exception
 */
@Test
public void testJobWithNonNormalizedCapabilities() throws Exception {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
              + " not found. Not running test.");
    return;
  }

  JobConf jobConf = new JobConf(mrCluster.getConfig());
  jobConf.setInt("mapreduce.map.memory.mb", 700);
  jobConf.setInt("mapred.reduce.memory.mb", 1500);

  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(jobConf);
  Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
  job.setJarByClass(SleepJob.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.submit();
  boolean completed = job.waitForCompletion(true);
  Assert.assertTrue("Job should be completed", completed);
  Assert.assertEquals("Job should be finished successfully", 
                  JobStatus.State.SUCCEEDED, job.getJobState());
}
项目:hadoop-TCP    文件:CredentialsTestJob.java   
public Job createJob() 
throws IOException {
  Configuration conf = getConf();
  conf.setInt(MRJobConfig.NUM_MAPS, 1);
  Job job = Job.getInstance(conf, "test");
  job.setNumReduceTasks(1);
  job.setJarByClass(CredentialsTestJob.class);
  job.setNumReduceTasks(1);
  job.setMapperClass(CredentialsTestJob.CredentialsTestMapper.class);
  job.setMapOutputKeyClass(IntWritable.class);
  job.setMapOutputValueClass(NullWritable.class);
  job.setReducerClass(CredentialsTestJob.CredentialsTestReducer.class);
  job.setInputFormatClass(SleepJob.SleepInputFormat.class);
  job.setPartitionerClass(SleepJob.SleepJobPartitioner.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setSpeculativeExecution(false);
  job.setJobName("test job");
  FileInputFormat.addInputPath(job, new Path("ignored"));
  return job;
}
项目:hardfs    文件:CredentialsTestJob.java   
public Job createJob() 
throws IOException {
  Configuration conf = getConf();
  conf.setInt(MRJobConfig.NUM_MAPS, 1);
  Job job = Job.getInstance(conf, "test");
  job.setNumReduceTasks(1);
  job.setJarByClass(CredentialsTestJob.class);
  job.setNumReduceTasks(1);
  job.setMapperClass(CredentialsTestJob.CredentialsTestMapper.class);
  job.setMapOutputKeyClass(IntWritable.class);
  job.setMapOutputValueClass(NullWritable.class);
  job.setReducerClass(CredentialsTestJob.CredentialsTestReducer.class);
  job.setInputFormatClass(SleepJob.SleepInputFormat.class);
  job.setPartitionerClass(SleepJob.SleepJobPartitioner.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setSpeculativeExecution(false);
  job.setJobName("test job");
  FileInputFormat.addInputPath(job, new Path("ignored"));
  return job;
}
项目:hadoop-on-lustre2    文件:CredentialsTestJob.java   
public Job createJob() 
throws IOException {
  Configuration conf = getConf();
  conf.setInt(MRJobConfig.NUM_MAPS, 1);
  Job job = Job.getInstance(conf, "test");
  job.setNumReduceTasks(1);
  job.setJarByClass(CredentialsTestJob.class);
  job.setNumReduceTasks(1);
  job.setMapperClass(CredentialsTestJob.CredentialsTestMapper.class);
  job.setMapOutputKeyClass(IntWritable.class);
  job.setMapOutputValueClass(NullWritable.class);
  job.setReducerClass(CredentialsTestJob.CredentialsTestReducer.class);
  job.setInputFormatClass(SleepJob.SleepInputFormat.class);
  job.setPartitionerClass(SleepJob.SleepJobPartitioner.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setSpeculativeExecution(false);
  job.setJobName("test job");
  FileInputFormat.addInputPath(job, new Path("ignored"));
  return job;
}
项目:mapreduce-fork    文件:TestSubmitJob.java   
private void runJobAndVerifyFailure(JobConf jobConf, long memForMapTasks,
    long memForReduceTasks, String expectedMsg)
    throws Exception,
    IOException {
  String[] args = { "-m", "0", "-r", "0", "-mt", "0", "-rt", "0" };
  boolean throwsException = false;
  String msg = null;
  try {
    ToolRunner.run(jobConf, new SleepJob(), args);
  } catch (RemoteException re) {
    throwsException = true;
    msg = re.unwrapRemoteException().getMessage();
  }
  assertTrue(throwsException);
  assertNotNull(msg);

  String overallExpectedMsg =
      "(" + memForMapTasks + " memForMapTasks " + memForReduceTasks
          + " memForReduceTasks): " + expectedMsg;
  assertTrue("Observed message - " + msg
      + " - doesn't contain expected message - " + overallExpectedMsg, msg
      .contains(overallExpectedMsg));
}
项目:mapreduce-fork    文件:TestJobExecutionAsDifferentUser.java   
/** Ensure that SIGQUIT can be properly sent by the LinuxTaskController
 * if a task times out.
 */
public void testTimeoutStackTrace() throws Exception {
  if (!shouldRun()) {
    return;
  }

  // Run a job that should timeout and trigger a SIGQUIT.
  startCluster();
  jobOwner.doAs(new PrivilegedExceptionAction<Object>() {
    public Object run() throws Exception {
      JobConf conf = getClusterConf();
      conf.setInt(JobContext.TASK_TIMEOUT, 10000);
      conf.setInt(Job.COMPLETION_POLL_INTERVAL_KEY, 50);
      SleepJob sleepJob = new SleepJob();
      sleepJob.setConf(conf);
      Job job = sleepJob.createJob(1, 0, 30000, 1, 0, 0);
      job.setMaxMapAttempts(1);
      int prevNumSigQuits = MyLinuxTaskController.attemptedSigQuits;
      job.waitForCompletion(true);
      assertTrue("Did not detect a new SIGQUIT!",
          prevNumSigQuits < MyLinuxTaskController.attemptedSigQuits);
      assertEquals("A SIGQUIT attempt failed!", 0,
          MyLinuxTaskController.failedSigQuits);
      return null;
    }
  });
}
项目:mapreduce-fork    文件:TestJobDirCleanup.java   
private JobID runSleepJob(JobConf conf) throws Exception {
  SleepJob sleep = new SleepJob();
  sleep.setConf(conf);
  Job job = sleep.createJob(1, 10, 1000, 1, 10000, 1);
  job.waitForCompletion(true);
  return job.getJobID();
}
项目:mapreduce-fork    文件:TestCluster.java   
@Test
public void testJobSubmission() throws Exception {
  Configuration conf = new Configuration(cluster.getConf());
  SleepJob job = new SleepJob();
  job.setConf(conf);
  Job rJob = job.createJob(1, 1, 100, 100, 100, 100);
  rJob = cluster.getJTClient().submitAndVerifyJob(rJob);
  cluster.getJTClient().verifyJobHistory(rJob.getJobID());
}
项目:mapreduce-fork    文件:TestCapacitySchedulerWithJobTracker.java   
/**
 * Test case which checks if the jobs which
 * fail initialization are removed from the
 * {@link CapacityTaskScheduler} waiting queue.
 *
 * @throws Exception
 */
public void testFailingJobInitalization() throws Exception {
  Properties schedulerProps = new Properties();
  Properties clusterProps = new Properties();
  clusterProps.put("mapred.queue.names","default");
  clusterProps.put(TTConfig.TT_MAP_SLOTS, String.valueOf(1));
  clusterProps.put(TTConfig.TT_REDUCE_SLOTS, String.valueOf(1));
  clusterProps.put(JTConfig.JT_TASKS_PER_JOB, String.valueOf(1));
  clusterProps.put(JTConfig.JT_PERSIST_JOBSTATUS, "false");
  // cluster capacity 1 maps, 1 reduces
  startCluster(1, clusterProps, schedulerProps);
  CapacityTaskScheduler scheduler = (CapacityTaskScheduler) getJobTracker()
    .getTaskScheduler();

   AbstractQueue root = scheduler.getRoot();
   root.getChildren().get(0).getQueueSchedulingContext().setCapacityPercent(100);

  JobConf conf = getJobConf();
  conf.setSpeculativeExecution(false);
  conf.setNumTasksToExecutePerJvm(-1);
  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(conf);
  Job job = sleepJob.createJob(3, 3, 1, 1, 1, 1);
  job.waitForCompletion(false);
  assertFalse(
    "The submitted job successfully completed",
    job.isSuccessful());

  JobQueuesManager mgr = scheduler.jobQueuesManager;
  assertEquals(
    "Failed job present in Waiting queue", 0, mgr
      .getJobQueue("default").getWaitingJobCount());
}
项目:hadoop    文件:TestMRJobs.java   
private void testSleepJobInternal(boolean useRemoteJar) throws Exception {
  LOG.info("\n\n\nStarting testSleepJob: useRemoteJar=" + useRemoteJar);

  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
             + " not found. Not running test.");
    return;
  }

  Configuration sleepConf = new Configuration(mrCluster.getConfig());
  // set master address to local to test that local mode applied iff framework == local
  sleepConf.set(MRConfig.MASTER_ADDRESS, "local");  

  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(sleepConf);

  // job with 3 maps (10s) and numReduces reduces (5s), 1 "record" each:
  Job job = sleepJob.createJob(3, numSleepReducers, 10000, 1, 5000, 1);

  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  if (useRemoteJar) {
    final Path localJar = new Path(
        ClassUtil.findContainingJar(SleepJob.class));
    ConfigUtil.addLink(job.getConfiguration(), "/jobjars",
        localFs.makeQualified(localJar.getParent()).toUri());
    job.setJar("viewfs:///jobjars/" + localJar.getName());
  } else {
    job.setJarByClass(SleepJob.class);
  }
  job.setMaxMapAttempts(1); // speed up failures
  job.submit();
  String trackingUrl = job.getTrackingURL();
  String jobId = job.getJobID().toString();
  boolean succeeded = job.waitForCompletion(true);
  Assert.assertTrue(succeeded);
  Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
  Assert.assertTrue("Tracking URL was " + trackingUrl +
                    " but didn't Match Job ID " + jobId ,
        trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));
  verifySleepJobCounters(job);
  verifyTaskProgress(job);

  // TODO later:  add explicit "isUber()" checks of some sort (extend
  // JobStatus?)--compare against MRJobConfig.JOB_UBERTASK_ENABLE value
}
项目:hadoop    文件:TestMRJobs.java   
private void testJobClassloader(boolean useCustomClasses) throws IOException,
    InterruptedException, ClassNotFoundException {
  LOG.info("\n\n\nStarting testJobClassloader()"
      + " useCustomClasses=" + useCustomClasses);

  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
             + " not found. Not running test.");
    return;
  }
  final Configuration sleepConf = new Configuration(mrCluster.getConfig());
  // set master address to local to test that local mode applied iff framework == local
  sleepConf.set(MRConfig.MASTER_ADDRESS, "local");
  sleepConf.setBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER, true);
  if (useCustomClasses) {
    // to test AM loading user classes such as output format class, we want
    // to blacklist them from the system classes (they need to be prepended
    // as the first match wins)
    String systemClasses = ApplicationClassLoader.SYSTEM_CLASSES_DEFAULT;
    // exclude the custom classes from system classes
    systemClasses = "-" + CustomOutputFormat.class.getName() + ",-" +
        CustomSpeculator.class.getName() + "," +
        systemClasses;
    sleepConf.set(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER_SYSTEM_CLASSES,
        systemClasses);
  }
  sleepConf.set(MRJobConfig.IO_SORT_MB, TEST_IO_SORT_MB);
  sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, Level.ALL.toString());
  sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, Level.ALL.toString());
  sleepConf.set(MRJobConfig.REDUCE_LOG_LEVEL, Level.ALL.toString());
  sleepConf.set(MRJobConfig.MAP_JAVA_OPTS, "-verbose:class");
  final SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(sleepConf);
  final Job job = sleepJob.createJob(1, 1, 10, 1, 10, 1);
  job.setMapperClass(ConfVerificationMapper.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.setJarByClass(SleepJob.class);
  job.setMaxMapAttempts(1); // speed up failures
  if (useCustomClasses) {
    // set custom output format class and speculator class
    job.setOutputFormatClass(CustomOutputFormat.class);
    final Configuration jobConf = job.getConfiguration();
    jobConf.setClass(MRJobConfig.MR_AM_JOB_SPECULATOR, CustomSpeculator.class,
        Speculator.class);
    // speculation needs to be enabled for the speculator to be loaded
    jobConf.setBoolean(MRJobConfig.MAP_SPECULATIVE, true);
  }
  job.submit();
  boolean succeeded = job.waitForCompletion(true);
  Assert.assertTrue("Job status: " + job.getStatus().getFailureInfo(),
      succeeded);
}
项目:hadoop    文件:TestMRJobsWithHistoryService.java   
@Test (timeout = 90000)
public void testJobHistoryData() throws IOException, InterruptedException,
    AvroRemoteException, ClassNotFoundException {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
        + " not found. Not running test.");
    return;
  }



  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(mrCluster.getConfig());
  // Job with 3 maps and 2 reduces
  Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
  job.setJarByClass(SleepJob.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.waitForCompletion(true);
  Counters counterMR = job.getCounters();
  JobId jobId = TypeConverter.toYarn(job.getJobID());
  ApplicationId appID = jobId.getAppId();
  int pollElapsed = 0;
  while (true) {
    Thread.sleep(1000);
    pollElapsed += 1000;

    if (TERMINAL_RM_APP_STATES.contains(
        mrCluster.getResourceManager().getRMContext().getRMApps().get(appID)
        .getState())) {
      break;
    }

    if (pollElapsed >= 60000) {
      LOG.warn("application did not reach terminal state within 60 seconds");
      break;
    }
  }
  Assert.assertEquals(RMAppState.FINISHED, mrCluster.getResourceManager()
    .getRMContext().getRMApps().get(appID).getState());
  Counters counterHS = job.getCounters();
  //TODO the Assert below worked. need to check
  //Should we compare each field or convert to V2 counter and compare
  LOG.info("CounterHS " + counterHS);
  LOG.info("CounterMR " + counterMR);
  Assert.assertEquals(counterHS, counterMR);

  HSClientProtocol historyClient = instantiateHistoryProxy();
  GetJobReportRequest gjReq = Records.newRecord(GetJobReportRequest.class);
  gjReq.setJobId(jobId);
  JobReport jobReport = historyClient.getJobReport(gjReq).getJobReport();
  verifyJobReport(jobReport, jobId);
}
项目:hadoop    文件:MapredTestDriver.java   
public MapredTestDriver(ProgramDriver pgd) {
  this.pgd = pgd;
  try {
    pgd.addClass("testsequencefile", TestSequenceFile.class, 
    "A test for flat files of binary key value pairs.");
    pgd.addClass("threadedmapbench", ThreadedMapBenchmark.class, 
        "A map/reduce benchmark that compares the performance " + 
        "of maps with multiple spills over maps with 1 spill");
    pgd.addClass("mrbench", MRBench.class, 
        "A map/reduce benchmark that can create many small jobs");
    pgd.addClass("mapredtest", TestMapRed.class, "A map/reduce test check.");
    pgd.addClass("testsequencefileinputformat", 
        TestSequenceFileInputFormat.class, 
        "A test for sequence file input format.");
    pgd.addClass("testtextinputformat", TestTextInputFormat.class, 
        "A test for text input format.");
    pgd.addClass("testmapredsort", SortValidator.class, 
        "A map/reduce program that validates the " +
        "map-reduce framework's sort.");
    pgd.addClass("testbigmapoutput", BigMapOutput.class, 
        "A map/reduce program that works on a very big " +
        "non-splittable file and does identity map/reduce");
    pgd.addClass("loadgen", GenericMRLoadGenerator.class, 
        "Generic map/reduce load generator");
    pgd.addClass("MRReliabilityTest", ReliabilityTest.class,
        "A program that tests the reliability of the MR framework by " +
        "injecting faults/failures");
    pgd.addClass("fail", FailJob.class, "a job that always fails");
    pgd.addClass("sleep", SleepJob.class, 
                 "A job that sleeps at each map and reduce task.");
    pgd.addClass("nnbench", NNBench.class, 
        "A benchmark that stresses the namenode.");
    pgd.addClass("testfilesystem", TestFileSystem.class, 
        "A test for FileSystem read/write.");
    pgd.addClass(TestDFSIO.class.getSimpleName(), TestDFSIO.class, 
        "Distributed i/o benchmark.");
    pgd.addClass("DFSCIOTest", DFSCIOTest.class, "" +
        "Distributed i/o benchmark of libhdfs.");
    pgd.addClass("DistributedFSCheck", DistributedFSCheck.class, 
        "Distributed checkup of the file system consistency.");
    pgd.addClass("filebench", FileBench.class, 
        "Benchmark SequenceFile(Input|Output)Format " +
        "(block,record compressed and uncompressed), " +
        "Text(Input|Output)Format (compressed and uncompressed)");
    pgd.addClass(JHLogAnalyzer.class.getSimpleName(), JHLogAnalyzer.class, 
        "Job History Log analyzer.");
    pgd.addClass(SliveTest.class.getSimpleName(), SliveTest.class, 
        "HDFS Stress Test and Live Data Verification.");
    pgd.addClass("minicluster", MiniHadoopClusterManager.class,
    "Single process HDFS and MR cluster.");
    pgd.addClass("largesorter", LargeSorter.class,
        "Large-Sort tester");
    pgd.addClass("NNloadGenerator", LoadGenerator.class,
            "Generate load on Namenode using NN loadgenerator run WITHOUT MR");
    pgd.addClass("NNloadGeneratorMR", LoadGeneratorMR.class,
        "Generate load on Namenode using NN loadgenerator run as MR job");
    pgd.addClass("NNstructureGenerator", StructureGenerator.class,
        "Generate the structure to be used by NNdataGenerator");
    pgd.addClass("NNdataGenerator", DataGenerator.class,
        "Generate the data to be used by NNloadGenerator");
  } catch(Throwable e) {
    e.printStackTrace();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestMRJobs.java   
private void testSleepJobInternal(boolean useRemoteJar) throws Exception {
  LOG.info("\n\n\nStarting testSleepJob: useRemoteJar=" + useRemoteJar);

  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
             + " not found. Not running test.");
    return;
  }

  Configuration sleepConf = new Configuration(mrCluster.getConfig());
  // set master address to local to test that local mode applied iff framework == local
  sleepConf.set(MRConfig.MASTER_ADDRESS, "local");  

  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(sleepConf);

  // job with 3 maps (10s) and numReduces reduces (5s), 1 "record" each:
  Job job = sleepJob.createJob(3, numSleepReducers, 10000, 1, 5000, 1);

  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  if (useRemoteJar) {
    final Path localJar = new Path(
        ClassUtil.findContainingJar(SleepJob.class));
    ConfigUtil.addLink(job.getConfiguration(), "/jobjars",
        localFs.makeQualified(localJar.getParent()).toUri());
    job.setJar("viewfs:///jobjars/" + localJar.getName());
  } else {
    job.setJarByClass(SleepJob.class);
  }
  job.setMaxMapAttempts(1); // speed up failures
  job.submit();
  String trackingUrl = job.getTrackingURL();
  String jobId = job.getJobID().toString();
  boolean succeeded = job.waitForCompletion(true);
  Assert.assertTrue(succeeded);
  Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
  Assert.assertTrue("Tracking URL was " + trackingUrl +
                    " but didn't Match Job ID " + jobId ,
        trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));
  verifySleepJobCounters(job);
  verifyTaskProgress(job);

  // TODO later:  add explicit "isUber()" checks of some sort (extend
  // JobStatus?)--compare against MRJobConfig.JOB_UBERTASK_ENABLE value
}