Java 类org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster 实例源码

项目:hadoop    文件:TestBinaryTokenFile.java   
@BeforeClass
public static void setUp() throws Exception {
  final Configuration conf = new Configuration();

  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
  conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG");

  final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
  builder.checkExitOnShutdown(true);
  builder.numDataNodes(numSlaves);
  builder.format(true);
  builder.racks(null);
  dfsCluster = builder.build();

  mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs);
  mrCluster.init(conf);
  mrCluster.start();

  NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads(); 

  FileSystem fs = dfsCluster.getFileSystem(); 
  p1 = new Path("file1");
  p1 = fs.makeQualified(p1);
}
项目:hadoop    文件:TestMRAMWithNonNormalizedCapabilities.java   
/**
 * To ensure nothing broken after we removed normalization 
 * from the MRAM side
 * @throws Exception
 */
@Test
public void testJobWithNonNormalizedCapabilities() throws Exception {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
              + " not found. Not running test.");
    return;
  }

  JobConf jobConf = new JobConf(mrCluster.getConfig());
  jobConf.setInt("mapreduce.map.memory.mb", 700);
  jobConf.setInt("mapred.reduce.memory.mb", 1500);

  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(jobConf);
  Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
  job.setJarByClass(SleepJob.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.submit();
  boolean completed = job.waitForCompletion(true);
  Assert.assertTrue("Job should be completed", completed);
  Assert.assertEquals("Job should be finished successfully", 
                  JobStatus.State.SUCCEEDED, job.getJobState());
}
项目:aliyun-oss-hadoop-fs    文件:TestBinaryTokenFile.java   
@BeforeClass
public static void setUp() throws Exception {
  final Configuration conf = new Configuration();

  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
  conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG");

  final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
  builder.checkExitOnShutdown(true);
  builder.numDataNodes(numSlaves);
  builder.format(true);
  builder.racks(null);
  dfsCluster = builder.build();

  mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs);
  mrCluster.init(conf);
  mrCluster.start();

  NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads(); 

  FileSystem fs = dfsCluster.getFileSystem(); 
  p1 = new Path("file1");
  p1 = fs.makeQualified(p1);
}
项目:aliyun-oss-hadoop-fs    文件:TestMRAMWithNonNormalizedCapabilities.java   
/**
 * To ensure nothing broken after we removed normalization 
 * from the MRAM side
 * @throws Exception
 */
@Test
public void testJobWithNonNormalizedCapabilities() throws Exception {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
              + " not found. Not running test.");
    return;
  }

  JobConf jobConf = new JobConf(mrCluster.getConfig());
  jobConf.setInt("mapreduce.map.memory.mb", 700);
  jobConf.setInt("mapred.reduce.memory.mb", 1500);

  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(jobConf);
  Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
  job.setJarByClass(SleepJob.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.submit();
  boolean completed = job.waitForCompletion(true);
  Assert.assertTrue("Job should be completed", completed);
  Assert.assertEquals("Job should be finished successfully", 
                  JobStatus.State.SUCCEEDED, job.getJobState());
}
项目:big-c    文件:TestBinaryTokenFile.java   
@BeforeClass
public static void setUp() throws Exception {
  final Configuration conf = new Configuration();

  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
  conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG");

  final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
  builder.checkExitOnShutdown(true);
  builder.numDataNodes(numSlaves);
  builder.format(true);
  builder.racks(null);
  dfsCluster = builder.build();

  mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs);
  mrCluster.init(conf);
  mrCluster.start();

  NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads(); 

  FileSystem fs = dfsCluster.getFileSystem(); 
  p1 = new Path("file1");
  p1 = fs.makeQualified(p1);
}
项目:big-c    文件:TestMRAMWithNonNormalizedCapabilities.java   
/**
 * To ensure nothing broken after we removed normalization 
 * from the MRAM side
 * @throws Exception
 */
@Test
public void testJobWithNonNormalizedCapabilities() throws Exception {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
              + " not found. Not running test.");
    return;
  }

  JobConf jobConf = new JobConf(mrCluster.getConfig());
  jobConf.setInt("mapreduce.map.memory.mb", 700);
  jobConf.setInt("mapred.reduce.memory.mb", 1500);

  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(jobConf);
  Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
  job.setJarByClass(SleepJob.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.submit();
  boolean completed = job.waitForCompletion(true);
  Assert.assertTrue("Job should be completed", completed);
  Assert.assertEquals("Job should be finished successfully", 
                  JobStatus.State.SUCCEEDED, job.getJobState());
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestBinaryTokenFile.java   
@BeforeClass
public static void setUp() throws Exception {
  final Configuration conf = new Configuration();

  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
  conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG");

  final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
  builder.checkExitOnShutdown(true);
  builder.numDataNodes(numSlaves);
  builder.format(true);
  builder.racks(null);
  dfsCluster = builder.build();

  mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs);
  mrCluster.init(conf);
  mrCluster.start();

  NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads(); 

  FileSystem fs = dfsCluster.getFileSystem(); 
  p1 = new Path("file1");
  p1 = fs.makeQualified(p1);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestMRAMWithNonNormalizedCapabilities.java   
/**
 * To ensure nothing broken after we removed normalization 
 * from the MRAM side
 * @throws Exception
 */
@Test
public void testJobWithNonNormalizedCapabilities() throws Exception {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
              + " not found. Not running test.");
    return;
  }

  JobConf jobConf = new JobConf(mrCluster.getConfig());
  jobConf.setInt("mapreduce.map.memory.mb", 700);
  jobConf.setInt("mapred.reduce.memory.mb", 1500);

  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(jobConf);
  Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
  job.setJarByClass(SleepJob.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.submit();
  boolean completed = job.waitForCompletion(true);
  Assert.assertTrue("Job should be completed", completed);
  Assert.assertEquals("Job should be finished successfully", 
                  JobStatus.State.SUCCEEDED, job.getJobState());
}
项目:hadoop-plus    文件:TestBinaryTokenFile.java   
@BeforeClass
public static void setUp() throws Exception {
  final Configuration conf = new Configuration();

  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
  conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG");

  final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
  builder.checkExitOnShutdown(true);
  builder.numDataNodes(numSlaves);
  builder.format(true);
  builder.racks(null);
  dfsCluster = builder.build();

  mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs);
  mrCluster.init(conf);
  mrCluster.start();

  NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads(); 

  FileSystem fs = dfsCluster.getFileSystem(); 
  p1 = new Path("file1");
  p1 = fs.makeQualified(p1);
}
项目:hadoop-plus    文件:TestMRAMWithNonNormalizedCapabilities.java   
/**
 * To ensure nothing broken after we removed normalization 
 * from the MRAM side
 * @throws Exception
 */
@Test
public void testJobWithNonNormalizedCapabilities() throws Exception {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
              + " not found. Not running test.");
    return;
  }

  JobConf jobConf = new JobConf(mrCluster.getConfig());
  jobConf.setInt("mapreduce.map.memory.mb", 700);
  jobConf.setInt("mapred.reduce.memory.mb", 1500);

  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(jobConf);
  Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
  job.setJarByClass(SleepJob.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.submit();
  boolean completed = job.waitForCompletion(true);
  Assert.assertTrue("Job should be completed", completed);
  Assert.assertEquals("Job should be finished successfully", 
                  JobStatus.State.SUCCEEDED, job.getJobState());
}
项目:FlexMap    文件:TestBinaryTokenFile.java   
@BeforeClass
public static void setUp() throws Exception {
  final Configuration conf = new Configuration();

  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
  conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG");

  final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
  builder.checkExitOnShutdown(true);
  builder.numDataNodes(numSlaves);
  builder.format(true);
  builder.racks(null);
  dfsCluster = builder.build();

  mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs);
  mrCluster.init(conf);
  mrCluster.start();

  NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads(); 

  FileSystem fs = dfsCluster.getFileSystem(); 
  p1 = new Path("file1");
  p1 = fs.makeQualified(p1);
}
项目:FlexMap    文件:TestMRAMWithNonNormalizedCapabilities.java   
/**
 * To ensure nothing broken after we removed normalization 
 * from the MRAM side
 * @throws Exception
 */
@Test
public void testJobWithNonNormalizedCapabilities() throws Exception {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
              + " not found. Not running test.");
    return;
  }

  JobConf jobConf = new JobConf(mrCluster.getConfig());
  jobConf.setInt("mapreduce.map.memory.mb", 700);
  jobConf.setInt("mapred.reduce.memory.mb", 1500);

  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(jobConf);
  Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
  job.setJarByClass(SleepJob.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.submit();
  boolean completed = job.waitForCompletion(true);
  Assert.assertTrue("Job should be completed", completed);
  Assert.assertEquals("Job should be finished successfully", 
                  JobStatus.State.SUCCEEDED, job.getJobState());
}
项目:hops    文件:TestBinaryTokenFile.java   
@BeforeClass
public static void setUp() throws Exception {
  final Configuration conf = new Configuration();

  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
  conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG");

  final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
  builder.checkExitOnShutdown(true);
  builder.numDataNodes(numSlaves);
  builder.format(true);
  builder.racks(null);
  dfsCluster = builder.build();

  mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs, false);
  mrCluster.init(conf);
  mrCluster.start();

  NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads(); 

  FileSystem fs = dfsCluster.getFileSystem(); 
  p1 = new Path("file1");
  p1 = fs.makeQualified(p1);
}
项目:hops    文件:TestMRAMWithNonNormalizedCapabilities.java   
/**
 * To ensure nothing broken after we removed normalization 
 * from the MRAM side
 * @throws Exception
 */
@Test
public void testJobWithNonNormalizedCapabilities() throws Exception {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
              + " not found. Not running test.");
    return;
  }

  JobConf jobConf = new JobConf(mrCluster.getConfig());
  jobConf.setInt("mapreduce.map.memory.mb", 700);
  jobConf.setInt("mapred.reduce.memory.mb", 1500);

  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(jobConf);
  Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
  job.setJarByClass(SleepJob.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.submit();
  boolean completed = job.waitForCompletion(true);
  Assert.assertTrue("Job should be completed", completed);
  Assert.assertEquals("Job should be finished successfully", 
                  JobStatus.State.SUCCEEDED, job.getJobState());
}
项目:hadoop-TCP    文件:TestBinaryTokenFile.java   
@BeforeClass
public static void setUp() throws Exception {
  final Configuration conf = new Configuration();

  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
  conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG");

  final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
  builder.checkExitOnShutdown(true);
  builder.numDataNodes(numSlaves);
  builder.format(true);
  builder.racks(null);
  dfsCluster = builder.build();

  mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs);
  mrCluster.init(conf);
  mrCluster.start();

  NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads(); 

  FileSystem fs = dfsCluster.getFileSystem(); 
  p1 = new Path("file1");
  p1 = fs.makeQualified(p1);
}
项目:hadoop-TCP    文件:TestMRAMWithNonNormalizedCapabilities.java   
/**
 * To ensure nothing broken after we removed normalization 
 * from the MRAM side
 * @throws Exception
 */
@Test
public void testJobWithNonNormalizedCapabilities() throws Exception {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
              + " not found. Not running test.");
    return;
  }

  JobConf jobConf = new JobConf(mrCluster.getConfig());
  jobConf.setInt("mapreduce.map.memory.mb", 700);
  jobConf.setInt("mapred.reduce.memory.mb", 1500);

  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(jobConf);
  Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
  job.setJarByClass(SleepJob.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.submit();
  boolean completed = job.waitForCompletion(true);
  Assert.assertTrue("Job should be completed", completed);
  Assert.assertEquals("Job should be finished successfully", 
                  JobStatus.State.SUCCEEDED, job.getJobState());
}
项目:hardfs    文件:TestBinaryTokenFile.java   
@BeforeClass
public static void setUp() throws Exception {
  final Configuration conf = new Configuration();

  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
  conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG");

  final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
  builder.checkExitOnShutdown(true);
  builder.numDataNodes(numSlaves);
  builder.format(true);
  builder.racks(null);
  dfsCluster = builder.build();

  mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs);
  mrCluster.init(conf);
  mrCluster.start();

  NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads(); 

  FileSystem fs = dfsCluster.getFileSystem(); 
  p1 = new Path("file1");
  p1 = fs.makeQualified(p1);
}
项目:hardfs    文件:TestMRAMWithNonNormalizedCapabilities.java   
/**
 * To ensure nothing broken after we removed normalization 
 * from the MRAM side
 * @throws Exception
 */
@Test
public void testJobWithNonNormalizedCapabilities() throws Exception {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
              + " not found. Not running test.");
    return;
  }

  JobConf jobConf = new JobConf(mrCluster.getConfig());
  jobConf.setInt("mapreduce.map.memory.mb", 700);
  jobConf.setInt("mapred.reduce.memory.mb", 1500);

  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(jobConf);
  Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
  job.setJarByClass(SleepJob.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.submit();
  boolean completed = job.waitForCompletion(true);
  Assert.assertTrue("Job should be completed", completed);
  Assert.assertEquals("Job should be finished successfully", 
                  JobStatus.State.SUCCEEDED, job.getJobState());
}
项目:hadoop-on-lustre2    文件:TestBinaryTokenFile.java   
@BeforeClass
public static void setUp() throws Exception {
  final Configuration conf = new Configuration();

  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
  conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG");

  final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
  builder.checkExitOnShutdown(true);
  builder.numDataNodes(numSlaves);
  builder.format(true);
  builder.racks(null);
  dfsCluster = builder.build();

  mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs);
  mrCluster.init(conf);
  mrCluster.start();

  NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads(); 

  FileSystem fs = dfsCluster.getFileSystem(); 
  p1 = new Path("file1");
  p1 = fs.makeQualified(p1);
}
项目:hadoop-on-lustre2    文件:TestMRAMWithNonNormalizedCapabilities.java   
/**
 * To ensure nothing broken after we removed normalization 
 * from the MRAM side
 * @throws Exception
 */
@Test
public void testJobWithNonNormalizedCapabilities() throws Exception {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
              + " not found. Not running test.");
    return;
  }

  JobConf jobConf = new JobConf(mrCluster.getConfig());
  jobConf.setInt("mapreduce.map.memory.mb", 700);
  jobConf.setInt("mapred.reduce.memory.mb", 1500);

  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(jobConf);
  Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
  job.setJarByClass(SleepJob.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.submit();
  boolean completed = job.waitForCompletion(true);
  Assert.assertTrue("Job should be completed", completed);
  Assert.assertEquals("Job should be finished successfully", 
                  JobStatus.State.SUCCEEDED, job.getJobState());
}
项目:hadoop    文件:TestMRAMWithNonNormalizedCapabilities.java   
@After
public void tearDown() {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
        + " not found. Not running test.");
    return;
  }

  if (mrCluster != null) {
    mrCluster.stop();
  }
}
项目:hadoop    文件:TestMRCJCSocketFactory.java   
private void stopMiniMRYarnCluster(MiniMRYarnCluster miniMRYarnCluster) {
  try {
    if (miniMRYarnCluster != null)
      miniMRYarnCluster.stop();

  } catch (Exception ignored) {
    // nothing we can do
    ignored.printStackTrace();
  }
}
项目:aliyun-oss-hadoop-fs    文件:MiniMRClientClusterFactory.java   
public static MiniMRClientCluster create(Class<?> caller, String identifier,
    int noOfNMs, Configuration conf) throws IOException {

  if (conf == null) {
    conf = new Configuration();
  }

  FileSystem fs = FileSystem.get(conf);

  Path testRootDir = new Path("target", identifier + "-tmpDir")
      .makeQualified(fs);
  Path appJar = new Path(testRootDir, "MRAppJar.jar");

  // Copy MRAppJar and make it private.
  Path appMasterJar = new Path(MiniMRYarnCluster.APPJAR);

  fs.copyFromLocalFile(appMasterJar, appJar);
  fs.setPermission(appJar, new FsPermission("744"));

  Job job = Job.getInstance(conf);

  job.addFileToClassPath(appJar);

  Path callerJar = new Path(JarFinder.getJar(caller));
  Path remoteCallerJar = new Path(testRootDir, callerJar.getName());
  fs.copyFromLocalFile(callerJar, remoteCallerJar);
  fs.setPermission(remoteCallerJar, new FsPermission("744"));
  job.addFileToClassPath(remoteCallerJar);

  MiniMRYarnCluster miniMRYarnCluster = new MiniMRYarnCluster(identifier,
      noOfNMs);
  job.getConfiguration().set("minimrclientcluster.caller.name",
      identifier);
  job.getConfiguration().setInt("minimrclientcluster.nodemanagers.number",
      noOfNMs);
  miniMRYarnCluster.init(job.getConfiguration());
  miniMRYarnCluster.start();

  return new MiniMRYarnClusterAdapter(miniMRYarnCluster);
}
项目:aliyun-oss-hadoop-fs    文件:TestMRAMWithNonNormalizedCapabilities.java   
@After
public void tearDown() {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
        + " not found. Not running test.");
    return;
  }

  if (mrCluster != null) {
    mrCluster.stop();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestMRCJCSocketFactory.java   
private void stopMiniMRYarnCluster(MiniMRYarnCluster miniMRYarnCluster) {
  try {
    if (miniMRYarnCluster != null)
      miniMRYarnCluster.stop();

  } catch (Exception ignored) {
    // nothing we can do
    ignored.printStackTrace();
  }
}
项目:big-c    文件:MiniMRClientClusterFactory.java   
public static MiniMRClientCluster create(Class<?> caller, String identifier,
    int noOfNMs, Configuration conf) throws IOException {

  if (conf == null) {
    conf = new Configuration();
  }

  FileSystem fs = FileSystem.get(conf);

  Path testRootDir = new Path("target", identifier + "-tmpDir")
      .makeQualified(fs);
  Path appJar = new Path(testRootDir, "MRAppJar.jar");

  // Copy MRAppJar and make it private.
  Path appMasterJar = new Path(MiniMRYarnCluster.APPJAR);

  fs.copyFromLocalFile(appMasterJar, appJar);
  fs.setPermission(appJar, new FsPermission("744"));

  Job job = Job.getInstance(conf);

  job.addFileToClassPath(appJar);

  Path callerJar = new Path(JarFinder.getJar(caller));
  Path remoteCallerJar = new Path(testRootDir, callerJar.getName());
  fs.copyFromLocalFile(callerJar, remoteCallerJar);
  fs.setPermission(remoteCallerJar, new FsPermission("744"));
  job.addFileToClassPath(remoteCallerJar);

  MiniMRYarnCluster miniMRYarnCluster = new MiniMRYarnCluster(identifier,
      noOfNMs);
  job.getConfiguration().set("minimrclientcluster.caller.name",
      identifier);
  job.getConfiguration().setInt("minimrclientcluster.nodemanagers.number",
      noOfNMs);
  miniMRYarnCluster.init(job.getConfiguration());
  miniMRYarnCluster.start();

  return new MiniMRYarnClusterAdapter(miniMRYarnCluster);
}
项目:big-c    文件:TestMRAMWithNonNormalizedCapabilities.java   
@After
public void tearDown() {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
        + " not found. Not running test.");
    return;
  }

  if (mrCluster != null) {
    mrCluster.stop();
  }
}
项目:big-c    文件:TestMRCJCSocketFactory.java   
private void stopMiniMRYarnCluster(MiniMRYarnCluster miniMRYarnCluster) {
  try {
    if (miniMRYarnCluster != null)
      miniMRYarnCluster.stop();

  } catch (Exception ignored) {
    // nothing we can do
    ignored.printStackTrace();
  }
}
项目:Halyard    文件:HBaseServerTestInstance.java   
public static synchronized Configuration getInstanceConfig() throws Exception {
    if (conf == null) {
        File zooRoot = File.createTempFile("hbase-zookeeper", "");
        zooRoot.delete();
        ZooKeeperServer zookeper = new ZooKeeperServer(zooRoot, zooRoot, 2000);
        ServerCnxnFactory factory = ServerCnxnFactory.createFactory(new InetSocketAddress("localhost", 0), 5000);
        factory.startup(zookeper);

        YarnConfiguration yconf = new YarnConfiguration();
        String argLine = System.getProperty("argLine");
        if (argLine != null) {
            yconf.set("yarn.app.mapreduce.am.command-opts", argLine.replace("jacoco.exec", "jacocoMR.exec"));
        }
        yconf.setBoolean(MRConfig.MAPREDUCE_MINICLUSTER_CONTROL_RESOURCE_MONITORING, false);
        yconf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
        MiniMRYarnCluster miniCluster = new MiniMRYarnCluster("testCluster");
        miniCluster.init(yconf);
        yconf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, true);
        miniCluster.start();

        File hbaseRoot = File.createTempFile("hbase-root", "");
        hbaseRoot.delete();
        conf = HBaseConfiguration.create(miniCluster.getConfig());
        conf.set(HConstants.HBASE_DIR, hbaseRoot.toURI().toURL().toString());
        conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, factory.getLocalPort());
        conf.set("hbase.master.hostname", "localhost");
        conf.set("hbase.regionserver.hostname", "localhost");
        conf.setInt("hbase.master.info.port", -1);
        conf.set("hbase.fs.tmp.dir", new File(System.getProperty("java.io.tmpdir")).toURI().toURL().toString());
        LocalHBaseCluster cluster = new LocalHBaseCluster(conf);
        cluster.startup();
    }
    return conf;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:MiniMRClientClusterFactory.java   
public static MiniMRClientCluster create(Class<?> caller, String identifier,
    int noOfNMs, Configuration conf) throws IOException {

  if (conf == null) {
    conf = new Configuration();
  }

  FileSystem fs = FileSystem.get(conf);

  Path testRootDir = new Path("target", identifier + "-tmpDir")
      .makeQualified(fs);
  Path appJar = new Path(testRootDir, "MRAppJar.jar");

  // Copy MRAppJar and make it private.
  Path appMasterJar = new Path(MiniMRYarnCluster.APPJAR);

  fs.copyFromLocalFile(appMasterJar, appJar);
  fs.setPermission(appJar, new FsPermission("744"));

  Job job = Job.getInstance(conf);

  job.addFileToClassPath(appJar);

  Path callerJar = new Path(JarFinder.getJar(caller));
  Path remoteCallerJar = new Path(testRootDir, callerJar.getName());
  fs.copyFromLocalFile(callerJar, remoteCallerJar);
  fs.setPermission(remoteCallerJar, new FsPermission("744"));
  job.addFileToClassPath(remoteCallerJar);

  MiniMRYarnCluster miniMRYarnCluster = new MiniMRYarnCluster(identifier,
      noOfNMs);
  job.getConfiguration().set("minimrclientcluster.caller.name",
      identifier);
  job.getConfiguration().setInt("minimrclientcluster.nodemanagers.number",
      noOfNMs);
  miniMRYarnCluster.init(job.getConfiguration());
  miniMRYarnCluster.start();

  return new MiniMRYarnClusterAdapter(miniMRYarnCluster);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestMRAMWithNonNormalizedCapabilities.java   
@After
public void tearDown() {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
        + " not found. Not running test.");
    return;
  }

  if (mrCluster != null) {
    mrCluster.stop();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestMRCJCSocketFactory.java   
private void stopMiniMRYarnCluster(MiniMRYarnCluster miniMRYarnCluster) {
  try {
    if (miniMRYarnCluster != null)
      miniMRYarnCluster.stop();

  } catch (Exception ignored) {
    // nothing we can do
    ignored.printStackTrace();
  }
}
项目:hadoop-plus    文件:MiniMRClientClusterFactory.java   
public static MiniMRClientCluster create(Class<?> caller, String identifier,
    int noOfNMs, Configuration conf) throws IOException {

  if (conf == null) {
    conf = new Configuration();
  }

  FileSystem fs = FileSystem.get(conf);

  Path testRootDir = new Path("target", identifier + "-tmpDir")
      .makeQualified(fs);
  Path appJar = new Path(testRootDir, "MRAppJar.jar");

  // Copy MRAppJar and make it private.
  Path appMasterJar = new Path(MiniMRYarnCluster.APPJAR);

  fs.copyFromLocalFile(appMasterJar, appJar);
  fs.setPermission(appJar, new FsPermission("744"));

  Job job = Job.getInstance(conf);

  job.addFileToClassPath(appJar);

  Path callerJar = new Path(JarFinder.getJar(caller));
  Path remoteCallerJar = new Path(testRootDir, callerJar.getName());
  fs.copyFromLocalFile(callerJar, remoteCallerJar);
  fs.setPermission(remoteCallerJar, new FsPermission("744"));
  job.addFileToClassPath(remoteCallerJar);

  MiniMRYarnCluster miniMRYarnCluster = new MiniMRYarnCluster(identifier,
      noOfNMs);
  job.getConfiguration().set("minimrclientcluster.caller.name",
      identifier);
  job.getConfiguration().setInt("minimrclientcluster.nodemanagers.number",
      noOfNMs);
  miniMRYarnCluster.init(job.getConfiguration());
  miniMRYarnCluster.start();

  return new MiniMRYarnClusterAdapter(miniMRYarnCluster);
}
项目:hadoop-plus    文件:TestMRAMWithNonNormalizedCapabilities.java   
@After
public void tearDown() {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
        + " not found. Not running test.");
    return;
  }

  if (mrCluster != null) {
    mrCluster.stop();
  }
}
项目:hadoop-plus    文件:TestSocketFactory.java   
private void stopMiniMRYarnCluster(MiniMRYarnCluster miniMRYarnCluster) {
  try {
    if (miniMRYarnCluster != null)
      miniMRYarnCluster.stop();

  } catch (Exception ignored) {
    // nothing we can do
    ignored.printStackTrace();
  }
}
项目:FlexMap    文件:MiniMRClientClusterFactory.java   
public static MiniMRClientCluster create(Class<?> caller, String identifier,
    int noOfNMs, Configuration conf) throws IOException {

  if (conf == null) {
    conf = new Configuration();
  }

  FileSystem fs = FileSystem.get(conf);

  Path testRootDir = new Path("target", identifier + "-tmpDir")
      .makeQualified(fs);
  Path appJar = new Path(testRootDir, "MRAppJar.jar");

  // Copy MRAppJar and make it private.
  Path appMasterJar = new Path(MiniMRYarnCluster.APPJAR);

  fs.copyFromLocalFile(appMasterJar, appJar);
  fs.setPermission(appJar, new FsPermission("744"));

  Job job = Job.getInstance(conf);

  job.addFileToClassPath(appJar);

  Path callerJar = new Path(JarFinder.getJar(caller));
  Path remoteCallerJar = new Path(testRootDir, callerJar.getName());
  fs.copyFromLocalFile(callerJar, remoteCallerJar);
  fs.setPermission(remoteCallerJar, new FsPermission("744"));
  job.addFileToClassPath(remoteCallerJar);

  MiniMRYarnCluster miniMRYarnCluster = new MiniMRYarnCluster(identifier,
      noOfNMs);
  job.getConfiguration().set("minimrclientcluster.caller.name",
      identifier);
  job.getConfiguration().setInt("minimrclientcluster.nodemanagers.number",
      noOfNMs);
  miniMRYarnCluster.init(job.getConfiguration());
  miniMRYarnCluster.start();

  return new MiniMRYarnClusterAdapter(miniMRYarnCluster);
}
项目:FlexMap    文件:TestMRAMWithNonNormalizedCapabilities.java   
@After
public void tearDown() {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
        + " not found. Not running test.");
    return;
  }

  if (mrCluster != null) {
    mrCluster.stop();
  }
}
项目:FlexMap    文件:TestMRCJCSocketFactory.java   
private void stopMiniMRYarnCluster(MiniMRYarnCluster miniMRYarnCluster) {
  try {
    if (miniMRYarnCluster != null)
      miniMRYarnCluster.stop();

  } catch (Exception ignored) {
    // nothing we can do
    ignored.printStackTrace();
  }
}
项目:hadoop-mini-clusters    文件:MRLocalCluster.java   
@Override
public void start() throws Exception {
    LOG.info("MR: Starting MiniMRYarnCluster");
    configure();
    miniMRYarnCluster = new MiniMRYarnCluster(testName, numNodeManagers);
    miniMRYarnCluster.serviceInit(configuration);
    miniMRYarnCluster.init(configuration);
    miniMRYarnCluster.start();
}
项目:hops    文件:MrClusterTest.java   
@Override
public void setUp() throws Exception {
  super.setUp();
  Configuration conf = new Configuration(getConfig());
  mrCluster = new MiniMRYarnCluster(this.getClass().getName(), numDatanode,
          false);
  conf.set("fs.defaultFS", fs.getUri().toString());
  mrCluster.init(conf);
  mrCluster.start();
}