Java 类org.apache.hadoop.mapreduce.MRConfig 实例源码

项目:angel    文件:CounterUpdater.java   
public void initialize() {
  Class<? extends ResourceCalculatorProcessTree> clazz =
      PSAgentContext
          .get()
          .getConf()
          .getClass(MRConfig.RESOURCE_CALCULATOR_PROCESS_TREE, null,
              ResourceCalculatorProcessTree.class);
  pTree =
      ResourceCalculatorProcessTree.getResourceCalculatorProcessTree(
          System.getenv().get("JVM_PID"), clazz, PSAgentContext.get().getConf());
  if (pTree != null) {
    pTree.updateProcessTree();
    initCpuCumulativeTime = pTree.getCumulativeCpuTime();
  }
  LOG.info(" Using ResourceCalculatorProcessTree : " + pTree);
}
项目:hadoop    文件:TestReporter.java   
@Override
public void map(LongWritable key, Text value, Context context)
    throws IOException {
  StringBuilder sb = new StringBuilder(512);
  for (int i = 0; i < 1000; i++) {
    sb.append("a");
  }
  context.setStatus(sb.toString());
  int progressStatusLength = context.getConfiguration().getInt(
      MRConfig.PROGRESS_STATUS_LEN_LIMIT_KEY,
      MRConfig.PROGRESS_STATUS_LEN_LIMIT_DEFAULT);

  if (context.getStatus().length() > progressStatusLength) {
    throw new IOException("Status is not truncated");
  }
}
项目:hadoop    文件:TestBinaryTokenFile.java   
@BeforeClass
public static void setUp() throws Exception {
  final Configuration conf = new Configuration();

  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
  conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG");

  final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
  builder.checkExitOnShutdown(true);
  builder.numDataNodes(numSlaves);
  builder.format(true);
  builder.racks(null);
  dfsCluster = builder.build();

  mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs);
  mrCluster.init(conf);
  mrCluster.start();

  NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads(); 

  FileSystem fs = dfsCluster.getFileSystem(); 
  p1 = new Path("file1");
  p1 = fs.makeQualified(p1);
}
项目:hadoop    文件:TestMapCollection.java   
private static void runTest(String name, Job job) throws Exception {
  job.setNumReduceTasks(1);
  job.getConfiguration().set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
  job.getConfiguration().setInt(MRJobConfig.IO_SORT_FACTOR, 1000);
  job.getConfiguration().set("fs.defaultFS", "file:///");
  job.getConfiguration().setInt("test.mapcollection.num.maps", 1);
  job.setInputFormatClass(FakeIF.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setMapperClass(Mapper.class);
  job.setReducerClass(SpillReducer.class);
  job.setMapOutputKeyClass(KeyWritable.class);
  job.setMapOutputValueClass(ValWritable.class);
  job.setSortComparatorClass(VariableComparator.class);

  LOG.info("Running " + name);
  assertTrue("Job failed!", job.waitForCompletion(false));
}
项目:hadoop    文件:LocalJobRunner.java   
static void setupChildMapredLocalDirs(Task t, JobConf conf) {
  String[] localDirs = conf.getTrimmedStrings(MRConfig.LOCAL_DIR);
  String jobId = t.getJobID().toString();
  String taskId = t.getTaskID().toString();
  boolean isCleanup = t.isTaskCleanupTask();
  String user = t.getUser();
  StringBuffer childMapredLocalDir =
      new StringBuffer(localDirs[0] + Path.SEPARATOR
          + getLocalTaskDir(user, jobId, taskId, isCleanup));
  for (int i = 1; i < localDirs.length; i++) {
    childMapredLocalDir.append("," + localDirs[i] + Path.SEPARATOR
        + getLocalTaskDir(user, jobId, taskId, isCleanup));
  }
  LOG.debug(MRConfig.LOCAL_DIR + " for child : " + childMapredLocalDir);
  conf.set(MRConfig.LOCAL_DIR, childMapredLocalDir.toString());
}
项目:hadoop    文件:TestJobClient.java   
@Test
public void testGetClusterStatusWithLocalJobRunner() throws Exception {
  Configuration conf = new Configuration();
  conf.set(JTConfig.JT_IPC_ADDRESS, MRConfig.LOCAL_FRAMEWORK_NAME);
  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
  JobClient client = new JobClient(conf);
  ClusterStatus clusterStatus = client.getClusterStatus(true);
  Collection<String> activeTrackerNames = clusterStatus
      .getActiveTrackerNames();
  Assert.assertEquals(0, activeTrackerNames.size());
  int blacklistedTrackers = clusterStatus.getBlacklistedTrackers();
  Assert.assertEquals(0, blacklistedTrackers);
  Collection<BlackListInfo> blackListedTrackersInfo = clusterStatus
      .getBlackListedTrackersInfo();
  Assert.assertEquals(0, blackListedTrackersInfo.size());
}
项目:hadoop    文件:TestMRApps.java   
@Test (timeout = 120000)
public void testSetClasspathWithUserPrecendence() {
   Configuration conf = new Configuration();
   conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM, true);
   conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST, true);
   Map<String, String> env = new HashMap<String, String>();
   try {
     MRApps.setClasspath(env, conf);
   } catch (Exception e) {
     fail("Got exception while setting classpath");
   }
   String env_str = env.get("CLASSPATH");
   String expectedClasspath = StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,
     Arrays.asList(ApplicationConstants.Environment.PWD.$$(), "job.jar/job.jar",
       "job.jar/classes/", "job.jar/lib/*",
       ApplicationConstants.Environment.PWD.$$() + "/*"));
   assertTrue("MAPREDUCE_JOB_USER_CLASSPATH_FIRST set, but not taking effect!",
     env_str.startsWith(expectedClasspath));
 }
项目:hadoop    文件:TestMRApps.java   
@Test (timeout = 120000)
public void testSetClasspathWithNoUserPrecendence() {
  Configuration conf = new Configuration();
  conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM, true);
  conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST, false);
  Map<String, String> env = new HashMap<String, String>();
  try {
    MRApps.setClasspath(env, conf);
  } catch (Exception e) {
    fail("Got exception while setting classpath");
  }
  String env_str = env.get("CLASSPATH");
  String expectedClasspath = StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,
    Arrays.asList("job.jar/job.jar", "job.jar/classes/", "job.jar/lib/*",
      ApplicationConstants.Environment.PWD.$$() + "/*"));
  assertTrue("MAPREDUCE_JOB_USER_CLASSPATH_FIRST false, and job.jar is not in"
    + " the classpath!", env_str.contains(expectedClasspath));
  assertFalse("MAPREDUCE_JOB_USER_CLASSPATH_FIRST false, but taking effect!",
    env_str.startsWith(expectedClasspath));
}
项目:hadoop    文件:TestMRApps.java   
@Test (timeout = 120000)
public void testSetClasspathWithJobClassloader() throws IOException {
  Configuration conf = new Configuration();
  conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM, true);
  conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER, true);
  Map<String, String> env = new HashMap<String, String>();
  MRApps.setClasspath(env, conf);
  String cp = env.get("CLASSPATH");
  String appCp = env.get("APP_CLASSPATH");
  assertFalse("MAPREDUCE_JOB_CLASSLOADER true, but job.jar is in the"
    + " classpath!", cp.contains("jar" + ApplicationConstants.CLASS_PATH_SEPARATOR + "job"));
  assertFalse("MAPREDUCE_JOB_CLASSLOADER true, but PWD is in the classpath!",
    cp.contains("PWD"));
  String expectedAppClasspath = StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,
    Arrays.asList(ApplicationConstants.Environment.PWD.$$(), "job.jar/job.jar",
      "job.jar/classes/", "job.jar/lib/*",
      ApplicationConstants.Environment.PWD.$$() + "/*"));
  assertEquals("MAPREDUCE_JOB_CLASSLOADER true, but job.jar is not in the app"
    + " classpath!", expectedAppClasspath, appCp);
}
项目:hadoop    文件:Task.java   
public void setConf(Configuration conf) {
  if (conf instanceof JobConf) {
    this.conf = (JobConf) conf;
  } else {
    this.conf = new JobConf(conf);
  }
  this.mapOutputFile = ReflectionUtils.newInstance(
      conf.getClass(MRConfig.TASK_LOCAL_OUTPUT_CLASS,
        MROutputFiles.class, MapOutputFile.class), conf);
  this.lDirAlloc = new LocalDirAllocator(MRConfig.LOCAL_DIR);
  // add the static resolutions (this is required for the junit to
  // work on testcases that simulate multiple nodes on a single physical
  // node.
  String hostToResolved[] = conf.getStrings(MRConfig.STATIC_RESOLUTIONS);
  if (hostToResolved != null) {
    for (String str : hostToResolved) {
      String name = str.substring(0, str.indexOf('='));
      String resolvedName = str.substring(str.indexOf('=') + 1);
      NetUtils.addStaticResolution(name, resolvedName);
    }
  }
}
项目:hadoop    文件:IFileInputStream.java   
/**
 * Create a checksum input stream that reads
 * @param in The input stream to be verified for checksum.
 * @param len The length of the input stream including checksum bytes.
 */
public IFileInputStream(InputStream in, long len, Configuration conf) {
  this.in = in;
  this.inFd = getFileDescriptorIfAvail(in);
  sum = DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 
      Integer.MAX_VALUE);
  checksumSize = sum.getChecksumSize();
  length = len;
  dataLength = length - checksumSize;

  conf = (conf != null) ? conf : new Configuration();
  readahead = conf.getBoolean(MRConfig.MAPRED_IFILE_READAHEAD,
      MRConfig.DEFAULT_MAPRED_IFILE_READAHEAD);
  readaheadLength = conf.getInt(MRConfig.MAPRED_IFILE_READAHEAD_BYTES,
      MRConfig.DEFAULT_MAPRED_IFILE_READAHEAD_BYTES);

  doReadahead();
}
项目:hadoop    文件:TestMaster.java   
@Test 
public void testGetMasterUser() {
  YarnConfiguration conf = new YarnConfiguration();
  conf.set(MRConfig.MASTER_USER_NAME, "foo");
  conf.set(YarnConfiguration.RM_PRINCIPAL, "bar");

  // default is yarn framework  
  assertEquals(Master.getMasterUserName(conf), "bar");

  // set framework name to classic
  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME);
  assertEquals(Master.getMasterUserName(conf), "foo");

  // change framework to yarn
  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
  assertEquals(Master.getMasterUserName(conf), "bar");

}
项目:hadoop    文件:TestJobAclsManager.java   
@Test
public void testClusterAdmins() {
  Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
  Configuration conf = new Configuration();
  String jobOwner = "testuser";
  conf.set(JobACL.VIEW_JOB.getAclName(), jobOwner);
  conf.set(JobACL.MODIFY_JOB.getAclName(), jobOwner);
  conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
  String clusterAdmin = "testuser2";
  conf.set(MRConfig.MR_ADMINS, clusterAdmin);

  JobACLsManager aclsManager = new JobACLsManager(conf);
  tmpJobACLs = aclsManager.constructJobACLs(conf);
  final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;

  UserGroupInformation callerUGI = UserGroupInformation.createUserForTesting(
      clusterAdmin, new String[] {});

  // cluster admin should have access
  boolean val = aclsManager.checkAccess(callerUGI, JobACL.VIEW_JOB, jobOwner,
      jobACLs.get(JobACL.VIEW_JOB));
  assertTrue("cluster admin should have view access", val);
  val = aclsManager.checkAccess(callerUGI, JobACL.MODIFY_JOB, jobOwner,
      jobACLs.get(JobACL.MODIFY_JOB));
  assertTrue("cluster admin should have modify access", val);
}
项目:hadoop    文件:TestJobAclsManager.java   
@Test
public void testAclsOff() {
  Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
  Configuration conf = new Configuration();
  String jobOwner = "testuser";
  conf.set(JobACL.VIEW_JOB.getAclName(), jobOwner);
  conf.setBoolean(MRConfig.MR_ACLS_ENABLED, false);
  String noAdminUser = "testuser2";

  JobACLsManager aclsManager = new JobACLsManager(conf);
  tmpJobACLs = aclsManager.constructJobACLs(conf);
  final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;

  UserGroupInformation callerUGI = UserGroupInformation.createUserForTesting(
      noAdminUser, new String[] {});
  // acls off so anyone should have access
  boolean val = aclsManager.checkAccess(callerUGI, JobACL.VIEW_JOB, jobOwner,
      jobACLs.get(JobACL.VIEW_JOB));
  assertTrue("acls off so anyone should have access", val);
}
项目:hadoop    文件:TestJobAclsManager.java   
@Test
public void testGroups() {
  Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
  Configuration conf = new Configuration();
  String jobOwner = "testuser";
  conf.set(JobACL.VIEW_JOB.getAclName(), jobOwner);
  conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
  String user = "testuser2";
  String adminGroup = "adminGroup";
  conf.set(MRConfig.MR_ADMINS, " " + adminGroup);

  JobACLsManager aclsManager = new JobACLsManager(conf);
  tmpJobACLs = aclsManager.constructJobACLs(conf);
  final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;

  UserGroupInformation callerUGI = UserGroupInformation.createUserForTesting(
   user, new String[] {adminGroup});
  // acls off so anyone should have access
  boolean val = aclsManager.checkAccess(callerUGI, JobACL.VIEW_JOB, jobOwner,
      jobACLs.get(JobACL.VIEW_JOB));
  assertTrue("user in admin group should have access", val);
}
项目:hadoop    文件:TestJobSplitWriter.java   
@Test
public void testMaxBlockLocationsNewSplits() throws Exception {
  TEST_DIR.mkdirs();
  try {
    Configuration conf = new Configuration();
    conf.setInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY, 4);
    Path submitDir = new Path(TEST_DIR.getAbsolutePath());
    FileSystem fs = FileSystem.getLocal(conf);
    FileSplit split = new FileSplit(new Path("/some/path"), 0, 1,
        new String[] { "loc1", "loc2", "loc3", "loc4", "loc5" });
    JobSplitWriter.createSplitFiles(submitDir, conf, fs,
        new FileSplit[] { split });
    JobSplit.TaskSplitMetaInfo[] infos =
        SplitMetaInfoReader.readSplitMetaInfo(new JobID(), fs, conf,
            submitDir);
    assertEquals("unexpected number of splits", 1, infos.length);
    assertEquals("unexpected number of split locations",
        4, infos[0].getLocations().length);
  } finally {
    FileUtil.fullyDelete(TEST_DIR);
  }
}
项目:hadoop    文件:TestJobSplitWriter.java   
@Test
public void testMaxBlockLocationsOldSplits() throws Exception {
  TEST_DIR.mkdirs();
  try {
    Configuration conf = new Configuration();
    conf.setInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY, 4);
    Path submitDir = new Path(TEST_DIR.getAbsolutePath());
    FileSystem fs = FileSystem.getLocal(conf);
    org.apache.hadoop.mapred.FileSplit split =
        new org.apache.hadoop.mapred.FileSplit(new Path("/some/path"), 0, 1,
            new String[] { "loc1", "loc2", "loc3", "loc4", "loc5" });
    JobSplitWriter.createSplitFiles(submitDir, conf, fs,
        new org.apache.hadoop.mapred.InputSplit[] { split });
    JobSplit.TaskSplitMetaInfo[] infos =
        SplitMetaInfoReader.readSplitMetaInfo(new JobID(), fs, conf,
            submitDir);
    assertEquals("unexpected number of splits", 1, infos.length);
    assertEquals("unexpected number of split locations",
        4, infos[0].getLocations().length);
  } finally {
    FileUtil.fullyDelete(TEST_DIR);
  }
}
项目:hadoop    文件:TestHsWebServicesAcls.java   
@Before
public void setup() throws IOException {
  this.conf = new JobConf();
  this.conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
      NullGroupsProvider.class.getName());
  this.conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
  Groups.getUserToGroupsMappingService(conf);
  this.ctx = buildHistoryContext(this.conf);
  WebApp webApp = mock(HsWebApp.class);
  when(webApp.name()).thenReturn("hsmockwebapp");
  this.hsWebServices= new HsWebServices(ctx, conf, webApp);
  this.hsWebServices.setResponse(mock(HttpServletResponse.class));

  Job job = ctx.getAllJobs().values().iterator().next();
  this.jobIdStr = job.getID().toString();
  Task task = job.getTasks().values().iterator().next();
  this.taskIdStr = task.getID().toString();
  this.taskAttemptIdStr =
      task.getAttempts().keySet().iterator().next().toString();
}
项目:hadoop    文件:GridmixJob.java   
/**
 * Sets the high ram job properties in the simulated job's configuration.
 */
@SuppressWarnings("deprecation")
static void configureHighRamProperties(Configuration sourceConf, 
                                       Configuration destConf) {
  // set the memory per map task
  scaleConfigParameter(sourceConf, destConf, 
                       MRConfig.MAPMEMORY_MB, MRJobConfig.MAP_MEMORY_MB, 
                       MRJobConfig.DEFAULT_MAP_MEMORY_MB);

  // validate and fail early
  validateTaskMemoryLimits(destConf, MRJobConfig.MAP_MEMORY_MB, 
                           JTConfig.JT_MAX_MAPMEMORY_MB);

  // set the memory per reduce task
  scaleConfigParameter(sourceConf, destConf, 
                       MRConfig.REDUCEMEMORY_MB, MRJobConfig.REDUCE_MEMORY_MB,
                       MRJobConfig.DEFAULT_REDUCE_MEMORY_MB);
  // validate and fail early
  validateTaskMemoryLimits(destConf, MRJobConfig.REDUCE_MEMORY_MB, 
                           JTConfig.JT_MAX_REDUCEMEMORY_MB);
}
项目:hadoop    文件:TestMRFramework.java   
@Test
public void testFramework() {
  JobConf jobConf = new JobConf();
  jobConf.set(JTConfig.JT_IPC_ADDRESS, MRConfig.LOCAL_FRAMEWORK_NAME);
  jobConf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
  assertFalse("Expected 'isLocal' to be false", 
      StreamUtil.isLocalJobTracker(jobConf));

  jobConf.set(JTConfig.JT_IPC_ADDRESS, MRConfig.LOCAL_FRAMEWORK_NAME);
  jobConf.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME);
  assertFalse("Expected 'isLocal' to be false", 
      StreamUtil.isLocalJobTracker(jobConf));

  jobConf.set(JTConfig.JT_IPC_ADDRESS, "jthost:9090");
  jobConf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
  assertTrue("Expected 'isLocal' to be true", 
      StreamUtil.isLocalJobTracker(jobConf));
}
项目:big-c    文件:GridmixJob.java   
/**
 * Sets the high ram job properties in the simulated job's configuration.
 */
@SuppressWarnings("deprecation")
static void configureHighRamProperties(Configuration sourceConf, 
                                       Configuration destConf) {
  // set the memory per map task
  scaleConfigParameter(sourceConf, destConf, 
                       MRConfig.MAPMEMORY_MB, MRJobConfig.MAP_MEMORY_MB, 
                       MRJobConfig.DEFAULT_MAP_MEMORY_MB);

  // validate and fail early
  validateTaskMemoryLimits(destConf, MRJobConfig.MAP_MEMORY_MB, 
                           JTConfig.JT_MAX_MAPMEMORY_MB);

  // set the memory per reduce task
  scaleConfigParameter(sourceConf, destConf, 
                       MRConfig.REDUCEMEMORY_MB, MRJobConfig.REDUCE_MEMORY_MB,
                       MRJobConfig.DEFAULT_REDUCE_MEMORY_MB);
  // validate and fail early
  validateTaskMemoryLimits(destConf, MRJobConfig.REDUCE_MEMORY_MB, 
                           JTConfig.JT_MAX_REDUCEMEMORY_MB);
}
项目:aliyun-oss-hadoop-fs    文件:TestReporter.java   
@Override
public void map(LongWritable key, Text value, Context context)
    throws IOException {
  StringBuilder sb = new StringBuilder(512);
  for (int i = 0; i < 1000; i++) {
    sb.append("a");
  }
  context.setStatus(sb.toString());
  int progressStatusLength = context.getConfiguration().getInt(
      MRConfig.PROGRESS_STATUS_LEN_LIMIT_KEY,
      MRConfig.PROGRESS_STATUS_LEN_LIMIT_DEFAULT);

  if (context.getStatus().length() > progressStatusLength) {
    throw new IOException("Status is not truncated");
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestLocalJobSubmission.java   
/**
 * test the local job submission options of
 * -jt local -libjars
 * @throws IOException
 */
@Test
public void testLocalJobLibjarsOption() throws IOException {
  Path jarPath = makeJar(new Path(TEST_ROOT_DIR, "test.jar"));

  Configuration conf = new Configuration();
  conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost:9000");
  conf.set(MRConfig.FRAMEWORK_NAME, "local");
  final String[] args = {
      "-jt" , "local", "-libjars", jarPath.toString(),
      "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
  };
  int res = -1;
  try {
    res = ToolRunner.run(conf, new SleepJob(), args);
  } catch (Exception e) {
    System.out.println("Job failed with " + e.getLocalizedMessage());
    e.printStackTrace(System.out);
    fail("Job failed");
  }
  assertEquals("dist job res is not 0:", 0, res);
}
项目:aliyun-oss-hadoop-fs    文件:TestLocalJobSubmission.java   
/**
 * test the local job submission with
 * intermediate data encryption enabled.
 * @throws IOException
 */
@Test
public void testLocalJobEncryptedIntermediateData() throws IOException {
  Configuration conf = new Configuration();
  conf.set(MRConfig.FRAMEWORK_NAME, "local");
  conf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true);
  final String[] args = {
      "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
  };
  int res = -1;
  try {
    res = ToolRunner.run(conf, new SleepJob(), args);
  } catch (Exception e) {
    System.out.println("Job failed with " + e.getLocalizedMessage());
    e.printStackTrace(System.out);
    fail("Job failed");
  }
  assertEquals("dist job res is not 0:", 0, res);
}
项目:aliyun-oss-hadoop-fs    文件:TestBinaryTokenFile.java   
@BeforeClass
public static void setUp() throws Exception {
  final Configuration conf = new Configuration();

  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
  conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG");

  final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
  builder.checkExitOnShutdown(true);
  builder.numDataNodes(numSlaves);
  builder.format(true);
  builder.racks(null);
  dfsCluster = builder.build();

  mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs);
  mrCluster.init(conf);
  mrCluster.start();

  NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads(); 

  FileSystem fs = dfsCluster.getFileSystem(); 
  p1 = new Path("file1");
  p1 = fs.makeQualified(p1);
}
项目:big-c    文件:LocalJobRunner.java   
static void setupChildMapredLocalDirs(Task t, JobConf conf) {
  String[] localDirs = conf.getTrimmedStrings(MRConfig.LOCAL_DIR);
  String jobId = t.getJobID().toString();
  String taskId = t.getTaskID().toString();
  boolean isCleanup = t.isTaskCleanupTask();
  String user = t.getUser();
  StringBuffer childMapredLocalDir =
      new StringBuffer(localDirs[0] + Path.SEPARATOR
          + getLocalTaskDir(user, jobId, taskId, isCleanup));
  for (int i = 1; i < localDirs.length; i++) {
    childMapredLocalDir.append("," + localDirs[i] + Path.SEPARATOR
        + getLocalTaskDir(user, jobId, taskId, isCleanup));
  }
  LOG.debug(MRConfig.LOCAL_DIR + " for child : " + childMapredLocalDir);
  conf.set(MRConfig.LOCAL_DIR, childMapredLocalDir.toString());
}
项目:aliyun-oss-hadoop-fs    文件:LocalJobRunner.java   
static void setupChildMapredLocalDirs(Task t, JobConf conf) {
  String[] localDirs = conf.getTrimmedStrings(MRConfig.LOCAL_DIR);
  String jobId = t.getJobID().toString();
  String taskId = t.getTaskID().toString();
  boolean isCleanup = t.isTaskCleanupTask();
  String user = t.getUser();
  StringBuffer childMapredLocalDir =
      new StringBuffer(localDirs[0] + Path.SEPARATOR
          + getLocalTaskDir(user, jobId, taskId, isCleanup));
  for (int i = 1; i < localDirs.length; i++) {
    childMapredLocalDir.append("," + localDirs[i] + Path.SEPARATOR
        + getLocalTaskDir(user, jobId, taskId, isCleanup));
  }
  LOG.debug(MRConfig.LOCAL_DIR + " for child : " + childMapredLocalDir);
  conf.set(MRConfig.LOCAL_DIR, childMapredLocalDir.toString());
}
项目:big-c    文件:TestJobAclsManager.java   
@Test
public void testAclsOff() {
  Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
  Configuration conf = new Configuration();
  String jobOwner = "testuser";
  conf.set(JobACL.VIEW_JOB.getAclName(), jobOwner);
  conf.setBoolean(MRConfig.MR_ACLS_ENABLED, false);
  String noAdminUser = "testuser2";

  JobACLsManager aclsManager = new JobACLsManager(conf);
  tmpJobACLs = aclsManager.constructJobACLs(conf);
  final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;

  UserGroupInformation callerUGI = UserGroupInformation.createUserForTesting(
      noAdminUser, new String[] {});
  // acls off so anyone should have access
  boolean val = aclsManager.checkAccess(callerUGI, JobACL.VIEW_JOB, jobOwner,
      jobACLs.get(JobACL.VIEW_JOB));
  assertTrue("acls off so anyone should have access", val);
}
项目:aliyun-oss-hadoop-fs    文件:TestJobClient.java   
@Test
public void testGetClusterStatusWithLocalJobRunner() throws Exception {
  Configuration conf = new Configuration();
  conf.set(JTConfig.JT_IPC_ADDRESS, MRConfig.LOCAL_FRAMEWORK_NAME);
  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
  JobClient client = new JobClient(conf);
  ClusterStatus clusterStatus = client.getClusterStatus(true);
  Collection<String> activeTrackerNames = clusterStatus
      .getActiveTrackerNames();
  Assert.assertEquals(0, activeTrackerNames.size());
  int blacklistedTrackers = clusterStatus.getBlacklistedTrackers();
  Assert.assertEquals(0, blacklistedTrackers);
  Collection<BlackListInfo> blackListedTrackersInfo = clusterStatus
      .getBlackListedTrackersInfo();
  Assert.assertEquals(0, blackListedTrackersInfo.size());
}
项目:big-c    文件:TestJobAclsManager.java   
@Test
public void testClusterAdmins() {
  Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
  Configuration conf = new Configuration();
  String jobOwner = "testuser";
  conf.set(JobACL.VIEW_JOB.getAclName(), jobOwner);
  conf.set(JobACL.MODIFY_JOB.getAclName(), jobOwner);
  conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
  String clusterAdmin = "testuser2";
  conf.set(MRConfig.MR_ADMINS, clusterAdmin);

  JobACLsManager aclsManager = new JobACLsManager(conf);
  tmpJobACLs = aclsManager.constructJobACLs(conf);
  final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;

  UserGroupInformation callerUGI = UserGroupInformation.createUserForTesting(
      clusterAdmin, new String[] {});

  // cluster admin should have access
  boolean val = aclsManager.checkAccess(callerUGI, JobACL.VIEW_JOB, jobOwner,
      jobACLs.get(JobACL.VIEW_JOB));
  assertTrue("cluster admin should have view access", val);
  val = aclsManager.checkAccess(callerUGI, JobACL.MODIFY_JOB, jobOwner,
      jobACLs.get(JobACL.MODIFY_JOB));
  assertTrue("cluster admin should have modify access", val);
}
项目:aliyun-oss-hadoop-fs    文件:TestMRApps.java   
@Test (timeout = 120000)
public void testSetClasspathWithNoUserPrecendence() {
  Configuration conf = new Configuration();
  conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM, true);
  conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST, false);
  Map<String, String> env = new HashMap<String, String>();
  try {
    MRApps.setClasspath(env, conf);
  } catch (Exception e) {
    fail("Got exception while setting classpath");
  }
  String env_str = env.get("CLASSPATH");
  String expectedClasspath = StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,
    Arrays.asList("job.jar/job.jar", "job.jar/classes/", "job.jar/lib/*",
      ApplicationConstants.Environment.PWD.$$() + "/*"));
  assertTrue("MAPREDUCE_JOB_USER_CLASSPATH_FIRST false, and job.jar is not in"
    + " the classpath!", env_str.contains(expectedClasspath));
  assertFalse("MAPREDUCE_JOB_USER_CLASSPATH_FIRST false, but taking effect!",
    env_str.startsWith(expectedClasspath));
}
项目:aliyun-oss-hadoop-fs    文件:Task.java   
public void setConf(Configuration conf) {
  if (conf instanceof JobConf) {
    this.conf = (JobConf) conf;
  } else {
    this.conf = new JobConf(conf);
  }
  this.mapOutputFile = ReflectionUtils.newInstance(
      conf.getClass(MRConfig.TASK_LOCAL_OUTPUT_CLASS,
        MROutputFiles.class, MapOutputFile.class), conf);
  this.lDirAlloc = new LocalDirAllocator(MRConfig.LOCAL_DIR);
  // add the static resolutions (this is required for the junit to
  // work on testcases that simulate multiple nodes on a single physical
  // node.
  String hostToResolved[] = conf.getStrings(MRConfig.STATIC_RESOLUTIONS);
  if (hostToResolved != null) {
    for (String str : hostToResolved) {
      String name = str.substring(0, str.indexOf('='));
      String resolvedName = str.substring(str.indexOf('=') + 1);
      NetUtils.addStaticResolution(name, resolvedName);
    }
  }
}
项目:big-c    文件:TestMRApps.java   
@Test (timeout = 120000)
public void testSetClasspathWithJobClassloader() throws IOException {
  Configuration conf = new Configuration();
  conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM, true);
  conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER, true);
  Map<String, String> env = new HashMap<String, String>();
  MRApps.setClasspath(env, conf);
  String cp = env.get("CLASSPATH");
  String appCp = env.get("APP_CLASSPATH");
  assertFalse("MAPREDUCE_JOB_CLASSLOADER true, but job.jar is in the"
    + " classpath!", cp.contains("jar" + ApplicationConstants.CLASS_PATH_SEPARATOR + "job"));
  assertFalse("MAPREDUCE_JOB_CLASSLOADER true, but PWD is in the classpath!",
    cp.contains("PWD"));
  String expectedAppClasspath = StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,
    Arrays.asList(ApplicationConstants.Environment.PWD.$$(), "job.jar/job.jar",
      "job.jar/classes/", "job.jar/lib/*",
      ApplicationConstants.Environment.PWD.$$() + "/*"));
  assertEquals("MAPREDUCE_JOB_CLASSLOADER true, but job.jar is not in the app"
    + " classpath!", expectedAppClasspath, appCp);
}
项目:aliyun-oss-hadoop-fs    文件:TestMaster.java   
@Test 
public void testGetMasterUser() {
  YarnConfiguration conf = new YarnConfiguration();
  conf.set(MRConfig.MASTER_USER_NAME, "foo");
  conf.set(YarnConfiguration.RM_PRINCIPAL, "bar");

  // default is yarn framework  
  assertEquals(Master.getMasterUserName(conf), "bar");

  // set framework name to classic
  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME);
  assertEquals(Master.getMasterUserName(conf), "foo");

  // change framework to yarn
  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
  assertEquals(Master.getMasterUserName(conf), "bar");

}
项目:aliyun-oss-hadoop-fs    文件:TestJobAclsManager.java   
@Test
public void testClusterAdmins() {
  Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
  Configuration conf = new Configuration();
  String jobOwner = "testuser";
  conf.set(JobACL.VIEW_JOB.getAclName(), jobOwner);
  conf.set(JobACL.MODIFY_JOB.getAclName(), jobOwner);
  conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
  String clusterAdmin = "testuser2";
  conf.set(MRConfig.MR_ADMINS, clusterAdmin);

  JobACLsManager aclsManager = new JobACLsManager(conf);
  tmpJobACLs = aclsManager.constructJobACLs(conf);
  final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;

  UserGroupInformation callerUGI = UserGroupInformation.createUserForTesting(
      clusterAdmin, new String[] {});

  // cluster admin should have access
  boolean val = aclsManager.checkAccess(callerUGI, JobACL.VIEW_JOB, jobOwner,
      jobACLs.get(JobACL.VIEW_JOB));
  assertTrue("cluster admin should have view access", val);
  val = aclsManager.checkAccess(callerUGI, JobACL.MODIFY_JOB, jobOwner,
      jobACLs.get(JobACL.MODIFY_JOB));
  assertTrue("cluster admin should have modify access", val);
}
项目:big-c    文件:TestMapCollection.java   
private static void runTest(String name, Job job) throws Exception {
  job.setNumReduceTasks(1);
  job.getConfiguration().set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
  job.getConfiguration().setInt(MRJobConfig.IO_SORT_FACTOR, 1000);
  job.getConfiguration().set("fs.defaultFS", "file:///");
  job.getConfiguration().setInt("test.mapcollection.num.maps", 1);
  job.setInputFormatClass(FakeIF.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setMapperClass(Mapper.class);
  job.setReducerClass(SpillReducer.class);
  job.setMapOutputKeyClass(KeyWritable.class);
  job.setMapOutputValueClass(ValWritable.class);
  job.setSortComparatorClass(VariableComparator.class);

  LOG.info("Running " + name);
  assertTrue("Job failed!", job.waitForCompletion(false));
}
项目:aliyun-oss-hadoop-fs    文件:TestJobAclsManager.java   
@Test
public void testAclsOff() {
  Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
  Configuration conf = new Configuration();
  String jobOwner = "testuser";
  conf.set(JobACL.VIEW_JOB.getAclName(), jobOwner);
  conf.setBoolean(MRConfig.MR_ACLS_ENABLED, false);
  String noAdminUser = "testuser2";

  JobACLsManager aclsManager = new JobACLsManager(conf);
  tmpJobACLs = aclsManager.constructJobACLs(conf);
  final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;

  UserGroupInformation callerUGI = UserGroupInformation.createUserForTesting(
      noAdminUser, new String[] {});
  // acls off so anyone should have access
  boolean val = aclsManager.checkAccess(callerUGI, JobACL.VIEW_JOB, jobOwner,
      jobACLs.get(JobACL.VIEW_JOB));
  assertTrue("acls off so anyone should have access", val);
}
项目:aliyun-oss-hadoop-fs    文件:TestJobSplitWriter.java   
@Test
public void testMaxBlockLocationsOldSplits() throws Exception {
  TEST_DIR.mkdirs();
  try {
    Configuration conf = new Configuration();
    conf.setInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY, 4);
    Path submitDir = new Path(TEST_DIR.getAbsolutePath());
    FileSystem fs = FileSystem.getLocal(conf);
    org.apache.hadoop.mapred.FileSplit split =
        new org.apache.hadoop.mapred.FileSplit(new Path("/some/path"), 0, 1,
            new String[] { "loc1", "loc2", "loc3", "loc4", "loc5" });
    JobSplitWriter.createSplitFiles(submitDir, conf, fs,
        new org.apache.hadoop.mapred.InputSplit[] { split });
    JobSplit.TaskSplitMetaInfo[] infos =
        SplitMetaInfoReader.readSplitMetaInfo(new JobID(), fs, conf,
            submitDir);
    assertEquals("unexpected number of splits", 1, infos.length);
    assertEquals("unexpected number of split locations",
        4, infos[0].getLocations().length);
  } finally {
    FileUtil.fullyDelete(TEST_DIR);
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestHsWebServicesAcls.java   
@Before
public void setup() throws IOException {
  this.conf = new JobConf();
  this.conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
      NullGroupsProvider.class.getName());
  this.conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
  Groups.getUserToGroupsMappingService(conf);
  this.ctx = buildHistoryContext(this.conf);
  WebApp webApp = mock(HsWebApp.class);
  when(webApp.name()).thenReturn("hsmockwebapp");
  this.hsWebServices= new HsWebServices(ctx, conf, webApp);
  this.hsWebServices.setResponse(mock(HttpServletResponse.class));

  Job job = ctx.getAllJobs().values().iterator().next();
  this.jobIdStr = job.getID().toString();
  Task task = job.getTasks().values().iterator().next();
  this.taskIdStr = task.getID().toString();
  this.taskAttemptIdStr =
      task.getAttempts().keySet().iterator().next().toString();
}
项目:aliyun-oss-hadoop-fs    文件:GridmixJob.java   
/**
 * Sets the high ram job properties in the simulated job's configuration.
 */
@SuppressWarnings("deprecation")
static void configureHighRamProperties(Configuration sourceConf, 
                                       Configuration destConf) {
  // set the memory per map task
  scaleConfigParameter(sourceConf, destConf, 
                       MRConfig.MAPMEMORY_MB, MRJobConfig.MAP_MEMORY_MB, 
                       MRJobConfig.DEFAULT_MAP_MEMORY_MB);

  // validate and fail early
  validateTaskMemoryLimits(destConf, MRJobConfig.MAP_MEMORY_MB, 
                           JTConfig.JT_MAX_MAPMEMORY_MB);

  // set the memory per reduce task
  scaleConfigParameter(sourceConf, destConf, 
                       MRConfig.REDUCEMEMORY_MB, MRJobConfig.REDUCE_MEMORY_MB,
                       MRJobConfig.DEFAULT_REDUCE_MEMORY_MB);
  // validate and fail early
  validateTaskMemoryLimits(destConf, MRJobConfig.REDUCE_MEMORY_MB, 
                           JTConfig.JT_MAX_REDUCEMEMORY_MB);
}