public JobConf obtainJobConf(MiniMRCluster cluster) { if (cluster == null) return null; try { Object runner = cluster.getJobTrackerRunner(); Method meth = runner.getClass().getDeclaredMethod("getJobTracker", emptyParam); Object tracker = meth.invoke(runner, new Object []{}); Method m = tracker.getClass().getDeclaredMethod("getConf", emptyParam); return (JobConf) m.invoke(tracker, new Object []{}); } catch (NoSuchMethodException nsme) { return null; } catch (InvocationTargetException ite) { return null; } catch (IllegalAccessException iae) { return null; } }
@BeforeClass public static void setUp() throws Exception { Configuration conf = new Configuration(); dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null); jConf = new JobConf(conf); mrCluster = new MiniMRCluster(0, 0, numSlaves, dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null, jConf); createTokenFileJson(); verifySecretKeysInJSONFile(); createTokenFileBinary(); verifySecretKeysInBinaryFile(); NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads(); FileSystem fs = dfsCluster.getFileSystem(); p1 = new Path("file1"); p2 = new Path("file2"); p1 = fs.makeQualified(p1); }
private void startCluster(Configuration conf) throws Exception { if (System.getProperty("hadoop.log.dir") == null) { System.setProperty("hadoop.log.dir", "build/test-dir"); } conf.set("dfs.block.access.token.enable", "false"); conf.set("dfs.permissions", "true"); conf.set("hadoop.security.authentication", "simple"); dfsCluster = new MiniDFSCluster(conf, 1, true, null); FileSystem fileSystem = dfsCluster.getFileSystem(); fileSystem.mkdirs(new Path("/tmp")); fileSystem.mkdirs(new Path("/user")); fileSystem.mkdirs(new Path("/hadoop/mapred/system")); fileSystem.setPermission( new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx")); fileSystem.setPermission( new Path("/user"), FsPermission.valueOf("-rwxrwxrwx")); fileSystem.setPermission( new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------")); FileSystem.setDefaultUri(conf, fileSystem.getUri()); mrCluster = new MiniMRCluster(1, fileSystem.getUri().toString(), 1, null, null, new JobConf(conf)); }
@SuppressWarnings("deprecation") @Override public void start() throws Exception { System.setProperty("test.build.data", getDataDir()); LOG.info("test.build.data set to: " + getDataDir()); System.setProperty("hadoop.log.dir", getLogDir()); LOG.info("log dir set to: " + getLogDir()); // Start DFS server LOG.info("Starting DFS cluster..."); dfsCluster = new MiniDFSCluster(config, 1, true, null); if (dfsCluster.isClusterUp()) { LOG.info("Started DFS cluster on port: " + dfsCluster.getNameNodePort()); } else { LOG.error("Could not start DFS cluster"); } // Start MR server LOG.info("Starting MR cluster"); mrCluster = new MiniMRCluster(0, 0, 1, dfsCluster.getFileSystem().getUri() .toString(), 1, null, null, null, new JobConf(config)); LOG.info("Started MR cluster"); config = prepareConfiguration(mrCluster.createJobConf()); }
@Override public JobConf obtainJobConf(MiniMRCluster cluster) { if (cluster == null) return null; try { Object runner = cluster.getJobTrackerRunner(); Method meth = runner.getClass().getDeclaredMethod("getJobTracker", emptyParam); Object tracker = meth.invoke(runner, new Object []{}); Method m = tracker.getClass().getDeclaredMethod("getConf", emptyParam); return (JobConf) m.invoke(tracker, new Object []{}); } catch (NoSuchMethodException nsme) { return null; } catch (InvocationTargetException ite) { return null; } catch (IllegalAccessException iae) { return null; } }
@BeforeClass public static void setUp() throws Exception { Configuration conf = new Configuration(); dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null); jConf = new JobConf(conf); mrCluster = new MiniMRCluster(0, 0, numSlaves, dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null, jConf); createTokenFileJson(); verifySecretKeysInJSONFile(); dfsCluster.getNameNode().getNamesystem() .getDelegationTokenSecretManager().startThreads(); FileSystem fs = dfsCluster.getFileSystem(); p1 = new Path("file1"); p2 = new Path("file2"); p1 = fs.makeQualified(p1); }
/** * This tests the setting of memory limit for streaming processes. * This will launch a streaming app which will allocate 10MB memory. * First, program is launched with sufficient memory. And test expects * it to succeed. Then program is launched with insufficient memory and * is expected to be a failure. */ public void testCommandLine() { if (StreamUtil.isCygwin()) { return; } try { final int numSlaves = 2; Configuration conf = new Configuration(); dfs = new MiniDFSCluster(conf, numSlaves, true, null); fs = dfs.getFileSystem(); mr = new MiniMRCluster(numSlaves, fs.getUri().toString(), 1); writeInputFile(fs, inputPath); map = StreamUtil.makeJavaCommand(UlimitApp.class, new String[]{}); runProgram(SET_MEMORY_LIMIT); fs.delete(outputPath, true); assertFalse("output not cleaned up", fs.exists(outputPath)); mr.waitUntilIdle(); } catch(IOException e) { fail(e.toString()); } finally { mr.shutdown(); dfs.shutdown(); } }
@BeforeClass public static void setUp() throws Exception { Configuration conf = new Configuration(); dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null); jConf = new JobConf(conf); mrCluster = new MiniMRCluster(0, 0, numSlaves, dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null, jConf); createTokenFileJson(); verifySecretKeysInJSONFile(); NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads(); FileSystem fs = dfsCluster.getFileSystem(); p1 = new Path("file1"); p2 = new Path("file2"); p1 = fs.makeQualified(p1); }
private void startCluster(boolean reStart) throws Exception { // Configure job queues String[] queueNames = {"default"}; createQueuesConfigFile(queueNames, new String[] { jobSubmitter }, new String[] { qAdmin }); JobConf conf = new JobConf(); // Enable queue and job level authorization conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true); // Enable CompletedJobStore FileSystem fs = FileSystem.getLocal(conf); if (!reStart) { fs.delete(TEST_DIR, true); } conf.set(JTConfig.JT_PERSIST_JOBSTATUS_DIR, fs.makeQualified(TEST_DIR).toString()); conf.setBoolean(JTConfig.JT_PERSIST_JOBSTATUS, true); conf.set(JTConfig.JT_PERSIST_JOBSTATUS_HOURS, "1"); UserGroupInformation MR_UGI = UserGroupInformation.getLoginUser(); mr = new MiniMRCluster(0, 0, 1, "file:///", 1, null, null, MR_UGI, conf); }
@BeforeClass public static void setUp() throws Exception { Configuration conf = new Configuration(); dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null); jConf = new JobConf(conf); mrCluster = new MiniMRCluster(0, 0, numSlaves, dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null, jConf); createTokenFileJson(); verifySecretKeysInJSONFile(); dfsCluster.getNamesystem().getDelegationTokenSecretManager().startThreads(); FileSystem fs = dfsCluster.getFileSystem(); p1 = new Path("file1"); p2 = new Path("file2"); p1 = fs.makeQualified(p1); }
@BeforeClass public static void setUp() throws Exception { Configuration conf = new Configuration(); dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null); jConf = new JobConf(conf); mrCluster = new MiniMRCluster(0, 0, numSlaves, dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null, jConf); createTokenFileJson(); verifySecretKeysInJSONFile(); dfsCluster.getNamesystem() .getDelegationTokenSecretManager().startThreads(); FileSystem fs = dfsCluster.getFileSystem(); p1 = new Path("file1"); p2 = new Path("file2"); p1 = fs.makeQualified(p1); }
@Before public void setup() throws Exception { user1 = UserGroupInformation.createUserForTesting("alice", new String[]{"users"}); user2 = UserGroupInformation.createUserForTesting("bob", new String[]{"users"}); cluster = new MiniMRCluster(0,0,1,"file:///",1); }
protected void setUp() throws Exception { super.setUp(); if (System.getProperty("hadoop.log.dir") == null) { System.setProperty("hadoop.log.dir", "/tmp"); } int taskTrackers = 2; int dataNodes = 2; String proxyUser = System.getProperty("user.name"); String proxyGroup = "g"; StringBuilder sb = new StringBuilder(); sb.append("127.0.0.1,localhost"); for (InetAddress i : InetAddress.getAllByName(InetAddress.getLocalHost().getHostName())) { sb.append(",").append(i.getCanonicalHostName()); } JobConf conf = new JobConf(); conf.set("dfs.block.access.token.enable", "false"); conf.set("dfs.permissions", "true"); conf.set("hadoop.security.authentication", "simple"); dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(dataNodes) .build(); FileSystem fileSystem = dfsCluster.getFileSystem(); fileSystem.mkdirs(new Path("/tmp")); fileSystem.mkdirs(new Path("/user")); fileSystem.mkdirs(new Path("/hadoop/mapred/system")); fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx")); fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx")); fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------")); String nnURI = fileSystem.getUri().toString(); int numDirs = 1; String[] racks = null; String[] hosts = null; mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf); ProxyUsers.refreshSuperUserGroupsConfiguration(conf); }
public TestFileArgs() throws IOException { // Set up mini cluster conf = new Configuration(); dfs = new MiniDFSCluster.Builder(conf).build(); fileSys = dfs.getFileSystem(); namenode = fileSys.getUri().getAuthority(); mr = new MiniMRCluster(1, namenode, 1); map = LS_PATH; FileSystem.setDefaultUri(conf, "hdfs://" + namenode); setTestDir(new File("/tmp/TestFileArgs")); }
/** * Start the cluster and create input file before running the actual test. * * @throws IOException */ @Before public void setUp() throws IOException { conf = new JobConf(); conf.setBoolean(JTConfig.JT_RETIREJOBS, false); conf.setBoolean(JTConfig.JT_PERSIST_JOBSTATUS, false); mr = new MiniMRCluster(1, "file:///", 3, null , null, conf); Path inFile = new Path(INPUT_FILE); fs = inFile.getFileSystem(mr.createJobConf()); clean(fs); buildExpectedJobOutput(); }
public JobConf obtainJobConf(MiniMRCluster cluster) { try { Method meth = MiniMRCluster.class.getMethod("getJobTrackerConf", emptyParam); return (JobConf) meth.invoke(cluster, new Object []{}); } catch (NoSuchMethodException nsme) { return null; } catch (InvocationTargetException ite) { return null; } catch (IllegalAccessException iae) { return null; } }
public void start() throws IOException { File testCluster = new File(WORKING_DIRECTORY); if (testCluster.exists()) { FileUtil.deleteDirectory(testCluster); } testCluster.mkdirs(); File testClusterData = new File(WORKING_DIRECTORY + "/data"); File testClusterLog = new File(WORKING_DIRECTORY + "/logs"); if (cluster == null) { conf = new HdfsConfiguration(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testClusterData.getAbsolutePath()); cluster = new MiniDFSCluster.Builder(conf).build(); fs = cluster.getFileSystem(); // set mincluster as default config HdfsUtil.setDefaultConfiguration(conf); System.setProperty("hadoop.log.dir", testClusterLog.getAbsolutePath()); MiniMRCluster mrCluster = new MiniMRCluster(1, fs.getUri() .toString(), 1, null, null, new JobConf(conf)); JobConf mrClusterConf = mrCluster.createJobConf(); HdfsUtil.setDefaultConfiguration(new Configuration(mrClusterConf)); System.out.println("------"); JobClient client = new JobClient(mrClusterConf); ClusterStatus status = client.getClusterStatus(true); System.out.println(status.getActiveTrackerNames()); } }