@SuppressWarnings("deprecation") @BeforeClass public static void setUp() throws Exception { System.setProperty("hadoop.log.dir", "logs"); Configuration conf = new Configuration(); dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(numSlaves) .build(); jConf = new JobConf(conf); FileSystem.setDefaultUri(conf, dfsCluster.getFileSystem().getUri().toString()); mrCluster = MiniMRClientClusterFactory.create(TestMRCredentials.class, 1, jConf); createKeysAsJson("keys.json"); }
private void startCluster(Configuration conf) throws Exception { if (System.getProperty("hadoop.log.dir") == null) { System.setProperty("hadoop.log.dir", "target/test-dir"); } conf.set("dfs.block.access.token.enable", "false"); conf.set("dfs.permissions", "true"); conf.set("hadoop.security.authentication", "simple"); String cp = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH, StringUtils.join(",", YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH)) + File.pathSeparator + classpathDir; conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, cp); dfsCluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fileSystem = dfsCluster.getFileSystem(); fileSystem.mkdirs(new Path("/tmp")); fileSystem.mkdirs(new Path("/user")); fileSystem.mkdirs(new Path("/hadoop/mapred/system")); fileSystem.setPermission( new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx")); fileSystem.setPermission( new Path("/user"), FsPermission.valueOf("-rwxrwxrwx")); fileSystem.setPermission( new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------")); FileSystem.setDefaultUri(conf, fileSystem.getUri()); mrCluster = MiniMRClientClusterFactory.create(this.getClass(), 1, conf); // so the minicluster conf is avail to the containers. Writer writer = new FileWriter(classpathDir + "/core-site.xml"); mrCluster.getConfig().writeXml(writer); writer.close(); }
public static void initCluster(Class<?> caller) throws IOException { Configuration conf = new Configuration(); // conf.set("mapred.queue.names", "default,q1,q2"); conf.set("mapred.queue.names", "default"); conf.set("yarn.scheduler.capacity.root.queues", "default"); conf.set("yarn.scheduler.capacity.root.default.capacity", "100.0"); conf.setBoolean(GRIDMIX_USE_QUEUE_IN_TRACE, false); conf.set(GRIDMIX_DEFAULT_QUEUE, "default"); dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true) .build();// MiniDFSCluster(conf, 3, true, null); dfs = dfsCluster.getFileSystem(); conf.set(JTConfig.JT_RETIREJOBS, "false"); mrvl = MiniMRClientClusterFactory.create(caller, 2, conf); conf = mrvl.getConfig(); String[] files = conf.getStrings(MRJobConfig.CACHE_FILES); if (files != null) { String[] timestamps = new String[files.length]; for (int i = 0; i < files.length; i++) { timestamps[i] = Long.toString(System.currentTimeMillis()); } conf.setStrings(MRJobConfig.CACHE_FILE_TIMESTAMPS, timestamps); } }
@SuppressWarnings("deprecation") @BeforeClass public static void setUp() throws Exception { System.setProperty("hadoop.log.dir", "logs"); Configuration conf = new Configuration(); dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null); jConf = new JobConf(conf); FileSystem.setDefaultUri(conf, dfsCluster.getFileSystem().getUri().toString()); mrCluster = MiniMRClientClusterFactory.create(TestMRCredentials.class, 1, jConf); createKeysAsJson("keys.json"); }
private void startCluster(Configuration conf) throws Exception { if (System.getProperty("hadoop.log.dir") == null) { System.setProperty("hadoop.log.dir", "target/test-dir"); } conf.set("dfs.block.access.token.enable", "false"); conf.set("dfs.permissions", "true"); conf.set("hadoop.security.authentication", "simple"); String cp = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH) + File.pathSeparator + classpathDir; conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, cp); dfsCluster = new MiniDFSCluster(conf, 1, true, null); FileSystem fileSystem = dfsCluster.getFileSystem(); fileSystem.mkdirs(new Path("/tmp")); fileSystem.mkdirs(new Path("/user")); fileSystem.mkdirs(new Path("/hadoop/mapred/system")); fileSystem.setPermission( new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx")); fileSystem.setPermission( new Path("/user"), FsPermission.valueOf("-rwxrwxrwx")); fileSystem.setPermission( new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------")); FileSystem.setDefaultUri(conf, fileSystem.getUri()); mrCluster = MiniMRClientClusterFactory.create(this.getClass(), 1, conf); // so the minicluster conf is avail to the containers. Writer writer = new FileWriter(classpathDir + "/core-site.xml"); mrCluster.getConfig().writeXml(writer); writer.close(); }
@SuppressWarnings("deprecation") @BeforeClass public static void setUp() throws Exception { System.setProperty("hadoop.log.dir", "logs"); Configuration conf = new Configuration(); dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(numSlaves) .build(); jConf = new JobConf(conf); FileSystem.setDefaultUri(conf, dfsCluster.getFileSystem().getUri().toString()); mrCluster = MiniMRClientClusterFactory.create(TestMRCredentials.class, 1, jConf, false); createKeysAsJson("keys.json"); }
private void startCluster(Configuration conf) throws Exception { if (System.getProperty("hadoop.log.dir") == null) { System.setProperty("hadoop.log.dir", "target/test-dir"); } conf.set("dfs.block.access.token.enable", "false"); conf.set("dfs.permissions", "true"); conf.set("hadoop.security.authentication", "simple"); String cp = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH, StringUtils.join(",", YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH)) + File.pathSeparator + classpathDir; conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, cp); dfsCluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fileSystem = dfsCluster.getFileSystem(); fileSystem.mkdirs(new Path("/tmp")); fileSystem.mkdirs(new Path("/user")); fileSystem.mkdirs(new Path("/hadoop/mapred/system")); fileSystem.setPermission( new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx")); fileSystem.setPermission( new Path("/user"), FsPermission.valueOf("-rwxrwxrwx")); fileSystem.setPermission( new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------")); FileSystem.setDefaultUri(conf, fileSystem.getUri()); mrCluster = MiniMRClientClusterFactory.create(this.getClass(), 1, conf, false); // so the minicluster conf is avail to the containers. Writer writer = new FileWriter(classpathDir + "/core-site.xml"); mrCluster.getConfig().writeXml(writer); writer.close(); }
public static void initCluster(Class<?> caller) throws IOException { Configuration conf = new Configuration(); // conf.set("mapred.queue.names", "default,q1,q2"); conf.set("mapred.queue.names", "default"); conf.set("yarn.scheduler.capacity.root.queues", "default"); conf.set("yarn.scheduler.capacity.root.default.capacity", "100.0"); conf.setBoolean(GRIDMIX_USE_QUEUE_IN_TRACE, false); conf.set(GRIDMIX_DEFAULT_QUEUE, "default"); dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true) .build();// MiniDFSCluster(conf, 3, true, null); dfs = dfsCluster.getFileSystem(); conf.set(JTConfig.JT_RETIREJOBS, "false"); mrvl = MiniMRClientClusterFactory.create(caller, 2, conf,false); conf = mrvl.getConfig(); String[] files = conf.getStrings(MRJobConfig.CACHE_FILES); if (files != null) { String[] timestamps = new String[files.length]; for (int i = 0; i < files.length; i++) { timestamps[i] = Long.toString(System.currentTimeMillis()); } conf.setStrings(MRJobConfig.CACHE_FILE_TIMESTAMPS, timestamps); } }
private void startMrCluster() throws IOException { Configuration conf = new JobConf(); FileSystem.setDefaultUri(conf, HadoopTestUtils.getTestDFS().getUri()); conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true); conf.setBoolean(JHAdminConfig.MR_HISTORY_MINICLUSTER_FIXED_PORTS, true); String addr = MiniYARNCluster.getHostname() + ":0"; conf.set(YarnConfiguration.RM_ADDRESS, addr); conf.set(JHAdminConfig.MR_HISTORY_ADDRESS, addr); m_mrCluster = MiniMRClientClusterFactory.create( HadoopTestUtils.class, "MR4CTests", 1, // num node managers conf ); // make sure startup is finished for ( int i=0; i<60; i++ ) { String newAddr = m_mrCluster.getConfig().get(YarnConfiguration.RM_ADDRESS); if ( newAddr.equals(addr) ) { s_log.warn("MiniYARNCluster startup not complete"); try { Thread.sleep(1000); } catch (InterruptedException ie) { throw new IOException(ie); } } else { s_log.info("MiniYARNCluster now available at {}", newAddr); return; } } throw new IOException("MiniYARNCluster taking too long to startup"); }
@BeforeClass public static void setup() throws IOException { Properties props = new Properties(); InputStream is = ClassLoader.getSystemResourceAsStream("hdfs-conf.properties"); props.load(is); for (Entry<Object, Object> entry : props.entrySet()) { System.setProperty((String) entry.getKey(), (String) entry.getValue()); } Map<String, String> envMap = new HashMap<String, String>(); envMap.put("JAVA_HOME", System.getProperty("java.home")); setEnv(envMap); final Configuration conf = new Configuration(); final Path TEST_ROOT_DIR = new Path(System.getProperty("test.build.data", "/tmp")); testdir = new Path(TEST_ROOT_DIR, "TestMiniMRClientCluster"); inDir = new Path(testdir, "in"); outDir = new Path(testdir, "out"); FileSystem fs = FileSystem.getLocal(conf); if (fs.exists(testdir) && !fs.delete(testdir, true)) { throw new IOException("Could not delete " + testdir); } if (!fs.mkdirs(inDir)) { throw new IOException("Mkdirs failed to create " + inDir); } for (int i = 0; i < inFiles.length; i++) { inFiles[i] = new Path(inDir, "part_" + i); createFile(inFiles[i], conf); } // create the mini cluster to be used for the tests mrCluster = MiniMRClientClusterFactory.create(WordCountToolTest.class, 1, new Configuration()); }
@BeforeClass public static void setup() throws IOException { Properties props = new Properties(); InputStream is = ClassLoader.getSystemResourceAsStream("hdfs-conf.properties"); props.load(is); for (Entry<Object, Object> entry : props.entrySet()) { System.setProperty((String) entry.getKey(), (String) entry.getValue()); } Map<String, String> envMap = new HashMap<String, String>(); envMap.put("JAVA_HOME", System.getProperty("java.home")); setEnv(envMap); final Configuration conf = new Configuration(); final Path TEST_ROOT_DIR = new Path(System.getProperty("test.build.data", "/tmp")); testdir = new Path(TEST_ROOT_DIR, "TestMiniMRClientCluster"); inDir = new Path(testdir, "in"); outDir = new Path(testdir, "out"); FileSystem fs = FileSystem.getLocal(conf); if (fs.exists(testdir) && !fs.delete(testdir, true)) { throw new IOException("Could not delete " + testdir); } if (!fs.mkdirs(inDir)) { throw new IOException("Mkdirs failed to create " + inDir); } for (int i = 0; i < inFiles.length; i++) { inFiles[i] = new Path(inDir, "part_" + i); createFile(inFiles[i], conf); } // create the mini cluster to be used for the tests mrCluster = MiniMRClientClusterFactory.create(GrepToolTest.class, 1, new Configuration()); }
@Before public void setup() throws IOException { Configuration conf = new YarnConfiguration(); cluster = MiniMRClientClusterFactory.create(this.getClass(), 2, conf); cluster.start(); }
/** * Starts DFS and MR clusters, as specified in member-variable options. Also * writes out configuration and details, if requested. * * @throws IOException * @throws FileNotFoundException * @throws URISyntaxException */ public void start() throws IOException, FileNotFoundException, URISyntaxException { if (!noDFS) { dfs = new MiniDFSCluster.Builder(conf).nameNodePort(nnPort) .numDataNodes(numDataNodes).startupOption(dfsOpts).build(); LOG.info("Started MiniDFSCluster -- namenode on port " + dfs.getNameNodePort()); } if (!noMR) { if (fs == null && dfs != null) { fs = dfs.getFileSystem().getUri().toString(); } else if (fs == null) { fs = "file:///tmp/minimr-" + System.nanoTime(); } FileSystem.setDefaultUri(conf, new URI(fs)); // Instruct the minicluster to use fixed ports, so user will know which // ports to use when communicating with the cluster. conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true); conf.setBoolean(JHAdminConfig.MR_HISTORY_MINICLUSTER_FIXED_PORTS, true); conf.set(YarnConfiguration.RM_ADDRESS, MiniYARNCluster.getHostname() + ":" + this.rmPort); conf.set(JHAdminConfig.MR_HISTORY_ADDRESS, MiniYARNCluster.getHostname() + ":" + this.jhsPort); mr = MiniMRClientClusterFactory.create(this.getClass(), numNodeManagers, conf); LOG.info("Started MiniMRCluster"); } if (writeConfig != null) { FileOutputStream fos = new FileOutputStream(new File(writeConfig)); conf.writeXml(fos); fos.close(); } if (writeDetails != null) { Map<String, Object> map = new TreeMap<String, Object>(); if (dfs != null) { map.put("namenode_port", dfs.getNameNodePort()); } if (mr != null) { map.put("resourcemanager_port", mr.getConfig().get( YarnConfiguration.RM_ADDRESS).split(":")[1]); } FileWriter fw = new FileWriter(new File(writeDetails)); fw.write(new JSON().toJSON(map)); fw.close(); } }
public void testDistCh() throws Exception { final Configuration conf = new Configuration(); conf.set(CapacitySchedulerConfiguration.PREFIX+CapacitySchedulerConfiguration.ROOT+"."+CapacitySchedulerConfiguration.QUEUES, "default"); conf.set(CapacitySchedulerConfiguration.PREFIX+CapacitySchedulerConfiguration.ROOT+".default."+CapacitySchedulerConfiguration.CAPACITY, "100"); final MiniDFSCluster cluster= new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).build(); final FileSystem fs = cluster.getFileSystem(); final FsShell shell = new FsShell(conf); try { final FileTree tree = new FileTree(fs, "testDistCh"); final FileStatus rootstatus = fs.getFileStatus(tree.rootdir); runLsr(shell, tree.root, 0); final String[] args = new String[NUN_SUBS]; final ChPermissionStatus[] newstatus = new ChPermissionStatus[NUN_SUBS]; args[0]="/test/testDistCh/sub0:sub1::"; newstatus[0] = new ChPermissionStatus(rootstatus, "sub1", "", ""); args[1]="/test/testDistCh/sub1::sub2:"; newstatus[1] = new ChPermissionStatus(rootstatus, "", "sub2", ""); args[2]="/test/testDistCh/sub2:::437"; newstatus[2] = new ChPermissionStatus(rootstatus, "", "", "437"); args[3]="/test/testDistCh/sub3:sub1:sub2:447"; newstatus[3] = new ChPermissionStatus(rootstatus, "sub1", "sub2", "447"); args[4]="/test/testDistCh/sub4::sub5:437"; newstatus[4] = new ChPermissionStatus(rootstatus, "", "sub5", "437"); args[5]="/test/testDistCh/sub5:sub1:sub5:"; newstatus[5] = new ChPermissionStatus(rootstatus, "sub1", "sub5", ""); args[6]="/test/testDistCh/sub6:sub3::437"; newstatus[6] = new ChPermissionStatus(rootstatus, "sub3", "", "437"); System.out.println("args=" + Arrays.asList(args).toString().replace(",", ",\n ")); System.out.println("newstatus=" + Arrays.asList(newstatus).toString().replace(",", ",\n ")); //run DistCh new DistCh(MiniMRClientClusterFactory.create(this.getClass(), 2, conf).getConfig()).run(args); runLsr(shell, tree.root, 0); //check results for(int i = 0; i < NUN_SUBS; i++) { Path sub = new Path(tree.root + "/sub" + i); checkFileStatus(newstatus[i], fs.getFileStatus(sub)); for(FileStatus status : fs.listStatus(sub)) { checkFileStatus(newstatus[i], status); } } } finally { cluster.shutdown(); } }