@Before @Override public void setUp() throws Exception { super.setUp(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true); conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, HDFSPolicyProvider.class, PolicyProvider.class); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); dfsCluster.waitClusterUp(); namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///"); username = System.getProperty("user.name"); fs = dfsCluster.getFileSystem(); assertTrue("Not a HDFS: "+fs.getUri(), fs instanceof DistributedFileSystem); }
@Before @Override public void setUp() throws Exception { super.setUp(); conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, HDFSPolicyProvider.class, PolicyProvider.class); // Many of the tests expect a replication value of 1 in the output conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); dfsCluster.waitClusterUp(); namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///"); username = System.getProperty("user.name"); fs = dfsCluster.getFileSystem(); assertTrue("Not a HDFS: "+fs.getUri(), fs instanceof DistributedFileSystem); }
@Before @Override public void setUp() throws Exception { super.setUp(); conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, HDFSPolicyProvider.class, PolicyProvider.class); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); tmpDir = new File(System.getProperty("test.build.data", "target"), UUID.randomUUID().toString()).getAbsoluteFile(); final Path jksPath = new Path(tmpDir.toString(), "test.jks"); conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()); dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); dfsCluster.waitClusterUp(); createAKey("mykey", conf); namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///"); username = System.getProperty("user.name"); fs = dfsCluster.getFileSystem(); assertTrue("Not an HDFS: " + fs.getUri(), fs instanceof DistributedFileSystem); }
private void rewriteHadoopPolicyFile(File policyFile) throws IOException { FileWriter fos = new FileWriter(policyFile); PolicyProvider policyProvider = new HDFSPolicyProvider(); fos.write("<configuration>\n"); for (Service service : policyProvider.getServices()) { String key = service.getServiceKey(); String value ="*"; if (key.equals("security.refresh.policy.protocol.acl")) { value = DUMMY_ACL; } fos.write("<property><name>"+ key + "</name><value>" + value + "</value></property>\n"); System.err.println("<property><name>"+ key + "</name><value>" + value + "</value></property>\n"); } fos.write("</configuration>\n"); fos.close(); }
private void initIpcServer(Configuration conf) throws IOException { InetSocketAddress ipcAddr = NetUtils.createSocketAddr( conf.get("dfs.datanode.ipc.address")); ipcServer = RPC.getServer(DataNode.class, this, ipcAddr.getHostName(), ipcAddr.getPort(), conf.getInt("dfs.datanode.handler.count", 3), false, conf, blockTokenSecretManager); // set service-level authorization security policy if (conf.getBoolean( CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) { ipcServer.refreshServiceAcl(conf, new HDFSPolicyProvider()); } dnRegistration.setIpcPort(ipcServer.getListenerAddress().getPort()); LOG.info("dnRegistration = " + dnRegistration); }
@Override // RefreshAuthorizationPolicyProtocol public void refreshServiceAcl() throws IOException { checkNNStartup(); if (!serviceAuthEnabled) { throw new AuthorizationException("Service Level Authorization not enabled!"); } this.clientRpcServer.refreshServiceAcl(new Configuration(), new HDFSPolicyProvider()); if (this.serviceRpcServer != null) { this.serviceRpcServer.refreshServiceAcl(new Configuration(), new HDFSPolicyProvider()); } }
JournalNodeRpcServer(Configuration conf, JournalNode jn) throws IOException { this.jn = jn; Configuration confCopy = new Configuration(conf); // Ensure that nagling doesn't kick in, which could cause latency issues. confCopy.setBoolean( CommonConfigurationKeysPublic.IPC_SERVER_TCPNODELAY_KEY, true); InetSocketAddress addr = getAddress(confCopy); RPC.setProtocolEngine(confCopy, QJournalProtocolPB.class, ProtobufRpcEngine.class); QJournalProtocolServerSideTranslatorPB translator = new QJournalProtocolServerSideTranslatorPB(this); BlockingService service = QJournalProtocolService .newReflectiveBlockingService(translator); this.server = new RPC.Builder(confCopy) .setProtocol(QJournalProtocolPB.class) .setInstance(service) .setBindAddress(addr.getHostName()) .setPort(addr.getPort()) .setNumHandlers(HANDLER_COUNT) .setVerbose(false) .build(); // set service-level authorization security policy if (confCopy.getBoolean( CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) { server.refreshServiceAcl(confCopy, new HDFSPolicyProvider()); } }
@Before @Override public void setUp() throws Exception { super.setUp(); conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, HDFSPolicyProvider.class, PolicyProvider.class); // Many of the tests expect a replication value of 1 in the output conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); // Build racks and hosts configuration to test dfsAdmin -printTopology String [] racks = {"/rack1", "/rack1", "/rack2", "/rack2", "/rack2", "/rack3", "/rack4", "/rack4" }; String [] hosts = {"host1", "host2", "host3", "host4", "host5", "host6", "host7", "host8" }; dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(8) .racks(racks) .hosts(hosts) .build(); dfsCluster.waitClusterUp(); namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///"); username = System.getProperty("user.name"); fs = dfsCluster.getFileSystem(); assertTrue("Not a HDFS: "+fs.getUri(), fs instanceof DistributedFileSystem); }
JournalNodeRpcServer(Configuration conf, JournalNode jn) throws IOException { this.jn = jn; Configuration confCopy = new Configuration(conf); // Ensure that nagling doesn't kick in, which could cause latency issues. confCopy.setBoolean( CommonConfigurationKeysPublic.IPC_SERVER_TCPNODELAY_KEY, true); InetSocketAddress addr = getAddress(confCopy); RPC.setProtocolEngine(confCopy, QJournalProtocolPB.class, ProtobufRpcEngine.class); QJournalProtocolServerSideTranslatorPB translator = new QJournalProtocolServerSideTranslatorPB(this); BlockingService service = QJournalProtocolService .newReflectiveBlockingService(translator); this.server = new RPC.Builder(confCopy) .setProtocol(QJournalProtocolPB.class) .setInstance(service) .setBindAddress(addr.getHostName()) .setPort(addr.getPort()) .setNumHandlers(HANDLER_COUNT) .setVerbose(false) .build(); // set service-level authorization security policy if (confCopy.getBoolean( CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) { server.refreshServiceAcl(confCopy, new HDFSPolicyProvider()); } this.server.setTracer(jn.tracer); }
@Override public Service[] getServices() { Service[] hdfsServices = new HDFSPolicyProvider().getServices(); Service[] mrServices = new MapReducePolicyProvider().getServices(); Service[] hadoopServices = new Service[hdfsServices.length + mrServices.length]; System.arraycopy(hdfsServices, 0, hadoopServices, 0, hdfsServices.length); System.arraycopy(mrServices, 0, hadoopServices, hdfsServices.length, mrServices.length); return hadoopServices; }