ZKFCRpcServer(Configuration conf, InetSocketAddress bindAddr, ZKFailoverController zkfc, PolicyProvider policy) throws IOException { this.zkfc = zkfc; RPC.setProtocolEngine(conf, ZKFCProtocolPB.class, ProtobufRpcEngine.class); ZKFCProtocolServerSideTranslatorPB translator = new ZKFCProtocolServerSideTranslatorPB(this); BlockingService service = ZKFCProtocolService .newReflectiveBlockingService(translator); this.server = new RPC.Builder(conf).setProtocol(ZKFCProtocolPB.class) .setInstance(service).setBindAddress(bindAddr.getHostName()) .setPort(bindAddr.getPort()).setNumHandlers(HANDLER_COUNT) .setVerbose(false).build(); // set service-level authorization security policy if (conf.getBoolean( CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) { server.refreshServiceAcl(conf, policy); } }
@Before @Override public void setUp() throws Exception { super.setUp(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true); conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, HDFSPolicyProvider.class, PolicyProvider.class); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); dfsCluster.waitClusterUp(); namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///"); username = System.getProperty("user.name"); fs = dfsCluster.getFileSystem(); assertTrue("Not a HDFS: "+fs.getUri(), fs instanceof DistributedFileSystem); }
@Before @Override public void setUp() throws Exception { super.setUp(); conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, HDFSPolicyProvider.class, PolicyProvider.class); // Many of the tests expect a replication value of 1 in the output conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); dfsCluster.waitClusterUp(); namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///"); username = System.getProperty("user.name"); fs = dfsCluster.getFileSystem(); assertTrue("Not a HDFS: "+fs.getUri(), fs instanceof DistributedFileSystem); }
@Before @Override public void setUp() throws Exception { super.setUp(); conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, HDFSPolicyProvider.class, PolicyProvider.class); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); tmpDir = new File(System.getProperty("test.build.data", "target"), UUID.randomUUID().toString()).getAbsoluteFile(); final Path jksPath = new Path(tmpDir.toString(), "test.jks"); conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()); dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); dfsCluster.waitClusterUp(); createAKey("mykey", conf); namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///"); username = System.getProperty("user.name"); fs = dfsCluster.getFileSystem(); assertTrue("Not an HDFS: " + fs.getUri(), fs instanceof DistributedFileSystem); }
public void testServiceLevelAuthorization() throws Exception { MiniMRCluster mr = null; try { // Turn on service-level authorization final JobConf conf = new JobConf(); conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, MapReducePolicyProvider.class, PolicyProvider.class); conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, true); // Start the mini mr cluster mr = new MiniMRCluster(1, "file:///", 1, null, null, conf); // Invoke MRAdmin commands MRAdmin mrAdmin = new MRAdmin(mr.createJobConf()); assertEquals(0, mrAdmin.run(new String[] { "-refreshQueues" })); assertEquals(0, mrAdmin.run(new String[] { "-refreshNodes" })); } finally { if (mr != null) { mr.shutdown(); } } }
@Override public RefreshServiceAclsResponse refreshServiceAcls( RefreshServiceAclsRequest request) throws YarnException { Configuration conf = new Configuration(); if (!conf.getBoolean( CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) { throw RPCUtil.getRemoteException( new IOException("Service Authorization (" + CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION + ") not enabled.")); } PolicyProvider policyProvider = new RMPolicyProvider(); refreshServiceAcls(conf, policyProvider); clientRMService.refreshServiceAcls(conf, policyProvider); applicationMasterService.refreshServiceAcls(conf, policyProvider); resourceTrackerService.refreshServiceAcls(conf, policyProvider); return recordFactory.newRecordInstance(RefreshServiceAclsResponse.class); }
@Override public RefreshServiceAclsResponse refreshServiceAcls( RefreshServiceAclsRequest request) throws YarnException, IOException { if (!getConfig().getBoolean( CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) { throw RPCUtil.getRemoteException( new IOException("Service Authorization (" + CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION + ") not enabled.")); } String argName = "refreshServiceAcls"; UserGroupInformation user = checkAcls(argName); checkRMStatus(user.getShortUserName(), argName, "refresh Service ACLs."); PolicyProvider policyProvider = RMPolicyProvider.getInstance(); Configuration conf = getConfiguration(new Configuration(false), YarnConfiguration.HADOOP_POLICY_CONFIGURATION_FILE); refreshServiceAcls(conf, policyProvider); rmContext.getClientRMService().refreshServiceAcls(conf, policyProvider); rmContext.getApplicationMasterService().refreshServiceAcls( conf, policyProvider); rmContext.getResourceTrackerService().refreshServiceAcls( conf, policyProvider); RMAuditLogger.logSuccess(user.getShortUserName(), argName, "AdminService"); return recordFactory.newRecordInstance(RefreshServiceAclsResponse.class); }
@Before @Override public void setUp() throws Exception { super.setUp(); conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, HDFSPolicyProvider.class, PolicyProvider.class); // Many of the tests expect a replication value of 1 in the output conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); // Build racks and hosts configuration to test dfsAdmin -printTopology String [] racks = {"/rack1", "/rack1", "/rack2", "/rack2", "/rack2", "/rack3", "/rack4", "/rack4" }; String [] hosts = {"host1", "host2", "host3", "host4", "host5", "host6", "host7", "host8" }; dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(8) .racks(racks) .hosts(hosts) .build(); dfsCluster.waitClusterUp(); namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///"); username = System.getProperty("user.name"); fs = dfsCluster.getFileSystem(); assertTrue("Not a HDFS: "+fs.getUri(), fs instanceof DistributedFileSystem); }
@BeforeClass public static void setupBeforeClass() throws Exception { TEST_UTIL = new HBaseTestingUtility(); TEST_UTIL.startMiniZKCluster(); // register token type for protocol SecurityInfo.addInfo(AuthenticationProtos.AuthenticationService.getDescriptor().getName(), new SecurityInfo("hbase.test.kerberos.principal", AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN)); // security settings only added after startup so that ZK does not require SASL Configuration conf = TEST_UTIL.getConfiguration(); conf.set("hadoop.security.authentication", "kerberos"); conf.set("hbase.security.authentication", "kerberos"); conf.setBoolean(HADOOP_SECURITY_AUTHORIZATION, true); server = new TokenServer(conf); serverThread = new Thread(server); Threads.setDaemonThreadRunning(serverThread, "TokenServer:"+server.getServerName().toString()); // wait for startup while (!server.isStarted() && !server.isStopped()) { Thread.sleep(10); } server.rpcServer.refreshAuthManager(new PolicyProvider() { @Override public Service[] getServices() { return new Service [] { new Service("security.client.protocol.acl", AuthenticationProtos.AuthenticationService.BlockingInterface.class)}; } }); ZKClusterId.setClusterId(server.getZooKeeper(), clusterId); secretManager = (AuthenticationTokenSecretManager)server.getSecretManager(); while(secretManager.getCurrentKey() == null) { Thread.sleep(1); } }