Java 类org.apache.hadoop.security.authorize.PolicyProvider 实例源码

项目:hadoop-oss    文件:ZKFCRpcServer.java   
ZKFCRpcServer(Configuration conf,
    InetSocketAddress bindAddr,
    ZKFailoverController zkfc,
    PolicyProvider policy) throws IOException {
  this.zkfc = zkfc;

  RPC.setProtocolEngine(conf, ZKFCProtocolPB.class,
      ProtobufRpcEngine.class);
  ZKFCProtocolServerSideTranslatorPB translator =
      new ZKFCProtocolServerSideTranslatorPB(this);
  BlockingService service = ZKFCProtocolService
      .newReflectiveBlockingService(translator);
  this.server = new RPC.Builder(conf).setProtocol(ZKFCProtocolPB.class)
      .setInstance(service).setBindAddress(bindAddr.getHostName())
      .setPort(bindAddr.getPort()).setNumHandlers(HANDLER_COUNT)
      .setVerbose(false).build();

  // set service-level authorization security policy
  if (conf.getBoolean(
      CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
    server.refreshServiceAcl(conf, policy);
  }

}
项目:hadoop    文件:TestXAttrCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(), 
      fs instanceof DistributedFileSystem);
}
项目:hadoop    文件:TestCacheAdminCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);

  // Many of the tests expect a replication value of 1 in the output
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();

  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
}
项目:hadoop    文件:TestCryptoAdminCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  tmpDir = new File(System.getProperty("test.build.data", "target"),
      UUID.randomUUID().toString()).getAbsoluteFile();
  final Path jksPath = new Path(tmpDir.toString(), "test.jks");
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  dfsCluster.waitClusterUp();
  createAKey("mykey", conf);
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not an HDFS: " + fs.getUri(),
      fs instanceof DistributedFileSystem);
}
项目:hadoop    文件:ZKFCRpcServer.java   
ZKFCRpcServer(Configuration conf,
    InetSocketAddress bindAddr,
    ZKFailoverController zkfc,
    PolicyProvider policy) throws IOException {
  this.zkfc = zkfc;

  RPC.setProtocolEngine(conf, ZKFCProtocolPB.class,
      ProtobufRpcEngine.class);
  ZKFCProtocolServerSideTranslatorPB translator =
      new ZKFCProtocolServerSideTranslatorPB(this);
  BlockingService service = ZKFCProtocolService
      .newReflectiveBlockingService(translator);
  this.server = new RPC.Builder(conf).setProtocol(ZKFCProtocolPB.class)
      .setInstance(service).setBindAddress(bindAddr.getHostName())
      .setPort(bindAddr.getPort()).setNumHandlers(HANDLER_COUNT)
      .setVerbose(false).build();

  // set service-level authorization security policy
  if (conf.getBoolean(
      CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
    server.refreshServiceAcl(conf, policy);
  }

}
项目:aliyun-oss-hadoop-fs    文件:TestXAttrCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(), 
      fs instanceof DistributedFileSystem);
}
项目:aliyun-oss-hadoop-fs    文件:TestCacheAdminCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);

  // Many of the tests expect a replication value of 1 in the output
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();

  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
}
项目:aliyun-oss-hadoop-fs    文件:TestCryptoAdminCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  tmpDir = new File(System.getProperty("test.build.data", "target"),
      UUID.randomUUID().toString()).getAbsoluteFile();
  final Path jksPath = new Path(tmpDir.toString(), "test.jks");
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  dfsCluster.waitClusterUp();
  createAKey("mykey", conf);
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not an HDFS: " + fs.getUri(),
      fs instanceof DistributedFileSystem);
}
项目:aliyun-oss-hadoop-fs    文件:ZKFCRpcServer.java   
ZKFCRpcServer(Configuration conf,
    InetSocketAddress bindAddr,
    ZKFailoverController zkfc,
    PolicyProvider policy) throws IOException {
  this.zkfc = zkfc;

  RPC.setProtocolEngine(conf, ZKFCProtocolPB.class,
      ProtobufRpcEngine.class);
  ZKFCProtocolServerSideTranslatorPB translator =
      new ZKFCProtocolServerSideTranslatorPB(this);
  BlockingService service = ZKFCProtocolService
      .newReflectiveBlockingService(translator);
  this.server = new RPC.Builder(conf).setProtocol(ZKFCProtocolPB.class)
      .setInstance(service).setBindAddress(bindAddr.getHostName())
      .setPort(bindAddr.getPort()).setNumHandlers(HANDLER_COUNT)
      .setVerbose(false).build();

  // set service-level authorization security policy
  if (conf.getBoolean(
      CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
    server.refreshServiceAcl(conf, policy);
  }

}
项目:big-c    文件:TestXAttrCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(), 
      fs instanceof DistributedFileSystem);
}
项目:big-c    文件:TestCacheAdminCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);

  // Many of the tests expect a replication value of 1 in the output
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();

  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
}
项目:big-c    文件:TestCryptoAdminCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  tmpDir = new File(System.getProperty("test.build.data", "target"),
      UUID.randomUUID().toString()).getAbsoluteFile();
  final Path jksPath = new Path(tmpDir.toString(), "test.jks");
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  dfsCluster.waitClusterUp();
  createAKey("mykey", conf);
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not an HDFS: " + fs.getUri(),
      fs instanceof DistributedFileSystem);
}
项目:big-c    文件:ZKFCRpcServer.java   
ZKFCRpcServer(Configuration conf,
    InetSocketAddress bindAddr,
    ZKFailoverController zkfc,
    PolicyProvider policy) throws IOException {
  this.zkfc = zkfc;

  RPC.setProtocolEngine(conf, ZKFCProtocolPB.class,
      ProtobufRpcEngine.class);
  ZKFCProtocolServerSideTranslatorPB translator =
      new ZKFCProtocolServerSideTranslatorPB(this);
  BlockingService service = ZKFCProtocolService
      .newReflectiveBlockingService(translator);
  this.server = new RPC.Builder(conf).setProtocol(ZKFCProtocolPB.class)
      .setInstance(service).setBindAddress(bindAddr.getHostName())
      .setPort(bindAddr.getPort()).setNumHandlers(HANDLER_COUNT)
      .setVerbose(false).build();

  // set service-level authorization security policy
  if (conf.getBoolean(
      CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
    server.refreshServiceAcl(conf, policy);
  }

}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestXAttrCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(), 
      fs instanceof DistributedFileSystem);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestCacheAdminCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);

  // Many of the tests expect a replication value of 1 in the output
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();

  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestCryptoAdminCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  tmpDir = new File(System.getProperty("test.build.data", "target"),
      UUID.randomUUID().toString()).getAbsoluteFile();
  final Path jksPath = new Path(tmpDir.toString(), "test.jks");
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  dfsCluster.waitClusterUp();
  createAKey("mykey", conf);
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not an HDFS: " + fs.getUri(),
      fs instanceof DistributedFileSystem);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:ZKFCRpcServer.java   
ZKFCRpcServer(Configuration conf,
    InetSocketAddress bindAddr,
    ZKFailoverController zkfc,
    PolicyProvider policy) throws IOException {
  this.zkfc = zkfc;

  RPC.setProtocolEngine(conf, ZKFCProtocolPB.class,
      ProtobufRpcEngine.class);
  ZKFCProtocolServerSideTranslatorPB translator =
      new ZKFCProtocolServerSideTranslatorPB(this);
  BlockingService service = ZKFCProtocolService
      .newReflectiveBlockingService(translator);
  this.server = new RPC.Builder(conf).setProtocol(ZKFCProtocolPB.class)
      .setInstance(service).setBindAddress(bindAddr.getHostName())
      .setPort(bindAddr.getPort()).setNumHandlers(HANDLER_COUNT)
      .setVerbose(false).build();

  // set service-level authorization security policy
  if (conf.getBoolean(
      CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
    server.refreshServiceAcl(conf, policy);
  }

}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestAdminOperationsProtocolWithServiceAuthorization.java   
public void testServiceLevelAuthorization() throws Exception {
  MiniMRCluster mr = null;
  try {
    // Turn on service-level authorization
    final JobConf conf = new JobConf();
    conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
                  MapReducePolicyProvider.class, PolicyProvider.class);
    conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
                    true);

    // Start the mini mr cluster
    mr = new MiniMRCluster(1, "file:///", 1, null, null, conf);

    // Invoke MRAdmin commands
    MRAdmin mrAdmin = new MRAdmin(mr.createJobConf());
    assertEquals(0, mrAdmin.run(new String[] { "-refreshQueues" }));
    assertEquals(0, mrAdmin.run(new String[] { "-refreshNodes" }));
  } finally {
    if (mr != null) { 
      mr.shutdown();
    }
  }
}
项目:hadoop-plus    文件:AdminService.java   
@Override
public RefreshServiceAclsResponse refreshServiceAcls(
    RefreshServiceAclsRequest request) throws YarnException {
  Configuration conf = new Configuration();
  if (!conf.getBoolean(
           CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, 
           false)) {
    throw RPCUtil.getRemoteException(
        new IOException("Service Authorization (" + 
            CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION + 
            ") not enabled."));
  }

  PolicyProvider policyProvider = new RMPolicyProvider(); 

  refreshServiceAcls(conf, policyProvider);
  clientRMService.refreshServiceAcls(conf, policyProvider);
  applicationMasterService.refreshServiceAcls(conf, policyProvider);
  resourceTrackerService.refreshServiceAcls(conf, policyProvider);

  return recordFactory.newRecordInstance(RefreshServiceAclsResponse.class);
}
项目:hadoop-plus    文件:ZKFCRpcServer.java   
ZKFCRpcServer(Configuration conf,
    InetSocketAddress bindAddr,
    ZKFailoverController zkfc,
    PolicyProvider policy) throws IOException {
  this.zkfc = zkfc;

  RPC.setProtocolEngine(conf, ZKFCProtocolPB.class,
      ProtobufRpcEngine.class);
  ZKFCProtocolServerSideTranslatorPB translator =
      new ZKFCProtocolServerSideTranslatorPB(this);
  BlockingService service = ZKFCProtocolService
      .newReflectiveBlockingService(translator);
  this.server = new RPC.Builder(conf).setProtocol(ZKFCProtocolPB.class)
      .setInstance(service).setBindAddress(bindAddr.getHostName())
      .setPort(bindAddr.getPort()).setNumHandlers(HANDLER_COUNT)
      .setVerbose(false).build();

  // set service-level authorization security policy
  if (conf.getBoolean(
      CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
    server.refreshServiceAcl(conf, policy);
  }

}
项目:FlexMap    文件:TestXAttrCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(), 
      fs instanceof DistributedFileSystem);
}
项目:FlexMap    文件:TestCacheAdminCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);

  // Many of the tests expect a replication value of 1 in the output
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();

  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
}
项目:FlexMap    文件:TestCryptoAdminCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  tmpDir = new File(System.getProperty("test.build.data", "target"),
      UUID.randomUUID().toString()).getAbsoluteFile();
  final Path jksPath = new Path(tmpDir.toString(), "test.jks");
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  dfsCluster.waitClusterUp();
  createAKey("mykey", conf);
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not an HDFS: " + fs.getUri(),
      fs instanceof DistributedFileSystem);
}
项目:hops    文件:ZKFCRpcServer.java   
ZKFCRpcServer(Configuration conf,
    InetSocketAddress bindAddr,
    ZKFailoverController zkfc,
    PolicyProvider policy) throws IOException {
  this.zkfc = zkfc;

  RPC.setProtocolEngine(conf, ZKFCProtocolPB.class,
      ProtobufRpcEngine.class);
  ZKFCProtocolServerSideTranslatorPB translator =
      new ZKFCProtocolServerSideTranslatorPB(this);
  BlockingService service = ZKFCProtocolService
      .newReflectiveBlockingService(translator);
  this.server = new RPC.Builder(conf).setProtocol(ZKFCProtocolPB.class)
      .setInstance(service).setBindAddress(bindAddr.getHostName())
      .setPort(bindAddr.getPort()).setNumHandlers(HANDLER_COUNT)
      .setVerbose(false).build();

  // set service-level authorization security policy
  if (conf.getBoolean(
      CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
    server.refreshServiceAcl(conf, policy);
  }

}
项目:hadoop-TCP    文件:AdminService.java   
@Override
public RefreshServiceAclsResponse refreshServiceAcls(
    RefreshServiceAclsRequest request) throws YarnException {
  Configuration conf = new Configuration();
  if (!conf.getBoolean(
           CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, 
           false)) {
    throw RPCUtil.getRemoteException(
        new IOException("Service Authorization (" + 
            CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION + 
            ") not enabled."));
  }

  PolicyProvider policyProvider = new RMPolicyProvider(); 

  refreshServiceAcls(conf, policyProvider);
  clientRMService.refreshServiceAcls(conf, policyProvider);
  applicationMasterService.refreshServiceAcls(conf, policyProvider);
  resourceTrackerService.refreshServiceAcls(conf, policyProvider);

  return recordFactory.newRecordInstance(RefreshServiceAclsResponse.class);
}
项目:hadoop-TCP    文件:ZKFCRpcServer.java   
ZKFCRpcServer(Configuration conf,
    InetSocketAddress bindAddr,
    ZKFailoverController zkfc,
    PolicyProvider policy) throws IOException {
  this.zkfc = zkfc;

  RPC.setProtocolEngine(conf, ZKFCProtocolPB.class,
      ProtobufRpcEngine.class);
  ZKFCProtocolServerSideTranslatorPB translator =
      new ZKFCProtocolServerSideTranslatorPB(this);
  BlockingService service = ZKFCProtocolService
      .newReflectiveBlockingService(translator);
  this.server = new RPC.Builder(conf).setProtocol(ZKFCProtocolPB.class)
      .setInstance(service).setBindAddress(bindAddr.getHostName())
      .setPort(bindAddr.getPort()).setNumHandlers(HANDLER_COUNT)
      .setVerbose(false).build();

  // set service-level authorization security policy
  if (conf.getBoolean(
      CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
    server.refreshServiceAcl(conf, policy);
  }

}
项目:hadoop-on-lustre    文件:TestAdminOperationsProtocolWithServiceAuthorization.java   
public void testServiceLevelAuthorization() throws Exception {
  MiniMRCluster mr = null;
  try {
    // Turn on service-level authorization
    final JobConf conf = new JobConf();
    conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
                  MapReducePolicyProvider.class, PolicyProvider.class);
    conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
                    true);

    // Start the mini mr cluster
    mr = new MiniMRCluster(1, "file:///", 1, null, null, conf);

    // Invoke MRAdmin commands
    MRAdmin mrAdmin = new MRAdmin(mr.createJobConf());
    assertEquals(0, mrAdmin.run(new String[] { "-refreshQueues" }));
    assertEquals(0, mrAdmin.run(new String[] { "-refreshNodes" }));
  } finally {
    if (mr != null) { 
      mr.shutdown();
    }
  }
}
项目:hardfs    文件:AdminService.java   
@Override
public RefreshServiceAclsResponse refreshServiceAcls(
    RefreshServiceAclsRequest request) throws YarnException {
  Configuration conf = new Configuration();
  if (!conf.getBoolean(
           CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, 
           false)) {
    throw RPCUtil.getRemoteException(
        new IOException("Service Authorization (" + 
            CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION + 
            ") not enabled."));
  }

  PolicyProvider policyProvider = new RMPolicyProvider(); 

  refreshServiceAcls(conf, policyProvider);
  clientRMService.refreshServiceAcls(conf, policyProvider);
  applicationMasterService.refreshServiceAcls(conf, policyProvider);
  resourceTrackerService.refreshServiceAcls(conf, policyProvider);

  return recordFactory.newRecordInstance(RefreshServiceAclsResponse.class);
}
项目:hardfs    文件:ZKFCRpcServer.java   
ZKFCRpcServer(Configuration conf,
    InetSocketAddress bindAddr,
    ZKFailoverController zkfc,
    PolicyProvider policy) throws IOException {
  this.zkfc = zkfc;

  RPC.setProtocolEngine(conf, ZKFCProtocolPB.class,
      ProtobufRpcEngine.class);
  ZKFCProtocolServerSideTranslatorPB translator =
      new ZKFCProtocolServerSideTranslatorPB(this);
  BlockingService service = ZKFCProtocolService
      .newReflectiveBlockingService(translator);
  this.server = new RPC.Builder(conf).setProtocol(ZKFCProtocolPB.class)
      .setInstance(service).setBindAddress(bindAddr.getHostName())
      .setPort(bindAddr.getPort()).setNumHandlers(HANDLER_COUNT)
      .setVerbose(false).build();

  // set service-level authorization security policy
  if (conf.getBoolean(
      CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
    server.refreshServiceAcl(conf, policy);
  }

}
项目:hadoop-on-lustre2    文件:TestCacheAdminCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);

  // Many of the tests expect a replication value of 1 in the output
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();

  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
}
项目:hadoop-on-lustre2    文件:ZKFCRpcServer.java   
ZKFCRpcServer(Configuration conf,
    InetSocketAddress bindAddr,
    ZKFailoverController zkfc,
    PolicyProvider policy) throws IOException {
  this.zkfc = zkfc;

  RPC.setProtocolEngine(conf, ZKFCProtocolPB.class,
      ProtobufRpcEngine.class);
  ZKFCProtocolServerSideTranslatorPB translator =
      new ZKFCProtocolServerSideTranslatorPB(this);
  BlockingService service = ZKFCProtocolService
      .newReflectiveBlockingService(translator);
  this.server = new RPC.Builder(conf).setProtocol(ZKFCProtocolPB.class)
      .setInstance(service).setBindAddress(bindAddr.getHostName())
      .setPort(bindAddr.getPort()).setNumHandlers(HANDLER_COUNT)
      .setVerbose(false).build();

  // set service-level authorization security policy
  if (conf.getBoolean(
      CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
    server.refreshServiceAcl(conf, policy);
  }

}
项目:hanoi-hadoop-2.0.0-cdh    文件:TestAdminOperationsProtocolWithServiceAuthorization.java   
public void testServiceLevelAuthorization() throws Exception {
  MiniMRCluster mr = null;
  try {
    // Turn on service-level authorization
    final JobConf conf = new JobConf();
    conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
                  MapReducePolicyProvider.class, PolicyProvider.class);
    conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
                    true);

    // Start the mini mr cluster
    mr = new MiniMRCluster(1, "file:///", 1, null, null, conf);

    // Invoke MRAdmin commands
    MRAdmin mrAdmin = new MRAdmin(mr.createJobConf());
    assertEquals(0, mrAdmin.run(new String[] { "-refreshQueues" }));
    assertEquals(0, mrAdmin.run(new String[] { "-refreshNodes" }));
  } finally {
    if (mr != null) { 
      mr.shutdown();
    }
  }
}
项目:mapreduce-fork    文件:TestAdminOperationsProtocolWithServiceAuthorization.java   
public void testServiceLevelAuthorization() throws Exception {
  MiniMRCluster mr = null;
  try {
    // Turn on service-level authorization
    final JobConf conf = new JobConf();
    conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
                  MapReducePolicyProvider.class, PolicyProvider.class);
    conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
                    true);

    // Start the mini mr cluster
    mr = new MiniMRCluster(1, "file:///", 1, null, null, conf);

    // Invoke MRAdmin commands
    MRAdmin mrAdmin = new MRAdmin(mr.createJobConf());
    assertEquals(0, mrAdmin.run(new String[] { "-refreshQueues" }));
    assertEquals(0, mrAdmin.run(new String[] { "-refreshNodes" }));
  } finally {
    if (mr != null) { 
      mr.shutdown();
    }
  }
}
项目:hortonworks-extension    文件:TestAdminOperationsProtocolWithServiceAuthorization.java   
public void testServiceLevelAuthorization() throws Exception {
  MiniMRCluster mr = null;
  try {
    // Turn on service-level authorization
    final JobConf conf = new JobConf();
    conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
                  MapReducePolicyProvider.class, PolicyProvider.class);
    conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
                    true);

    // Start the mini mr cluster
    mr = new MiniMRCluster(1, "file:///", 1, null, null, conf);

    // Invoke MRAdmin commands
    MRAdmin mrAdmin = new MRAdmin(mr.createJobConf());
    assertEquals(0, mrAdmin.run(new String[] { "-refreshQueues" }));
    assertEquals(0, mrAdmin.run(new String[] { "-refreshNodes" }));
  } finally {
    if (mr != null) { 
      mr.shutdown();
    }
  }
}
项目:hortonworks-extension    文件:TestAdminOperationsProtocolWithServiceAuthorization.java   
public void testServiceLevelAuthorization() throws Exception {
  MiniMRCluster mr = null;
  try {
    // Turn on service-level authorization
    final JobConf conf = new JobConf();
    conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
                  MapReducePolicyProvider.class, PolicyProvider.class);
    conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
                    true);

    // Start the mini mr cluster
    mr = new MiniMRCluster(1, "file:///", 1, null, null, conf);

    // Invoke MRAdmin commands
    MRAdmin mrAdmin = new MRAdmin(mr.createJobConf());
    assertEquals(0, mrAdmin.run(new String[] { "-refreshQueues" }));
    assertEquals(0, mrAdmin.run(new String[] { "-refreshNodes" }));
  } finally {
    if (mr != null) { 
      mr.shutdown();
    }
  }
}
项目:hadoop    文件:AdminService.java   
@Override
public RefreshServiceAclsResponse refreshServiceAcls(
    RefreshServiceAclsRequest request) throws YarnException, IOException {
  if (!getConfig().getBoolean(
           CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, 
           false)) {
    throw RPCUtil.getRemoteException(
        new IOException("Service Authorization (" + 
            CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION + 
            ") not enabled."));
  }

  String argName = "refreshServiceAcls";
  UserGroupInformation user = checkAcls(argName);

  checkRMStatus(user.getShortUserName(), argName, "refresh Service ACLs.");

  PolicyProvider policyProvider = RMPolicyProvider.getInstance();
  Configuration conf =
      getConfiguration(new Configuration(false),
          YarnConfiguration.HADOOP_POLICY_CONFIGURATION_FILE);

  refreshServiceAcls(conf, policyProvider);
  rmContext.getClientRMService().refreshServiceAcls(conf, policyProvider);
  rmContext.getApplicationMasterService().refreshServiceAcls(
      conf, policyProvider);
  rmContext.getResourceTrackerService().refreshServiceAcls(
      conf, policyProvider);

  RMAuditLogger.logSuccess(user.getShortUserName(), argName, "AdminService");

  return recordFactory.newRecordInstance(RefreshServiceAclsResponse.class);
}
项目:hadoop    文件:TestHDFSCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);

  // Many of the tests expect a replication value of 1 in the output
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  // Build racks and hosts configuration to test dfsAdmin -printTopology
  String [] racks =  {"/rack1", "/rack1", "/rack2", "/rack2",
                      "/rack2", "/rack3", "/rack4", "/rack4" };
  String [] hosts = {"host1", "host2", "host3", "host4",
                     "host5", "host6", "host7", "host8" };
  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(8)
                                               .racks(racks)
                                               .hosts(hosts)
                                               .build();
  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
}
项目:ditb    文件:TestTokenAuthentication.java   
@BeforeClass
public static void setupBeforeClass() throws Exception {
  TEST_UTIL = new HBaseTestingUtility();
  TEST_UTIL.startMiniZKCluster();
  // register token type for protocol
  SecurityInfo.addInfo(AuthenticationProtos.AuthenticationService.getDescriptor().getName(),
    new SecurityInfo("hbase.test.kerberos.principal",
      AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN));
  // security settings only added after startup so that ZK does not require SASL
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.set("hadoop.security.authentication", "kerberos");
  conf.set("hbase.security.authentication", "kerberos");
  conf.setBoolean(HADOOP_SECURITY_AUTHORIZATION, true);
  server = new TokenServer(conf);
  serverThread = new Thread(server);
  Threads.setDaemonThreadRunning(serverThread, "TokenServer:"+server.getServerName().toString());
  // wait for startup
  while (!server.isStarted() && !server.isStopped()) {
    Thread.sleep(10);
  }
  server.rpcServer.refreshAuthManager(new PolicyProvider() {
    @Override
    public Service[] getServices() {
      return new Service [] {
        new Service("security.client.protocol.acl",
          AuthenticationProtos.AuthenticationService.BlockingInterface.class)};
    }
  });
  ZKClusterId.setClusterId(server.getZooKeeper(), clusterId);
  secretManager = (AuthenticationTokenSecretManager)server.getSecretManager();
  while(secretManager.getCurrentKey() == null) {
    Thread.sleep(1);
  }
}
项目:aliyun-oss-hadoop-fs    文件:AdminService.java   
@Override
public RefreshServiceAclsResponse refreshServiceAcls(
    RefreshServiceAclsRequest request) throws YarnException, IOException {
  if (!getConfig().getBoolean(
           CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, 
           false)) {
    throw RPCUtil.getRemoteException(
        new IOException("Service Authorization (" + 
            CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION + 
            ") not enabled."));
  }

  String argName = "refreshServiceAcls";
  UserGroupInformation user = checkAcls(argName);

  checkRMStatus(user.getShortUserName(), argName, "refresh Service ACLs.");

  PolicyProvider policyProvider = RMPolicyProvider.getInstance();
  Configuration conf =
      getConfiguration(new Configuration(false),
          YarnConfiguration.HADOOP_POLICY_CONFIGURATION_FILE);

  refreshServiceAcls(conf, policyProvider);
  rmContext.getClientRMService().refreshServiceAcls(conf, policyProvider);
  rmContext.getApplicationMasterService().refreshServiceAcls(
      conf, policyProvider);
  rmContext.getResourceTrackerService().refreshServiceAcls(
      conf, policyProvider);

  RMAuditLogger.logSuccess(user.getShortUserName(), argName, "AdminService");

  return recordFactory.newRecordInstance(RefreshServiceAclsResponse.class);
}
项目:aliyun-oss-hadoop-fs    文件:TestHDFSCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);

  // Many of the tests expect a replication value of 1 in the output
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  // Build racks and hosts configuration to test dfsAdmin -printTopology
  String [] racks =  {"/rack1", "/rack1", "/rack2", "/rack2",
                      "/rack2", "/rack3", "/rack4", "/rack4" };
  String [] hosts = {"host1", "host2", "host3", "host4",
                     "host5", "host6", "host7", "host8" };
  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(8)
                                               .racks(racks)
                                               .hosts(hosts)
                                               .build();
  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
}