Java 类org.apache.hadoop.security.authorize.ServiceAuthorizationManager 实例源码

项目:ditb    文件:RpcServer.java   
/** Starts the service.  Must be called before any calls will be handled. */
@Override
public synchronized void start() {
  if (started) return;
  authTokenSecretMgr = createSecretManager();
  if (authTokenSecretMgr != null) {
    setSecretManager(authTokenSecretMgr);
    authTokenSecretMgr.start();
  }
  this.authManager = new ServiceAuthorizationManager();
  HBasePolicyProvider.init(conf, authManager);
  responder.start();
  listener.start();
  scheduler.start();
  started = true;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestAdminOperationsProtocolWithServiceAuthorization.java   
public void testServiceLevelAuthorization() throws Exception {
  MiniMRCluster mr = null;
  try {
    // Turn on service-level authorization
    final JobConf conf = new JobConf();
    conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
                  MapReducePolicyProvider.class, PolicyProvider.class);
    conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
                    true);

    // Start the mini mr cluster
    mr = new MiniMRCluster(1, "file:///", 1, null, null, conf);

    // Invoke MRAdmin commands
    MRAdmin mrAdmin = new MRAdmin(mr.createJobConf());
    assertEquals(0, mrAdmin.run(new String[] { "-refreshQueues" }));
    assertEquals(0, mrAdmin.run(new String[] { "-refreshNodes" }));
  } finally {
    if (mr != null) { 
      mr.shutdown();
    }
  }
}
项目:pbase    文件:RpcServer.java   
/** Starts the service.  Must be called before any calls will be handled. */
@Override
public synchronized void start() {
  if (started) return;
  AuthenticationTokenSecretManager mgr = createSecretManager();
  if (mgr != null) {
    setSecretManager(mgr);
    mgr.start();
  }
  this.authManager = new ServiceAuthorizationManager();
  HBasePolicyProvider.init(conf, authManager);
  responder.start();
  listener.start();
  scheduler.start();
  started = true;
}
项目:hbase    文件:SimpleRpcServer.java   
/** Starts the service.  Must be called before any calls will be handled. */
@Override
public synchronized void start() {
  if (started) return;
  authTokenSecretMgr = createSecretManager();
  if (authTokenSecretMgr != null) {
    setSecretManager(authTokenSecretMgr);
    authTokenSecretMgr.start();
  }
  this.authManager = new ServiceAuthorizationManager();
  HBasePolicyProvider.init(conf, authManager);
  responder.start();
  listener.start();
  scheduler.start();
  started = true;
}
项目:PyroDB    文件:RpcServer.java   
/** Starts the service.  Must be called before any calls will be handled. */
@Override
public synchronized void start() {
  if (started) return;
  AuthenticationTokenSecretManager mgr = createSecretManager();
  if (mgr != null) {
    setSecretManager(mgr);
    mgr.start();
  }
  this.authManager = new ServiceAuthorizationManager();
  HBasePolicyProvider.init(conf, authManager);
  responder.start();
  listener.start();
  scheduler.start();
  started = true;
}
项目:c5    文件:RpcServer.java   
/**
 * Starts the service threads but does not allow requests to be responded yet.
 * Client will get {@link ServerNotRunningYetException} instead.
 */
@Override
public synchronized void startThreads() {
  AuthenticationTokenSecretManager mgr = createSecretManager();
  if (mgr != null) {
    setSecretManager(mgr);
    mgr.start();
  }
  this.authManager = new ServiceAuthorizationManager();
  HBasePolicyProvider.init(conf, authManager);
  responder.start();
  listener.start();
  handlers = startHandlers(callQueue, handlerCount);
  priorityHandlers = startHandlers(priorityCallQueue, priorityHandlerCount);
  replicationHandlers = startHandlers(replicationQueue, numOfReplicationHandlers);
}
项目:hadoop-on-lustre    文件:TestAdminOperationsProtocolWithServiceAuthorization.java   
public void testServiceLevelAuthorization() throws Exception {
  MiniMRCluster mr = null;
  try {
    // Turn on service-level authorization
    final JobConf conf = new JobConf();
    conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
                  MapReducePolicyProvider.class, PolicyProvider.class);
    conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
                    true);

    // Start the mini mr cluster
    mr = new MiniMRCluster(1, "file:///", 1, null, null, conf);

    // Invoke MRAdmin commands
    MRAdmin mrAdmin = new MRAdmin(mr.createJobConf());
    assertEquals(0, mrAdmin.run(new String[] { "-refreshQueues" }));
    assertEquals(0, mrAdmin.run(new String[] { "-refreshNodes" }));
  } finally {
    if (mr != null) { 
      mr.shutdown();
    }
  }
}
项目:hadoop-on-lustre    文件:Server.java   
/**
 * Authorize the incoming client connection.
 * 
 * @param user client user
 * @param connection incoming connection
 * @param addr InetAddress of incoming connection
 * @throws AuthorizationException when the client isn't authorized to talk the protocol
 */
public void authorize(UserGroupInformation user, 
                      ConnectionHeader connection,
                      InetAddress addr
                      ) throws AuthorizationException {
  if (authorize) {
    Class<?> protocol = null;
    try {
      protocol = getProtocolClass(connection.getProtocol(), getConf());
    } catch (ClassNotFoundException cfne) {
      throw new AuthorizationException("Unknown protocol: " + 
                                       connection.getProtocol());
    }
    ServiceAuthorizationManager.authorize(user, protocol, getConf(), addr);
  }
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TestAdminOperationsProtocolWithServiceAuthorization.java   
public void testServiceLevelAuthorization() throws Exception {
  MiniMRCluster mr = null;
  try {
    // Turn on service-level authorization
    final JobConf conf = new JobConf();
    conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
                  MapReducePolicyProvider.class, PolicyProvider.class);
    conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
                    true);

    // Start the mini mr cluster
    mr = new MiniMRCluster(1, "file:///", 1, null, null, conf);

    // Invoke MRAdmin commands
    MRAdmin mrAdmin = new MRAdmin(mr.createJobConf());
    assertEquals(0, mrAdmin.run(new String[] { "-refreshQueues" }));
    assertEquals(0, mrAdmin.run(new String[] { "-refreshNodes" }));
  } finally {
    if (mr != null) { 
      mr.shutdown();
    }
  }
}
项目:mapreduce-fork    文件:TestAdminOperationsProtocolWithServiceAuthorization.java   
public void testServiceLevelAuthorization() throws Exception {
  MiniMRCluster mr = null;
  try {
    // Turn on service-level authorization
    final JobConf conf = new JobConf();
    conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
                  MapReducePolicyProvider.class, PolicyProvider.class);
    conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
                    true);

    // Start the mini mr cluster
    mr = new MiniMRCluster(1, "file:///", 1, null, null, conf);

    // Invoke MRAdmin commands
    MRAdmin mrAdmin = new MRAdmin(mr.createJobConf());
    assertEquals(0, mrAdmin.run(new String[] { "-refreshQueues" }));
    assertEquals(0, mrAdmin.run(new String[] { "-refreshNodes" }));
  } finally {
    if (mr != null) { 
      mr.shutdown();
    }
  }
}
项目:hortonworks-extension    文件:TestAdminOperationsProtocolWithServiceAuthorization.java   
public void testServiceLevelAuthorization() throws Exception {
  MiniMRCluster mr = null;
  try {
    // Turn on service-level authorization
    final JobConf conf = new JobConf();
    conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
                  MapReducePolicyProvider.class, PolicyProvider.class);
    conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
                    true);

    // Start the mini mr cluster
    mr = new MiniMRCluster(1, "file:///", 1, null, null, conf);

    // Invoke MRAdmin commands
    MRAdmin mrAdmin = new MRAdmin(mr.createJobConf());
    assertEquals(0, mrAdmin.run(new String[] { "-refreshQueues" }));
    assertEquals(0, mrAdmin.run(new String[] { "-refreshNodes" }));
  } finally {
    if (mr != null) { 
      mr.shutdown();
    }
  }
}
项目:hortonworks-extension    文件:Server.java   
/**
 * Authorize the incoming client connection.
 * 
 * @param user client user
 * @param connection incoming connection
 * @param addr InetAddress of incoming connection
 * @throws AuthorizationException when the client isn't authorized to talk the protocol
 */
public void authorize(UserGroupInformation user, 
                      ConnectionHeader connection,
                      InetAddress addr
                      ) throws AuthorizationException {
  if (authorize) {
    Class<?> protocol = null;
    try {
      protocol = getProtocolClass(connection.getProtocol(), getConf());
    } catch (ClassNotFoundException cfne) {
      throw new AuthorizationException("Unknown protocol: " + 
                                       connection.getProtocol());
    }
    ServiceAuthorizationManager.authorize(user, protocol, getConf(), addr);
  }
}
项目:hortonworks-extension    文件:TestAdminOperationsProtocolWithServiceAuthorization.java   
public void testServiceLevelAuthorization() throws Exception {
  MiniMRCluster mr = null;
  try {
    // Turn on service-level authorization
    final JobConf conf = new JobConf();
    conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
                  MapReducePolicyProvider.class, PolicyProvider.class);
    conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
                    true);

    // Start the mini mr cluster
    mr = new MiniMRCluster(1, "file:///", 1, null, null, conf);

    // Invoke MRAdmin commands
    MRAdmin mrAdmin = new MRAdmin(mr.createJobConf());
    assertEquals(0, mrAdmin.run(new String[] { "-refreshQueues" }));
    assertEquals(0, mrAdmin.run(new String[] { "-refreshNodes" }));
  } finally {
    if (mr != null) { 
      mr.shutdown();
    }
  }
}
项目:hortonworks-extension    文件:Server.java   
/**
 * Authorize the incoming client connection.
 * 
 * @param user client user
 * @param connection incoming connection
 * @param addr InetAddress of incoming connection
 * @throws AuthorizationException when the client isn't authorized to talk the protocol
 */
public void authorize(UserGroupInformation user, 
                      ConnectionHeader connection,
                      InetAddress addr
                      ) throws AuthorizationException {
  if (authorize) {
    Class<?> protocol = null;
    try {
      protocol = getProtocolClass(connection.getProtocol(), getConf());
    } catch (ClassNotFoundException cfne) {
      throw new AuthorizationException("Unknown protocol: " + 
                                       connection.getProtocol());
    }
    ServiceAuthorizationManager.authorize(user, protocol, getConf(), addr);
  }
}
项目:hadoop    文件:TestRMAdminService.java   
private void verifyServiceACLsRefresh(ServiceAuthorizationManager manager,
    Class<?> protocol, String aclString) {
  for (Class<?> protocolClass : manager.getProtocolsWithAcls()) {
    AccessControlList accessList =
        manager.getProtocolsAcls(protocolClass);
    if (protocolClass == protocol) {
      Assert.assertEquals(accessList.getAclString(),
          aclString);
    } else {
      Assert.assertEquals(accessList.getAclString(), "*");
    }
  }
}
项目:ditb    文件:HBasePolicyProvider.java   
public static void init(Configuration conf, ServiceAuthorizationManager authManager) {
  // set service-level authorization security policy
  System.setProperty("hadoop.policy.file", "hbase-policy.xml");
  if (conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    authManager.refresh(conf, new HBasePolicyProvider());
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestRMAdminService.java   
private void verifyServiceACLsRefresh(ServiceAuthorizationManager manager,
    Class<?> protocol, String aclString) {
  for (Class<?> protocolClass : manager.getProtocolsWithAcls()) {
    AccessControlList accessList =
        manager.getProtocolsAcls(protocolClass);
    if (protocolClass == protocol) {
      Assert.assertEquals(accessList.getAclString(),
          aclString);
    } else {
      Assert.assertEquals(accessList.getAclString(), "*");
    }
  }
}
项目:big-c    文件:TestRMAdminService.java   
private void verifyServiceACLsRefresh(ServiceAuthorizationManager manager,
    Class<?> protocol, String aclString) {
  for (Class<?> protocolClass : manager.getProtocolsWithAcls()) {
    AccessControlList accessList =
        manager.getProtocolsAcls(protocolClass);
    if (protocolClass == protocol) {
      Assert.assertEquals(accessList.getAclString(),
          aclString);
    } else {
      Assert.assertEquals(accessList.getAclString(), "*");
    }
  }
}
项目:LCIndex-HBase-0.94.16    文件:WritableRpcEngine.java   
/** Construct an RPC server.
 * @param instance the instance whose methods will be called
 * @param conf the configuration to use
 * @param bindAddress the address to bind on to listen for connection
 * @param port the port to listen for connections on
 * @param numHandlers the number of method handler threads to run
 * @param verbose whether each call should be logged
 * @throws IOException e
 */
public Server(Object instance, final Class<?>[] ifaces,
              Configuration conf, String bindAddress,  int port,
              int numHandlers, int metaHandlerCount, boolean verbose,
              int highPriorityLevel) throws IOException {
  super(bindAddress, port, Invocation.class, numHandlers, metaHandlerCount,
      conf, classNameBase(instance.getClass().getName()),
      highPriorityLevel);
  this.instance = instance;
  this.implementation = instance.getClass();
  this.verbose = verbose;

  this.ifaces = ifaces;

  // create metrics for the advertised interfaces this server implements.
  String [] metricSuffixes = new String [] {ABOVE_ONE_SEC_METRIC};
  this.rpcMetrics.createMetrics(this.ifaces, false, metricSuffixes);

  this.authorize =
    conf.getBoolean(
        ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false);

  this.warnResponseTime = conf.getInt(WARN_RESPONSE_TIME,
      DEFAULT_WARN_RESPONSE_TIME);
  this.warnResponseSize = conf.getInt(WARN_RESPONSE_SIZE,
      DEFAULT_WARN_RESPONSE_SIZE);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestRMAdminService.java   
private void verifyServiceACLsRefresh(ServiceAuthorizationManager manager,
    Class<?> protocol, String aclString) {
  for (Class<?> protocolClass : manager.getProtocolsWithAcls()) {
    AccessControlList accessList =
        manager.getProtocolsAcls(protocolClass);
    if (protocolClass == protocol) {
      Assert.assertEquals(accessList.getAclString(),
          aclString);
    } else {
      Assert.assertEquals(accessList.getAclString(), "*");
    }
  }
}
项目:hadoop-EAR    文件:JobTracker.java   
@Override
public void refreshServiceAcl() throws IOException {
  if (!conf.getBoolean(
          ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    throw new AuthorizationException("Service Level Authorization not enabled!");
  }
  SecurityUtil.getPolicy().refresh();
}
项目:hadoop-EAR    文件:TestCLI.java   
public void setUp() throws Exception {
  // Read the testConfig.xml file
  readTestConfigFile();

  // Start up the mini dfs cluster
  boolean success = false;
  conf = new Configuration();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
                HadoopPolicyProvider.class, PolicyProvider.class);
  conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
                  true);

  dfsCluster = new MiniDFSCluster(conf, 1, true, null);
  namenode = conf.get("fs.default.name", "file:///");
  clitestDataDir = new File(TEST_CACHE_DATA_DIR).
    toURI().toString().replace(' ', '+');
  username = System.getProperty("user.name");

  FileSystem fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
  dfs = (DistributedFileSystem) fs;

   // Start up mini mr cluster
  JobConf mrConf = new JobConf(conf);
  mrCluster = new MiniMRCluster(1, dfsCluster.getFileSystem().getUri().toString(), 1, 
                         null, null, mrConf);
  jobtracker = mrCluster.createJobConf().get("mapred.job.tracker", "local");

  success = true;

  assertTrue("Error setting up Mini DFS & MR clusters", success);
}
项目:hadoop-EAR    文件:TestRPC.java   
@Test
public void testAuthorization() throws Exception {
  Configuration conf = new Configuration();
  conf.setBoolean(
      ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, true);

  // Expect to succeed
  conf.set(ACL_CONFIG, "*");
  doRPCs(conf, false);

  // Reset authorization to expect failure
  conf.set(ACL_CONFIG, "invalid invalid");
  doRPCs(conf, true);
}
项目:hadoop-EAR    文件:RPC.java   
/** Construct an RPC server.
 * @param instance the instance whose methods will be called
 * @param conf the configuration to use
 * @param bindAddress the address to bind on to listen for connection
 * @param port the port to listen for connections on
 * @param numHandlers the number of method handler threads to run
 * @param verbose whether each call should be logged
 * @param supportOldJobConf supports server to deserialize old job conf
 */
public Server(Object instance, Configuration conf, String bindAddress,
    int port, int numHandlers, boolean verbose, boolean supportOldJobConf)
    throws IOException {
  super(bindAddress, port, Invocation.class, numHandlers, conf,
      classNameBase(instance.getClass().getName()), supportOldJobConf);
  this.instance = instance;
  this.fastProtocol = instance instanceof FastProtocol;
  this.verbose = verbose;
  this.authorize = 
    conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
                    false);
}
项目:hadoop-EAR    文件:RPC.java   
@Override
public void authorize(Subject user, ConnectionHeader connection) 
throws AuthorizationException {
  if (authorize) {
    Class<?> protocol = null;
    try {
      protocol = getProtocolClass(connection.getProtocol(), getConf());
    } catch (ClassNotFoundException cfne) {
      throw new AuthorizationException("Unknown protocol: " + 
                                       connection.getProtocol());
    }
    ServiceAuthorizationManager.authorize(user, protocol);
  }
}
项目:hadoop-EAR    文件:NameNode.java   
/**
 * Initialize name-node.
 *
 */
protected void initialize() throws IOException {
  // set service-level authorization security policy
  if (serviceAuthEnabled =
      getConf().getBoolean(
          ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    PolicyProvider policyProvider =
      (PolicyProvider)(ReflectionUtils.newInstance(
          getConf().getClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
              HDFSPolicyProvider.class, PolicyProvider.class),
          getConf()));
    SecurityUtil.setPolicy(new ConfiguredPolicy(getConf(), policyProvider));
  }

  // This is a check that the port is free
  // create a socket and bind to it, throw exception if port is busy
  // This has to be done before we are reading Namesystem not to waste time and fail fast
  NetUtils.isSocketBindable(getClientProtocolAddress(getConf()));
  NetUtils.isSocketBindable(getDNProtocolAddress(getConf()));
  NetUtils.isSocketBindable(getHttpServerAddress(getConf()));

  long serverVersion = ClientProtocol.versionID;
  this.clientProtocolMethodsFingerprint = ProtocolSignature
      .getMethodsSigFingerPrint(ClientProtocol.class, serverVersion);

  myMetrics = new NameNodeMetrics(getConf(), this);

  this.clusterName = getConf().get(FSConstants.DFS_CLUSTER_NAME);
  this.namesystem = new FSNamesystem(this, getConf());
  // HACK: from removal of FSNamesystem.getFSNamesystem().
  JspHelper.fsn = this.namesystem;

  this.startDNServer();
  startHttpServer(getConf());
}
项目:pbase    文件:HBasePolicyProvider.java   
public static void init(Configuration conf, ServiceAuthorizationManager authManager) {
  // set service-level authorization security policy
  System.setProperty("hadoop.policy.file", "hbase-policy.xml");
  if (conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    authManager.refresh(conf, new HBasePolicyProvider());
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
  }
}
项目:HIndex    文件:HBasePolicyProvider.java   
public static void init(Configuration conf, ServiceAuthorizationManager authManager) {
  // set service-level authorization security policy
  System.setProperty("hadoop.policy.file", "hbase-policy.xml");
  if (conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    authManager.refresh(conf, new HBasePolicyProvider());
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
  }
}
项目:HIndex    文件:RpcServer.java   
/**
 * Starts the service threads but does not allow requests to be responded yet.
 * Client will get {@link ServerNotRunningYetException} instead.
 */
@Override
public synchronized void startThreads() {
  AuthenticationTokenSecretManager mgr = createSecretManager();
  if (mgr != null) {
    setSecretManager(mgr);
    mgr.start();
  }
  this.authManager = new ServiceAuthorizationManager();
  HBasePolicyProvider.init(conf, authManager);
  responder.start();
  listener.start();
  scheduler.start();
}
项目:hops    文件:TestRMAdminService.java   
private void verifyServiceACLsRefresh(ServiceAuthorizationManager manager,
    Class<?> protocol, String aclString) {
  for (Class<?> protocolClass : manager.getProtocolsWithAcls()) {
    AccessControlList accessList =
        manager.getProtocolsAcls(protocolClass);
    if (protocolClass == protocol) {
      Assert.assertEquals(accessList.getAclString(),
          aclString);
    } else {
      Assert.assertEquals(accessList.getAclString(), "*");
    }
  }
}
项目:IRIndex    文件:HBasePolicyProvider.java   
public static void init(Configuration conf,
    ServiceAuthorizationManager authManager) {
  // set service-level authorization security policy
  System.setProperty("hadoop.policy.file", "hbase-policy.xml");
  if (conf.getBoolean(
        ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    authManager.refresh(conf, new HBasePolicyProvider());
  }
}
项目:IRIndex    文件:SecureRpcEngine.java   
@Override
public void startThreads() {
  AuthenticationTokenSecretManager mgr = createSecretManager();
  if (mgr != null) {
    setSecretManager(mgr);
    mgr.start();
  }
  this.authManager = new ServiceAuthorizationManager();
  HBasePolicyProvider.init(conf, authManager);

  // continue with base startup
  super.startThreads();
}
项目:IRIndex    文件:WritableRpcEngine.java   
/** Construct an RPC server.
 * @param instance the instance whose methods will be called
 * @param conf the configuration to use
 * @param bindAddress the address to bind on to listen for connection
 * @param port the port to listen for connections on
 * @param numHandlers the number of method handler threads to run
 * @param verbose whether each call should be logged
 * @throws IOException e
 */
public Server(Object instance, final Class<?>[] ifaces,
              Configuration conf, String bindAddress,  int port,
              int numHandlers, int metaHandlerCount, boolean verbose,
              int highPriorityLevel) throws IOException {
  super(bindAddress, port, Invocation.class, numHandlers, metaHandlerCount,
      conf, classNameBase(instance.getClass().getName()),
      highPriorityLevel);
  this.instance = instance;
  this.implementation = instance.getClass();
  this.verbose = verbose;

  this.ifaces = ifaces;

  // create metrics for the advertised interfaces this server implements.
  String [] metricSuffixes = new String [] {ABOVE_ONE_SEC_METRIC};
  this.rpcMetrics.createMetrics(this.ifaces, false, metricSuffixes);

  this.authorize =
    conf.getBoolean(
        ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false);

  this.warnResponseTime = conf.getInt(WARN_RESPONSE_TIME,
      DEFAULT_WARN_RESPONSE_TIME);
  this.warnResponseSize = conf.getInt(WARN_RESPONSE_SIZE,
      DEFAULT_WARN_RESPONSE_SIZE);
}
项目:hbase    文件:HBasePolicyProvider.java   
public static void init(Configuration conf, ServiceAuthorizationManager authManager) {
  // set service-level authorization security policy
  System.setProperty("hadoop.policy.file", "hbase-policy.xml");
  if (conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    authManager.refresh(conf, new HBasePolicyProvider());
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
  }
}
项目:hbase    文件:NettyRpcServer.java   
@Override
public synchronized void start() {
  if (started) {
    return;
  }
  authTokenSecretMgr = createSecretManager();
  if (authTokenSecretMgr != null) {
    setSecretManager(authTokenSecretMgr);
    authTokenSecretMgr.start();
  }
  this.authManager = new ServiceAuthorizationManager();
  HBasePolicyProvider.init(conf, authManager);
  scheduler.start();
  started = true;
}
项目:RStore    文件:WritableRpcEngine.java   
/** Construct an RPC server.
 * @param instance the instance whose methods will be called
 * @param conf the configuration to use
 * @param bindAddress the address to bind on to listen for connection
 * @param port the port to listen for connections on
 * @param numHandlers the number of method handler threads to run
 * @param verbose whether each call should be logged
 * @throws IOException e
 */
public Server(Object instance, final Class<?>[] ifaces,
              Configuration conf, String bindAddress,  int port,
              int numHandlers, int metaHandlerCount, boolean verbose,
              int highPriorityLevel) throws IOException {
  super(bindAddress, port, Invocation.class, numHandlers, metaHandlerCount,
      conf, classNameBase(instance.getClass().getName()),
      highPriorityLevel);
  this.instance = instance;
  this.implementation = instance.getClass();
  this.verbose = verbose;

  this.ifaces = ifaces;

  // create metrics for the advertised interfaces this server implements.
  String [] metricSuffixes = new String [] {ABOVE_ONE_SEC_METRIC};
  this.rpcMetrics.createMetrics(this.ifaces, false, metricSuffixes);

  this.authorize =
    conf.getBoolean(
        ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false);

  this.warnResponseTime = conf.getInt(WARN_RESPONSE_TIME,
      DEFAULT_WARN_RESPONSE_TIME);
  this.warnResponseSize = conf.getInt(WARN_RESPONSE_SIZE,
      DEFAULT_WARN_RESPONSE_SIZE);
}
项目:PyroDB    文件:HBasePolicyProvider.java   
public static void init(Configuration conf, ServiceAuthorizationManager authManager) {
  // set service-level authorization security policy
  System.setProperty("hadoop.policy.file", "hbase-policy.xml");
  if (conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    authManager.refresh(conf, new HBasePolicyProvider());
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
  }
}
项目:c5    文件:HBasePolicyProvider.java   
public static void init(Configuration conf, ServiceAuthorizationManager authManager) {
  // set service-level authorization security policy
  System.setProperty("hadoop.policy.file", "hbase-policy.xml");
  if (conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    authManager.refresh(conf, new HBasePolicyProvider());
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
  }
}
项目:hadoop-on-lustre    文件:JobTracker.java   
@Override
public void refreshServiceAcl() throws IOException {
  if (!conf.getBoolean(
          ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    throw new AuthorizationException("Service Level Authorization not enabled!");
  }
  ServiceAuthorizationManager.refresh(conf, new MapReducePolicyProvider());
}
项目:hadoop-on-lustre    文件:TestCLI.java   
public void setUp() throws Exception {
  // Read the testConfig.xml file
  readTestConfigFile();

  // Start up the mini dfs cluster
  boolean success = false;
  conf = new Configuration();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
                HadoopPolicyProvider.class, PolicyProvider.class);
  conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
                  true);

  dfsCluster = new MiniDFSCluster(conf, 1, true, null);
  namenode = conf.get("fs.default.name", "file:///");
  clitestDataDir = new File(TEST_CACHE_DATA_DIR).
    toURI().toString().replace(' ', '+');
  username = System.getProperty("user.name");

  FileSystem fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
  dfs = (DistributedFileSystem) fs;

   // Start up mini mr cluster
  JobConf mrConf = new JobConf(conf);
  mrCluster = new MiniMRCluster(1, dfsCluster.getFileSystem().getUri().toString(), 1, 
                         null, null, mrConf);
  jobtracker = mrCluster.createJobConf().get("mapred.job.tracker", "local");

  success = true;

  assertTrue("Error setting up Mini DFS & MR clusters", success);
}