Java 类org.apache.hadoop.security.authorize.ConfiguredPolicy 实例源码

项目:hadoop-EAR    文件:TestRPC.java   
private void doRPCs(Configuration conf, boolean expectFailure) throws Exception {
  SecurityUtil.setPolicy(new ConfiguredPolicy(conf, new TestPolicyProvider()));

  Server server = RPC.getServer(new TestImpl(), ADDRESS, 0, 5, true, conf);

  TestProtocol proxy = null;

  server.start();

  InetSocketAddress addr = NetUtils.getConnectAddress(server);

  try {
    proxy = (TestProtocol)RPC.getProxy(
        TestProtocol.class, TestProtocol.versionID, addr, conf);
    proxy.ping();

    if (expectFailure) {
      fail("Expect RPC.getProxy to fail with AuthorizationException!");
    }
  } catch (RemoteException e) {
    if (expectFailure) {
      assertTrue(e.unwrapRemoteException() instanceof AuthorizationException);
    } else {
      throw e;
    }
  } finally {
    server.stop();
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
  }
}
项目:hadoop-EAR    文件:NameNode.java   
/**
 * Initialize name-node.
 *
 */
protected void initialize() throws IOException {
  // set service-level authorization security policy
  if (serviceAuthEnabled =
      getConf().getBoolean(
          ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    PolicyProvider policyProvider =
      (PolicyProvider)(ReflectionUtils.newInstance(
          getConf().getClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
              HDFSPolicyProvider.class, PolicyProvider.class),
          getConf()));
    SecurityUtil.setPolicy(new ConfiguredPolicy(getConf(), policyProvider));
  }

  // This is a check that the port is free
  // create a socket and bind to it, throw exception if port is busy
  // This has to be done before we are reading Namesystem not to waste time and fail fast
  NetUtils.isSocketBindable(getClientProtocolAddress(getConf()));
  NetUtils.isSocketBindable(getDNProtocolAddress(getConf()));
  NetUtils.isSocketBindable(getHttpServerAddress(getConf()));

  long serverVersion = ClientProtocol.versionID;
  this.clientProtocolMethodsFingerprint = ProtocolSignature
      .getMethodsSigFingerPrint(ClientProtocol.class, serverVersion);

  myMetrics = new NameNodeMetrics(getConf(), this);

  this.clusterName = getConf().get(FSConstants.DFS_CLUSTER_NAME);
  this.namesystem = new FSNamesystem(this, getConf());
  // HACK: from removal of FSNamesystem.getFSNamesystem().
  JspHelper.fsn = this.namesystem;

  this.startDNServer();
  startHttpServer(getConf());
}
项目:RDFS    文件:TestRPC.java   
private void doRPCs(Configuration conf, boolean expectFailure) throws Exception {
  SecurityUtil.setPolicy(new ConfiguredPolicy(conf, new TestPolicyProvider()));

  Server server = RPC.getServer(new TestImpl(), ADDRESS, 0, 5, true, conf);

  TestProtocol proxy = null;

  server.start();

  InetSocketAddress addr = NetUtils.getConnectAddress(server);

  try {
    proxy = (TestProtocol)RPC.getProxy(
        TestProtocol.class, TestProtocol.versionID, addr, conf);
    proxy.ping();

    if (expectFailure) {
      fail("Expect RPC.getProxy to fail with AuthorizationException!");
    }
  } catch (RemoteException e) {
    if (expectFailure) {
      assertTrue(e.unwrapRemoteException() instanceof AuthorizationException);
    } else {
      throw e;
    }
  } finally {
    server.stop();
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
  }
}
项目:hadoop-0.20    文件:TestRPC.java   
private void doRPCs(Configuration conf, boolean expectFailure) throws Exception {
  SecurityUtil.setPolicy(new ConfiguredPolicy(conf, new TestPolicyProvider()));

  Server server = RPC.getServer(new TestImpl(), ADDRESS, 0, 5, true, conf);

  TestProtocol proxy = null;

  server.start();

  InetSocketAddress addr = NetUtils.getConnectAddress(server);

  try {
    proxy = (TestProtocol)RPC.getProxy(
        TestProtocol.class, TestProtocol.versionID, addr, conf);
    proxy.ping();

    if (expectFailure) {
      fail("Expect RPC.getProxy to fail with AuthorizationException!");
    }
  } catch (RemoteException e) {
    if (expectFailure) {
      assertTrue(e.unwrapRemoteException() instanceof AuthorizationException);
    } else {
      throw e;
    }
  } finally {
    server.stop();
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
  }
}
项目:hadoop-0.20    文件:NameNode.java   
/**
 * Initialize name-node.
 * 
 * @param conf the configuration
 */
private void initialize(Configuration conf) throws IOException {
  InetSocketAddress socAddr = NameNode.getAddress(conf);
  int handlerCount = conf.getInt("dfs.namenode.handler.count", 10);

  // set service-level authorization security policy
  if (serviceAuthEnabled = 
        conf.getBoolean(
          ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    PolicyProvider policyProvider = 
      (PolicyProvider)(ReflectionUtils.newInstance(
          conf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
              HDFSPolicyProvider.class, PolicyProvider.class), 
          conf));
    SecurityUtil.setPolicy(new ConfiguredPolicy(conf, policyProvider));
  }

  // create rpc server 
  this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(),
                              handlerCount, false, conf);

  // The rpc-server port can be ephemeral... ensure we have the correct info
  this.serverAddress = this.server.getListenerAddress(); 
  FileSystem.setDefaultUri(conf, getUri(serverAddress));
  LOG.info("Namenode up at: " + this.serverAddress);

  myMetrics = new NameNodeMetrics(conf, this);

  this.namesystem = new FSNamesystem(this, conf);
  startHttpServer(conf);
  this.server.start();  //start RPC server   
  startTrashEmptier(conf);
}
项目:hadoop-gpu    文件:TestRPC.java   
private void doRPCs(Configuration conf, boolean expectFailure) throws Exception {
  SecurityUtil.setPolicy(new ConfiguredPolicy(conf, new TestPolicyProvider()));

  Server server = RPC.getServer(new TestImpl(), ADDRESS, 0, 5, true, conf);

  TestProtocol proxy = null;

  server.start();

  InetSocketAddress addr = NetUtils.getConnectAddress(server);

  try {
    proxy = (TestProtocol)RPC.getProxy(
        TestProtocol.class, TestProtocol.versionID, addr, conf);
    proxy.ping();

    if (expectFailure) {
      fail("Expect RPC.getProxy to fail with AuthorizationException!");
    }
  } catch (RemoteException e) {
    if (expectFailure) {
      assertTrue(e.unwrapRemoteException() instanceof AuthorizationException);
    } else {
      throw e;
    }
  } finally {
    server.stop();
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
  }
}
项目:hadoop-gpu    文件:NameNode.java   
/**
 * Initialize name-node.
 * 
 * @param conf the configuration
 */
private void initialize(Configuration conf) throws IOException {
  InetSocketAddress socAddr = NameNode.getAddress(conf);
  int handlerCount = conf.getInt("dfs.namenode.handler.count", 10);

  // set service-level authorization security policy
  if (serviceAuthEnabled = 
        conf.getBoolean(
          ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    PolicyProvider policyProvider = 
      (PolicyProvider)(ReflectionUtils.newInstance(
          conf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
              HDFSPolicyProvider.class, PolicyProvider.class), 
          conf));
    SecurityUtil.setPolicy(new ConfiguredPolicy(conf, policyProvider));
  }

  // create rpc server 
  this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(),
                              handlerCount, false, conf);

  // The rpc-server port can be ephemeral... ensure we have the correct info
  this.serverAddress = this.server.getListenerAddress(); 
  FileSystem.setDefaultUri(conf, getUri(serverAddress));
  LOG.info("Namenode up at: " + this.serverAddress);

  myMetrics = new NameNodeMetrics(conf, this);

  this.namesystem = new FSNamesystem(this, conf);
  startHttpServer(conf);
  this.server.start();  //start RPC server   
  startTrashEmptier(conf);
}
项目:hadoop-EAR    文件:TestRPC.java   
@Test
public void testIOSetupFailure() throws Exception {
  SecurityUtil.setPolicy(new ConfiguredPolicy(conf, new TestPolicyProvider()));

  Server server = RPC.getServer(new TestImpl(), ADDRESS, 0, 5, true, conf);

  TestProtocol proxy = null;

  server.start();

  InetSocketAddress addr = NetUtils.getConnectAddress(server);

  final AtomicReference<Hashtable> hashtable = new AtomicReference<Hashtable>(
      null);
  try {
    InjectionHandler.set(new InjectionHandler() {
      @Override
      protected void _processEvent(InjectionEventI event, Object... args) {
        if (event == InjectionEventCore.RPC_CLIENT_SETUP_IO_STREAM_FAILURE) {
          hashtable.set((Hashtable) args[0]);
          throw new RuntimeException("testIOSetupFailure");
        }
      }
    });

    try {
      proxy = (TestProtocol)RPC.getProxy(
          TestProtocol.class, TestProtocol.versionID, addr, conf);
      proxy.ping();
      TestCase.fail();
    } catch (RuntimeException e) {
      if (!e.getMessage().equals("testIOSetupFailure")) {
        throw e;
      }
    }
    InjectionHandler.clear();
    TestCase
        .assertNotNull("inject handler is not triggered", hashtable.get());
    TestCase.assertTrue("Connection is not cleared.", hashtable.get()
        .isEmpty());
  } finally {
    server.stop();
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
  }
}
项目:hadoop-EAR    文件:DataNode.java   
private void initConfig(Configuration conf) throws IOException {
  if (conf.get(FSConstants.SLAVE_HOST_NAME) != null) {
    machineName = conf.get(FSConstants.SLAVE_HOST_NAME);
  }
  if (machineName == null) {
    machineName = DNS.getDefaultIP(conf.get(FSConstants.DFS_DATANODE_DNS_INTERFACE, "default"));
  } else {
    // Ensuring it's an IP address
    machineName = NetUtils.normalizeHostName(machineName);
  }
  // Allow configuration to delay block reports to find bugs
  artificialBlockReceivedDelay = conf.getInt(
    "dfs.datanode.artificialBlockReceivedDelay", 0);
  if (conf.getBoolean(
      ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    PolicyProvider policyProvider = (PolicyProvider) (ReflectionUtils
        .newInstance(conf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
            HDFSPolicyProvider.class, PolicyProvider.class), conf));
    SecurityUtil.setPolicy(new ConfiguredPolicy(conf, policyProvider));
  }
  this.socketTimeout = conf.getInt("dfs.socket.timeout",
      HdfsConstants.READ_TIMEOUT);
  this.socketReadExtentionTimeout = conf.getInt(
      HdfsConstants.DFS_DATANODE_READ_EXTENSION,
      HdfsConstants.READ_TIMEOUT_EXTENSION);
  this.socketWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout",
      HdfsConstants.WRITE_TIMEOUT);
  this.socketWriteExtentionTimeout = conf.getInt(
      HdfsConstants.DFS_DATANODE_WRITE_EXTENTSION,
      HdfsConstants.WRITE_TIMEOUT_EXTENSION);

  /* Based on results on different platforms, we might need set the default 
   * to false on some of them. */
  this.transferToAllowed = conf.getBoolean("dfs.datanode.transferTo.allowed",
                                           true);

  // TODO: remove the global setting and change data protocol to support
  // per session setting for this value.
  this.ignoreChecksumWhenRead = conf.getBoolean("dfs.datanode.read.ignore.checksum",
      false);

  this.writePacketSize = conf.getInt("dfs.write.packet.size",
      HdfsConstants.DEFAULT_PACKETSIZE);

  this.deletedReportInterval =
    conf.getLong("dfs.blockreport.intervalMsec", BLOCKREPORT_INTERVAL);
  // Calculate the full block report interval
  int fullReportMagnifier = conf.getInt("dfs.fullblockreport.magnifier", 2);
  this.blockReportInterval = fullReportMagnifier * deletedReportInterval;
  this.heartBeatInterval = conf.getLong("dfs.heartbeat.interval", HEARTBEAT_INTERVAL) * 1000L;
  long heartbeatRecheckInterval = conf.getInt(
      "heartbeat.recheck.interval", 5 * 60 * 1000); // 5 minutes
  this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval +
      10 * heartBeatInterval;
  this.blkRecoveryTimeout = conf.getLong(
      DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
      DFS_BLK_RECOVERY_BY_NN_TIMEOUT_DEFAULT);

  this.initialBlockReportDelay = conf.getLong("dfs.blockreport.initialDelay",
      BLOCKREPORT_INITIAL_DELAY) * 1000L;
  if (this.initialBlockReportDelay >= blockReportInterval) {
    this.initialBlockReportDelay = 0;
    LOG.info("dfs.blockreport.initialDelay is greater than "
        + "dfs.blockreport.intervalMsec."
        + " Setting initial delay to 0 msec:");
  }

  // do we need to sync block file contents to disk when blockfile is closed?
  this.syncOnClose = conf.getBoolean("dfs.datanode.synconclose", false);

  this.minDiskCheckIntervalMsec = conf.getLong(
      "dfs.datnode.checkdisk.mininterval",
      FSConstants.MIN_INTERVAL_CHECK_DIR_MSEC);
}
项目:RDFS    文件:NameNode.java   
/**
 * Initialize name-node.
 * 
 */
private void initialize() throws IOException {    
  // set service-level authorization security policy
  if (serviceAuthEnabled =
      getConf().getBoolean(
          ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    PolicyProvider policyProvider = 
      (PolicyProvider)(ReflectionUtils.newInstance(
          getConf().getClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
              HDFSPolicyProvider.class, PolicyProvider.class), 
          getConf()));
    SecurityUtil.setPolicy(new ConfiguredPolicy(getConf(), policyProvider));
  }

  // This is a check that the port is free
  // create a socket and bind to it, throw exception if port is busy
  // This has to be done before we are reading Namesystem not to waste time and fail fast
  InetSocketAddress clientSocket = NameNode.getAddress(getConf());
  ServerSocket socket = new ServerSocket();
  socket.bind(clientSocket);
  socket.close();
  InetSocketAddress dnSocket = NameNode.getDNProtocolAddress(getConf());
  if (dnSocket != null) {
    socket = new ServerSocket();
    socket.bind(dnSocket);
    socket.close();
    //System.err.println("Tested " + dnSocket);
  }

  long serverVersion = ClientProtocol.versionID;
  this.clientProtocolMethodsFingerprint = ProtocolSignature
      .getMethodsSigFingerPrint(ClientProtocol.class, serverVersion);

  myMetrics = new NameNodeMetrics(getConf(), this);

  this.clusterName = getConf().get("dfs.cluster.name");
  this.namesystem = new FSNamesystem(this, getConf());
  // HACK: from removal of FSNamesystem.getFSNamesystem().
  JspHelper.fsn = this.namesystem;

  this.startDNServer();
  startHttpServer(getConf());
}
项目:RDFS    文件:DataNode.java   
private void initConfig(Configuration conf) throws IOException {
  if (conf.get("slave.host.name") != null) {
    machineName = conf.get("slave.host.name");   
  }
  if (machineName == null) {
    machineName = DNS.getDefaultHost(
                                   conf.get("dfs.datanode.dns.interface","default"),
                                   conf.get("dfs.datanode.dns.nameserver","default"));
  }
  // Allow configuration to delay block reports to find bugs
  artificialBlockReceivedDelay = conf.getInt(
    "dfs.datanode.artificialBlockReceivedDelay", 0);
  if (conf.getBoolean(
      ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    PolicyProvider policyProvider = (PolicyProvider) (ReflectionUtils
        .newInstance(conf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
            HDFSPolicyProvider.class, PolicyProvider.class), conf));
    SecurityUtil.setPolicy(new ConfiguredPolicy(conf, policyProvider));
  }
  this.socketTimeout = conf.getInt("dfs.socket.timeout",
      HdfsConstants.READ_TIMEOUT);
  this.socketReadExtentionTimeout = conf.getInt(
      HdfsConstants.DFS_DATANODE_READ_EXTENSION,
      HdfsConstants.READ_TIMEOUT_EXTENSION);
  this.socketWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout",
      HdfsConstants.WRITE_TIMEOUT);
  this.socketWriteExtentionTimeout = conf.getInt(
      HdfsConstants.DFS_DATANODE_WRITE_EXTENTSION,
      HdfsConstants.WRITE_TIMEOUT_EXTENSION);

  /* Based on results on different platforms, we might need set the default 
   * to false on some of them. */
  this.transferToAllowed = conf.getBoolean("dfs.datanode.transferTo.allowed",
                                           true);

  // TODO: remove the global setting and change data protocol to support
  // per session setting for this value.
  this.ignoreChecksumWhenRead = conf.getBoolean("dfs.datanode.read.ignore.checksum",
      false);

  this.writePacketSize = conf.getInt("dfs.write.packet.size", 64*1024);

  this.deletedReportInterval =
    conf.getLong("dfs.blockreport.intervalMsec", BLOCKREPORT_INTERVAL);
  // Calculate the full block report interval
  int fullReportMagnifier = conf.getInt("dfs.fullblockreport.magnifier", 2);
  this.blockReportInterval = fullReportMagnifier * deletedReportInterval;
  this.heartBeatInterval = conf.getLong("dfs.heartbeat.interval", HEARTBEAT_INTERVAL) * 1000L;
  long heartbeatRecheckInterval = conf.getInt(
      "heartbeat.recheck.interval", 5 * 60 * 1000); // 5 minutes
  this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval +
      10 * heartBeatInterval;

  this.initialBlockReportDelay = conf.getLong("dfs.blockreport.initialDelay",
      BLOCKREPORT_INITIAL_DELAY) * 1000L;
  if (this.initialBlockReportDelay >= blockReportInterval) {
    this.initialBlockReportDelay = 0;
    LOG.info("dfs.blockreport.initialDelay is greater than "
        + "dfs.blockreport.intervalMsec."
        + " Setting initial delay to 0 msec:");
  }

  // do we need to sync block file contents to disk when blockfile is closed?
  this.syncOnClose = conf.getBoolean("dfs.datanode.synconclose", false);

  this.minDiskCheckIntervalMsec = conf.getLong(
      "dfs.datnode.checkdisk.mininterval",
      FSConstants.MIN_INTERVAL_CHECK_DIR_MSEC);
}