Java 类org.apache.hadoop.security.SecurityUtil 实例源码

项目:hadoop-oss    文件:HttpServer2.java   
private void initSpnego(Configuration conf, String hostName,
    String usernameConfKey, String keytabConfKey) throws IOException {
  Map<String, String> params = new HashMap<>();
  String principalInConf = conf.get(usernameConfKey);
  if (principalInConf != null && !principalInConf.isEmpty()) {
    params.put("kerberos.principal", SecurityUtil.getServerPrincipal(
        principalInConf, hostName));
  }
  String httpKeytab = conf.get(keytabConfKey);
  if (httpKeytab != null && !httpKeytab.isEmpty()) {
    params.put("kerberos.keytab", httpKeytab);
  }
  params.put(AuthenticationFilter.AUTH_TYPE, "kerberos");

  defineFilter(webAppContext, SPNEGO_FILTER,
               AuthenticationFilter.class.getName(), params, null);
}
项目:hadoop-oss    文件:NuCypherExtNameNodeProxiesClient.java   
/**
 * Creates an explicitly HA-enabled proxy object.
 *
 * @param conf the configuration object
 * @param nameNodeUri the URI pointing either to a specific NameNode or to a
 *        logical nameservice.
 * @param xface the IPC interface which should be created
 * @param failoverProxyProvider Failover proxy provider
 * @return an object containing both the proxy and the associated
 *         delegation token service it corresponds to
 */
@SuppressWarnings("unchecked")
public static <T> ProxyAndInfo<T> createHAProxy(
    Configuration conf, URI nameNodeUri, Class<T> xface,
    AbstractNNFailoverProxyProvider<T> failoverProxyProvider) {
  Preconditions.checkNotNull(failoverProxyProvider);
  // HA case
  DfsClientConf config = new DfsClientConf(conf);
  T proxy = (T) RetryProxy.create(xface, failoverProxyProvider,
      RetryPolicies.failoverOnNetworkException(
          RetryPolicies.TRY_ONCE_THEN_FAIL, config.getMaxFailoverAttempts(),
          config.getMaxRetryAttempts(), config.getFailoverSleepBaseMillis(),
          config.getFailoverSleepMaxMillis()));

  Text dtService;
  if (failoverProxyProvider.useLogicalURI()) {
    dtService = HAUtilClient.buildTokenServiceForLogicalUri(nameNodeUri,
        HdfsConstants.HDFS_URI_SCHEME);
  } else {
    dtService = SecurityUtil.buildTokenService(
        NuCypherExtUtilClient.getNNAddress(nameNodeUri));
  }
  return new ProxyAndInfo<>(proxy, dtService,
      NuCypherExtUtilClient.getNNAddressCheckLogical(conf, nameNodeUri));
}
项目:big_data    文件:YARNRunner.java   
@VisibleForTesting
void addHistoryToken(Credentials ts) throws IOException, InterruptedException {
    /* check if we have a hsproxy, if not, no need */
    MRClientProtocol hsProxy = clientCache.getInitializedHSProxy();
    if (UserGroupInformation.isSecurityEnabled() && (hsProxy != null)) {
        /*
         * note that get delegation token was called. Again this is hack for
         * oozie to make sure we add history server delegation tokens to the
         * credentials
         */
        RMDelegationTokenSelector tokenSelector = new RMDelegationTokenSelector();
        Text service = resMgrDelegate.getRMDelegationTokenService();
        if (tokenSelector.selectToken(service, ts.getAllTokens()) != null) {
            Text hsService = SecurityUtil.buildTokenService(hsProxy.getConnectAddress());
            if (ts.getToken(hsService) == null) {
                ts.addToken(hsService, getDelegationTokenFromHS(hsProxy));
            }
        }
    }
}
项目:kafka-connect-hdfs    文件:TestWithSecureMiniDFSCluster.java   
private Configuration createSecureConfig(String dataTransferProtection) throws Exception {
  HdfsConfiguration conf = new HdfsConfiguration();
  SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
  conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab);
  conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab);
  conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal);
  conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
  conf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, dataTransferProtection);
  conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
  conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
  conf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
  conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10);
  conf.set(DFS_ENCRYPT_DATA_TRANSFER_KEY,
           "true");//https://issues.apache.org/jira/browse/HDFS-7431
  String keystoresDir = baseDir.getAbsolutePath();
  String sslConfDir = KeyStoreTestUtil.getClasspathDir(this.getClass());
  KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
  return conf;
}
项目:hadoop    文件:TimelineClientImpl.java   
@SuppressWarnings("unchecked")
@Override
public void cancelDelegationToken(
    final Token<TimelineDelegationTokenIdentifier> timelineDT)
        throws IOException, YarnException {
  final boolean isTokenServiceAddrEmpty =
      timelineDT.getService().toString().isEmpty();
  final String scheme = isTokenServiceAddrEmpty ? null
      : (YarnConfiguration.useHttps(this.getConfig()) ? "https" : "http");
  final InetSocketAddress address = isTokenServiceAddrEmpty ? null
      : SecurityUtil.getTokenServiceAddr(timelineDT);
  PrivilegedExceptionAction<Void> cancelDTAction =
      new PrivilegedExceptionAction<Void>() {

        @Override
        public Void run() throws Exception {
          // If the timeline DT to cancel is different than cached, replace it.
          // Token to set every time for retry, because when exception happens,
          // DelegationTokenAuthenticatedURL will reset it to null;
          if (!timelineDT.equals(token.getDelegationToken())) {
            token.setDelegationToken((Token) timelineDT);
          }
          DelegationTokenAuthenticatedURL authUrl =
              new DelegationTokenAuthenticatedURL(authenticator,
                  connConfigurator);
          // If the token service address is not available, fall back to use
          // the configured service address.
          final URI serviceURI = isTokenServiceAddrEmpty ? resURI
              : new URI(scheme, null, address.getHostName(),
              address.getPort(), RESOURCE_URI_STR, null, null);
          authUrl.cancelDelegationToken(serviceURI.toURL(), token, doAsUser);
          return null;
        }
      };
  operateDelegationToken(cancelDTAction);
}
项目:hadoop    文件:ClientRMProxy.java   
@Unstable
public static Text getTokenService(Configuration conf, String address,
    String defaultAddr, int defaultPort) {
  if (HAUtil.isHAEnabled(conf)) {
    // Build a list of service addresses to form the service name
    ArrayList<String> services = new ArrayList<String>();
    YarnConfiguration yarnConf = new YarnConfiguration(conf);
    for (String rmId : HAUtil.getRMHAIds(conf)) {
      // Set RM_ID to get the corresponding RM_ADDRESS
      yarnConf.set(YarnConfiguration.RM_HA_ID, rmId);
      services.add(SecurityUtil.buildTokenService(
          yarnConf.getSocketAddr(address, defaultAddr, defaultPort))
          .toString());
    }
    return new Text(Joiner.on(',').join(services));
  }

  // Non-HA case - no need to set RM_ID
  return SecurityUtil.buildTokenService(conf.getSocketAddr(address,
    defaultAddr, defaultPort));
}
项目:hadoop    文件:ConverterUtils.java   
/**
 * Convert a protobuf token into a rpc token and set its service. Supposed
 * to be used for tokens other than RMDelegationToken. For
 * RMDelegationToken, use
 * {@link #convertFromYarn(org.apache.hadoop.yarn.api.records.Token,
 * org.apache.hadoop.io.Text)} instead.
 *
 * @param protoToken the yarn token
 * @param serviceAddr the connect address for the service
 * @return rpc token
 */
public static <T extends TokenIdentifier> Token<T> convertFromYarn(
    org.apache.hadoop.yarn.api.records.Token protoToken,
    InetSocketAddress serviceAddr) {
  Token<T> token = new Token<T>(protoToken.getIdentifier().array(),
                                protoToken.getPassword().array(),
                                new Text(protoToken.getKind()),
                                new Text(protoToken.getService()));
  if (serviceAddr != null) {
    SecurityUtil.setTokenService(token, serviceAddr);
  }
  return token;
}
项目:hadoop    文件:TestRMContainerAllocator.java   
@Override
public synchronized Allocation allocate(
    ApplicationAttemptId applicationAttemptId, List<ResourceRequest> ask,
    List<ContainerId> release, 
    List<String> blacklistAdditions, List<String> blacklistRemovals) {
  List<ResourceRequest> askCopy = new ArrayList<ResourceRequest>();
  for (ResourceRequest req : ask) {
    ResourceRequest reqCopy = ResourceRequest.newInstance(req
        .getPriority(), req.getResourceName(), req.getCapability(), req
        .getNumContainers(), req.getRelaxLocality());
    askCopy.add(reqCopy);
  }
  SecurityUtil.setTokenServiceUseIp(false);
  lastAsk = ask;
  lastRelease = release;
  lastBlacklistAdditions = blacklistAdditions;
  lastBlacklistRemovals = blacklistRemovals;
  return super.allocate(
      applicationAttemptId, askCopy, release, 
      blacklistAdditions, blacklistRemovals);
}
项目:hadoop    文件:YARNRunner.java   
@VisibleForTesting
void addHistoryToken(Credentials ts) throws IOException, InterruptedException {
  /* check if we have a hsproxy, if not, no need */
  MRClientProtocol hsProxy = clientCache.getInitializedHSProxy();
  if (UserGroupInformation.isSecurityEnabled() && (hsProxy != null)) {
    /*
     * note that get delegation token was called. Again this is hack for oozie
     * to make sure we add history server delegation tokens to the credentials
     */
    RMDelegationTokenSelector tokenSelector = new RMDelegationTokenSelector();
    Text service = resMgrDelegate.getRMDelegationTokenService();
    if (tokenSelector.selectToken(service, ts.getAllTokens()) != null) {
      Text hsService = SecurityUtil.buildTokenService(hsProxy
          .getConnectAddress());
      if (ts.getToken(hsService) == null) {
        ts.addToken(hsService, getDelegationTokenFromHS(hsProxy));
      }
    }
  }
}
项目:hadoop    文件:TestBinaryTokenFile.java   
@BeforeClass
public static void setUp() throws Exception {
  final Configuration conf = new Configuration();

  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
  conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG");

  final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
  builder.checkExitOnShutdown(true);
  builder.numDataNodes(numSlaves);
  builder.format(true);
  builder.racks(null);
  dfsCluster = builder.build();

  mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs);
  mrCluster.init(conf);
  mrCluster.start();

  NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads(); 

  FileSystem fs = dfsCluster.getFileSystem(); 
  p1 = new Path("file1");
  p1 = fs.makeQualified(p1);
}
项目:hadoop    文件:MRDelegationTokenRenewer.java   
@Override
public long renew(Token<?> token, Configuration conf) throws IOException,
    InterruptedException {

  org.apache.hadoop.yarn.api.records.Token dToken =
      org.apache.hadoop.yarn.api.records.Token.newInstance(
        token.getIdentifier(), token.getKind().toString(),
        token.getPassword(), token.getService().toString());

  MRClientProtocol histProxy = instantiateHistoryProxy(conf,
      SecurityUtil.getTokenServiceAddr(token));
  try {
    RenewDelegationTokenRequest request = Records
        .newRecord(RenewDelegationTokenRequest.class);
    request.setDelegationToken(dToken);
    return histProxy.renewDelegationToken(request).getNextExpirationTime();
  } finally {
    stopHistoryProxy(histProxy);
  }

}
项目:hadoop    文件:RpcProgramMountd.java   
public RpcProgramMountd(NfsConfiguration config,
    DatagramSocket registrationSocket, boolean allowInsecurePorts)
    throws IOException {
  // Note that RPC cache is not enabled
  super("mountd", "localhost", config.getInt(
      NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY,
      NfsConfigKeys.DFS_NFS_MOUNTD_PORT_DEFAULT), PROGRAM, VERSION_1,
      VERSION_3, registrationSocket, allowInsecurePorts);
  exports = new ArrayList<String>();
  exports.add(config.get(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY,
      NfsConfigKeys.DFS_NFS_EXPORT_POINT_DEFAULT));
  this.hostsMatcher = NfsExports.getInstance(config);
  this.mounts = Collections.synchronizedList(new ArrayList<MountEntry>());
  UserGroupInformation.setConfiguration(config);
  SecurityUtil.login(config, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY,
      NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY);
  this.dfsClient = new DFSClient(NameNode.getAddress(config), config);
}
项目:hadoop    文件:Server.java   
private List<AuthMethod> getAuthMethods(SecretManager<?> secretManager,
                                           Configuration conf) {
  AuthenticationMethod confAuthenticationMethod =
      SecurityUtil.getAuthenticationMethod(conf);        
  List<AuthMethod> authMethods = new ArrayList<AuthMethod>();
  if (confAuthenticationMethod == AuthenticationMethod.TOKEN) {
    if (secretManager == null) {
      throw new IllegalArgumentException(AuthenticationMethod.TOKEN +
          " authentication requires a secret manager");
    } 
  } else if (secretManager != null) {
    LOG.debug(AuthenticationMethod.TOKEN +
        " authentication enabled for secret manager");
    // most preferred, go to the front of the line!
    authMethods.add(AuthenticationMethod.TOKEN.getAuthMethod());
  }
  authMethods.add(confAuthenticationMethod.getAuthMethod());        

  LOG.debug("Server accepts auth methods:" + authMethods);
  return authMethods;
}
项目:hadoop    文件:HftpFileSystem.java   
@SuppressWarnings("unchecked")
@Override
public long renewDelegationToken(final Token<?> token) throws IOException {
  // update the kerberos credentials, if they are coming from a keytab
  UserGroupInformation connectUgi = ugi.getRealUser();
  if (connectUgi == null) {
    connectUgi = ugi;
  }
  try {
    return connectUgi.doAs(new PrivilegedExceptionAction<Long>() {
      @Override
      public Long run() throws Exception {
        InetSocketAddress serviceAddr = SecurityUtil
            .getTokenServiceAddr(token);
        return DelegationTokenFetcher.renewDelegationToken(connectionFactory,
            DFSUtil.createUri(getUnderlyingProtocol(), serviceAddr),
            (Token<DelegationTokenIdentifier>) token);
      }
    });
  } catch (InterruptedException e) {
    throw new IOException(e);
  }
}
项目:hadoop    文件:HftpFileSystem.java   
@SuppressWarnings("unchecked")
@Override
public void cancelDelegationToken(final Token<?> token) throws IOException {
  UserGroupInformation connectUgi = ugi.getRealUser();
  if (connectUgi == null) {
    connectUgi = ugi;
  }
  try {
    connectUgi.doAs(new PrivilegedExceptionAction<Void>() {
      @Override
      public Void run() throws Exception {
        InetSocketAddress serviceAddr = SecurityUtil
            .getTokenServiceAddr(token);
        DelegationTokenFetcher.cancelDelegationToken(connectionFactory,
            DFSUtil.createUri(getUnderlyingProtocol(), serviceAddr),
            (Token<DelegationTokenIdentifier>) token);
        return null;
      }
    });
  } catch (InterruptedException e) {
    throw new IOException(e);
  }
}
项目:hadoop    文件:DelegationTokenSelector.java   
/**
 * Select the delegation token for hdfs.  The port will be rewritten to
 * the port of hdfs.service.host_$nnAddr, or the default rpc namenode port. 
 * This method should only be called by non-hdfs filesystems that do not
 * use the rpc port to acquire tokens.  Ex. webhdfs, hftp 
 * @param nnUri of the remote namenode
 * @param tokens as a collection
 * @param conf hadoop configuration
 * @return Token
 */
public Token<DelegationTokenIdentifier> selectToken(
    final URI nnUri, Collection<Token<?>> tokens,
    final Configuration conf) {
  // this guesses the remote cluster's rpc service port.
  // the current token design assumes it's the same as the local cluster's
  // rpc port unless a config key is set.  there should be a way to automatic
  // and correctly determine the value
  Text serviceName = SecurityUtil.buildTokenService(nnUri);
  final String nnServiceName = conf.get(SERVICE_NAME_KEY + serviceName);

  int nnRpcPort = NameNode.DEFAULT_PORT;
  if (nnServiceName != null) {
    nnRpcPort = NetUtils.createSocketAddr(nnServiceName, nnRpcPort).getPort(); 
  }
  // use original hostname from the uri to avoid unintentional host resolving
  serviceName = SecurityUtil.buildTokenService(
        NetUtils.createSocketAddrForHost(nnUri.getHost(), nnRpcPort));

  return selectToken(serviceName, tokens);
}
项目:hadoop    文件:EditLogTailer.java   
public void catchupDuringFailover() throws IOException {
  Preconditions.checkState(tailerThread == null ||
      !tailerThread.isAlive(),
      "Tailer thread should not be running once failover starts");
  // Important to do tailing as the login user, in case the shared
  // edits storage is implemented by a JournalManager that depends
  // on security credentials to access the logs (eg QuorumJournalManager).
  SecurityUtil.doAsLoginUser(new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws Exception {
      try {
        // It is already under the full name system lock and the checkpointer
        // thread is already stopped. No need to acqure any other lock.
        doTailEdits();
      } catch (InterruptedException e) {
        throw new IOException(e);
      }
      return null;
    }
  });
}
项目:hadoop    文件:BootstrapStandby.java   
@Override
public int run(String[] args) throws Exception {
  parseArgs(args);
  parseConfAndFindOtherNN();
  NameNode.checkAllowFormat(conf);

  InetSocketAddress myAddr = NameNode.getAddress(conf);
  SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
      DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, myAddr.getHostName());

  return SecurityUtil.doAsLoginUserOrFatal(new PrivilegedAction<Integer>() {
    @Override
    public Integer run() {
      try {
        return doRun();
      } catch (IOException e) {
        throw new RuntimeException(e);
      }
    }
  });
}
项目:hadoop    文件:NameNode.java   
private void startTrashEmptier(final Configuration conf) throws IOException {
  long trashInterval =
      conf.getLong(FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT);
  if (trashInterval == 0) {
    return;
  } else if (trashInterval < 0) {
    throw new IOException("Cannot start trash emptier with negative interval."
        + " Set " + FS_TRASH_INTERVAL_KEY + " to a positive value.");
  }

  // This may be called from the transitionToActive code path, in which
  // case the current user is the administrator, not the NN. The trash
  // emptier needs to run as the NN. See HDFS-3972.
  FileSystem fs = SecurityUtil.doAsLoginUser(
      new PrivilegedExceptionAction<FileSystem>() {
        @Override
        public FileSystem run() throws IOException {
          return FileSystem.get(conf);
        }
      });
  this.emptier = new Thread(new Trash(fs, conf).getEmptier(), "Trash Emptier");
  this.emptier.setDaemon(true);
  this.emptier.start();
}
项目:hadoop    文件:DFSAdmin.java   
/**
 * Download the most recent fsimage from the name node, and save it to a local
 * file in the given directory.
 * 
 * @param argv
 *          List of of command line parameters.
 * @param idx
 *          The index of the command that is being processed.
 * @return an exit code indicating success or failure.
 * @throws IOException
 */
public int fetchImage(final String[] argv, final int idx) throws IOException {
  Configuration conf = getConf();
  final URL infoServer = DFSUtil.getInfoServer(
      HAUtil.getAddressOfActive(getDFS()), conf,
      DFSUtil.getHttpClientScheme(conf)).toURL();
  SecurityUtil.doAsCurrentUser(new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws Exception {
      TransferFsImage.downloadMostRecentImageToDirectory(infoServer,
          new File(argv[idx]));
      return null;
    }
  });
  return 0;
}
项目:hadoop    文件:HttpServer2.java   
private void initSpnego(Configuration conf, String hostName,
    String usernameConfKey, String keytabConfKey) throws IOException {
  Map<String, String> params = new HashMap<>();
  String principalInConf = conf.get(usernameConfKey);
  if (principalInConf != null && !principalInConf.isEmpty()) {
    params.put("kerberos.principal", SecurityUtil.getServerPrincipal(
        principalInConf, hostName));
  }
  String httpKeytab = conf.get(keytabConfKey);
  if (httpKeytab != null && !httpKeytab.isEmpty()) {
    params.put("kerberos.keytab", httpKeytab);
  }
  params.put(AuthenticationFilter.AUTH_TYPE, "kerberos");

  defineFilter(webAppContext, SPNEGO_FILTER,
               AuthenticationFilter.class.getName(), params, null);
}
项目:hadoop    文件:JournalNode.java   
/**
 * Start listening for edits via RPC.
 */
public void start() throws IOException {
  Preconditions.checkState(!isStarted(), "JN already running");

  validateAndCreateJournalDir(localDir);

  DefaultMetricsSystem.initialize("JournalNode");
  JvmMetrics.create("JournalNode",
      conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
      DefaultMetricsSystem.instance());

  InetSocketAddress socAddr = JournalNodeRpcServer.getAddress(conf);
  SecurityUtil.login(conf, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY,
      DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());

  registerJNMXBean();

  httpServer = new JournalNodeHttpServer(conf, this);
  httpServer.start();

  httpServerURI = httpServer.getServerURI().toString();

  rpcServer = new JournalNodeRpcServer(conf, this);
  rpcServer.start();
}
项目:hadoop    文件:TestWebHdfsUrl.java   
private WebHdfsFileSystem getWebHdfsFileSystem(UserGroupInformation ugi,
    Configuration conf) throws IOException {
  if (UserGroupInformation.isSecurityEnabled()) {
    DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(new Text(
        ugi.getUserName()), null, null);
    FSNamesystem namesystem = mock(FSNamesystem.class);
    DelegationTokenSecretManager dtSecretManager = new DelegationTokenSecretManager(
        86400000, 86400000, 86400000, 86400000, namesystem);
    dtSecretManager.startThreads();
    Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(
        dtId, dtSecretManager);
    SecurityUtil.setTokenService(
        token, NetUtils.createSocketAddr(uri.getAuthority()));
    token.setKind(WebHdfsFileSystem.TOKEN_KIND);
    ugi.addToken(token);
  }
  return (WebHdfsFileSystem) FileSystem.get(uri, conf);
}
项目:hadoop    文件:SaslDataTransferTestCase.java   
/**
 * Creates configuration for starting a secure cluster.
 *
 * @param dataTransferProtection supported QOPs
 * @return configuration for starting a secure cluster
 * @throws Exception if there is any failure
 */
protected HdfsConfiguration createSecureConfig(
    String dataTransferProtection) throws Exception {
  HdfsConfiguration conf = new HdfsConfiguration();
  SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
  conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab);
  conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab);
  conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal);
  conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
  conf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, dataTransferProtection);
  conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
  conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
  conf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
  conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10);

  String keystoresDir = baseDir.getAbsolutePath();
  String sslConfDir = KeyStoreTestUtil.getClasspathDir(this.getClass());
  KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
  return conf;
}
项目:hadoop-oss    文件:ZKFailoverController.java   
public int run(final String[] args) throws Exception {
  if (!localTarget.isAutoFailoverEnabled()) {
    LOG.fatal("Automatic failover is not enabled for " + localTarget + "." +
        " Please ensure that automatic failover is enabled in the " +
        "configuration before running the ZK failover controller.");
    return ERR_CODE_AUTO_FAILOVER_NOT_ENABLED;
  }
  loginAsFCUser();
  try {
    return SecurityUtil.doAsLoginUserOrFatal(new PrivilegedAction<Integer>() {
      @Override
      public Integer run() {
        try {
          return doRun(args);
        } catch (Exception t) {
          throw new RuntimeException(t);
        } finally {
          if (elector != null) {
            elector.terminateConnection();
          }
        }
      }
    });
  } catch (RuntimeException rte) {
    LOG.fatal("The failover controller encounters runtime error: " + rte);
    throw (Exception)rte.getCause();
  }
}
项目:hadoop-oss    文件:RollingFileSystemSink.java   
@Override
public void init(SubsetConfiguration metrics2Properties) {
  properties = metrics2Properties;
  basePath = new Path(properties.getString(BASEPATH_KEY, BASEPATH_DEFAULT));
  source = properties.getString(SOURCE_KEY, SOURCE_DEFAULT);
  ignoreError = properties.getBoolean(IGNORE_ERROR_KEY, false);
  allowAppend = properties.getBoolean(ALLOW_APPEND_KEY, false);

  conf = loadConf();
  UserGroupInformation.setConfiguration(conf);

  // Don't do secure setup if it's not needed.
  if (UserGroupInformation.isSecurityEnabled()) {
    // Validate config so that we don't get an NPE
    checkForProperty(properties, KEYTAB_PROPERTY_KEY);
    checkForProperty(properties, USERNAME_PROPERTY_KEY);


    try {
      // Login as whoever we're supposed to be and let the hostname be pulled
      // from localhost. If security isn't enabled, this does nothing.
      SecurityUtil.login(conf, properties.getString(KEYTAB_PROPERTY_KEY),
          properties.getString(USERNAME_PROPERTY_KEY));
    } catch (IOException ex) {
      throw new MetricsException("Error logging in securely: ["
          + ex.toString() + "]", ex);
    }
  }
}
项目:hadoop-oss    文件:KMSClientProvider.java   
private Text getDelegationTokenService() throws IOException {
  URL url = new URL(kmsUrl);
  InetSocketAddress addr = new InetSocketAddress(url.getHost(),
      url.getPort());
  Text dtService = SecurityUtil.buildTokenService(addr);
  return dtService;
}
项目:hadoop-oss    文件:KMSPREClientProvider.java   
private Text getDelegationTokenService() throws IOException {
  URL url = new URL(kmsUrl);
  InetSocketAddress addr = new InetSocketAddress(url.getHost(),
      url.getPort());
  Text dtService = SecurityUtil.buildTokenService(addr);
  return dtService;
}
项目:hadoop-oss    文件:NetUtils.java   
private static String canonicalizeHost(String host) {
  // check if the host has already been canonicalized
  String fqHost = canonicalizedHostCache.get(host);
  if (fqHost == null) {
    try {
      fqHost = SecurityUtil.getByName(host).getHostName();
      // slight race condition, but won't hurt
      canonicalizedHostCache.putIfAbsent(host, fqHost);
    } catch (UnknownHostException e) {
      fqHost = host;
    }
  }
  return fqHost;
}
项目:hadoop-oss    文件:NetUtils.java   
/**
 * Checks if {@code host} is a local host name and return {@link InetAddress}
 * corresponding to that address.
 * 
 * @param host the specified host
 * @return a valid local {@link InetAddress} or null
 * @throws SocketException if an I/O error occurs
 */
public static InetAddress getLocalInetAddress(String host)
    throws SocketException {
  if (host == null) {
    return null;
  }
  InetAddress addr = null;
  try {
    addr = SecurityUtil.getByName(host);
    if (NetworkInterface.getByInetAddress(addr) == null) {
      addr = null; // Not a local address
    }
  } catch (UnknownHostException ignore) { }
  return addr;
}
项目:hadoop    文件:MiniRPCBenchmark.java   
void connectToServerAndGetDelegationToken(
    final Configuration conf, final InetSocketAddress addr) throws IOException {
  MiniProtocol client = null;
  try {
    UserGroupInformation current = UserGroupInformation.getCurrentUser();
    UserGroupInformation proxyUserUgi = 
      UserGroupInformation.createProxyUserForTesting(
          MINI_USER, current, GROUP_NAMES);

    try {
      client =  proxyUserUgi.doAs(new PrivilegedExceptionAction<MiniProtocol>() {
        @Override
        public MiniProtocol run() throws IOException {
          MiniProtocol p = RPC.getProxy(MiniProtocol.class,
              MiniProtocol.versionID, addr, conf);
          Token<TestDelegationTokenIdentifier> token;
          token = p.getDelegationToken(new Text(RENEWER));
          currentUgi = UserGroupInformation.createUserForTesting(MINI_USER, 
              GROUP_NAMES);
          SecurityUtil.setTokenService(token, addr);
          currentUgi.addToken(token);
          return p;
        }
      });
    } catch (InterruptedException e) {
      Assert.fail(Arrays.toString(e.getStackTrace()));
    }
  } finally {
    RPC.stopProxy(client);
  }
}
项目:hadoop-oss    文件:MiniRPCBenchmark.java   
void connectToServerAndGetDelegationToken(
    final Configuration conf, final InetSocketAddress addr) throws IOException {
  MiniProtocol client = null;
  try {
    UserGroupInformation current = UserGroupInformation.getCurrentUser();
    UserGroupInformation proxyUserUgi = 
      UserGroupInformation.createProxyUserForTesting(
          MINI_USER, current, GROUP_NAMES);

    try {
      client =  proxyUserUgi.doAs(new PrivilegedExceptionAction<MiniProtocol>() {
        @Override
        public MiniProtocol run() throws IOException {
          MiniProtocol p = RPC.getProxy(MiniProtocol.class,
              MiniProtocol.versionID, addr, conf);
          Token<TestDelegationTokenIdentifier> token;
          token = p.getDelegationToken(new Text(RENEWER));
          currentUgi = UserGroupInformation.createUserForTesting(MINI_USER, 
              GROUP_NAMES);
          SecurityUtil.setTokenService(token, addr);
          currentUgi.addToken(token);
          return p;
        }
      });
    } catch (InterruptedException e) {
      Assert.fail(Arrays.toString(e.getStackTrace()));
    }
  } finally {
    RPC.stopProxy(client);
  }
}
项目:hadoop-oss    文件:MiniRPCBenchmark.java   
public static void main(String[] args) throws Exception {
  System.out.println("Benchmark: RPC session establishment.");
  if(args.length < 1)
    printUsage();

  Configuration conf = new Configuration();
  int count = Integer.parseInt(args[0]);
  if(args.length > 1)
    conf.set(KEYTAB_FILE_KEY, args[1]);
  if(args.length > 2)
    conf.set(USER_NAME_KEY, args[2]);
  boolean useDelegationToken = false;
  if(args.length > 3)
    useDelegationToken = args[3].equalsIgnoreCase("useToken");
  Level l = Level.ERROR;
  if(args.length > 4)
    l = Level.toLevel(args[4]);

  MiniRPCBenchmark mb = new MiniRPCBenchmark(l);
  long elapsedTime = 0;
  if(useDelegationToken) {
    System.out.println(
        "Running MiniRPCBenchmark with delegation token authentication.");
    elapsedTime = mb.runMiniBenchmarkWithDelegationToken(
                            conf, count, KEYTAB_FILE_KEY, USER_NAME_KEY);
  } else {
    String auth = SecurityUtil.getAuthenticationMethod(conf).toString();
    System.out.println(
        "Running MiniRPCBenchmark with " + auth + " authentication.");
    elapsedTime = mb.runMiniBenchmark(
                            conf, count, KEYTAB_FILE_KEY, USER_NAME_KEY);
  }
  System.out.println(org.apache.hadoop.util.VersionInfo.getVersion());
  System.out.println("Number  of  connects: " + count);
  System.out.println("Average connect time: " + ((double)elapsedTime/count));
}
项目:hadoop    文件:TimelineClientImpl.java   
@SuppressWarnings("unchecked")
@Override
public long renewDelegationToken(
    final Token<TimelineDelegationTokenIdentifier> timelineDT)
        throws IOException, YarnException {
  final boolean isTokenServiceAddrEmpty =
      timelineDT.getService().toString().isEmpty();
  final String scheme = isTokenServiceAddrEmpty ? null
      : (YarnConfiguration.useHttps(this.getConfig()) ? "https" : "http");
  final InetSocketAddress address = isTokenServiceAddrEmpty ? null
      : SecurityUtil.getTokenServiceAddr(timelineDT);
  PrivilegedExceptionAction<Long> renewDTAction =
      new PrivilegedExceptionAction<Long>() {

        @Override
        public Long run() throws Exception {
          // If the timeline DT to renew is different than cached, replace it.
          // Token to set every time for retry, because when exception happens,
          // DelegationTokenAuthenticatedURL will reset it to null;
          if (!timelineDT.equals(token.getDelegationToken())) {
            token.setDelegationToken((Token) timelineDT);
          }
          DelegationTokenAuthenticatedURL authUrl =
              new DelegationTokenAuthenticatedURL(authenticator,
                  connConfigurator);
          // If the token service address is not available, fall back to use
          // the configured service address.
          final URI serviceURI = isTokenServiceAddrEmpty ? resURI
              : new URI(scheme, null, address.getHostName(),
              address.getPort(), RESOURCE_URI_STR, null, null);
          return authUrl
              .renewDelegationToken(serviceURI.toURL(), token, doAsUser);
        }
      };
  return (Long) operateDelegationToken(renewDTAction);
}
项目:ditb    文件:AuthFilter.java   
/**
 * Returns the configuration to be used by the authentication filter
 * to initialize the authentication handler.
 *
 * This filter retrieves all HBase configurations and passes those started
 * with REST_PREFIX to the authentication handler.  It is useful to support
 * plugging different authentication handlers.
*/
@Override
protected Properties getConfiguration(
    String configPrefix, FilterConfig filterConfig) throws ServletException {
  Properties props = super.getConfiguration(configPrefix, filterConfig);
  //setting the cookie path to root '/' so it is used for all resources.
  props.setProperty(AuthenticationFilter.COOKIE_PATH, "/");

  Configuration conf = HBaseConfiguration.create();
  for (Map.Entry<String, String> entry : conf) {
    String name = entry.getKey();
    if (name.startsWith(REST_PREFIX)) {
      String value = entry.getValue();
      if(name.equals(REST_AUTHENTICATION_PRINCIPAL))  {
        try {
          String machineName = Strings.domainNamePointerToHostName(
            DNS.getDefaultHost(conf.get(REST_DNS_INTERFACE, "default"),
              conf.get(REST_DNS_NAMESERVER, "default")));
          value = SecurityUtil.getServerPrincipal(value, machineName);
        } catch (IOException ie) {
          throw new ServletException("Failed to retrieve server principal", ie);
        }
      }
      LOG.debug("Setting property " + name + "=" + value);
      name = name.substring(REST_PREFIX_LEN);
      props.setProperty(name, value);
    }
  }
  return props;
}
项目:hadoop    文件:BuilderUtils.java   
@VisibleForTesting
public static Token newContainerToken(NodeId nodeId,
    byte[] password, ContainerTokenIdentifier tokenIdentifier) {
  // RPC layer client expects ip:port as service for tokens
  InetSocketAddress addr =
      NetUtils.createSocketAddrForHost(nodeId.getHost(), nodeId.getPort());
  // NOTE: use SecurityUtil.setTokenService if this becomes a "real" token
  Token containerToken =
      newToken(Token.class, tokenIdentifier.getBytes(),
        ContainerTokenIdentifier.KIND.toString(), password, SecurityUtil
          .buildTokenService(addr).toString());
  return containerToken;
}
项目:hadoop    文件:ResourceManager.java   
protected void doSecureLogin() throws IOException {
InetSocketAddress socAddr = getBindAddress(conf);
   SecurityUtil.login(this.conf, YarnConfiguration.RM_KEYTAB,
       YarnConfiguration.RM_PRINCIPAL, socAddr.getHostName());

   // if security is enable, set rmLoginUGI as UGI of loginUser
   if (UserGroupInformation.isSecurityEnabled()) {
     this.rmLoginUGI = UserGroupInformation.getLoginUser();
   }
 }
项目:hadoop    文件:TestAMAuthorization.java   
@SuppressWarnings("unchecked")
public static Token<? extends TokenIdentifier> setupAndReturnAMRMToken(
    InetSocketAddress rmBindAddress,
    Collection<Token<? extends TokenIdentifier>> allTokens) {
  for (Token<? extends TokenIdentifier> token : allTokens) {
    if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
      SecurityUtil.setTokenService(token, rmBindAddress);
      return (Token<AMRMTokenIdentifier>) token;
    }
  }
  return null;
}
项目:hadoop    文件:MockRMWithCustomAMLauncher.java   
@Override
protected ApplicationMasterLauncher createAMLauncher() {
  return new ApplicationMasterLauncher(getRMContext()) {
    @Override
    protected Runnable createRunnableLauncher(RMAppAttempt application,
        AMLauncherEventType event) {
      return new AMLauncher(context, application, event, getConfig()) {
        @Override
        protected ContainerManagementProtocol getContainerMgrProxy(
            ContainerId containerId) {
          return containerManager;
        }
        @Override
        protected Token<AMRMTokenIdentifier> createAndSetAMRMToken() {
          Token<AMRMTokenIdentifier> amRmToken =
              super.createAndSetAMRMToken();
          InetSocketAddress serviceAddr =
              getConfig().getSocketAddr(
                YarnConfiguration.RM_SCHEDULER_ADDRESS,
                YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS,
                YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT);
          SecurityUtil.setTokenService(amRmToken, serviceAddr);
          return amRmToken;
        }
      };
    }
  };
}
项目:hadoop    文件:YarnClientImpl.java   
private static String getTimelineDelegationTokenRenewer(Configuration conf)
    throws IOException, YarnException  {
  // Parse the RM daemon user if it exists in the config
  String rmPrincipal = conf.get(YarnConfiguration.RM_PRINCIPAL);
  String renewer = null;
  if (rmPrincipal != null && rmPrincipal.length() > 0) {
    String rmHost = conf.getSocketAddr(
        YarnConfiguration.RM_ADDRESS,
        YarnConfiguration.DEFAULT_RM_ADDRESS,
        YarnConfiguration.DEFAULT_RM_PORT).getHostName();
    renewer = SecurityUtil.getServerPrincipal(rmPrincipal, rmHost);
  }
  return renewer;
}