Java 类org.apache.hadoop.security.UserGroupInformation 实例源码

项目:hadoop    文件:HAServiceProtocolClientSideTranslatorPB.java   
public HAServiceProtocolClientSideTranslatorPB(
    InetSocketAddress addr, Configuration conf,
    SocketFactory socketFactory, int timeout) throws IOException {
  RPC.setProtocolEngine(conf, HAServiceProtocolPB.class,
      ProtobufRpcEngine.class);
  rpcProxy = RPC.getProxy(HAServiceProtocolPB.class,
      RPC.getProtocolVersion(HAServiceProtocolPB.class), addr,
      UserGroupInformation.getCurrentUser(), conf, socketFactory, timeout);
}
项目:hadoop-oss    文件:ProtobufRpcEngine.java   
@Override
@SuppressWarnings("unchecked")
public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion,
    InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
    SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy,
    AtomicBoolean fallbackToSimpleAuth) throws IOException {

  final Invoker invoker = new Invoker(protocol, addr, ticket, conf, factory,
      rpcTimeout, connectionRetryPolicy, fallbackToSimpleAuth);
  return new ProtocolProxy<T>(protocol, (T) Proxy.newProxyInstance(
      protocol.getClassLoader(), new Class[]{protocol}, invoker), false);
}
项目:hadoop    文件:ApplicationMaster.java   
private static void publishApplicationAttemptEvent(
    final TimelineClient timelineClient, String appAttemptId,
    DSEvent appEvent, String domainId, UserGroupInformation ugi) {
  final TimelineEntity entity = new TimelineEntity();
  entity.setEntityId(appAttemptId);
  entity.setEntityType(DSEntity.DS_APP_ATTEMPT.toString());
  entity.setDomainId(domainId);
  entity.addPrimaryFilter("user", ugi.getShortUserName());
  TimelineEvent event = new TimelineEvent();
  event.setEventType(appEvent.toString());
  event.setTimestamp(System.currentTimeMillis());
  entity.addEvent(event);
  try {
    timelineClient.putEntities(entity);
  } catch (YarnException | IOException e) {
    LOG.error("App Attempt "
        + (appEvent.equals(DSEvent.DS_APP_ATTEMPT_START) ? "start" : "end")
        + " event could not be published for "
        + appAttemptId.toString(), e);
  }
}
项目:hadoop    文件:TestLogsCLI.java   
private static void uploadEmptyContainerLogIntoRemoteDir(UserGroupInformation ugi,
    Configuration configuration, List<String> rootLogDirs, NodeId nodeId,
    ContainerId containerId, Path appDir, FileSystem fs) throws Exception {
  Path path =
      new Path(appDir, LogAggregationUtils.getNodeString(nodeId)
          + System.currentTimeMillis());
  AggregatedLogFormat.LogWriter writer =
      new AggregatedLogFormat.LogWriter(configuration, path, ugi);
  writer.writeApplicationOwner(ugi.getUserName());

  Map<ApplicationAccessType, String> appAcls =
      new HashMap<ApplicationAccessType, String>();
  appAcls.put(ApplicationAccessType.VIEW_APP, ugi.getUserName());
  writer.writeApplicationACLs(appAcls);
  DataOutputStream out = writer.getWriter().prepareAppendKey(-1);
  new AggregatedLogFormat.LogKey(containerId).write(out);
  out.close();
  out = writer.getWriter().prepareAppendValue(-1);
  new AggregatedLogFormat.LogValue(rootLogDirs, containerId,
    UserGroupInformation.getCurrentUser().getShortUserName()).write(out,
    new HashSet<File>());
  out.close();
  writer.close();
}
项目:hadoop    文件:TestDelegationTokensWithHA.java   
private static void doRenewOrCancel(
    final Token<DelegationTokenIdentifier> token, final Configuration conf,
    final TokenTestAction action)
    throws IOException, InterruptedException {
  UserGroupInformation.createRemoteUser("JobTracker").doAs(
      new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
          switch (action) {
          case RENEW:
            token.renew(conf);
            break;
          case CANCEL:
            token.cancel(conf);
            break;
          default:
            fail("bad action:" + action);
          }
          return null;
        }
      });
}
项目:hadoop    文件:TestReadWhileWriting.java   
static void checkFile(Path p, int expectedsize, final Configuration conf
    ) throws IOException, InterruptedException {
  //open the file with another user account
  final String username = UserGroupInformation.getCurrentUser().getShortUserName()
      + "_" + ++userCount;

  UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username, 
                               new String[] {"supergroup"});

  final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);

  final HdfsDataInputStream in = (HdfsDataInputStream)fs.open(p);

  //Check visible length
  Assert.assertTrue(in.getVisibleLength() >= expectedsize);

  //Able to read?
  for(int i = 0; i < expectedsize; i++) {
    Assert.assertEquals((byte)i, (byte)in.read());  
  }

  in.close();
}
项目:hadoop    文件:TestClientRMService.java   
private void checkTokenCancellation(ClientRMService rmService,
    UserGroupInformation owner, UserGroupInformation renewer)
    throws IOException, YarnException {
  RMDelegationTokenIdentifier tokenIdentifier =
      new RMDelegationTokenIdentifier(new Text(owner.getUserName()),
        new Text(renewer.getUserName()), null);
  Token<?> token =
      new Token<RMDelegationTokenIdentifier>(tokenIdentifier, dtsm);
  org.apache.hadoop.yarn.api.records.Token dToken =
      BuilderUtils.newDelegationToken(token.getIdentifier(), token.getKind()
        .toString(), token.getPassword(), token.getService().toString());
  CancelDelegationTokenRequest request =
      Records.newRecord(CancelDelegationTokenRequest.class);
  request.setDelegationToken(dToken);
  rmService.cancelDelegationToken(request);
}
项目:hadoop    文件:TestJHSSecurity.java   
private Token getDelegationToken(
    final UserGroupInformation loggedInUser,
    final MRClientProtocol hsService, final String renewerString)
    throws IOException, InterruptedException {
  // Get the delegation token directly as it is a little difficult to setup
  // the kerberos based rpc.
  Token token = loggedInUser
      .doAs(new PrivilegedExceptionAction<Token>() {
        @Override
        public Token run() throws IOException {
          GetDelegationTokenRequest request = Records
              .newRecord(GetDelegationTokenRequest.class);
          request.setRenewer(renewerString);
          return hsService.getDelegationToken(request).getDelegationToken();
        }

      });
  return token;
}
项目:hadoop    文件:TestFSPermissionChecker.java   
private void assertPermissionDenied(UserGroupInformation user, String path,
    FsAction access) throws IOException {
  try {
    INodesInPath iip = dir.getINodesInPath(path, true);
    dir.getPermissionChecker(SUPERUSER, SUPERGROUP, user).checkPermission(iip,
      false, null, null, access, null, false);
    fail("expected AccessControlException for user + " + user + ", path = " +
      path + ", access = " + access);
  } catch (AccessControlException e) {
    assertTrue("Permission denied messages must carry the username",
            e.getMessage().contains(user.getUserName().toString()));
    assertTrue("Permission denied messages must carry the path parent",
            e.getMessage().contains(
                new Path(path).getParent().toUri().getPath()));
  }
}
项目:hadoop-oss    文件:GetGroupsBase.java   
/**
 * Get the groups for the users given and print formatted output to the
 * {@link PrintStream} configured earlier.
 */
@Override
public int run(String[] args) throws Exception {
  if (args.length == 0) {
    args = new String[] { UserGroupInformation.getCurrentUser().getUserName() }; 
  }

  for (String username : args) {
    StringBuilder sb = new StringBuilder();
    sb.append(username + " :");
    for (String group : getUgmProtocol().getGroupsForUser(username)) {
      sb.append(" ");
      sb.append(group);
    }
    out.println(sb);
  }

  return 0;
}
项目:hadoop    文件:TimelineDataManager.java   
/**
 * Get the single timeline entity that the given user has access to. The
 * meaning of each argument has been documented with
 * {@link TimelineReader#getEntity}.
 * 
 * @see TimelineReader#getEntity
 */
public TimelineEntity getEntity(
    String entityType,
    String entityId,
    EnumSet<Field> fields,
    UserGroupInformation callerUGI) throws YarnException, IOException {
  TimelineEntity entity = null;
  entity =
      store.getEntity(entityId, entityType, fields);
  if (entity != null) {
    addDefaultDomainIdIfAbsent(entity);
    // check ACLs
    if (!timelineACLsManager.checkAccess(
        callerUGI, ApplicationAccessType.VIEW_APP, entity)) {
      entity = null;
    }
  }
  return entity;
}
项目:hadoop    文件:RMWebServices.java   
/**
 * Generates a new ApplicationId which is then sent to the client
 * 
 * @param hsr
 *          the servlet request
 * @return Response containing the app id and the maximum resource
 *         capabilities
 * @throws AuthorizationException
 * @throws IOException
 * @throws InterruptedException
 */
@POST
@Path("/apps/new-application")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public Response createNewApplication(@Context HttpServletRequest hsr)
    throws AuthorizationException, IOException, InterruptedException {
  init();
  UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
  if (callerUGI == null) {
    throw new AuthorizationException("Unable to obtain user name, "
        + "user not authenticated");
  }
  if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
    String msg = "The default static user cannot carry out this operation.";
    return Response.status(Status.FORBIDDEN).entity(msg).build();
  }

  NewApplication appId = createNewApplication();
  return Response.status(Status.OK).entity(appId).build();

}
项目:hadoop    文件:TestSubmitApplicationWithRMHA.java   
@Test
public void
    testHandleRMHABeforeSubmitApplicationCallWithSavedApplicationState()
        throws Exception {
  // start two RMs, and transit rm1 to active, rm2 to standby
  startRMs();

  // get a new applicationId from rm1
  ApplicationId appId = rm1.getNewAppId().getApplicationId();

  // Do the failover
  explicitFailover();

  // submit the application with previous assigned applicationId
  // to current active rm: rm2
  RMApp app1 =
      rm2.submitApp(200, "", UserGroupInformation
          .getCurrentUser().getShortUserName(), null, false, null,
          configuration.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
              YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS), null, null,
          false, false, true, appId);

  // verify application submission
  verifySubmitApp(rm2, app1, appId);
}
项目:hadoop    文件:TestAppController.java   
/**
 *  Test method 'jobCounters'. Should print message about error or set CountersPage class for rendering
 */
@Test
public void testGetJobCounters() {

  when(job.checkAccess(any(UserGroupInformation.class), any(JobACL.class)))
      .thenReturn(false);

  appController.jobCounters();
  verify(appController.response()).setContentType(MimeType.TEXT);
  assertEquals(
      "Access denied: User user does not have permission to view job job_01_01",
      appController.getData());
  when(job.checkAccess(any(UserGroupInformation.class), any(JobACL.class)))
      .thenReturn(true);

  appController.getProperty().remove(AMParams.JOB_ID);
  appController.jobCounters();
  assertEquals(
      "Access denied: User user does not have permission to view job job_01_01Bad Request: Missing job ID",
      appController.getData());

  appController.getProperty().put(AMParams.JOB_ID, "job_01_01");
  appController.jobCounters();
  assertEquals(CountersPage.class, appController.getClazz());
}
项目:hadoop    文件:TimelineACLsManager.java   
public boolean checkAccess(UserGroupInformation callerUGI,
    TimelineDomain domain) throws YarnException, IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Verifying the access of "
        + (callerUGI == null ? null : callerUGI.getShortUserName())
        + " on the timeline domain " + domain);
  }

  if (!adminAclsManager.areACLsEnabled()) {
    return true;
  }

  String owner = domain.getOwner();
  if (owner == null || owner.length() == 0) {
    throw new YarnException("Owner information of the timeline domain "
        + domain.getId() + " is corrupted.");
  }
  if (callerUGI != null
      && (adminAclsManager.isAdmin(callerUGI) ||
          callerUGI.getShortUserName().equals(owner))) {
    return true;
  }
  return false;
}
项目:hadoop    文件:ClientRMService.java   
@Override
public CancelDelegationTokenResponse cancelDelegationToken(
    CancelDelegationTokenRequest request) throws YarnException {
  try {
    if (!isAllowedDelegationTokenOp()) {
      throw new IOException(
          "Delegation Token can be cancelled only with kerberos authentication");
    }
    org.apache.hadoop.yarn.api.records.Token protoToken = request.getDelegationToken();
    Token<RMDelegationTokenIdentifier> token = new Token<RMDelegationTokenIdentifier>(
        protoToken.getIdentifier().array(), protoToken.getPassword().array(),
        new Text(protoToken.getKind()), new Text(protoToken.getService()));

    String user = UserGroupInformation.getCurrentUser().getUserName();
    rmDTSecretManager.cancelToken(token, user);
    return Records.newRecord(CancelDelegationTokenResponse.class);
  } catch (IOException e) {
    throw RPCUtil.getRemoteException(e);
  }
}
项目:hadoop    文件:TestProxyUsers.java   
@Test
public void testIPRange() {
  Configuration conf = new Configuration();
  conf.set(
      DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserGroupConfKey(REAL_USER_NAME),
      "*");
  conf.set(
      DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserIpConfKey(REAL_USER_NAME),
      PROXY_IP_RANGE);
  ProxyUsers.refreshSuperUserGroupsConfiguration(conf);

  // First try proxying a group that's allowed
  UserGroupInformation realUserUgi = UserGroupInformation
      .createRemoteUser(REAL_USER_NAME);
  UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
      PROXY_USER_NAME, realUserUgi, GROUP_NAMES);

  // From good IP
  assertAuthorized(proxyUserUgi, "10.222.0.0");
  // From bad IP
  assertNotAuthorized(proxyUserUgi, "10.221.0.0");
}
项目:hadoop    文件:GridmixJob.java   
protected GridmixJob(final Configuration conf, long submissionMillis, 
                     final String name) throws IOException {
  submissionTimeNanos = TimeUnit.NANOSECONDS.convert(
      submissionMillis, TimeUnit.MILLISECONDS);
  jobdesc = null;
  outdir = null;
  seq = -1;
  ugi = UserGroupInformation.getCurrentUser();

  try {
    job = this.ugi.doAs(new PrivilegedExceptionAction<Job>() {
      public Job run() throws IOException {
        Job ret = Job.getInstance(conf, name);
        ret.getConfiguration().setInt(GRIDMIX_JOB_SEQ, seq);
        setJobQueue(ret, conf.get(GRIDMIX_DEFAULT_QUEUE));
        return ret;
      }
    });
  } catch (InterruptedException e) {
    throw new IOException(e);
  }
}
项目:hadoop    文件:CommitterEventHandler.java   
@Override
protected void serviceInit(Configuration conf) throws Exception {
  super.serviceInit(conf);
  commitThreadCancelTimeoutMs = conf.getInt(
      MRJobConfig.MR_AM_COMMITTER_CANCEL_TIMEOUT_MS,
      MRJobConfig.DEFAULT_MR_AM_COMMITTER_CANCEL_TIMEOUT_MS);
  commitWindowMs = conf.getLong(MRJobConfig.MR_AM_COMMIT_WINDOW_MS,
      MRJobConfig.DEFAULT_MR_AM_COMMIT_WINDOW_MS);
  try {
    fs = FileSystem.get(conf);
    JobID id = TypeConverter.fromYarn(context.getApplicationID());
    JobId jobId = TypeConverter.toYarn(id);
    String user = UserGroupInformation.getCurrentUser().getShortUserName();
    startCommitFile = MRApps.getStartJobCommitFile(conf, user, jobId);
    endCommitSuccessFile = MRApps.getEndJobCommitSuccessFile(conf, user, jobId);
    endCommitFailureFile = MRApps.getEndJobCommitFailureFile(conf, user, jobId);
  } catch (IOException e) {
    throw new YarnRuntimeException(e);
  }
}
项目:hadoop    文件:HSAdmin.java   
private int getGroups(String[] usernames) throws IOException {
  // Get groups users belongs to
  if (usernames.length == 0) {
    usernames = new String[] { UserGroupInformation.getCurrentUser()
        .getUserName() };
  }

  // Get the current configuration
  Configuration conf = getConf();

  InetSocketAddress address = conf.getSocketAddr(
      JHAdminConfig.JHS_ADMIN_ADDRESS,
      JHAdminConfig.DEFAULT_JHS_ADMIN_ADDRESS,
      JHAdminConfig.DEFAULT_JHS_ADMIN_PORT);

  GetUserMappingsProtocol getUserMappingProtocol = HSProxies.createProxy(
      conf, address, GetUserMappingsProtocol.class,
      UserGroupInformation.getCurrentUser());
  for (String username : usernames) {
    StringBuilder sb = new StringBuilder();
    sb.append(username + " :");
    for (String group : getUserMappingProtocol.getGroupsForUser(username)) {
      sb.append(" ");
      sb.append(group);
    }
    System.out.println(sb);
  }

  return 0;
}
项目:hadoop    文件:AdminService.java   
private UserGroupInformation checkAcls(String method) throws YarnException {
  try {
    return checkAccess(method);
  } catch (IOException ioe) {
    throw RPCUtil.getRemoteException(ioe);
  }
}
项目:hadoop    文件:ContainerLocalizer.java   
protected void closeFileSystems(UserGroupInformation ugi) {
  try {
    FileSystem.closeAllForUGI(ugi);
  } catch (IOException e) {
    LOG.warn("Failed to close filesystems: ", e);
  }
}
项目:hadoop    文件:TestDelegationToken.java   
@Test
public void testAddDelegationTokensDFSApi() throws Exception {
  UserGroupInformation ugi = UserGroupInformation.createRemoteUser("JobTracker");
  DistributedFileSystem dfs = cluster.getFileSystem();
  Credentials creds = new Credentials();
  final Token<?> tokens[] = dfs.addDelegationTokens("JobTracker", creds);
  Assert.assertEquals(1, tokens.length);
  Assert.assertEquals(1, creds.numberOfTokens());
  checkTokenIdentifier(ugi, tokens[0]);

  final Token<?> tokens2[] = dfs.addDelegationTokens("JobTracker", creds);
  Assert.assertEquals(0, tokens2.length); // already have token
  Assert.assertEquals(1, creds.numberOfTokens());
}
项目:hadoop-oss    文件:TestProxyUsers.java   
@Test
public void testNoHostsForUsers() throws Exception {
  Configuration conf = new Configuration(false);
  conf.set("y." + REAL_USER_NAME + ".users",
    StringUtils.join(",", Arrays.asList(AUTHORIZED_PROXY_USER_NAME)));
  ProxyUsers.refreshSuperUserGroupsConfiguration(conf, "y");

  UserGroupInformation realUserUgi = UserGroupInformation
    .createRemoteUser(REAL_USER_NAME);
  UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
    AUTHORIZED_PROXY_USER_NAME, realUserUgi, GROUP_NAMES);

  // IP doesn't matter
  assertNotAuthorized(proxyUserUgi, "1.2.3.4");
}
项目:QDrill    文件:ImpersonationUtil.java   
/**
 * Given admin user/group list, finds whether the given username has admin privileges.
 *
 * @param userName User who is checked for administrative privileges.
 * @param adminUsers Comma separated list of admin usernames,
 * @param adminGroups Comma separated list of admin usergroups
 * @return
 */
public static boolean hasAdminPrivileges(final String userName, final String adminUsers, final String adminGroups) {
  // Process user is by default an admin
  if (getProcessUserName().equals(userName)) {
    return true;
  }

  final Set<String> adminUsersSet = Sets.newHashSet(SPLITTER.split(adminUsers));
  if (adminUsersSet.contains(userName)) {
    return true;
  }

  final UserGroupInformation ugi = createProxyUgi(userName);
  final String[] userGroups = ugi.getGroupNames();
  if (userGroups == null || userGroups.length == 0) {
    return false;
  }

  final Set<String> adminUserGroupsSet = Sets.newHashSet(SPLITTER.split(adminGroups));
  for (String userGroup : userGroups) {
    if (adminUserGroupsSet.contains(userGroup)) {
      return true;
    }
  }

  return false;
}
项目:ditb    文件:MasterProcedureUtil.java   
public static UserInformation toProtoUserInfo(UserGroupInformation ugi) {
  UserInformation.Builder userInfoPB = UserInformation.newBuilder();
  userInfoPB.setEffectiveUser(ugi.getUserName());
  if (ugi.getRealUser() != null) {
    userInfoPB.setRealUser(ugi.getRealUser().getUserName());
  }
  return userInfoPB.build();
}
项目:hadoop    文件:LeaseRenewer.java   
/** Get a renewer. */
private synchronized LeaseRenewer get(final String authority,
    final UserGroupInformation ugi) {
  final Key k = new Key(authority, ugi);
  LeaseRenewer r = renewers.get(k);
  if (r == null) {
    r = new LeaseRenewer(k);
    renewers.put(k, r);
  }
  return r;
}
项目:flume-release-1.7.0    文件:KerberosAuthenticator.java   
@Override
public synchronized PrivilegedExecutor proxyAs(String proxyUserName) {
  if (proxyUserName == null || proxyUserName.isEmpty()) {
    return this;
  }
  if (proxyCache.get(proxyUserName) == null) {
    UserGroupInformation proxyUgi;
    proxyUgi = UserGroupInformation.createProxyUser(proxyUserName, ugi);
    printUGI(proxyUgi);
    proxyCache.put(proxyUserName, new UGIExecutor(proxyUgi));
  }
  return proxyCache.get(proxyUserName);
}
项目:hadoop-oss    文件:TestWebDelegationToken.java   
@Test
public void testFallbackToPseudoDelegationTokenAuthenticator()
    throws Exception {
  final Server jetty = createJettyServer();
  Context context = new Context();
  context.setContextPath("/foo");
  jetty.setHandler(context);
  context.addFilter(new FilterHolder(PseudoDTAFilter.class), "/*", 0);
  context.addServlet(new ServletHolder(UserServlet.class), "/bar");

  try {
    jetty.start();
    final URL url = new URL(getJettyURL() + "/foo/bar");

    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(FOO_USER);
    ugi.doAs(new PrivilegedExceptionAction<Void>() {
      @Override
      public Void run() throws Exception {
        DelegationTokenAuthenticatedURL.Token token =
            new DelegationTokenAuthenticatedURL.Token();
        DelegationTokenAuthenticatedURL aUrl =
            new DelegationTokenAuthenticatedURL();
        HttpURLConnection conn = aUrl.openConnection(url, token);
        Assert.assertEquals(HttpURLConnection.HTTP_OK,
            conn.getResponseCode());
        List<String> ret = IOUtils.readLines(conn.getInputStream());
        Assert.assertEquals(1, ret.size());
        Assert.assertEquals(FOO_USER, ret.get(0));

        aUrl.getDelegationToken(url, token, FOO_USER);
        Assert.assertNotNull(token.getDelegationToken());
        Assert.assertEquals(new Text("token-kind"),
            token.getDelegationToken().getKind());
        return null;
      }
    });
  } finally {
    jetty.stop();
  }
}
项目:hadoop    文件:TestRMNMSecretKeys.java   
@Test(timeout = 1000000)
public void testNMUpdation() throws Exception {
  YarnConfiguration conf = new YarnConfiguration();
  // validating RM NM keys for Unsecured environment
  validateRMNMKeyExchange(conf);

  // validating RM NM keys for secured environment
  conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
      "kerberos");
  UserGroupInformation.setConfiguration(conf);
  validateRMNMKeyExchange(conf);
}
项目:hadoop    文件:HSAdminServer.java   
@Override
public void refreshAdminAcls() throws IOException {
  UserGroupInformation user = checkAcls("refreshAdminAcls");

  Configuration conf = createConf();
  adminAcl = new AccessControlList(conf.get(JHAdminConfig.JHS_ADMIN_ACL,
      JHAdminConfig.DEFAULT_JHS_ADMIN_ACL));
  HSAuditLogger.logSuccess(user.getShortUserName(), "refreshAdminAcls",
      HISTORY_ADMIN_SERVER);
}
项目:hadoop    文件:TestWebHdfsFileSystemContract.java   
@Override
protected void setUp() throws Exception {
  //get file system as a non-superuser
  final UserGroupInformation current = UserGroupInformation.getCurrentUser();
  ugi = UserGroupInformation.createUserForTesting(
      current.getShortUserName() + "x", new String[]{"user"});
  fs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, WebHdfsFileSystem.SCHEME);
  defaultWorkingDirectory = fs.getWorkingDirectory().toUri().getPath();
}
项目:hadoop    文件:ClientRMService.java   
private String getRenewerForToken(Token<RMDelegationTokenIdentifier> token)
    throws IOException {
  UserGroupInformation user = UserGroupInformation.getCurrentUser();
  UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
  // we can always renew our own tokens
  return loginUser.getUserName().equals(user.getUserName())
      ? token.decodeIdentifier().getRenewer().toString()
      : user.getShortUserName();
}
项目:hadoop    文件:TestWebHdfsTokens.java   
@BeforeClass
public static void setUp() {
  conf = new Configuration();
  SecurityUtil.setAuthenticationMethod(KERBEROS, conf);
  UserGroupInformation.setConfiguration(conf);    
  UserGroupInformation.setLoginUser(
      UserGroupInformation.createUserForTesting(
          "LoginUser", new String[]{"supergroup"}));
}
项目:hadoop    文件:JobClient.java   
/**
 * Connect to the default cluster
 * @param conf the job configuration.
 * @throws IOException
 */
public void init(JobConf conf) throws IOException {
  setConf(conf);
  cluster = new Cluster(conf);
  clientUgi = UserGroupInformation.getCurrentUser();

  maxRetry = conf.getInt(MRJobConfig.MR_CLIENT_JOB_MAX_RETRIES,
    MRJobConfig.DEFAULT_MR_CLIENT_JOB_MAX_RETRIES);

  retryInterval =
    conf.getLong(MRJobConfig.MR_CLIENT_JOB_RETRY_INTERVAL,
      MRJobConfig.DEFAULT_MR_CLIENT_JOB_RETRY_INTERVAL);

}
项目:hadoop    文件:DFSZKFailoverController.java   
@Override
protected void checkRpcAdminAccess() throws IOException, AccessControlException {
  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  UserGroupInformation zkfcUgi = UserGroupInformation.getLoginUser();
  if (adminAcl.isUserAllowed(ugi) ||
      ugi.getShortUserName().equals(zkfcUgi.getShortUserName())) {
    LOG.info("Allowed RPC access from " + ugi + " at " + Server.getRemoteAddress());
    return;
  }
  String msg = "Disallowed RPC access from " + ugi + " at " +
      Server.getRemoteAddress() + ". Not listed in " + DFSConfigKeys.DFS_ADMIN; 
  LOG.warn(msg);
  throw new AccessControlException(msg);
}
项目:hadoop    文件:ViewFsBaseTest.java   
@Test
public void testInternalGetAclStatus() throws IOException {
  final UserGroupInformation currentUser =
      UserGroupInformation.getCurrentUser();
  AclStatus aclStatus = fcView.getAclStatus(new Path("/internalDir"));
  assertEquals(aclStatus.getOwner(), currentUser.getUserName());
  assertEquals(aclStatus.getGroup(), currentUser.getGroupNames()[0]);
  assertEquals(aclStatus.getEntries(),
      AclUtil.getMinimalAcl(PERMISSION_555));
  assertFalse(aclStatus.isStickyBit());
}
项目:ditb    文件:User.java   
/**
 * Wraps an underlying {@code UserGroupInformation} instance.
 * @param ugi The base Hadoop user
 * @return User
 */
public static User create(UserGroupInformation ugi) {
  if (ugi == null) {
    return null;
  }
  return new SecureHadoopUser(ugi);
}