Java 类org.apache.hadoop.security.UnixUserGroupInformation 实例源码

项目:hadoop-EAR    文件:TestConfiguredPolicy.java   
public void testConfiguredPolicy() throws Exception {
  Configuration conf = new Configuration();
  conf.set(KEY_1, AccessControlList.WILDCARD_ACL_VALUE);
  conf.set(KEY_2, USER1 + " " + GROUPS1[0]);

  ConfiguredPolicy policy = new ConfiguredPolicy(conf, new TestPolicyProvider());
  SecurityUtil.setPolicy(policy);

  Subject user1 = 
    SecurityUtil.getSubject(new UnixUserGroupInformation(USER1, GROUPS1));

  // Should succeed
  ServiceAuthorizationManager.authorize(user1, Protocol1.class);

  // Should fail
  Subject user2 = 
    SecurityUtil.getSubject(new UnixUserGroupInformation(USER2, GROUPS2));
  boolean failed = false;
  try {
    ServiceAuthorizationManager.authorize(user2, Protocol2.class);
  } catch (AuthorizationException ae) {
    failed = true;
  }
  assertTrue(failed);
}
项目:hadoop-EAR    文件:MiniMRCluster.java   
static JobConf configureJobConf(JobConf conf, String namenode, 
                                int jobTrackerPort, int jobTrackerInfoPort, 
                                UnixUserGroupInformation ugi) {
  JobConf result = new JobConf(conf);
  FileSystem.setDefaultUri(result, namenode);
  result.set("mapred.job.tracker", "localhost:"+jobTrackerPort);
  result.set("mapred.job.tracker.http.address", 
                      "127.0.0.1:" + jobTrackerInfoPort);
  if (ugi != null) {
    result.set("mapred.system.dir", "/mapred/system");
    UnixUserGroupInformation.saveToConf(result,
        UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
  }
  // for debugging have all task output sent to the test output
  JobClient.setTaskOutputFilter(result, JobClient.TaskStatusFilter.ALL);
  return result;
}
项目:hadoop-EAR    文件:ClusterWithLinuxTaskController.java   
protected void startCluster()
    throws IOException {
  JobConf conf = new JobConf();
  dfsCluster = new MiniDFSCluster(conf, NUMBER_OF_NODES, true, null);
  conf.set("mapred.task.tracker.task-controller",
      MyLinuxTaskController.class.getName());
  mrCluster =
      new MiniMRCluster(NUMBER_OF_NODES, dfsCluster.getFileSystem().getUri()
          .toString(), 1, null, null, conf);

  // Get the configured taskcontroller-path
  String path = System.getProperty("taskcontroller-path");
  createTaskControllerConf(path);
  String execPath = path + "/task-controller";
  TaskTracker tracker = mrCluster.getTaskTrackerRunner(0).tt;
  // TypeCasting the parent to our TaskController instance as we
  // know that that would be instance which should be present in TT.
  ((MyLinuxTaskController) tracker.getTaskController())
      .setTaskControllerExe(execPath);
  String ugi = System.getProperty("taskcontroller-user");
  clusterConf = mrCluster.createJobConf();
  String[] splits = ugi.split(",");
  taskControllerUser = new UnixUserGroupInformation(splits);
  clusterConf.set(UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
  createHomeDirectory(clusterConf);
}
项目:hadoop-EAR    文件:TestQueueAclsForCurrentUser.java   
private void setupConfForNoAccess() throws IOException,LoginException {
  currentUGI = UnixUserGroupInformation.login();
  String userName = currentUGI.getUserName();
  conf = new JobConf();

  conf.setBoolean("mapred.acls.enabled",true);

  conf.set("mapred.queue.names", "qu1,qu2");
  //Only user u1 has access
  conf.set("mapred.queue.qu1.acl-submit-job", "u1");
  conf.set("mapred.queue.qu1.acl-administer-jobs", "u1");
  //q2 only group g2 has acls for the queues
  conf.set("mapred.queue.qu2.acl-submit-job", " g2");
  conf.set("mapred.queue.qu2.acl-administer-jobs", " g2");
  queueManager = new QueueManager(conf);

}
项目:hadoop-EAR    文件:TestDFSPermission.java   
void verifyPermission(UnixUserGroupInformation ugi) throws LoginException,
    IOException {
  if (this.ugi != ugi) {
    setRequiredPermissions(ugi);
    this.ugi = ugi;
  }

  try {
    try {
      call();
      assertFalse(expectPermissionDeny());
    } catch(AccessControlException e) {
      assertTrue(expectPermissionDeny());
    }
  } catch (AssertionFailedError ae) {
    logPermissions();
    throw ae;
  }
}
项目:hadoop-EAR    文件:TestDFSPermission.java   
protected void setRequiredPermissions(UnixUserGroupInformation ugi)
    throws IOException {
  if (SUPERUSER.equals(ugi)) {
    requiredAncestorPermission = SUPER_MASK;
    requiredParentPermission = SUPER_MASK;
    requiredPermission = SUPER_MASK;
  } else if (USER1.equals(ugi)) {
    requiredAncestorPermission = (short)(opAncestorPermission & OWNER_MASK);
    requiredParentPermission = (short)(opParentPermission & OWNER_MASK);
    requiredPermission = (short)(opPermission & OWNER_MASK);
  } else if (USER2.equals(ugi)) {
    requiredAncestorPermission = (short)(opAncestorPermission & GROUP_MASK);
    requiredParentPermission = (short)(opParentPermission & GROUP_MASK);
    requiredPermission = (short)(opPermission & GROUP_MASK);
  } else if (USER3.equals(ugi)) {
    requiredAncestorPermission = (short)(opAncestorPermission & OTHER_MASK);
    requiredParentPermission = (short)(opParentPermission & OTHER_MASK);
    requiredPermission = (short)(opPermission & OTHER_MASK);
  } else {
    throw new IllegalArgumentException("Non-supported user: " + ugi);
  }
}
项目:hadoop-EAR    文件:TestMergeFile.java   
@Before
public void startUpCluster() throws IOException {
  RaidCodecBuilder.loadDefaultFullBlocksCodecs(conf, numRSParityBlocks,
      numDataBlocks);
  cluster = new MiniDFSCluster(conf, REPL_FACTOR, true, null);
  assertNotNull("Failed Cluster Creation", cluster);
  cluster.waitClusterUp();
  dfs = (DistributedFileSystem) cluster.getFileSystem();
  assertNotNull("Failed to get FileSystem", dfs);
  nn = cluster.getNameNode();
  assertNotNull("Failed to get NameNode", nn);
  Configuration newConf = new Configuration(conf);
  USER1 = new UnixUserGroupInformation("foo", new String[] {"bar" });
  UnixUserGroupInformation.saveToConf(newConf,
      UnixUserGroupInformation.UGI_PROPERTY_NAME, USER1);
  userdfs = (DistributedFileSystem)FileSystem.get(newConf); // login as ugi
}
项目:hadoop-EAR    文件:NNThroughputBenchmark.java   
NNThroughputBenchmark(Configuration conf) throws IOException, LoginException {
  config = conf;
  ugi = UnixUserGroupInformation.login(config);
  UserGroupInformation.setCurrentUser(ugi);

  // We do not need many handlers, since each thread simulates a handler
  // by calling name-node methods directly
  config.setInt("dfs.namenode.handler.count", 1);
  // set exclude file
  config.set("dfs.hosts.exclude", "${hadoop.tmp.dir}/dfs/hosts/exclude");
  File excludeFile = new File(config.get("dfs.hosts.exclude", "exclude"));
  if(! excludeFile.exists()) {
    if(!excludeFile.getParentFile().mkdirs())
      throw new IOException("NNThroughputBenchmark: cannot mkdir " + excludeFile);
  }
  new FileOutputStream(excludeFile).close();
  // Start the NameNode
  String[] argv = new String[] {};
  nameNode = NameNode.createNameNode(argv, config);
}
项目:hadoop-EAR    文件:TestLeaseRecovery3.java   
private void recoverLease(Path filepath, DistributedFileSystem dfs2) throws Exception {
  if (dfs2==null) {
    Configuration conf2 = new Configuration(conf);
    String username = UserGroupInformation.getCurrentUGI().getUserName()+"_1";
    UnixUserGroupInformation.saveToConf(conf2,
        UnixUserGroupInformation.UGI_PROPERTY_NAME,
        new UnixUserGroupInformation(username, new String[]{"supergroup"}));
    dfs2 = (DistributedFileSystem)FileSystem.get(conf2);
  }

  AppendTestUtil.LOG.info("XXX  test recoverLease");
  while (!dfs2.recoverLease(filepath, true)) {
    AppendTestUtil.LOG.info("sleep " + 1000 + "ms");
    Thread.sleep(1000);
  }
}
项目:hadoop-EAR    文件:TestLeaseRecovery3.java   
private void verifyFile(FileSystem dfs, Path filepath, byte[] actual,
    int size) throws IOException {
  if (dfs==null) {
    Configuration conf2 = new Configuration(conf);
    String username = UserGroupInformation.getCurrentUGI().getUserName()+"_1";
    UnixUserGroupInformation.saveToConf(conf2,
        UnixUserGroupInformation.UGI_PROPERTY_NAME,
        new UnixUserGroupInformation(username, new String[]{"supergroup"}));
    dfs = (DistributedFileSystem)FileSystem.get(conf2);
  }
  AppendTestUtil.LOG.info("Lease for file " +  filepath + " is recovered. "
      + "Validating its contents now...");

  // verify that file-size matches
  assertTrue("File should be " + size + " bytes, but is actually " +
             " found to be " + dfs.getFileStatus(filepath).getLen() +
             " bytes",
             dfs.getFileStatus(filepath).getLen() == size);

  // verify that there is enough data to read.
  System.out.println("File size is good. Now validating sizes from datanodes...");
  FSDataInputStream stmin = dfs.open(filepath);
  stmin.readFully(0, actual, 0, size);
  stmin.close();
}
项目:hadoop-EAR    文件:TestRaidFile.java   
@Before
public void startUpCluster() throws IOException {
  RaidCodecBuilder.loadDefaultFullBlocksCodecs(conf, numRSParityBlocks,
      numDataBlocks);
  cluster = new MiniDFSCluster(conf, 4, true, null);
  assertNotNull("Failed Cluster Creation", cluster);
  cluster.waitClusterUp();
  dfs = (DistributedFileSystem) cluster.getFileSystem();
  assertNotNull("Failed to get FileSystem", dfs);
  nn = cluster.getNameNode();
  assertNotNull("Failed to get NameNode", nn);
  Configuration newConf = new Configuration(conf);
  USER1 = new UnixUserGroupInformation("foo", new String[] {"bar" });
  UnixUserGroupInformation.saveToConf(newConf,
      UnixUserGroupInformation.UGI_PROPERTY_NAME, USER1);
  userdfs = (DistributedFileSystem)FileSystem.get(newConf); // login as ugi
  InjectionHandler h = new FakeBlockGeneratorInjectionHandler();
  InjectionHandler.set(h);
  rand.nextBytes(bytes);
}
项目:hadoop-EAR    文件:AvatarShell.java   
public void initAvatarRPC(String address) throws IOException {
  InetSocketAddress addr = null;
  if (address != null) {
    addr = NetUtils.createSocketAddr(address);
  } else {
    addr = AvatarNode.getAddress(conf);
  }

  try {
    this.ugi = UnixUserGroupInformation.login(conf, true);
  } catch (LoginException e) {
    throw (IOException) (new IOException().initCause(e));
  }

  this.rpcAvatarnode = createRPCAvatarnode(addr, conf, ugi);
  this.avatarnode = createAvatarnode(rpcAvatarnode);
}
项目:hadoop-EAR    文件:UtilizationCollectorCached.java   
protected void connect() {
  LOG.info("Connecting to collector...");
  try {
    conf.setStrings(UnixUserGroupInformation.UGI_PROPERTY_NAME,
                    new String[]{"hadoop", "hadoop"});
    rpcCollector =
          (UtilizationCollectorProtocol) RPC.getProxy(UtilizationCollectorProtocol.class,
                                           UtilizationCollectorProtocol.versionID,
                                           UtilizationCollector.getAddress(conf),
                                           conf);
  } catch (IOException e) {
    LOG.error("Cannot connect to UtilizationCollector server. Retry in " +
             DEFAULT_MIRROR_PERIOD + " milliseconds.");
    return;
  }
  LOG.info("Connection established");
}
项目:hadoop-EAR    文件:HftpFileSystem.java   
@Override
public void initialize(URI name, Configuration conf) throws IOException {
  super.initialize(name, conf);
  setConf(conf);
  try {
    this.ugi = UnixUserGroupInformation.login(conf, true);
  } catch (LoginException le) {
    throw new IOException(StringUtils.stringifyException(le));
  }
  initializedWith = name;
  if (conf.getBoolean(FSConstants.CLIENT_CONFIGURATION_LOOKUP_DONE, false)) {
    try {
      initializedWith = new URI(conf.get(FileSystem.FS_DEFAULT_NAME_KEY));
    } catch (URISyntaxException e) {
      LOG.error(e);
    }
  }
  nnAddr = NetUtils.createSocketAddr(name.toString());
  doStrictContentLengthCheck = conf.getBoolean(STRICT_CONTENT_LENGTH, false);
}
项目:hadoop-EAR    文件:DfsServlet.java   
/**
 * Create a {@link NameNode} proxy from the current {@link ServletContext}. 
 */
protected synchronized ClientProtocol createNameNodeProxy(UnixUserGroupInformation ugi
    ) throws IOException {
  if (nnProxy != null) {
    return nnProxy;
  }
  ServletContext context = getServletContext();
  InetSocketAddress nnAddr = (InetSocketAddress)context.getAttribute("name.node.address");
  if (nnAddr == null) {
    throw new IOException("The namenode is not out of safemode yet");
  }
  Configuration conf = new Configuration(
      (Configuration)context.getAttribute("name.conf"));
  UnixUserGroupInformation.saveToConf(conf,
      UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
  nnProxy = DFSClient.createNamenode(nnAddr, conf);
  return nnProxy;
}
项目:hadoop-EAR    文件:JobClient.java   
private static UnixUserGroupInformation getUGI(Configuration job) throws IOException {
  UnixUserGroupInformation ugi = null;
  try {
    ugi = UnixUserGroupInformation.login(job, true);
  } catch (LoginException e) {
    throw (IOException)(new IOException(
        "Failed to get the current user's information.").initCause(e));
  }
  return ugi;
}
项目:hadoop-EAR    文件:MRAdmin.java   
private static UnixUserGroupInformation getUGI(Configuration conf) 
throws IOException {
  UnixUserGroupInformation ugi = null;
  try {
    ugi = UnixUserGroupInformation.login(conf, true);
  } catch (LoginException e) {
    throw (IOException)(new IOException(
        "Failed to get the current user's information.").initCause(e));
  }
  return ugi;
}
项目:hadoop-EAR    文件:TestFileSystem.java   
static Configuration createConf4Testing(String username) throws Exception {
  Configuration conf = new Configuration();
  UnixUserGroupInformation.saveToConf(conf,
      UnixUserGroupInformation.UGI_PROPERTY_NAME,
      new UnixUserGroupInformation(username, new String[]{"group"}));
  return conf;    
}
项目:hadoop-EAR    文件:TestQueueManager.java   
public void testEnabledACLForNonDefaultQueue() throws IOException,
                                                        LoginException {
  // login as self...
  UserGroupInformation ugi = UnixUserGroupInformation.login();
  String userName = ugi.getUserName();
  // allow everyone in default queue
  JobConf conf = setupConf("mapred.queue.default.acl-submit-job", "*");
  // setup a different queue
  conf.set("mapred.queue.names", "default,q2");
  // setup a different acl for this queue.
  conf.set("mapred.queue.q2.acl-submit-job", userName);
  // verify job submission to other queue fails.
  verifyJobSubmission(conf, true, "q2");
}
项目:hadoop-EAR    文件:TestQueueManager.java   
public void testUserEnabledACLForJobSubmission() 
                                  throws IOException, LoginException {
  // login as self...
  UserGroupInformation ugi = UnixUserGroupInformation.login();
  String userName = ugi.getUserName();
  JobConf conf = setupConf("mapred.queue.default.acl-submit-job",
                                "3698-junk-user," + userName 
                                  + " 3698-junk-group1,3698-junk-group2");
  verifyJobSubmission(conf, true);
}
项目:hadoop-EAR    文件:TestQueueManager.java   
public void testGroupsEnabledACLForJobSubmission() 
                                  throws IOException, LoginException {
  // login as self, get one group, and add in allowed list.
  UserGroupInformation ugi = UnixUserGroupInformation.login();
  String[] groups = ugi.getGroupNames();
  assertTrue(groups.length > 0);
  JobConf conf = setupConf("mapred.queue.default.acl-submit-job",
                              "3698-junk-user1,3698-junk-user2 " 
                                + groups[groups.length-1] 
                                         + ",3698-junk-group");
  verifyJobSubmission(conf, true);
}
项目:hadoop-EAR    文件:TestQueueManager.java   
public void testUserEnabledACLForJobKill() throws IOException, 
                                                  LoginException {
  // login as self...
  UserGroupInformation ugi = UnixUserGroupInformation.login();
  String userName = ugi.getUserName();
  JobConf conf = setupConf("mapred.queue.default.acl-administer-jobs",
                                            "dummy-user,"+userName);
  verifyJobKillAsOtherUser(conf, true, "dummy-user,dummy-user-group");
}
项目:hadoop-EAR    文件:TestQueueManager.java   
private RunningJob submitSleepJob(int numMappers, int numReducers, 
                                  long mapSleepTime, long reduceSleepTime,
                                  boolean shouldComplete, String userInfo,
                                  String queueName) 
                                    throws IOException {
  JobConf clientConf = new JobConf();
  clientConf.set("mapred.job.tracker", "localhost:"
      + miniMRCluster.getJobTrackerPort());
  SleepJob job = new SleepJob();
  job.setConf(clientConf);
  clientConf = job.setupJobConf(numMappers, numReducers, 
      mapSleepTime, (int)mapSleepTime/100,
      reduceSleepTime, (int)reduceSleepTime/100);
  if (queueName != null) {
    clientConf.setQueueName(queueName);
  }
  JobConf jc = new JobConf(clientConf);
  if (userInfo != null) {
    jc.set(UnixUserGroupInformation.UGI_PROPERTY_NAME, userInfo);
  }
  RunningJob rJob = null;
  if (shouldComplete) {
    rJob = JobClient.runJob(jc);  
  } else {
    rJob = new JobClient(clientConf).submitJob(jc);
  }
  return rJob;
}
项目:hadoop-EAR    文件:MiniMRCluster.java   
public MiniMRCluster(int jobTrackerPort, int taskTrackerPort,
    int numTaskTrackers, String namenode, 
    int numDir, String[] racks, String[] hosts, UnixUserGroupInformation ugi
    ) throws IOException {
  this(jobTrackerPort, taskTrackerPort, numTaskTrackers, namenode, 
       numDir, racks, hosts, ugi, null);
}
项目:hadoop-EAR    文件:MiniMRCluster.java   
public MiniMRCluster(int jobTrackerPort, int taskTrackerPort,
    int numTaskTrackers, String namenode, 
    int numDir, String[] racks, String[] hosts, UnixUserGroupInformation ugi,
    JobConf conf) throws IOException {
  this(jobTrackerPort, taskTrackerPort, numTaskTrackers, namenode, numDir, 
       racks, hosts, ugi, conf, 0);
}
项目:hadoop-EAR    文件:TestDFSPermission.java   
private void testCreateMkdirs(UnixUserGroupInformation ugi, Path path,
    short ancestorPermission, short parentPermission) throws Exception {
  createVerifier.set(path, OpType.MKDIRS, ancestorPermission,
      parentPermission);
  createVerifier.verifyPermission(ugi);
  createVerifier.setOpType(OpType.CREATE);
  createVerifier.setCleanup(false);
  createVerifier.verifyPermission(ugi);
  createVerifier.setCleanup(true);
  createVerifier.verifyPermission(ugi); // test overWritten
}
项目:hadoop-EAR    文件:TestDFSPermission.java   
private void testOpen(UnixUserGroupInformation ugi, Path path,
    short ancestorPermission, short parentPermission, short filePermission)
    throws Exception {
  openVerifier
      .set(path, ancestorPermission, parentPermission, filePermission);
  openVerifier.verifyPermission(ugi);
}
项目:hadoop-EAR    文件:TestDFSPermission.java   
private void testSetReplication(UnixUserGroupInformation ugi, Path path,
    short ancestorPermission, short parentPermission, short filePermission)
    throws Exception {
  replicatorVerifier.set(path, ancestorPermission, parentPermission,
      filePermission);
  replicatorVerifier.verifyPermission(ugi);
}
项目:hadoop-EAR    文件:TestDFSPermission.java   
private void testSetTimes(UnixUserGroupInformation ugi, Path path,
    short ancestorPermission, short parentPermission, short filePermission)
    throws Exception {
  timesVerifier.set(path, ancestorPermission, parentPermission,
      filePermission);
  timesVerifier.verifyPermission(ugi);
}
项目:hadoop-EAR    文件:TestDFSPermission.java   
private void testList(UnixUserGroupInformation ugi, Path file, Path dir,
    short ancestorPermission, short parentPermission, short filePermission)
    throws Exception {
  listVerifier.set(file, InodeType.FILE, ancestorPermission,
      parentPermission, filePermission);
  listVerifier.verifyPermission(ugi);
  listVerifier.setInodeType(dir, InodeType.DIR);
  listVerifier.verifyPermission(ugi);
}
项目:hadoop-EAR    文件:TestDFSPermission.java   
private void testRename(UnixUserGroupInformation ugi, Path src, Path dst,
    short srcAncestorPermission, short srcParentPermission,
    short dstAncestorPermission, short dstParentPermission) throws Exception {
  renameVerifier.set(src, srcAncestorPermission, srcParentPermission, dst,
      dstAncestorPermission, dstParentPermission);
  renameVerifier.verifyPermission(ugi);
}
项目:hadoop-EAR    文件:TestDFSPermission.java   
private void testDeleteDir(UnixUserGroupInformation ugi, Path path,
    short ancestorPermission, short parentPermission, short permission,
    short[] childPermissions) throws Exception {
  dirDeletionVerifier.set(path, ancestorPermission, parentPermission,
      permission, childPermissions);
  dirDeletionVerifier.verifyPermission(ugi);

}
项目:hadoop-EAR    文件:TestDFSPermission.java   
private void login(UnixUserGroupInformation ugi) throws IOException {
  if (fs != null) {
    fs.close();
  }
  UnixUserGroupInformation.saveToConf(conf,
      UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
  fs = FileSystem.get(conf); // login as ugi
}
项目:hadoop-EAR    文件:TestHDFSFileSystemContract.java   
@Override
protected void setUp() throws Exception {
  Configuration conf = new Configuration();
  cluster = new MiniDFSCluster(conf, 2, true, null);
  fs = cluster.getFileSystem();
  defaultWorkingDirectory = "/user/" + 
         UnixUserGroupInformation.login().getUserName();
}
项目:hadoop-EAR    文件:TestDFSShell.java   
public void testRemoteException() throws Exception {
  UnixUserGroupInformation tmpUGI = new UnixUserGroupInformation("tmpname",
      new String[] {
      "mygroup"});
  cluster = null;
  PrintStream bak = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster(conf, 2, true, null);
    FileSystem fs = cluster.getFileSystem();
    Path p = new Path("/foo");
    fs.mkdirs(p);
    fs.setPermission(p, new FsPermission((short)0700));
    UnixUserGroupInformation.saveToConf(conf,
        UnixUserGroupInformation.UGI_PROPERTY_NAME, tmpUGI);
    FsShell fshell = new FsShell(conf);
    bak = System.err;
    ByteArrayOutputStream out = new ByteArrayOutputStream();
    PrintStream tmp = new PrintStream(out);
    System.setErr(tmp);
    String[] args = new String[2];
    args[0] = "-ls";
    args[1] = "/foo";
    int ret = ToolRunner.run(fshell, args);
    assertTrue("returned should be -1", (ret == -1));
    String str = out.toString();
    assertTrue("permission denied printed", str.indexOf("Permission denied") != -1);
    out.reset();
  } finally {
    if (bak != null) {
      System.setErr(bak);
    }
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop-EAR    文件:ProxyStreamFile.java   
/** {@inheritDoc} */
@Override
protected DFSClient getDFSClient(HttpServletRequest request)
    throws IOException {
  ServletContext context = getServletContext();
  Configuration conf = new Configuration((Configuration) context
      .getAttribute("name.conf"));
  UnixUserGroupInformation.saveToConf(conf,
      UnixUserGroupInformation.UGI_PROPERTY_NAME, getUGI(request));
  InetSocketAddress nameNodeAddr = (InetSocketAddress) context
      .getAttribute("name.node.address");
  return new DFSClient(nameNodeAddr, conf);
}
项目:hadoop-EAR    文件:TestOriginalCaller.java   
@BeforeClass
public static void setUpClass() {
  assertFalse(callerUgi.equals(originalUgi));
  final UserGroupInformation user = UserGroupInformation.getCurrentUGI();
  if (user != null) {
    assertFalse(user.getUserName().equals(callerUgi.getUserName()));
    assertFalse(Arrays.asList(user.getGroupNames()).contains(callerUgi.getGroupNames()[0]));
    assertFalse(user.getUserName().equals(originalUgi.getUserName()));
    assertFalse(Arrays.asList(user.getGroupNames()).contains(originalUgi.getGroupNames()[0]));
  }
  UnixUserGroupInformation.saveToConf(conf, UnixUserGroupInformation.UGI_PROPERTY_NAME,
      callerUgi);
  conf.setBoolean("fs.security.ugi.getFromConf", false);
}
项目:hadoop-EAR    文件:ProxyFileDataServlet.java   
/** {@inheritDoc} */
@Override
protected URI createUri(FileStatus i, UnixUserGroupInformation ugi,
    ClientProtocol nnproxy, HttpServletRequest request) throws IOException,
    URISyntaxException {
  return new URI(request.getScheme(), null, request.getServerName(), request
      .getServerPort(), "/streamFile", "filename=" + i.getPath() + "&ugi="
      + ugi, null);
}
项目:hadoop-EAR    文件:TestLeaseRecovery3.java   
private void recoverLeaseUsingCreate(Path filepath) throws IOException {
  Configuration conf2 = new Configuration(conf);
  String username = UserGroupInformation.getCurrentUGI().getUserName()+"_1";
  UnixUserGroupInformation.saveToConf(conf2,
      UnixUserGroupInformation.UGI_PROPERTY_NAME,
      new UnixUserGroupInformation(username, new String[]{"supergroup"}));
  FileSystem dfs2 = FileSystem.get(conf2);

  boolean done = false;
  for(int i = 0; i < 10 && !done; i++) {
    AppendTestUtil.LOG.info("i=" + i);
    try {
      dfs2.create(filepath, false, bufferSize, (short)1, BLOCK_SIZE);
      fail("Creation of an existing file should never succeed.");
    } catch (IOException ioe) {
      final String message = ioe.getMessage();
      if (message.contains("file exists")) {
        AppendTestUtil.LOG.info("done", ioe);
        done = true;
      }
      else if (message.contains(AlreadyBeingCreatedException.class.getSimpleName())) {
        AppendTestUtil.LOG.info("GOOD! got " + message);
      }
      else {
        AppendTestUtil.LOG.warn("UNEXPECTED IOException", ioe);
      }
    }

    if (!done) {
      AppendTestUtil.LOG.info("sleep " + 5000 + "ms");
      try {Thread.sleep(5000);} catch (InterruptedException e) {}
    }
  }
  assertTrue(done);

}
项目:hadoop-EAR    文件:TestLeaseRecovery2.java   
private void recoverLease(Path filepath, DistributedFileSystem dfs2) throws Exception {
  if (dfs2==null) {
    Configuration conf2 = new Configuration(conf);
    String username = UserGroupInformation.getCurrentUGI().getUserName()+"_1";
    UnixUserGroupInformation.saveToConf(conf2,
        UnixUserGroupInformation.UGI_PROPERTY_NAME,
        new UnixUserGroupInformation(username, new String[]{"supergroup"}));
    dfs2 = (DistributedFileSystem)FileSystem.get(conf2);
  }

  while (!dfs2.recoverLease(filepath, false)) {
    AppendTestUtil.LOG.info("sleep " + 1000 + "ms");
    Thread.sleep(1000);
  }
}