Java 类org.apache.hadoop.security.Credentials 实例源码

项目:hadoop    文件:TestJobImpl.java   
public StubbedJob(JobId jobId, ApplicationAttemptId applicationAttemptId,
    Configuration conf, EventHandler eventHandler, boolean newApiCommitter,
    String user, int numSplits, AppContext appContext) {
  super(jobId, applicationAttemptId, conf, eventHandler,
      null, new JobTokenSecretManager(), new Credentials(),
      new SystemClock(), Collections.<TaskId, TaskInfo> emptyMap(),
      MRAppMetrics.create(), null, newApiCommitter, user,
      System.currentTimeMillis(), null, appContext, null, null);

  initTransition = getInitTransition(numSplits);
  localFactory = stateMachineFactory.addTransition(JobStateInternal.NEW,
        EnumSet.of(JobStateInternal.INITED, JobStateInternal.FAILED),
        JobEventType.JOB_INIT,
        // This is abusive.
        initTransition);

  // This "this leak" is okay because the retained pointer is in an
  //  instance variable.
  localStateMachine = localFactory.make(this);
}
项目:hadoop-oss    文件:TestCredentials.java   
@Test
public void testAddTokensToUGI() {
  UserGroupInformation ugi = UserGroupInformation.createRemoteUser("someone");
  Credentials creds = new Credentials();

  for (int i=0; i < service.length; i++) {
    creds.addToken(service[i], token[i]);
  }
  ugi.addCredentials(creds);

  creds = ugi.getCredentials();
  for (int i=0; i < service.length; i++) {
    assertSame(token[i], creds.getToken(service[i]));
  }
  assertEquals(service.length, creds.numberOfTokens());
}
项目:hadoop    文件:FileSystem.java   
/**
 * Recursively obtain the tokens for this FileSystem and all descended
 * FileSystems as determined by getChildFileSystems().
 * @param renewer the user allowed to renew the delegation tokens
 * @param credentials cache in which to add the new delegation tokens
 * @param tokens list in which to add acquired tokens
 * @throws IOException
 */
private void collectDelegationTokens(final String renewer,
                                     final Credentials credentials,
                                     final List<Token<?>> tokens)
                                         throws IOException {
  final String serviceName = getCanonicalServiceName();
  // Collect token of the this filesystem and then of its embedded children
  if (serviceName != null) { // fs has token, grab it
    final Text service = new Text(serviceName);
    Token<?> token = credentials.getToken(service);
    if (token == null) {
      token = getDelegationToken(renewer);
      if (token != null) {
        tokens.add(token);
        credentials.addToken(service, token);
      }
    }
  }
  // Now collect the tokens from the children
  final FileSystem[] children = getChildFileSystems();
  if (children != null) {
    for (final FileSystem fs : children) {
      fs.collectDelegationTokens(renewer, credentials, tokens);
    }
  }
}
项目:hadoop-oss    文件:ViewFileSystemBaseTest.java   
@Test
public void testGetDelegationTokensWithCredentials() throws IOException {
  Credentials credentials = new Credentials();
  List<Token<?>> delTokens =
      Arrays.asList(fsView.addDelegationTokens("sanjay", credentials));

  int expectedTokenCount = getExpectedDelegationTokenCountWithCredentials();

  Assert.assertEquals(expectedTokenCount, delTokens.size());
  Credentials newCredentials = new Credentials();
  for (int i = 0; i < expectedTokenCount / 2; i++) {
    Token<?> token = delTokens.get(i);
    newCredentials.addToken(token.getService(), token);
  }

  List<Token<?>> delTokens2 =
      Arrays.asList(fsView.addDelegationTokens("sanjay", newCredentials));
  Assert.assertEquals((expectedTokenCount + 1) / 2, delTokens2.size());
}
项目:hadoop    文件:ClientDistributedCacheManager.java   
/**
 * For each archive or cache file - get the corresponding delegation token
 * @param job
 * @param credentials
 * @throws IOException
 */
public static void getDelegationTokens(Configuration job,
    Credentials credentials) throws IOException {
  URI[] tarchives = DistributedCache.getCacheArchives(job);
  URI[] tfiles = DistributedCache.getCacheFiles(job);

  int size = (tarchives!=null? tarchives.length : 0) + (tfiles!=null ? tfiles.length :0);
  Path[] ps = new Path[size];

  int i = 0;
  if (tarchives != null) {
    for (i=0; i < tarchives.length; i++) {
      ps[i] = new Path(tarchives[i].toString());
    }
  }

  if (tfiles != null) {
    for(int j=0; j< tfiles.length; j++) {
      ps[i+j] = new Path(tfiles[j].toString());
    }
  }

  TokenCache.obtainTokensForNamenodes(credentials, ps, job);
}
项目:big_data    文件:YARNRunner.java   
@Override
public JobStatus submitJob(JobID jobId, String jobSubmitDir, Credentials ts)
        throws IOException, InterruptedException {

    addHistoryToken(ts);

    // Construct necessary information to start the MR AM
    ApplicationSubmissionContext appContext = createApplicationSubmissionContext(conf, jobSubmitDir, ts);

    // Submit to ResourceManager
    try {
        ApplicationId applicationId = resMgrDelegate.submitApplication(appContext);

        ApplicationReport appMaster = resMgrDelegate.getApplicationReport(applicationId);
        String diagnostics = (appMaster == null ? "application report is null" : appMaster.getDiagnostics());
        if (appMaster == null || appMaster.getYarnApplicationState() == YarnApplicationState.FAILED
                || appMaster.getYarnApplicationState() == YarnApplicationState.KILLED) {
            throw new IOException("Failed to run job : " + diagnostics);
        }
        return clientCache.getClient(jobId).getJobStatus(jobId);
    } catch (YarnException e) {
        throw new IOException(e);
    }
}
项目:circus-train    文件:CopyListing.java   
/**
 * Public Factory method with which the appropriate CopyListing implementation may be retrieved.
 *
 * @param configuration The input configuration.
 * @param credentials Credentials object on which the FS delegation tokens are cached
 * @param options The input Options, to help choose the appropriate CopyListing Implementation.
 * @return An instance of the appropriate CopyListing implementation.
 * @throws java.io.IOException Exception if any
 */
public static CopyListing getCopyListing(
    Configuration configuration,
    Credentials credentials,
    S3MapReduceCpOptions options)
  throws IOException {

  String copyListingClassName = configuration.get(S3MapReduceCpConstants.CONF_LABEL_COPY_LISTING_CLASS, "");
  Class<? extends CopyListing> copyListingClass;
  try {
    if (!copyListingClassName.isEmpty()) {
      copyListingClass = configuration.getClass(S3MapReduceCpConstants.CONF_LABEL_COPY_LISTING_CLASS,
          SimpleCopyListing.class, CopyListing.class);
    } else {
      copyListingClass = SimpleCopyListing.class;
    }
    copyListingClassName = copyListingClass.getName();
    Constructor<? extends CopyListing> constructor = copyListingClass.getDeclaredConstructor(Configuration.class,
        Credentials.class);
    return constructor.newInstance(configuration, credentials);
  } catch (Exception e) {
    throw new IOException("Unable to instantiate " + copyListingClassName, e);
  }
}
项目:hadoop    文件:CredentialsTestJob.java   
private static void checkSecrets(Credentials ts) {
  if  ( ts == null){
    throw new RuntimeException("The credentials are not available"); 
    // fail the test
  }

  for(int i=0; i<NUM_OF_KEYS; i++) {
    String secretName = "alias"+i;
    // get token storage and a key
    byte[] secretValue =  ts.getSecretKey(new Text(secretName));
    System.out.println(secretValue);

    if (secretValue == null){
      throw new RuntimeException("The key "+ secretName + " is not available. "); 
      // fail the test
    }

    String secretValueStr = new String (secretValue);

    if  ( !("password"+i).equals(secretValueStr)){
      throw new RuntimeException("The key "+ secretName +
          " is not correct. Expected value is "+ ("password"+i) +
          ". Actual value is " + secretValueStr); // fail the test
    }        
  }
}
项目:hadoop    文件:TestFileSystemTokens.java   
@Test
public void testFsWithChildTokens() throws Exception {
  Credentials credentials = new Credentials();
  Text service1 = new Text("singleTokenFs1");
  Text service2 = new Text("singleTokenFs2");

  MockFileSystem fs1 = createFileSystemForServiceName(service1);
  MockFileSystem fs2 = createFileSystemForServiceName(service2);
  MockFileSystem fs3 = createFileSystemForServiceName(null);
  MockFileSystem multiFs = 
      createFileSystemForServiceName(null, fs1, fs2, fs3);

  multiFs.addDelegationTokens(renewer, credentials);
  verifyTokenFetch(multiFs, false); // has no tokens of own, only child tokens
  verifyTokenFetch(fs1, true);
  verifyTokenFetch(fs2, true);
  verifyTokenFetch(fs3, false);

  assertEquals(2, credentials.numberOfTokens());
  assertNotNull(credentials.getToken(service1));
  assertNotNull(credentials.getToken(service2));
}
项目:hadoop    文件:ContainerManagerImpl.java   
private Credentials parseCredentials(ContainerLaunchContext launchContext)
    throws IOException {
  Credentials credentials = new Credentials();
  // //////////// Parse credentials
  ByteBuffer tokens = launchContext.getTokens();

  if (tokens != null) {
    DataInputByteBuffer buf = new DataInputByteBuffer();
    tokens.rewind();
    buf.reset(tokens);
    credentials.readTokenStorageStream(buf);
    if (LOG.isDebugEnabled()) {
      for (Token<? extends TokenIdentifier> tk : credentials.getAllTokens()) {
        LOG.debug(tk.getService() + " = " + tk.toString());
      }
    }
  }
  // //////////// End of parsing credentials
  return credentials;
}
项目:hadoop    文件:TestFileSystemTokens.java   
@Test
public void testFsWithChildTokensOneExists() throws Exception {
  Credentials credentials = new Credentials();
  Text service1 = new Text("singleTokenFs1");
  Text service2 = new Text("singleTokenFs2");
  Token<?> token = mock(Token.class);
  credentials.addToken(service2, token);

  MockFileSystem fs1 = createFileSystemForServiceName(service1);
  MockFileSystem fs2 = createFileSystemForServiceName(service2);
  MockFileSystem fs3 = createFileSystemForServiceName(null);
  MockFileSystem multiFs = createFileSystemForServiceName(null, fs1, fs2, fs3);

  multiFs.addDelegationTokens(renewer, credentials);
  verifyTokenFetch(multiFs, false);
  verifyTokenFetch(fs1, true);
  verifyTokenFetch(fs2, false); // we had added its token to credentials
  verifyTokenFetch(fs3, false);

  assertEquals(2, credentials.numberOfTokens());
  assertNotNull(credentials.getToken(service1));
  assertSame(token, credentials.getToken(service2));
}
项目:hadoop    文件:TestContainerLocalizer.java   
@SuppressWarnings({ "rawtypes", "unchecked" })
static DataInputBuffer createFakeCredentials(Random r, int nTok)
      throws IOException {
    Credentials creds = new Credentials();
    byte[] password = new byte[20];
    Text kind = new Text();
    Text service = new Text();
    Text alias = new Text();
    for (int i = 0; i < nTok; ++i) {
      byte[] identifier = ("idef" + i).getBytes();
      r.nextBytes(password);
      kind.set("kind" + i);
      service.set("service" + i);
      alias.set("token" + i);
      Token token = new Token(identifier, password, kind, service);
      creds.addToken(alias, token);
    }
    DataOutputBuffer buf = new DataOutputBuffer();
    creds.writeTokenStorageToStream(buf);
    DataInputBuffer ret = new DataInputBuffer();
    ret.reset(buf.getData(), 0, buf.getLength());
    return ret;
  }
项目:hadoop    文件:ViewFileSystemBaseTest.java   
@Test
public void testGetDelegationTokensWithCredentials() throws IOException {
  Credentials credentials = new Credentials();
  List<Token<?>> delTokens =
      Arrays.asList(fsView.addDelegationTokens("sanjay", credentials));

  int expectedTokenCount = getExpectedDelegationTokenCountWithCredentials();

  Assert.assertEquals(expectedTokenCount, delTokens.size());
  Credentials newCredentials = new Credentials();
  for (int i = 0; i < expectedTokenCount / 2; i++) {
    Token<?> token = delTokens.get(i);
    newCredentials.addToken(token.getService(), token);
  }

  List<Token<?>> delTokens2 =
      Arrays.asList(fsView.addDelegationTokens("sanjay", newCredentials));
  Assert.assertEquals((expectedTokenCount + 1) / 2, delTokens2.size());
}
项目:hadoop    文件:TestCredentials.java   
@Test
public void testAddTokensToUGI() {
  UserGroupInformation ugi = UserGroupInformation.createRemoteUser("someone");
  Credentials creds = new Credentials();

  for (int i=0; i < service.length; i++) {
    creds.addToken(service[i], token[i]);
  }
  ugi.addCredentials(creds);

  creds = ugi.getCredentials();
  for (int i=0; i < service.length; i++) {
    assertSame(token[i], creds.getToken(service[i]));
  }
  assertEquals(service.length, creds.numberOfTokens());
}
项目:hadoop    文件:ApplicationAttemptStateDataPBImpl.java   
private static Credentials convertCredentialsFromByteBuffer(
    ByteBuffer appAttemptTokens) {
  DataInputByteBuffer dibb = new DataInputByteBuffer();
  try {
    Credentials credentials = null;
    if (appAttemptTokens != null) {
      credentials = new Credentials();
      appAttemptTokens.rewind();
      dibb.reset(appAttemptTokens);
      credentials.readTokenStorageStream(dibb);
    }
    return credentials;
  } catch (IOException e) {
    LOG.error("Failed to convert Credentials from ByteBuffer.");
    assert false;
    return null;
  } finally {
    IOUtils.closeStream(dibb);
  }
}
项目:hadoop    文件:TokenCache.java   
/**
 * load job token from a file
 * @deprecated Use {@link Credentials#readTokenStorageFile} instead,
 * this method is included for compatibility against Hadoop-1.
 * @param conf
 * @throws IOException
 */
@InterfaceAudience.Private
@Deprecated
public static Credentials loadTokens(String jobTokenFile, JobConf conf)
throws IOException {
  Path localJobTokenFile = new Path ("file:///" + jobTokenFile);

  Credentials ts = Credentials.readTokenStorageFile(localJobTokenFile, conf);

  if(LOG.isDebugEnabled()) {
    LOG.debug("Task: Loaded jobTokenFile from: "+
        localJobTokenFile.toUri().getPath() 
        +"; num of sec keys  = " + ts.numberOfSecretKeys() +
        " Number of tokens " +  ts.numberOfTokens());
  }
  return ts;
}
项目:hadoop    文件:YARNRunner.java   
@VisibleForTesting
void addHistoryToken(Credentials ts) throws IOException, InterruptedException {
  /* check if we have a hsproxy, if not, no need */
  MRClientProtocol hsProxy = clientCache.getInitializedHSProxy();
  if (UserGroupInformation.isSecurityEnabled() && (hsProxy != null)) {
    /*
     * note that get delegation token was called. Again this is hack for oozie
     * to make sure we add history server delegation tokens to the credentials
     */
    RMDelegationTokenSelector tokenSelector = new RMDelegationTokenSelector();
    Text service = resMgrDelegate.getRMDelegationTokenService();
    if (tokenSelector.selectToken(service, ts.getAllTokens()) != null) {
      Text hsService = SecurityUtil.buildTokenService(hsProxy
          .getConnectAddress());
      if (ts.getToken(hsService) == null) {
        ts.addToken(hsService, getDelegationTokenFromHS(hsProxy));
      }
    }
  }
}
项目:angel    文件:PSAgentAttempt.java   
static ContainerLaunchContext createContainerLaunchContext(
    Map<ApplicationAccessType, String> applicationACLs, Configuration conf,
    PSAgentAttemptId attemptId, final ApplicationId appid, MasterService masterService,
    Credentials credentials) {

  synchronized (commonContainerSpecLock) {
    if (commonContainerSpec == null) {
      commonContainerSpec =
          createCommonContainerLaunchContext(masterService, applicationACLs, conf, appid,
              credentials);
    }
  }

  Map<String, String> env = commonContainerSpec.getEnvironment();
  Map<String, String> myEnv = new HashMap<String, String>(env.size());
  myEnv.putAll(env);

  Apps.addToEnvironment(myEnv, AngelEnvironment.PSAGENT_ID.name(),
      Integer.toString(attemptId.getPsAgentId().getIndex()));
  Apps.addToEnvironment(myEnv, AngelEnvironment.PSAGENT_ATTEMPT_ID.name(),
      Integer.toString(attemptId.getIndex()));

  //ParameterServerJVM.setVMEnv(myEnv, conf);

  // Set up the launch command
  List<String> commands = PSAgentAttemptJVM.getVMCommand(conf, appid, attemptId);

  // Duplicate the ByteBuffers for access by multiple containers.
  Map<String, ByteBuffer> myServiceData = new HashMap<String, ByteBuffer>();
  for (Entry<String, ByteBuffer> entry : commonContainerSpec.getServiceData().entrySet()) {
    myServiceData.put(entry.getKey(), entry.getValue().duplicate());
  }

  // Construct the actual Container
  ContainerLaunchContext container =
      ContainerLaunchContext.newInstance(commonContainerSpec.getLocalResources(), myEnv,
          commands, myServiceData, commonContainerSpec.getTokens().duplicate(), applicationACLs);

  return container;
}
项目:hadoop    文件:TestRecovery.java   
private MapTaskImpl getMockMapTask(long clusterTimestamp, EventHandler eh) {

    ApplicationId appId = ApplicationId.newInstance(clusterTimestamp, 1);
    JobId jobId = MRBuilderUtils.newJobId(appId, 1);

    int partitions = 2;

    Path remoteJobConfFile = mock(Path.class);
    JobConf conf = new JobConf();
    TaskAttemptListener taskAttemptListener = mock(TaskAttemptListener.class);
    Token<JobTokenIdentifier> jobToken =
        (Token<JobTokenIdentifier>) mock(Token.class);
    Credentials credentials = null;
    Clock clock = new SystemClock();
    int appAttemptId = 3;
    MRAppMetrics metrics = mock(MRAppMetrics.class);
    Resource minContainerRequirements = mock(Resource.class);
    when(minContainerRequirements.getMemory()).thenReturn(1000);

    ClusterInfo clusterInfo = mock(ClusterInfo.class);
    AppContext appContext = mock(AppContext.class);
    when(appContext.getClusterInfo()).thenReturn(clusterInfo);

    TaskSplitMetaInfo taskSplitMetaInfo = mock(TaskSplitMetaInfo.class);
    MapTaskImpl mapTask = new MapTaskImpl(jobId, partitions,
        eh, remoteJobConfFile, conf,
        taskSplitMetaInfo, taskAttemptListener, jobToken, credentials, clock,
        appAttemptId, metrics, appContext);
    return mapTask;
  }
项目:hadoop    文件:YarnClientImpl.java   
private void addTimelineDelegationToken(
    ContainerLaunchContext clc) throws YarnException, IOException {
  Credentials credentials = new Credentials();
  DataInputByteBuffer dibb = new DataInputByteBuffer();
  ByteBuffer tokens = clc.getTokens();
  if (tokens != null) {
    dibb.reset(tokens);
    credentials.readTokenStorageStream(dibb);
    tokens.rewind();
  }
  // If the timeline delegation token is already in the CLC, no need to add
  // one more
  for (org.apache.hadoop.security.token.Token<? extends TokenIdentifier> token : credentials
      .getAllTokens()) {
    if (token.getKind().equals(TimelineDelegationTokenIdentifier.KIND_NAME)) {
      return;
    }
  }
  org.apache.hadoop.security.token.Token<TimelineDelegationTokenIdentifier>
      timelineDelegationToken = getTimelineDelegationToken();
  if (timelineDelegationToken == null) {
    return;
  }
  credentials.addToken(timelineService, timelineDelegationToken);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Add timline delegation token into credentials: "
        + timelineDelegationToken);
  }
  DataOutputBuffer dob = new DataOutputBuffer();
  credentials.writeTokenStorageToStream(dob);
  tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
  clc.setTokens(tokens);
}
项目:hadoop-oss    文件:TestCredentialProviderFactory.java   
@Test
public void testUserProvider() throws Exception {
  Configuration conf = new Configuration();
  final String ourUrl = UserProvider.SCHEME_NAME + ":///";
  conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
  checkSpecificProvider(conf, ourUrl);
  // see if the credentials are actually in the UGI
  Credentials credentials =
      UserGroupInformation.getCurrentUser().getCredentials();
  assertArrayEquals(new byte[]{'1', '2', '3'},
      credentials.getSecretKey(new Text("pass2")));
}
项目:hadoop    文件:ReduceTaskAttemptImpl.java   
public ReduceTaskAttemptImpl(TaskId id, int attempt,
    EventHandler eventHandler, Path jobFile, int partition,
    int numMapTasks, JobConf conf,
    TaskAttemptListener taskAttemptListener,
    Token<JobTokenIdentifier> jobToken,
    Credentials credentials, Clock clock,
    AppContext appContext) {
  super(id, attempt, eventHandler, taskAttemptListener, jobFile, partition,
      conf, new String[] {}, jobToken, credentials, clock,
      appContext);
  this.numMapTasks = numMapTasks;
}
项目:hadoop    文件:MapTaskAttemptImpl.java   
public MapTaskAttemptImpl(TaskId taskId, int attempt, 
    EventHandler eventHandler, Path jobFile, 
    int partition, TaskSplitMetaInfo splitInfo, JobConf conf,
    TaskAttemptListener taskAttemptListener, 
    Token<JobTokenIdentifier> jobToken,
    Credentials credentials, Clock clock,
    AppContext appContext) {
  super(taskId, attempt, eventHandler, 
      taskAttemptListener, jobFile, partition, conf, splitInfo.getLocations(),
      jobToken, credentials, clock, appContext);
  this.splitInfo = splitInfo;
}
项目:hadoop-oss    文件:ViewFileSystemBaseTest.java   
/**
 * This default implementation is when viewfs has mount points
 * into file systems, such as LocalFs that do no have delegation tokens.
 * It should be overridden for when mount points into hdfs.
 */
@Test
public void testGetDelegationTokens() throws IOException {
  Token<?>[] delTokens = 
      fsView.addDelegationTokens("sanjay", new Credentials());
  Assert.assertEquals(getExpectedDelegationTokenCount(), delTokens.length); 
}
项目:hadoop-oss    文件:TestFileSystemTokens.java   
@Test
public void testFsWithNoToken() throws Exception {
  MockFileSystem fs = createFileSystemForServiceName(null);  
  Credentials credentials = new Credentials();

  fs.addDelegationTokens(renewer, credentials);
  verifyTokenFetch(fs, false);
  assertEquals(0, credentials.numberOfTokens());
}
项目:hadoop-oss    文件:TestFileSystemTokens.java   
@Test
public void testFsWithTokenExists() throws Exception {
  Credentials credentials = new Credentials();
  Text service = new Text("singleTokenFs");
  MockFileSystem fs = createFileSystemForServiceName(service);
  Token<?> token = mock(Token.class);
  credentials.addToken(service, token);

  fs.addDelegationTokens(renewer, credentials);
  verifyTokenFetch(fs, false);

  assertEquals(1, credentials.numberOfTokens());
  assertSame(token, credentials.getToken(service));
}
项目:hadoop    文件:LocalJobRunner.java   
public org.apache.hadoop.mapreduce.JobStatus submitJob(
    org.apache.hadoop.mapreduce.JobID jobid, String jobSubmitDir,
    Credentials credentials) throws IOException {
  Job job = new Job(JobID.downgrade(jobid), jobSubmitDir);
  job.job.setCredentials(credentials);
  return job.status;

}
项目:hadoop    文件:TestEncryptionZones.java   
/**
 * Tests obtaining delegation token from stored key
 */
@Test(timeout = 120000)
public void testDelegationToken() throws Exception {
  UserGroupInformation.createRemoteUser("JobTracker");
  DistributedFileSystem dfs = cluster.getFileSystem();
  KeyProvider keyProvider = Mockito.mock(KeyProvider.class,
      withSettings().extraInterfaces(
          DelegationTokenExtension.class,
          CryptoExtension.class));
  Mockito.when(keyProvider.getConf()).thenReturn(conf);
  byte[] testIdentifier = "Test identifier for delegation token".getBytes();

  Token<?> testToken = new Token(testIdentifier, new byte[0],
      new Text(), new Text());
  Mockito.when(((DelegationTokenExtension)keyProvider).
      addDelegationTokens(anyString(), (Credentials)any())).
      thenReturn(new Token<?>[] { testToken });

  dfs.getClient().setKeyProvider(keyProvider);

  Credentials creds = new Credentials();
  final Token<?> tokens[] = dfs.addDelegationTokens("JobTracker", creds);
  DistributedFileSystem.LOG.debug("Delegation tokens: " +
      Arrays.asList(tokens));
  Assert.assertEquals(2, tokens.length);
  Assert.assertEquals(tokens[1], testToken);
  Assert.assertEquals(1, creds.numberOfTokens());
}
项目:hadoop-oss    文件:TestFileSystemTokens.java   
@Test
public void testFsWithNestedDuplicatesChildren() throws Exception {
  Credentials credentials = new Credentials();
  Text service1 = new Text("singleTokenFs1");
  Text service2 = new Text("singleTokenFs2");
  Text service4 = new Text("singleTokenFs4");
  Text multiService = new Text("multiTokenFs");
  Token<?> token2 = mock(Token.class);
  credentials.addToken(service2, token2);

  MockFileSystem fs1 = createFileSystemForServiceName(service1);
  MockFileSystem fs1B = createFileSystemForServiceName(service1);
  MockFileSystem fs2 = createFileSystemForServiceName(service2);
  MockFileSystem fs3 = createFileSystemForServiceName(null);
  MockFileSystem fs4 = createFileSystemForServiceName(service4);
  // now let's get dirty!  ensure dup tokens aren't fetched even when
  // repeated and dupped in a nested fs.  fs4 is a real test of the drill
  // down: multi-filter-multi-filter-filter-fs4.
  MockFileSystem multiFs = createFileSystemForServiceName(multiService,
      fs1, fs1B, fs2, fs2, new FilterFileSystem(fs3),
      new FilterFileSystem(new FilterFileSystem(fs4)));
  MockFileSystem superMultiFs = createFileSystemForServiceName(null,
      fs1, fs1B, fs1, new FilterFileSystem(fs3), new FilterFileSystem(multiFs));
  superMultiFs.addDelegationTokens(renewer, credentials);
  verifyTokenFetch(superMultiFs, false); // does not have its own token
  verifyTokenFetch(multiFs, true); // has its own token
  verifyTokenFetch(fs1, true);
  verifyTokenFetch(fs2, false); // we had added its token to credentials
  verifyTokenFetch(fs3, false); // has no tokens
  verifyTokenFetch(fs4, true);

  assertEquals(4, credentials.numberOfTokens()); //fs1+fs2+fs4+multifs (fs3=0)
  assertNotNull(credentials.getToken(service1));
  assertNotNull(credentials.getToken(service2));
  assertSame(token2, credentials.getToken(service2));
  assertNotNull(credentials.getToken(multiService));
  assertNotNull(credentials.getToken(service4));
}
项目:hadoop    文件:TestRMRestart.java   
@Test (timeout = 60000)
public void testAppSubmissionWithOldDelegationTokenAfterRMRestart()
    throws Exception {
  conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
  conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
      "kerberos");
  conf.set(YarnConfiguration.RM_ADDRESS, "localhost:8032");
  UserGroupInformation.setConfiguration(conf);
  MemoryRMStateStore memStore = new MemoryRMStateStore();
  memStore.init(conf);

  MockRM rm1 = new TestSecurityMockRM(conf, memStore);
  rm1.start();

  GetDelegationTokenRequest request1 =
      GetDelegationTokenRequest.newInstance("renewer1");
  UserGroupInformation.getCurrentUser().setAuthenticationMethod(
      AuthMethod.KERBEROS);
  GetDelegationTokenResponse response1 =
      rm1.getClientRMService().getDelegationToken(request1);
  Token<RMDelegationTokenIdentifier> token1 =
      ConverterUtils.convertFromYarn(response1.getRMDelegationToken(), rmAddr);

  // start new RM
  MockRM rm2 = new TestSecurityMockRM(conf, memStore);
  rm2.start();

  // submit an app with the old delegation token got from previous RM.
  Credentials ts = new Credentials();
  ts.addToken(token1.getService(), token1);
  RMApp app = rm2.submitApp(200, "name", "user",
      new HashMap<ApplicationAccessType, String>(), false, "default", 1, ts);
  rm2.waitForState(app.getApplicationId(), RMAppState.ACCEPTED);
}
项目:hadoop    文件:ViewFileSystemBaseTest.java   
/**
 * This default implementation is when viewfs has mount points
 * into file systems, such as LocalFs that do no have delegation tokens.
 * It should be overridden for when mount points into hdfs.
 */
@Test
public void testGetDelegationTokens() throws IOException {
  Token<?>[] delTokens = 
      fsView.addDelegationTokens("sanjay", new Credentials());
  Assert.assertEquals(getExpectedDelegationTokenCount(), delTokens.length); 
}
项目:hadoop    文件:TestDelegationTokenRenewer.java   
@Test
public void testFSLeakInObtainSystemTokensForUser() throws Exception{
  Credentials credentials = new Credentials();
  String user = "test";
  int oldCounter = MyFS.getInstanceCounter();
  delegationTokenRenewer.obtainSystemTokensForUser(user, credentials);
  delegationTokenRenewer.obtainSystemTokensForUser(user, credentials);
  delegationTokenRenewer.obtainSystemTokensForUser(user, credentials);
  Assert.assertEquals(oldCounter, MyFS.getInstanceCounter());
}
项目:hadoop    文件:NamenodeWebHdfsMethods.java   
private Token<? extends TokenIdentifier> generateDelegationToken(
    final NameNode namenode, final UserGroupInformation ugi,
    final String renewer) throws IOException {
  final Credentials c = DelegationTokenSecretManager.createCredentials(
      namenode, ugi, renewer != null? renewer: ugi.getShortUserName());
  if (c == null) {
    return null;
  }
  final Token<? extends TokenIdentifier> t = c.getAllTokens().iterator().next();
  Text kind = request.getScheme().equals("http") ? WebHdfsFileSystem.TOKEN_KIND
      : SWebHdfsFileSystem.TOKEN_KIND;
  t.setKind(kind);
  return t;
}
项目:hadoop    文件:TestDelegationToken.java   
@Test
public void testAddDelegationTokensDFSApi() throws Exception {
  UserGroupInformation ugi = UserGroupInformation.createRemoteUser("JobTracker");
  DistributedFileSystem dfs = cluster.getFileSystem();
  Credentials creds = new Credentials();
  final Token<?> tokens[] = dfs.addDelegationTokens("JobTracker", creds);
  Assert.assertEquals(1, tokens.length);
  Assert.assertEquals(1, creds.numberOfTokens());
  checkTokenIdentifier(ugi, tokens[0]);

  final Token<?> tokens2[] = dfs.addDelegationTokens("JobTracker", creds);
  Assert.assertEquals(0, tokens2.length); // already have token
  Assert.assertEquals(1, creds.numberOfTokens());
}
项目:hadoop    文件:LogHandlerAppStartedEvent.java   
public LogHandlerAppStartedEvent(ApplicationId appId, String user,
    Credentials credentials, ContainerLogsRetentionPolicy retentionPolicy,
    Map<ApplicationAccessType, String> appAcls,
    LogAggregationContext logAggregationContext) {
  super(LogHandlerEventType.APPLICATION_STARTED);
  this.applicationId = appId;
  this.user = user;
  this.credentials = credentials;
  this.retentionPolicy = retentionPolicy;
  this.appAcls = appAcls;
  this.logAggregationContext = logAggregationContext;
}
项目:hadoop    文件:ContainerManagerImpl.java   
@SuppressWarnings("unchecked")
private void recoverContainer(RecoveredContainerState rcs)
    throws IOException {
  StartContainerRequest req = rcs.getStartRequest();
  ContainerLaunchContext launchContext = req.getContainerLaunchContext();
  ContainerTokenIdentifier token =
      BuilderUtils.newContainerTokenIdentifier(req.getContainerToken());
  ContainerId containerId = token.getContainerID();
  ApplicationId appId =
      containerId.getApplicationAttemptId().getApplicationId();

  LOG.info("Recovering " + containerId + " in state " + rcs.getStatus()
      + " with exit code " + rcs.getExitCode());

  if (context.getApplications().containsKey(appId)) {
    Credentials credentials = parseCredentials(launchContext);
    Container container = new ContainerImpl(getConfig(), dispatcher,
        context.getNMStateStore(), req.getContainerLaunchContext(),
        credentials, metrics, token, rcs.getStatus(), rcs.getExitCode(),
        rcs.getDiagnostics(), rcs.getKilled());
    context.getContainers().put(containerId, container);
    dispatcher.getEventHandler().handle(
        new ApplicationContainerInitEvent(container));
  } else {
    if (rcs.getStatus() != RecoveredContainerStatus.COMPLETED) {
      LOG.warn(containerId + " has no corresponding application!");
    }
    LOG.info("Adding " + containerId + " to recently stopped containers");
    nodeStatusUpdater.addCompletedContainer(containerId);
  }
}
项目:hadoop    文件:ContainerManagerImpl.java   
private ContainerManagerApplicationProto buildAppProto(ApplicationId appId,
    String user, Credentials credentials,
    Map<ApplicationAccessType, String> appAcls,
    LogAggregationContext logAggregationContext) {

  ContainerManagerApplicationProto.Builder builder =
      ContainerManagerApplicationProto.newBuilder();
  builder.setId(((ApplicationIdPBImpl) appId).getProto());
  builder.setUser(user);

  if (logAggregationContext != null) {
    builder.setLogAggregationContext((
        (LogAggregationContextPBImpl)logAggregationContext).getProto());
  }

  builder.clearCredentials();
  if (credentials != null) {
    DataOutputBuffer dob = new DataOutputBuffer();
    try {
      credentials.writeTokenStorageToStream(dob);
      builder.setCredentials(ByteString.copyFrom(dob.getData()));
    } catch (IOException e) {
      // should not occur
      LOG.error("Cannot serialize credentials", e);
    }
  }

  builder.clearAcls();
  if (appAcls != null) {
    for (Map.Entry<ApplicationAccessType, String> acl : appAcls.entrySet()) {
      ApplicationACLMapProto p = ApplicationACLMapProto.newBuilder()
          .setAccessType(ProtoUtils.convertToProtoFormat(acl.getKey()))
          .setAcl(acl.getValue())
          .build();
      builder.addAcls(p);
    }
  }

  return builder.build();
}
项目:hadoop    文件:TestCredentials.java   
@Test
public void addAll() {
  Credentials creds = new Credentials();
  creds.addToken(service[0], token[0]);
  creds.addToken(service[1], token[1]);
  creds.addSecretKey(secret[0], secret[0].getBytes());
  creds.addSecretKey(secret[1], secret[1].getBytes());

  Credentials credsToAdd = new Credentials();
  // one duplicate with different value, one new
  credsToAdd.addToken(service[0], token[3]);
  credsToAdd.addToken(service[2], token[2]);
  credsToAdd.addSecretKey(secret[0], secret[3].getBytes());
  credsToAdd.addSecretKey(secret[2], secret[2].getBytes());

  creds.addAll(credsToAdd);
  assertEquals(3, creds.numberOfTokens());
  assertEquals(3, creds.numberOfSecretKeys());
  // existing token & secret should be overwritten
  assertEquals(token[3], creds.getToken(service[0]));
  assertEquals(secret[3], new Text(creds.getSecretKey(secret[0])));
  // non-duplicate token & secret should be present
  assertEquals(token[1], creds.getToken(service[1]));
  assertEquals(secret[1], new Text(creds.getSecretKey(secret[1])));
  // new token & secret should be added
  assertEquals(token[2], creds.getToken(service[2]));
  assertEquals(secret[2], new Text(creds.getSecretKey(secret[2])));
}