Java 类org.apache.hadoop.mapreduce.security.TokenCache 实例源码

项目:hadoop    文件:FileOutputFormat.java   
public void checkOutputSpecs(FileSystem ignored, JobConf job) 
  throws FileAlreadyExistsException, 
         InvalidJobConfException, IOException {
  // Ensure that the output directory is set and not already there
  Path outDir = getOutputPath(job);
  if (outDir == null && job.getNumReduceTasks() != 0) {
    throw new InvalidJobConfException("Output directory not set in JobConf.");
  }
  if (outDir != null) {
    FileSystem fs = outDir.getFileSystem(job);
    // normalize the output directory
    outDir = fs.makeQualified(outDir);
    setOutputPath(job, outDir);

    // get delegation token for the outDir's file system
    TokenCache.obtainTokensForNamenodes(job.getCredentials(), 
                                        new Path[] {outDir}, job);

    // check its existence
    if (fs.exists(outDir)) {
      throw new FileAlreadyExistsException("Output directory " + outDir + 
                                           " already exists");
    }
  }
}
项目:hadoop    文件:ClientDistributedCacheManager.java   
/**
 * For each archive or cache file - get the corresponding delegation token
 * @param job
 * @param credentials
 * @throws IOException
 */
public static void getDelegationTokens(Configuration job,
    Credentials credentials) throws IOException {
  URI[] tarchives = DistributedCache.getCacheArchives(job);
  URI[] tfiles = DistributedCache.getCacheFiles(job);

  int size = (tarchives!=null? tarchives.length : 0) + (tfiles!=null ? tfiles.length :0);
  Path[] ps = new Path[size];

  int i = 0;
  if (tarchives != null) {
    for (i=0; i < tarchives.length; i++) {
      ps[i] = new Path(tarchives[i].toString());
    }
  }

  if (tfiles != null) {
    for(int j=0; j< tfiles.length; j++) {
      ps[i+j] = new Path(tfiles[j].toString());
    }
  }

  TokenCache.obtainTokensForNamenodes(credentials, ps, job);
}
项目:hadoop    文件:FileOutputFormat.java   
public void checkOutputSpecs(JobContext job
                             ) throws FileAlreadyExistsException, IOException{
  // Ensure that the output directory is set and not already there
  Path outDir = getOutputPath(job);
  if (outDir == null) {
    throw new InvalidJobConfException("Output directory not set.");
  }

  // get delegation token for outDir's file system
  TokenCache.obtainTokensForNamenodes(job.getCredentials(),
      new Path[] { outDir }, job.getConfiguration());

  if (outDir.getFileSystem(job.getConfiguration()).exists(outDir)) {
    throw new FileAlreadyExistsException("Output directory " + outDir + 
                                         " already exists");
  }
}
项目:hadoop    文件:CopyOutputFormat.java   
/** {@inheritDoc} */
@Override
public void checkOutputSpecs(JobContext context) throws IOException {
  Configuration conf = context.getConfiguration();

  if (getCommitDirectory(conf) == null) {
    throw new IllegalStateException("Commit directory not configured");
  }

  Path workingPath = getWorkingDirectory(conf);
  if (workingPath == null) {
    throw new IllegalStateException("Working directory not configured");
  }

  // get delegation token for outDir's file system
  TokenCache.obtainTokensForNamenodes(context.getCredentials(),
                                      new Path[] {workingPath}, conf);
}
项目:aliyun-oss-hadoop-fs    文件:FileOutputFormat.java   
public void checkOutputSpecs(FileSystem ignored, JobConf job) 
  throws FileAlreadyExistsException, 
         InvalidJobConfException, IOException {
  // Ensure that the output directory is set and not already there
  Path outDir = getOutputPath(job);
  if (outDir == null && job.getNumReduceTasks() != 0) {
    throw new InvalidJobConfException("Output directory not set in JobConf.");
  }
  if (outDir != null) {
    FileSystem fs = outDir.getFileSystem(job);
    // normalize the output directory
    outDir = fs.makeQualified(outDir);
    setOutputPath(job, outDir);

    // get delegation token for the outDir's file system
    TokenCache.obtainTokensForNamenodes(job.getCredentials(), 
                                        new Path[] {outDir}, job);

    // check its existence
    if (fs.exists(outDir)) {
      throw new FileAlreadyExistsException("Output directory " + outDir + 
                                           " already exists");
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:ClientDistributedCacheManager.java   
/**
 * For each archive or cache file - get the corresponding delegation token
 * @param job
 * @param credentials
 * @throws IOException
 */
public static void getDelegationTokens(Configuration job,
    Credentials credentials) throws IOException {
  URI[] tarchives = DistributedCache.getCacheArchives(job);
  URI[] tfiles = DistributedCache.getCacheFiles(job);

  int size = (tarchives!=null? tarchives.length : 0) + (tfiles!=null ? tfiles.length :0);
  Path[] ps = new Path[size];

  int i = 0;
  if (tarchives != null) {
    for (i=0; i < tarchives.length; i++) {
      ps[i] = new Path(tarchives[i].toString());
    }
  }

  if (tfiles != null) {
    for(int j=0; j< tfiles.length; j++) {
      ps[i+j] = new Path(tfiles[j].toString());
    }
  }

  TokenCache.obtainTokensForNamenodes(credentials, ps, job);
}
项目:aliyun-oss-hadoop-fs    文件:FileOutputFormat.java   
public void checkOutputSpecs(JobContext job
                             ) throws FileAlreadyExistsException, IOException{
  // Ensure that the output directory is set and not already there
  Path outDir = getOutputPath(job);
  if (outDir == null) {
    throw new InvalidJobConfException("Output directory not set.");
  }

  // get delegation token for outDir's file system
  TokenCache.obtainTokensForNamenodes(job.getCredentials(),
      new Path[] { outDir }, job.getConfiguration());

  if (outDir.getFileSystem(job.getConfiguration()).exists(outDir)) {
    throw new FileAlreadyExistsException("Output directory " + outDir + 
                                         " already exists");
  }
}
项目:aliyun-oss-hadoop-fs    文件:CopyOutputFormat.java   
/** {@inheritDoc} */
@Override
public void checkOutputSpecs(JobContext context) throws IOException {
  Configuration conf = context.getConfiguration();

  if (getCommitDirectory(conf) == null) {
    throw new IllegalStateException("Commit directory not configured");
  }

  Path workingPath = getWorkingDirectory(conf);
  if (workingPath == null) {
    throw new IllegalStateException("Working directory not configured");
  }

  // get delegation token for outDir's file system
  TokenCache.obtainTokensForNamenodes(context.getCredentials(),
                                      new Path[] {workingPath}, conf);
}
项目:big-c    文件:FileOutputFormat.java   
public void checkOutputSpecs(FileSystem ignored, JobConf job) 
  throws FileAlreadyExistsException, 
         InvalidJobConfException, IOException {
  // Ensure that the output directory is set and not already there
  Path outDir = getOutputPath(job);
  if (outDir == null && job.getNumReduceTasks() != 0) {
    throw new InvalidJobConfException("Output directory not set in JobConf.");
  }
  if (outDir != null) {
    FileSystem fs = outDir.getFileSystem(job);
    // normalize the output directory
    outDir = fs.makeQualified(outDir);
    setOutputPath(job, outDir);

    // get delegation token for the outDir's file system
    TokenCache.obtainTokensForNamenodes(job.getCredentials(), 
                                        new Path[] {outDir}, job);

    // check its existence
    if (fs.exists(outDir)) {
      throw new FileAlreadyExistsException("Output directory " + outDir + 
                                           " already exists");
    }
  }
}
项目:big-c    文件:ClientDistributedCacheManager.java   
/**
 * For each archive or cache file - get the corresponding delegation token
 * @param job
 * @param credentials
 * @throws IOException
 */
public static void getDelegationTokens(Configuration job,
    Credentials credentials) throws IOException {
  URI[] tarchives = DistributedCache.getCacheArchives(job);
  URI[] tfiles = DistributedCache.getCacheFiles(job);

  int size = (tarchives!=null? tarchives.length : 0) + (tfiles!=null ? tfiles.length :0);
  Path[] ps = new Path[size];

  int i = 0;
  if (tarchives != null) {
    for (i=0; i < tarchives.length; i++) {
      ps[i] = new Path(tarchives[i].toString());
    }
  }

  if (tfiles != null) {
    for(int j=0; j< tfiles.length; j++) {
      ps[i+j] = new Path(tfiles[j].toString());
    }
  }

  TokenCache.obtainTokensForNamenodes(credentials, ps, job);
}
项目:big-c    文件:FileOutputFormat.java   
public void checkOutputSpecs(JobContext job
                             ) throws FileAlreadyExistsException, IOException{
  // Ensure that the output directory is set and not already there
  Path outDir = getOutputPath(job);
  if (outDir == null) {
    throw new InvalidJobConfException("Output directory not set.");
  }

  // get delegation token for outDir's file system
  TokenCache.obtainTokensForNamenodes(job.getCredentials(),
      new Path[] { outDir }, job.getConfiguration());

  if (outDir.getFileSystem(job.getConfiguration()).exists(outDir)) {
    throw new FileAlreadyExistsException("Output directory " + outDir + 
                                         " already exists");
  }
}
项目:big-c    文件:CopyOutputFormat.java   
/** {@inheritDoc} */
@Override
public void checkOutputSpecs(JobContext context) throws IOException {
  Configuration conf = context.getConfiguration();

  if (getCommitDirectory(conf) == null) {
    throw new IllegalStateException("Commit directory not configured");
  }

  Path workingPath = getWorkingDirectory(conf);
  if (workingPath == null) {
    throw new IllegalStateException("Working directory not configured");
  }

  // get delegation token for outDir's file system
  TokenCache.obtainTokensForNamenodes(context.getCredentials(),
                                      new Path[] {workingPath}, conf);
}
项目:emr-dynamodb-connector    文件:ImportInputFormat.java   
private List<InputSplit> getSplitsFromManifest(JobConf job) throws IOException {
  Path[] dirs = getInputPaths(job);
  if (dirs.length == 0) {
    throw new IOException("No input path specified in job");
  } else if (dirs.length > 1) {
    throw new IOException("Will only look for manifests in a single input directory (" + dirs
        .length + " directories provided).");
  }
  TokenCache.obtainTokensForNamenodes(job.getCredentials(), dirs, job);

  Path dir = dirs[0];

  FileSystem fs = dir.getFileSystem(job);
  if (!fs.getFileStatus(dir).isDirectory()) {
    throw new IOException("Input path not a directory: " + dir);
  }

  Path manifestPath = new Path(dir, ExportManifestOutputFormat.MANIFEST_FILENAME);
  if (!fs.isFile(manifestPath)) {
    return null;
  }

  return parseManifest(fs, manifestPath, job);
}
项目:flink    文件:Utils.java   
public static void setTokensFor(ContainerLaunchContext amContainer, List<Path> paths, Configuration conf) throws IOException {
    Credentials credentials = new Credentials();
    // for HDFS
    TokenCache.obtainTokensForNamenodes(credentials, paths.toArray(new Path[0]), conf);
    // for HBase
    obtainTokenForHBase(credentials, conf);
    // for user
    UserGroupInformation currUsr = UserGroupInformation.getCurrentUser();

    Collection<Token<? extends TokenIdentifier>> usrTok = currUsr.getTokens();
    for (Token<? extends TokenIdentifier> token : usrTok) {
        final Text id = new Text(token.getIdentifier());
        LOG.info("Adding user token " + id + " with " + token);
        credentials.addToken(id, token);
    }
    try (DataOutputBuffer dob = new DataOutputBuffer()) {
        credentials.writeTokenStorageToStream(dob);

        if (LOG.isDebugEnabled()) {
            LOG.debug("Wrote tokens. Credentials buffer length: " + dob.getLength());
        }

        ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        amContainer.setTokens(securityTokens);
    }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FileOutputFormat.java   
public void checkOutputSpecs(FileSystem ignored, JobConf job) 
  throws FileAlreadyExistsException, 
         InvalidJobConfException, IOException {
  // Ensure that the output directory is set and not already there
  Path outDir = getOutputPath(job);
  if (outDir == null && job.getNumReduceTasks() != 0) {
    throw new InvalidJobConfException("Output directory not set in JobConf.");
  }
  if (outDir != null) {
    FileSystem fs = outDir.getFileSystem(job);
    // normalize the output directory
    outDir = fs.makeQualified(outDir);
    setOutputPath(job, outDir);

    // get delegation token for the outDir's file system
    TokenCache.obtainTokensForNamenodes(job.getCredentials(), 
                                        new Path[] {outDir}, job);

    // check its existence
    if (fs.exists(outDir)) {
      throw new FileAlreadyExistsException("Output directory " + outDir + 
                                           " already exists");
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:ClientDistributedCacheManager.java   
/**
 * For each archive or cache file - get the corresponding delegation token
 * @param job
 * @param credentials
 * @throws IOException
 */
public static void getDelegationTokens(Configuration job,
    Credentials credentials) throws IOException {
  URI[] tarchives = DistributedCache.getCacheArchives(job);
  URI[] tfiles = DistributedCache.getCacheFiles(job);

  int size = (tarchives!=null? tarchives.length : 0) + (tfiles!=null ? tfiles.length :0);
  Path[] ps = new Path[size];

  int i = 0;
  if (tarchives != null) {
    for (i=0; i < tarchives.length; i++) {
      ps[i] = new Path(tarchives[i].toString());
    }
  }

  if (tfiles != null) {
    for(int j=0; j< tfiles.length; j++) {
      ps[i+j] = new Path(tfiles[j].toString());
    }
  }

  TokenCache.obtainTokensForNamenodes(credentials, ps, job);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FileOutputFormat.java   
public void checkOutputSpecs(JobContext job
                             ) throws FileAlreadyExistsException, IOException{
  // Ensure that the output directory is set and not already there
  Path outDir = getOutputPath(job);
  if (outDir == null) {
    throw new InvalidJobConfException("Output directory not set.");
  }

  // get delegation token for outDir's file system
  TokenCache.obtainTokensForNamenodes(job.getCredentials(),
      new Path[] { outDir }, job.getConfiguration());

  if (outDir.getFileSystem(job.getConfiguration()).exists(outDir)) {
    throw new FileAlreadyExistsException("Output directory " + outDir + 
                                         " already exists");
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:CopyOutputFormat.java   
/** @inheritDoc */
@Override
public void checkOutputSpecs(JobContext context) throws IOException {
  Configuration conf = context.getConfiguration();

  if (getCommitDirectory(conf) == null) {
    throw new IllegalStateException("Commit directory not configured");
  }

  Path workingPath = getWorkingDirectory(conf);
  if (workingPath == null) {
    throw new IllegalStateException("Working directory not configured");
  }

  // get delegation token for outDir's file system
  TokenCache.obtainTokensForNamenodes(context.getCredentials(),
                                      new Path[] {workingPath}, conf);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:JobClient.java   
@SuppressWarnings("unchecked")
private void populateTokenCache(Configuration conf, Credentials credentials)
throws IOException{
  readTokensFromFiles(conf, credentials);

  // add the delegation tokens from configuration
  String [] nameNodes = conf.getStrings(JobContext.JOB_NAMENODES);
  LOG.debug("adding the following namenodes' delegation tokens:" +
            Arrays.toString(nameNodes));
  if(nameNodes != null) {
    Path [] ps = new Path[nameNodes.length];
    for(int i=0; i< nameNodes.length; i++) {
      ps[i] = new Path(nameNodes[i]);
    }
    TokenCache.obtainTokensForNamenodes(credentials, ps, conf);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FileOutputFormat.java   
public void checkOutputSpecs(FileSystem ignored, JobConf job) 
  throws FileAlreadyExistsException, 
         InvalidJobConfException, IOException {
  // Ensure that the output directory is set and not already there
  Path outDir = getOutputPath(job);
  if (outDir == null && job.getNumReduceTasks() != 0) {
    throw new InvalidJobConfException("Output directory not set in JobConf.");
  }
  if (outDir != null) {
    FileSystem fs = outDir.getFileSystem(job);
    // normalize the output directory
    outDir = fs.makeQualified(outDir);
    setOutputPath(job, outDir);

    // get delegation token for the outDir's file system
    TokenCache.obtainTokensForNamenodes(job.getCredentials(), 
                                        new Path[] {outDir}, job);

    // check its existence
    if (fs.exists(outDir)) {
      throw new FileAlreadyExistsException("Output directory " + outDir + 
                                           " already exists");
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskTracker.java   
/**
   * Download the job-token file from the FS and save on local fs.
   * @param user
   * @param jobId
   * @return the local file system path of the downloaded file.
   * @throws IOException
   */
private String localizeJobTokenFile(String user, JobID jobId)
      throws IOException {
    // check if the tokenJob file is there..
    Path skPath = new Path(systemDirectory, 
        jobId.toString()+"/"+TokenCache.JOB_TOKEN_HDFS_FILE);

    FileStatus status = null;
    long jobTokenSize = -1;
    status = systemFS.getFileStatus(skPath); //throws FileNotFoundException
    jobTokenSize = status.getLen();

    Path localJobTokenFile =
        lDirAlloc.getLocalPathForWrite(getPrivateDirJobTokenFile(user, 
            jobId.toString()), jobTokenSize, fConf);

    String localJobTokenFileStr = localJobTokenFile.toUri().getPath();
    if(LOG.isDebugEnabled())
      LOG.debug("localizingJobTokenFile from sd="+skPath.toUri().getPath() + 
          " to " + localJobTokenFileStr);

    // Download job_token
    systemFS.copyToLocalFile(skPath, localJobTokenFile);      
    return localJobTokenFileStr;
  }
项目:hadoop-2.6.0-cdh5.4.3    文件:TrackerDistributedCacheManager.java   
/**
 * For each archive or cache file - get the corresponding delegation token
 * @param job
 * @param credentials
 * @throws IOException
 */
public static void getDelegationTokens(Configuration job, 
                                       Credentials credentials) 
throws IOException {
  URI[] tarchives = DistributedCache.getCacheArchives(job);
  URI[] tfiles = DistributedCache.getCacheFiles(job);

  int size = (tarchives!=null? tarchives.length : 0) + (tfiles!=null ? tfiles.length :0);
  Path[] ps = new Path[size];

  int i = 0;
  if (tarchives != null) {
    for (i=0; i < tarchives.length; i++) {
      ps[i] = new Path(tarchives[i].toString());
    }
  }

  if (tfiles != null) {
    for(int j=0; j< tfiles.length; j++) {
      ps[i+j] = new Path(tfiles[j].toString());
    }
  }

  TokenCache.obtainTokensForNamenodes(credentials, ps, job);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FileOutputFormat.java   
public void checkOutputSpecs(JobContext job
                             ) throws FileAlreadyExistsException, IOException{
  // Ensure that the output directory is set and not already there
  Path outDir = getOutputPath(job);
  if (outDir == null) {
    throw new InvalidJobConfException("Output directory not set.");
  }

  // get delegation token for outDir's file system
  TokenCache.obtainTokensForNamenodes(job.getCredentials(), 
                                      new Path[] {outDir}, 
                                      job.getConfiguration());

  if (outDir.getFileSystem(job.getConfiguration()).exists(outDir)) {
    throw new FileAlreadyExistsException("Output directory " + outDir + 
                                         " already exists");
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DistCp.java   
/** Sanity check for srcPath */
private static void checkSrcPath(JobConf jobConf, List<Path> srcPaths)
throws IOException {
  List<IOException> rslt = new ArrayList<IOException>();

  Path[] ps = new Path[srcPaths.size()];
  ps = srcPaths.toArray(ps);
  TokenCache.obtainTokensForNamenodes(jobConf.getCredentials(), ps, jobConf);

  for (Path p : srcPaths) {
    FileSystem fs = p.getFileSystem(jobConf);
    if (!fs.exists(p)) {
      rslt.add(new IOException("Input source " + p + " does not exist."));
    }
  }
  if (!rslt.isEmpty()) {
    throw new InvalidInputException(rslt);
  }
}
项目:hadoop-plus    文件:FileOutputFormat.java   
public void checkOutputSpecs(FileSystem ignored, JobConf job) 
  throws FileAlreadyExistsException, 
         InvalidJobConfException, IOException {
  // Ensure that the output directory is set and not already there
  Path outDir = getOutputPath(job);
  if (outDir == null && job.getNumReduceTasks() != 0) {
    throw new InvalidJobConfException("Output directory not set in JobConf.");
  }
  if (outDir != null) {
    FileSystem fs = outDir.getFileSystem(job);
    // normalize the output directory
    outDir = fs.makeQualified(outDir);
    setOutputPath(job, outDir);

    // get delegation token for the outDir's file system
    TokenCache.obtainTokensForNamenodes(job.getCredentials(), 
                                        new Path[] {outDir}, job);

    // check its existence
    if (fs.exists(outDir)) {
      throw new FileAlreadyExistsException("Output directory " + outDir + 
                                           " already exists");
    }
  }
}
项目:hadoop-plus    文件:ClientDistributedCacheManager.java   
/**
 * For each archive or cache file - get the corresponding delegation token
 * @param job
 * @param credentials
 * @throws IOException
 */
public static void getDelegationTokens(Configuration job,
    Credentials credentials) throws IOException {
  URI[] tarchives = DistributedCache.getCacheArchives(job);
  URI[] tfiles = DistributedCache.getCacheFiles(job);

  int size = (tarchives!=null? tarchives.length : 0) + (tfiles!=null ? tfiles.length :0);
  Path[] ps = new Path[size];

  int i = 0;
  if (tarchives != null) {
    for (i=0; i < tarchives.length; i++) {
      ps[i] = new Path(tarchives[i].toString());
    }
  }

  if (tfiles != null) {
    for(int j=0; j< tfiles.length; j++) {
      ps[i+j] = new Path(tfiles[j].toString());
    }
  }

  TokenCache.obtainTokensForNamenodes(credentials, ps, job);
}
项目:hadoop-plus    文件:JobSubmitter.java   
@SuppressWarnings("unchecked")
private void populateTokenCache(Configuration conf, Credentials credentials) 
throws IOException{
  readTokensFromFiles(conf, credentials);
  // add the delegation tokens from configuration
  String [] nameNodes = conf.getStrings(MRJobConfig.JOB_NAMENODES);
  LOG.debug("adding the following namenodes' delegation tokens:" + 
      Arrays.toString(nameNodes));
  if(nameNodes != null) {
    Path [] ps = new Path[nameNodes.length];
    for(int i=0; i< nameNodes.length; i++) {
      ps[i] = new Path(nameNodes[i]);
    }
    TokenCache.obtainTokensForNamenodes(credentials, ps, conf);
  }
}
项目:hadoop-plus    文件:FileOutputFormat.java   
public void checkOutputSpecs(JobContext job
                             ) throws FileAlreadyExistsException, IOException{
  // Ensure that the output directory is set and not already there
  Path outDir = getOutputPath(job);
  if (outDir == null) {
    throw new InvalidJobConfException("Output directory not set.");
  }

  // get delegation token for outDir's file system
  TokenCache.obtainTokensForNamenodes(job.getCredentials(),
      new Path[] { outDir }, job.getConfiguration());

  if (outDir.getFileSystem(job.getConfiguration()).exists(outDir)) {
    throw new FileAlreadyExistsException("Output directory " + outDir + 
                                         " already exists");
  }
}
项目:hadoop-plus    文件:CopyOutputFormat.java   
/** @inheritDoc */
@Override
public void checkOutputSpecs(JobContext context) throws IOException {
  Configuration conf = context.getConfiguration();

  if (getCommitDirectory(conf) == null) {
    throw new IllegalStateException("Commit directory not configured");
  }

  Path workingPath = getWorkingDirectory(conf);
  if (workingPath == null) {
    throw new IllegalStateException("Working directory not configured");
  }

  // get delegation token for outDir's file system
  TokenCache.obtainTokensForNamenodes(context.getCredentials(),
                                      new Path[] {workingPath}, conf);
}
项目:FlexMap    文件:FileOutputFormat.java   
public void checkOutputSpecs(FileSystem ignored, JobConf job) 
  throws FileAlreadyExistsException, 
         InvalidJobConfException, IOException {
  // Ensure that the output directory is set and not already there
  Path outDir = getOutputPath(job);
  if (outDir == null && job.getNumReduceTasks() != 0) {
    throw new InvalidJobConfException("Output directory not set in JobConf.");
  }
  if (outDir != null) {
    FileSystem fs = outDir.getFileSystem(job);
    // normalize the output directory
    outDir = fs.makeQualified(outDir);
    setOutputPath(job, outDir);

    // get delegation token for the outDir's file system
    TokenCache.obtainTokensForNamenodes(job.getCredentials(), 
                                        new Path[] {outDir}, job);

    // check its existence
    if (fs.exists(outDir)) {
      throw new FileAlreadyExistsException("Output directory " + outDir + 
                                           " already exists");
    }
  }
}
项目:FlexMap    文件:ClientDistributedCacheManager.java   
/**
 * For each archive or cache file - get the corresponding delegation token
 * @param job
 * @param credentials
 * @throws IOException
 */
public static void getDelegationTokens(Configuration job,
    Credentials credentials) throws IOException {
  URI[] tarchives = DistributedCache.getCacheArchives(job);
  URI[] tfiles = DistributedCache.getCacheFiles(job);

  int size = (tarchives!=null? tarchives.length : 0) + (tfiles!=null ? tfiles.length :0);
  Path[] ps = new Path[size];

  int i = 0;
  if (tarchives != null) {
    for (i=0; i < tarchives.length; i++) {
      ps[i] = new Path(tarchives[i].toString());
    }
  }

  if (tfiles != null) {
    for(int j=0; j< tfiles.length; j++) {
      ps[i+j] = new Path(tfiles[j].toString());
    }
  }

  TokenCache.obtainTokensForNamenodes(credentials, ps, job);
}
项目:FlexMap    文件:FileOutputFormat.java   
public void checkOutputSpecs(JobContext job
                             ) throws FileAlreadyExistsException, IOException{
  // Ensure that the output directory is set and not already there
  Path outDir = getOutputPath(job);
  if (outDir == null) {
    throw new InvalidJobConfException("Output directory not set.");
  }

  // get delegation token for outDir's file system
  TokenCache.obtainTokensForNamenodes(job.getCredentials(),
      new Path[] { outDir }, job.getConfiguration());

  if (outDir.getFileSystem(job.getConfiguration()).exists(outDir)) {
    throw new FileAlreadyExistsException("Output directory " + outDir + 
                                         " already exists");
  }
}
项目:DistCPPlus    文件:DistCpUtils.java   
/** Sanity check for srcPath */
public static void checkSrcPath(JobConf jobConf, List<Path> srcPaths)
throws IOException {
    List<IOException> rslt = new ArrayList<IOException>();

    Path[] ps = new Path[srcPaths.size()];
    ps = srcPaths.toArray(ps);
    TokenCache.obtainTokensForNamenodes(jobConf.getCredentials(), ps, jobConf);

    for (Path p : srcPaths) {
        FileSystem fs = p.getFileSystem(jobConf);
        if (!fs.exists(p)) {
            rslt.add(new IOException("Input source " + p + " does not exist."));
        }
    }
    if (!rslt.isEmpty()) {
        throw new InvalidInputException(rslt);
    }
}
项目:hops    文件:FileOutputFormat.java   
public void checkOutputSpecs(FileSystem ignored, JobConf job) 
  throws FileAlreadyExistsException, 
         InvalidJobConfException, IOException {
  // Ensure that the output directory is set and not already there
  Path outDir = getOutputPath(job);
  if (outDir == null && job.getNumReduceTasks() != 0) {
    throw new InvalidJobConfException("Output directory not set in JobConf.");
  }
  if (outDir != null) {
    FileSystem fs = outDir.getFileSystem(job);
    // normalize the output directory
    outDir = fs.makeQualified(outDir);
    setOutputPath(job, outDir);

    // get delegation token for the outDir's file system
    TokenCache.obtainTokensForNamenodes(job.getCredentials(), 
                                        new Path[] {outDir}, job);

    // check its existence
    if (fs.exists(outDir)) {
      throw new FileAlreadyExistsException("Output directory " + outDir + 
                                           " already exists");
    }
  }
}
项目:hops    文件:ClientDistributedCacheManager.java   
/**
 * For each archive or cache file - get the corresponding delegation token
 * @param job
 * @param credentials
 * @throws IOException
 */
public static void getDelegationTokens(Configuration job,
    Credentials credentials) throws IOException {
  URI[] tarchives = DistributedCache.getCacheArchives(job);
  URI[] tfiles = DistributedCache.getCacheFiles(job);

  int size = (tarchives!=null? tarchives.length : 0) + (tfiles!=null ? tfiles.length :0);
  Path[] ps = new Path[size];

  int i = 0;
  if (tarchives != null) {
    for (i=0; i < tarchives.length; i++) {
      ps[i] = new Path(tarchives[i].toString());
    }
  }

  if (tfiles != null) {
    for(int j=0; j< tfiles.length; j++) {
      ps[i+j] = new Path(tfiles[j].toString());
    }
  }

  TokenCache.obtainTokensForNamenodes(credentials, ps, job);
}
项目:hops    文件:FileOutputFormat.java   
public void checkOutputSpecs(JobContext job
                             ) throws FileAlreadyExistsException, IOException{
  // Ensure that the output directory is set and not already there
  Path outDir = getOutputPath(job);
  if (outDir == null) {
    throw new InvalidJobConfException("Output directory not set.");
  }

  // get delegation token for outDir's file system
  TokenCache.obtainTokensForNamenodes(job.getCredentials(),
      new Path[] { outDir }, job.getConfiguration());

  if (outDir.getFileSystem(job.getConfiguration()).exists(outDir)) {
    throw new FileAlreadyExistsException("Output directory " + outDir + 
                                         " already exists");
  }
}
项目:hops    文件:CopyOutputFormat.java   
/** {@inheritDoc} */
@Override
public void checkOutputSpecs(JobContext context) throws IOException {
  Configuration conf = context.getConfiguration();

  if (getCommitDirectory(conf) == null) {
    throw new IllegalStateException("Commit directory not configured");
  }

  Path workingPath = getWorkingDirectory(conf);
  if (workingPath == null) {
    throw new IllegalStateException("Working directory not configured");
  }

  // get delegation token for outDir's file system
  TokenCache.obtainTokensForNamenodes(context.getCredentials(),
                                      new Path[] {workingPath}, conf);
}
项目:asakusafw-compiler    文件:TemporaryFileOutputFormat.java   
@Override
public void checkOutputSpecs(JobContext context) throws IOException {
    Path path = getOutputPath(context);
    if (path == null) {
        throw new IOException("Temporary output path is not set");
    }
    TokenCache.obtainTokensForNamenodes(
            context.getCredentials(),
            new Path[] { path },
            context.getConfiguration());
    if (path.getFileSystem(context.getConfiguration()).exists(path)) {
        throw new IOException(MessageFormat.format(
                "Output directory {0} already exists",
                path));
    }
}
项目:hadoop-TCP    文件:FileOutputFormat.java   
public void checkOutputSpecs(FileSystem ignored, JobConf job) 
  throws FileAlreadyExistsException, 
         InvalidJobConfException, IOException {
  // Ensure that the output directory is set and not already there
  Path outDir = getOutputPath(job);
  if (outDir == null && job.getNumReduceTasks() != 0) {
    throw new InvalidJobConfException("Output directory not set in JobConf.");
  }
  if (outDir != null) {
    FileSystem fs = outDir.getFileSystem(job);
    // normalize the output directory
    outDir = fs.makeQualified(outDir);
    setOutputPath(job, outDir);

    // get delegation token for the outDir's file system
    TokenCache.obtainTokensForNamenodes(job.getCredentials(), 
                                        new Path[] {outDir}, job);

    // check its existence
    if (fs.exists(outDir)) {
      throw new FileAlreadyExistsException("Output directory " + outDir + 
                                           " already exists");
    }
  }
}
项目:hadoop-TCP    文件:ClientDistributedCacheManager.java   
/**
 * For each archive or cache file - get the corresponding delegation token
 * @param job
 * @param credentials
 * @throws IOException
 */
public static void getDelegationTokens(Configuration job,
    Credentials credentials) throws IOException {
  URI[] tarchives = DistributedCache.getCacheArchives(job);
  URI[] tfiles = DistributedCache.getCacheFiles(job);

  int size = (tarchives!=null? tarchives.length : 0) + (tfiles!=null ? tfiles.length :0);
  Path[] ps = new Path[size];

  int i = 0;
  if (tarchives != null) {
    for (i=0; i < tarchives.length; i++) {
      ps[i] = new Path(tarchives[i].toString());
    }
  }

  if (tfiles != null) {
    for(int j=0; j< tfiles.length; j++) {
      ps[i+j] = new Path(tfiles[j].toString());
    }
  }

  TokenCache.obtainTokensForNamenodes(credentials, ps, job);
}