Java 类org.apache.hadoop.hdfs.DistributedFileSystem 实例源码

项目:mumu-mapreduce    文件:MapReduceConfiguration.java   
/**
 * 查看输出结果
 *
 * @param path
 */
public void print(String path) {
    log.info("mapreduce输出结果:...................................................");
    DistributedFileSystem distributedFileSystem = distributedFileSystem();
    try {
        FileStatus[] fileStatuses = distributedFileSystem.listStatus(new Path(path));
        for (FileStatus fs : fileStatuses) {
            log.info(fs);
            FSDataInputStream fsDataInputStream = distributedFileSystem.open(fs.getPath());
            byte[] bs = new byte[fsDataInputStream.available()];
            fsDataInputStream.read(bs);
            log.info("\n" + new String(bs) + "\n");
        }
    } catch (IOException e) {
        log.error(e);
    } finally {
        close(distributedFileSystem);
    }
}
项目:mmsns    文件:HadoopUtil.java   
/**
 * 从hadoop中下载文件
 *
 * @param taskName
 * @param filePath
 */
public static void download(String taskName, String filePath, boolean existDelete) {
    File file = new File(filePath);
    if (file.exists()) {
        if (existDelete) {
            file.deleteOnExit();
        } else {
            return;
        }
    }
    String hadoopAddress = propertyConfig.getProperty("sqoop.task." + taskName + ".tolink.linkConfig.uri");
    String itemmodels = propertyConfig.getProperty("sqoop.task." + taskName + ".recommend.itemmodels");
    try {
        DistributedFileSystem distributedFileSystem = distributedFileSystem(hadoopAddress);
        FSDataInputStream fsDataInputStream = distributedFileSystem.open(new Path(itemmodels));
        byte[] bs = new byte[fsDataInputStream.available()];
        fsDataInputStream.read(bs);
        log.info(new String(bs));

        FileOutputStream fileOutputStream = new FileOutputStream(new File(filePath));
        IOUtils.write(bs, fileOutputStream);
        IOUtils.closeQuietly(fileOutputStream);
    } catch (IOException e) {
        log.error(e);
    }
}
项目:hadoop    文件:CryptoAdmin.java   
@Override
public int run(Configuration conf, List<String> args) throws IOException {
  if (!args.isEmpty()) {
    System.err.println("Can't understand argument: " + args.get(0));
    return 1;
  }

  final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
  try {
    final TableListing listing = new TableListing.Builder()
      .addField("").addField("", true)
      .wrapWidth(AdminHelper.MAX_LINE_WIDTH).hideHeaders().build();
    final RemoteIterator<EncryptionZone> it = dfs.listEncryptionZones();
    while (it.hasNext()) {
      EncryptionZone ez = it.next();
      listing.addRow(ez.getPath(), ez.getKeyName());
    }
    System.out.println(listing.toString());
  } catch (IOException e) {
    System.err.println(prettifyException(e));
    return 2;
  }

  return 0;
}
项目:hadoop    文件:CacheAdmin.java   
@Override
public int run(Configuration conf, List<String> args) throws IOException {
  String name = StringUtils.popFirstNonOption(args);
  if (name == null) {
    System.err.println("You must specify a name when deleting a " +
        "cache pool.");
    return 1;
  }
  if (!args.isEmpty()) {
    System.err.print("Can't understand arguments: " +
      Joiner.on(" ").join(args) + "\n");
    System.err.println("Usage is " + getShortUsage());
    return 1;
  }
  DistributedFileSystem dfs = AdminHelper.getDFS(conf);
  try {
    dfs.removeCachePool(name);
  } catch (IOException e) {
    System.err.println(AdminHelper.prettifyException(e));
    return 2;
  }
  System.out.println("Successfully removed cache pool " + name + ".");
  return 0;
}
项目:hadoop    文件:HDFSConcat.java   
public static void main(String... args) throws IOException {

    if(args.length < 2) {
      System.err.println("Usage HDFSConcat target srcs..");
      System.exit(0);
    }

    Configuration conf = new Configuration();
    String uri = conf.get("fs.default.name", def_uri);
    Path path = new Path(uri);
    DistributedFileSystem dfs = 
      (DistributedFileSystem)FileSystem.get(path.toUri(), conf);

    Path [] srcs = new Path[args.length-1];
    for(int i=1; i<args.length; i++) {
      srcs[i-1] = new Path(args[i]);
    }
    dfs.concat(new Path(args[0]), srcs);
  }
项目:hadoop    文件:TestFsck.java   
private void writeFile(final DistributedFileSystem dfs,
    Path dir, String fileName) throws IOException {
  Path filePath = new Path(dir.toString() + Path.SEPARATOR + fileName);
  final FSDataOutputStream out = dfs.create(filePath);
  out.writeChars("teststring");
  out.close();
}
项目:hadoop    文件:StoragePolicyAdmin.java   
@Override
public int run(Configuration conf, List<String> args) throws IOException {
  final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
  try {
    BlockStoragePolicy[] policies = dfs.getStoragePolicies();
    System.out.println("Block Storage Policies:");
    for (BlockStoragePolicy policy : policies) {
      if (policy != null) {
        System.out.println("\t" + policy);
      }
    }
  } catch (IOException e) {
    System.err.println(AdminHelper.prettifyException(e));
    return 2;
  }
  return 0;
}
项目:hadoop    文件:DFSAdmin.java   
static int run(DistributedFileSystem dfs, String[] argv, int idx) throws IOException {
  final RollingUpgradeAction action = RollingUpgradeAction.fromString(
      argv.length >= 2? argv[1]: "");
  if (action == null) {
    throw new IllegalArgumentException("Failed to covert \"" + argv[1]
        +"\" to " + RollingUpgradeAction.class.getSimpleName());
  }

  System.out.println(action + " rolling upgrade ...");

  final RollingUpgradeInfo info = dfs.rollingUpgrade(action);
  switch(action){
  case QUERY:
    break;
  case PREPARE:
    Preconditions.checkState(info.isStarted());
    break;
  case FINALIZE:
    Preconditions.checkState(info == null || info.isFinalized());
    break;
  }
  printMessage(info, System.out);
  return 0;
}
项目:hadoop    文件:DFSAdmin.java   
/**
 * Dumps DFS data structures into specified file.
 * Usage: hdfs dfsadmin -metasave filename
 * @param argv List of of command line parameters.
 * @param idx The index of the command that is being processed.
 * @exception IOException if an error occurred while accessing
 *            the file or path.
 */
public int metaSave(String[] argv, int idx) throws IOException {
  String pathname = argv[idx];
  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().metaSave(pathname);
      System.out.println("Created metasave file " + pathname + " in the log "
          + "directory of namenode " + proxy.getAddress());
    }
  } else {
    dfs.metaSave(pathname);
    System.out.println("Created metasave file " + pathname + " in the log " +
        "directory of namenode " + dfs.getUri());
  }
  return 0;
}
项目:hadoop    文件:TestHDFSFileContextMainOperations.java   
/**
 * Perform operations such as setting quota, deletion of files, rename and
 * ensure system can apply edits log during startup.
 */
@Test
public void testEditsLogOldRename() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  Path src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
  Path dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
  createFile(src1);
  fs.mkdirs(dst1.getParent());
  createFile(dst1);

  // Set quota so that dst1 parent cannot allow under it new files/directories 
  fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
  // Free up quota for a subsequent rename
  fs.delete(dst1, true);
  oldRename(src1, dst1, true, false);

  // Restart the cluster and ensure the above operations can be
  // loaded from the edits log
  restartCluster();
  fs = cluster.getFileSystem();
  src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
  dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
  Assert.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
  Assert.assertTrue(fs.exists(dst1));    // ensure rename dst exists
}
项目:hadoop    文件:TestHDFSFileContextMainOperations.java   
/**
 * Perform operations such as setting quota, deletion of files, rename and
 * ensure system can apply edits log during startup.
 */
@Test
public void testEditsLogRename() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  Path src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
  Path dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
  createFile(src1);
  fs.mkdirs(dst1.getParent());
  createFile(dst1);

  // Set quota so that dst1 parent cannot allow under it new files/directories 
  fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
  // Free up quota for a subsequent rename
  fs.delete(dst1, true);
  rename(src1, dst1, true, true, false, Rename.OVERWRITE);

  // Restart the cluster and ensure the above operations can be
  // loaded from the edits log
  restartCluster();
  fs = cluster.getFileSystem();
  src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
  dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
  Assert.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
  Assert.assertTrue(fs.exists(dst1));    // ensure rename dst exists
}
项目:ditb    文件:HBaseTestingUtility.java   
/**
 * This method clones the passed <code>c</code> configuration setting a new
 * user into the clone.  Use it getting new instances of FileSystem.  Only
 * works for DistributedFileSystem w/o Kerberos.
 * @param c Initial configuration
 * @param differentiatingSuffix Suffix to differentiate this user from others.
 * @return A new configuration instance with a different user set into it.
 * @throws IOException
 */
public static User getDifferentUser(final Configuration c,
  final String differentiatingSuffix)
throws IOException {
  FileSystem currentfs = FileSystem.get(c);
  if (!(currentfs instanceof DistributedFileSystem) || User.isHBaseSecurityEnabled(c)) {
    return User.getCurrent();
  }
  // Else distributed filesystem.  Make a new instance per daemon.  Below
  // code is taken from the AppendTestUtil over in hdfs.
  String username = User.getCurrent().getName() +
    differentiatingSuffix;
  User user = User.createUserForTesting(c, username,
      new String[]{"supergroup"});
  return user;
}
项目:hadoop    文件:TestXAttrCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(), 
      fs instanceof DistributedFileSystem);
}
项目:hadoop    文件:TestCacheAdminCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);

  // Many of the tests expect a replication value of 1 in the output
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();

  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
}
项目:mumu-parquet    文件:MapReduceParquetMapReducerTest.java   
@Test
public void mapreduce() {
    String inputPath = ParquetConfiguration.HDFS_URI + "//parquet/mapreduce/input";
    String outputPath = ParquetConfiguration.HDFS_URI + "//parquet/mapreduce/output" + DateFormatUtils.format(new Date(), "yyyyMMddHHmmss");
    try {
        MapReduceParquetMapReducer.main(new String[]{inputPath, outputPath});
        DistributedFileSystem distributedFileSystem = new ParquetConfiguration().distributedFileSystem();
        FileStatus[] fileStatuses = distributedFileSystem.listStatus(new Path(outputPath));
        for (FileStatus fileStatus : fileStatuses) {
            System.out.println(fileStatus);
        }
        distributedFileSystem.close();
    } catch (Exception e) {
        e.printStackTrace();
    }
}
项目:hadoop    文件:TestStorageMover.java   
/**
 * Move hot files to warm and cold, warm files to hot and cold,
 * and cold files to hot and warm.
 */
void moveAround(DistributedFileSystem dfs) throws Exception {
  for(Path srcDir : map.keySet()) {
    int i = 0;
    for(Path dstDir : map.keySet()) {
      if (!srcDir.equals(dstDir)) {
        final Path src = new Path(srcDir, "file" + i++);
        final Path dst = new Path(dstDir, srcDir.getName() + "2" + dstDir.getName());
        LOG.info("rename " + src + " to " + dst);
        dfs.rename(src, dst);
      }
    }
  }
}
项目:hadoop    文件:TestFileTruncate.java   
public static void checkBlockRecovery(Path p, DistributedFileSystem dfs,
    int attempts, long sleepMs) throws IOException {
  boolean success = false;
  for(int i = 0; i < attempts; i++) {
    LocatedBlocks blocks = getLocatedBlocks(p, dfs);
    boolean noLastBlock = blocks.getLastLocatedBlock() == null;
    if(!blocks.isUnderConstruction() &&
        (noLastBlock || blocks.isLastBlockComplete())) {
      success = true;
      break;
    }
    try { Thread.sleep(sleepMs); } catch (InterruptedException ignored) {}
  }
  assertThat("inode should complete in ~" + sleepMs * attempts + " ms.",
      success, is(true));
}
项目:hadoop    文件:TestBootstrapStandbyWithQJM.java   
@Before
public void setup() throws Exception {
  Configuration conf = new Configuration();
  // Turn off IPC client caching, so that the suite can handle
  // the restart of the daemons between test cases.
  conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
      0);

  MiniQJMHACluster miniQjmHaCluster = new MiniQJMHACluster.Builder(conf).build();
  cluster = miniQjmHaCluster.getDfsCluster();
  jCluster = miniQjmHaCluster.getJournalCluster();

  // make nn0 active
  cluster.transitionToActive(0);
  // do sth to generate in-progress edit log data
  DistributedFileSystem dfs = (DistributedFileSystem) 
      HATestUtil.configureFailoverFs(cluster, conf);
  dfs.mkdirs(new Path("/test2"));
  dfs.close();
}
项目:ditb    文件:FSUtils.java   
/**
 * We use reflection because {@link DistributedFileSystem#setSafeMode(
 * HdfsConstants.SafeModeAction action, boolean isChecked)} is not in hadoop 1.1
 *
 * @param dfs
 * @return whether we're in safe mode
 * @throws IOException
 */
private static boolean isInSafeMode(DistributedFileSystem dfs) throws IOException {
  boolean inSafeMode = false;
  try {
    Method m = DistributedFileSystem.class.getMethod("setSafeMode", new Class<?> []{
        org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction.class, boolean.class});
    inSafeMode = (Boolean) m.invoke(dfs,
      org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction.SAFEMODE_GET, true);
  } catch (Exception e) {
    if (e instanceof IOException) throw (IOException) e;

    // Check whether dfs is on safemode.
    inSafeMode = dfs.setSafeMode(
      org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction.SAFEMODE_GET);
  }
  return inSafeMode;
}
项目:hadoop    文件:TestNameNodeRetryCacheMetrics.java   
/** Start a cluster */
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3)
      .build();
  cluster.waitActive();
  cluster.transitionToActive(namenodeId);
  HATestUtil.setFailoverConfigurations(cluster, conf);
  filesystem = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
  namesystem = cluster.getNamesystem(namenodeId);
  metrics = namesystem.getRetryCache().getMetricsForTests();
}
项目:hadoop    文件:TestSaveNamespace.java   
@Test (timeout=30000)
public void testSaveNamespaceWithDanglingLease() throws Exception {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration())
      .numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  try {
    cluster.getNamesystem().leaseManager.addLease("me", "/non-existent");      
    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    cluster.getNameNodeRpc().saveNamespace();
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop-oss    文件:ApplicationMasterKMS.java   
private  void collectFileNames(DistributedFileSystem fs, String zonepath, List<String> names)
        throws IOException
{
    FileStatus[] statuses = fs.listStatus(new Path(zonepath));
    // System.out.println("## cheking path " + new Path(zonepath).toString() + " iter " + statuses.length);
    for (FileStatus status : statuses) {
        String fname = zonepath + "/" + status.getPath().getName();
        if (status.isDirectory())
            collectFileNames(fs, fname, names);
        else
            names.add(fname);
    }
}
项目:hadoop-oss    文件:KeyRotationBC.java   
public static void collectFileNames(DistributedFileSystem fs, String zonepath, List<String> names)
            throws IOException
    {
        FileStatus[] statuses = fs.listStatus(new Path(zonepath));
//      System.out.println("## cheking path " + new Path(zonepath).toString() + " iter " + statuses.length);
        for (FileStatus status : statuses) {
            String fname = zonepath + "/" + status.getPath().getName();
            if (status.isDirectory())
                collectFileNames(fs, fname, names);
            else
                names.add(fname);
        }
    }
项目:hadoop    文件:SnapshotTestHelper.java   
/**
 * Check the functionality of a snapshot.
 * 
 * @param hdfs DistributedFileSystem instance
 * @param snapshotRoot The root of the snapshot
 * @param snapshottedDir The snapshotted directory
 */
public static void checkSnapshotCreation(DistributedFileSystem hdfs,
    Path snapshotRoot, Path snapshottedDir) throws Exception {
  // Currently we only check if the snapshot was created successfully
  assertTrue(hdfs.exists(snapshotRoot));
  // Compare the snapshot with the current dir
  FileStatus[] currentFiles = hdfs.listStatus(snapshottedDir);
  FileStatus[] snapshotFiles = hdfs.listStatus(snapshotRoot);
  assertEquals("snapshottedDir=" + snapshottedDir
      + ", snapshotRoot=" + snapshotRoot,
      currentFiles.length, snapshotFiles.length);
}
项目:mumu-mapreduce    文件:MapReduceConfiguration.java   
/**
 * 关闭资源
 *
 * @param distributedFileSystem
 */
public void close(DistributedFileSystem distributedFileSystem) {
    if (distributedFileSystem != null) {
        try {
            distributedFileSystem.close();
        } catch (IOException e) {
            log.error(e);
        }
    }
}
项目:WIFIProbe    文件:HDFSTool.java   
public static void concat(String dir) throws IOException {


        String directory = NodeConfig.HDFS_PATH + dir;
        Configuration conf = new Configuration();
        DistributedFileSystem fs = (DistributedFileSystem)FileSystem.get(URI.create(directory), conf);
        FileStatus fileList[] = fs.listStatus(new Path(directory));

        if (fileList.length>=2) {

            ArrayList<Path>  srcs = new ArrayList<Path>(fileList.length);
            for (FileStatus fileStatus : fileList) {
                if ( fileStatus.isFile() &&
                        (fileStatus.getLen()&~fileStatus.getBlockSize())<fileStatus.getBlockSize()/2 ) {
                    srcs.add(fileStatus.getPath());
                }
            }

            if (srcs.size()>=2) {
                Logger.println("come to here");
                Path appended = srcs.get(0);
                Path[] sources = new Path[srcs.size()-1];
                for (int i=0; i<srcs.size()-1; i++) {
                    sources[i] = srcs.get(i+1);
                }
                Logger.println(fs==null);
                Logger.println(appended==null);
                Logger.println(sources==null);
                fs.concat(appended, sources);
                Logger.println("concat to : " + appended.getName());
                Logger.println(Arrays.toString(sources));
            }

            fs.close();
        }


    }
项目:hadoop    文件:HdfsAdmin.java   
/**
 * Create a new HdfsAdmin client.
 * 
 * @param uri the unique URI of the HDFS file system to administer
 * @param conf configuration
 * @throws IOException in the event the file system could not be created
 */
public HdfsAdmin(URI uri, Configuration conf) throws IOException {
  FileSystem fs = FileSystem.get(uri, conf);
  if (!(fs instanceof DistributedFileSystem)) {
    throw new IllegalArgumentException("'" + uri + "' is not an HDFS URI.");
  } else {
    dfs = (DistributedFileSystem)fs;
  }
}
项目:hadoop    文件:NameNodeConnector.java   
public NameNodeConnector(String name, URI nameNodeUri, Path idPath,
                         List<Path> targetPaths, Configuration conf,
                         int maxNotChangedIterations)
    throws IOException {
  this.nameNodeUri = nameNodeUri;
  this.idPath = idPath;
  this.targetPaths = targetPaths == null || targetPaths.isEmpty() ? Arrays
      .asList(new Path("/")) : targetPaths;
  this.maxNotChangedIterations = maxNotChangedIterations;

  this.namenode = NameNodeProxies.createProxy(conf, nameNodeUri,
      NamenodeProtocol.class).getProxy();
  this.client = NameNodeProxies.createProxy(conf, nameNodeUri,
      ClientProtocol.class, fallbackToSimpleAuth).getProxy();
  this.fs = (DistributedFileSystem)FileSystem.get(nameNodeUri, conf);

  final NamespaceInfo namespaceinfo = namenode.versionRequest();
  this.blockpoolID = namespaceinfo.getBlockPoolID();

  final FsServerDefaults defaults = fs.getServerDefaults(new Path("/"));
  this.keyManager = new KeyManager(blockpoolID, namenode,
      defaults.getEncryptDataTransfer(), conf);
  // if it is for test, we do not create the id file
  out = checkAndMarkRunning();
  if (out == null) {
    // Exit if there is another one running.
    throw new IOException("Another " + name + " is running.");
  }
}
项目:hadoop    文件:CryptoAdmin.java   
@Override
public int run(Configuration conf, List<String> args) throws IOException {
  final String path = StringUtils.popOptionWithArgument("-path", args);
  if (path == null) {
    System.err.println("You must specify a path with -path.");
    return 1;
  }

  final String keyName =
      StringUtils.popOptionWithArgument("-keyName", args);
  if (keyName == null) {
    System.err.println("You must specify a key name with -keyName.");
    return 1;
  }

  if (!args.isEmpty()) {
    System.err.println("Can't understand argument: " + args.get(0));
    return 1;
  }

  final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
  try {
    dfs.createEncryptionZone(new Path(path), keyName);
    System.out.println("Added encryption zone " + path);
  } catch (IOException e) {
    System.err.println(prettifyException(e));
    return 2;
  }

  return 0;
}
项目:hadoop    文件:DistCpSync.java   
@VisibleForTesting
static DiffInfo[] getDiffs(DistCpOptions inputOptions,
    DistributedFileSystem fs, Path sourceDir, Path targetDir) {
  try {
    final String from = getSnapshotName(inputOptions.getFromSnapshot());
    final String to = getSnapshotName(inputOptions.getToSnapshot());
    SnapshotDiffReport sourceDiff = fs.getSnapshotDiffReport(sourceDir,
        from, to);
    return DiffInfo.getDiffs(sourceDiff, targetDir);
  } catch (IOException e) {
    DistCp.LOG.warn("Failed to compute snapshot diff on " + sourceDir, e);
  }
  return null;
}
项目:hadoop    文件:AdminHelper.java   
static DistributedFileSystem getDFS(Configuration conf)
    throws IOException {
  FileSystem fs = FileSystem.get(conf);
  if (!(fs instanceof DistributedFileSystem)) {
    throw new IllegalArgumentException("FileSystem " + fs.getUri() +
        " is not an HDFS file system");
  }
  return (DistributedFileSystem)fs;
}
项目:hadoop    文件:SnapshotDiff.java   
@Override
public int run(String[] argv) throws Exception {
  String description = "hdfs snapshotDiff <snapshotDir> <from> <to>:\n" +
  "\tGet the difference between two snapshots, \n" + 
  "\tor between a snapshot and the current tree of a directory.\n" +
  "\tFor <from>/<to>, users can use \".\" to present the current status,\n" +
  "\tand use \".snapshot/snapshot_name\" to present a snapshot,\n" +
  "\twhere \".snapshot/\" can be omitted\n";

  if(argv.length != 3) {
    System.err.println("Usage: \n" + description);
    return 1;
  }

  FileSystem fs = FileSystem.get(getConf());
  if (! (fs instanceof DistributedFileSystem)) {
    System.err.println(
        "SnapshotDiff can only be used in DistributedFileSystem");
    return 1;
  }
  DistributedFileSystem dfs = (DistributedFileSystem) fs;

  Path snapshotRoot = new Path(argv[0]);
  String fromSnapshot = getSnapshotName(argv[1]);
  String toSnapshot = getSnapshotName(argv[2]);
  try {
    SnapshotDiffReport diffReport = dfs.getSnapshotDiffReport(snapshotRoot,
        fromSnapshot, toSnapshot);
    System.out.println(diffReport.toString());
  } catch (IOException e) {
    String[] content = e.getLocalizedMessage().split("\n");
    System.err.println("snapshotDiff: " + content[0]);
    return 1;
  }
  return 0;
}
项目:hadoop    文件:TestSnapshotDeletion.java   
@Test
public void testDeleteSnapshotWithPermissionsDisabled() throws Exception {
  cluster.shutdown();
  Configuration newConf = new Configuration(conf);
  newConf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
  cluster = new MiniDFSCluster.Builder(newConf).numDataNodes(0).build();
  cluster.waitActive();
  hdfs = cluster.getFileSystem();

  final Path path = new Path("/dir");
  hdfs.mkdirs(path);
  hdfs.allowSnapshot(path);
  hdfs.mkdirs(new Path(path, "/test"));
  hdfs.createSnapshot(path, "s1");
  UserGroupInformation anotherUser = UserGroupInformation
      .createRemoteUser("anotheruser");
  anotherUser.doAs(new PrivilegedAction<Object>() {
    @Override
    public Object run() {
      DistributedFileSystem anotherUserFS = null;
      try {
        anotherUserFS = cluster.getFileSystem();
        anotherUserFS.deleteSnapshot(path, "s1");
      } catch (IOException e) {
        fail("Failed to delete snapshot : " + e.getLocalizedMessage());
      } finally {
        IOUtils.closeStream(anotherUserFS);
      }
      return null;
    }
  });
}
项目:hadoop    文件:StoragePolicyAdmin.java   
@Override
public int run(Configuration conf, List<String> args) throws IOException {
  final String path = StringUtils.popOptionWithArgument("-path", args);
  if (path == null) {
    System.err.println("Please specify the path for setting the storage " +
        "policy.\nUsage: " + getLongUsage());
    return 1;
  }

  final String policyName = StringUtils.popOptionWithArgument("-policy",
      args);
  if (policyName == null) {
    System.err.println("Please specify the policy name.\nUsage: " +
        getLongUsage());
    return 1;
  }

  final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
  try {
    dfs.setStoragePolicy(new Path(path), policyName);
    System.out.println("Set storage policy " + policyName + " on " + path);
  } catch (Exception e) {
    System.err.println(AdminHelper.prettifyException(e));
    return 2;
  }
  return 0;
}
项目:hadoop    文件:DFSAdmin.java   
/** Constructor */
public DFSAdminCommand(FileSystem fs) {
  super(fs.getConf());
  if (!(fs instanceof DistributedFileSystem)) {
    throw new IllegalArgumentException("FileSystem " + fs.getUri() + 
        " is not an HDFS file system");
  }
  this.dfs = (DistributedFileSystem)fs;
}
项目:hadoop    文件:TestDeleteRace.java   
private void testDeleteAddBlockRace(boolean hasSnapshot) throws Exception {
  try {
    conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
        SlowBlockPlacementPolicy.class, BlockPlacementPolicy.class);
    cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = cluster.getFileSystem();
    final String fileName = "/testDeleteAddBlockRace";
    Path filePath = new Path(fileName);

    FSDataOutputStream out = null;
    out = fs.create(filePath);
    if (hasSnapshot) {
      SnapshotTestHelper.createSnapshot((DistributedFileSystem) fs, new Path(
          "/"), "s1");
    }

    Thread deleteThread = new DeleteThread(fs, filePath);
    deleteThread.start();

    try {
      // write data and syn to make sure a block is allocated.
      out.write(new byte[32], 0, 32);
      out.hsync();
      Assert.fail("Should have failed.");
    } catch (FileNotFoundException e) {
      GenericTestUtils.assertExceptionContains(filePath.getName(), e);
    }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop    文件:DFSAdmin.java   
/**
 * Allow snapshot on a directory.
 * Usage: hdfs dfsadmin -disallowSnapshot snapshotDir
 * @param argv List of of command line parameters.
 * @exception IOException
 */
public void disallowSnapshot(String[] argv) throws IOException {  
  DistributedFileSystem dfs = getDFS();
  try {
    dfs.disallowSnapshot(new Path(argv[1]));
  } catch (SnapshotException e) {
    throw new RemoteException(e.getClass().getName(), e.getMessage());
  }
  System.out.println("Disallowing snaphot on " + argv[1] + " succeeded");
}
项目:hadoop    文件:DFSAdmin.java   
/**
 * Command to ask the namenode to save the namespace.
 * Usage: hdfs dfsadmin -saveNamespace
 * @exception IOException 
 * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()
 */
public int saveNamespace() throws IOException {
  int exitCode = -1;

  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().saveNamespace();
      System.out.println("Save namespace successful for " +
          proxy.getAddress());
    }
  } else {
    dfs.saveNamespace();
    System.out.println("Save namespace successful");
  }
  exitCode = 0;

  return exitCode;
}
项目:hadoop    文件:DFSAdmin.java   
public int rollEdits() throws IOException {
  DistributedFileSystem dfs = getDFS();
  long txid = dfs.rollEdits();
  System.out.println("Successfully rolled edit logs.");
  System.out.println("New segment starts at txid " + txid);
  return 0;
}
项目:hadoop    文件:DFSAdmin.java   
/**
 * Command to ask the namenode to reread the hosts and excluded hosts 
 * file.
 * Usage: hdfs dfsadmin -refreshNodes
 * @exception IOException 
 */
public int refreshNodes() throws IOException {
  int exitCode = -1;

  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy: proxies) {
      proxy.getProxy().refreshNodes();
      System.out.println("Refresh nodes successful for " +
          proxy.getAddress());
    }
  } else {
    dfs.refreshNodes();
    System.out.println("Refresh nodes successful");
  }
  exitCode = 0;

  return exitCode;
}