Java 类org.apache.hadoop.hdfs.server.namenode.FSDirectory 实例源码

项目:hadoop    文件:TestNestedSnapshots.java   
/**
 * When we have nested snapshottable directories and if we try to reset the
 * snapshottable descendant back to an regular directory, we need to replace
 * the snapshottable descendant with an INodeDirectoryWithSnapshot
 */
@Test
public void testDisallowNestedSnapshottableDir() throws Exception {
  cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);

  final Path dir = new Path("/dir");
  final Path sub = new Path(dir, "sub");
  hdfs.mkdirs(sub);

  SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
  final Path file = new Path(sub, "file");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);

  FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
  INode subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode.asDirectory().isWithSnapshot());

  hdfs.allowSnapshot(sub);
  subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode.isDirectory() && subNode.asDirectory().isSnapshottable());

  hdfs.disallowSnapshot(sub);
  subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode.asDirectory().isWithSnapshot());
}
项目:aliyun-oss-hadoop-fs    文件:TestNestedSnapshots.java   
/**
 * When we have nested snapshottable directories and if we try to reset the
 * snapshottable descendant back to an regular directory, we need to replace
 * the snapshottable descendant with an INodeDirectoryWithSnapshot
 */
@Test
public void testDisallowNestedSnapshottableDir() throws Exception {
  cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);

  final Path dir = new Path("/dir");
  final Path sub = new Path(dir, "sub");
  hdfs.mkdirs(sub);

  SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
  final Path file = new Path(sub, "file");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);

  FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
  INode subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode.asDirectory().isWithSnapshot());

  hdfs.allowSnapshot(sub);
  subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode.isDirectory() && subNode.asDirectory().isSnapshottable());

  hdfs.disallowSnapshot(sub);
  subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode.asDirectory().isWithSnapshot());
}
项目:big-c    文件:TestNestedSnapshots.java   
/**
 * When we have nested snapshottable directories and if we try to reset the
 * snapshottable descendant back to an regular directory, we need to replace
 * the snapshottable descendant with an INodeDirectoryWithSnapshot
 */
@Test
public void testDisallowNestedSnapshottableDir() throws Exception {
  cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);

  final Path dir = new Path("/dir");
  final Path sub = new Path(dir, "sub");
  hdfs.mkdirs(sub);

  SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
  final Path file = new Path(sub, "file");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);

  FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
  INode subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode.asDirectory().isWithSnapshot());

  hdfs.allowSnapshot(sub);
  subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode.isDirectory() && subNode.asDirectory().isSnapshottable());

  hdfs.disallowSnapshot(sub);
  subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode.asDirectory().isWithSnapshot());
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestNestedSnapshots.java   
/**
 * When we have nested snapshottable directories and if we try to reset the
 * snapshottable descendant back to an regular directory, we need to replace
 * the snapshottable descendant with an INodeDirectoryWithSnapshot
 */
@Test
public void testDisallowNestedSnapshottableDir() throws Exception {
  cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);

  final Path dir = new Path("/dir");
  final Path sub = new Path(dir, "sub");
  hdfs.mkdirs(sub);

  SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
  final Path file = new Path(sub, "file");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);

  FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
  INode subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode.asDirectory().isWithSnapshot());

  hdfs.allowSnapshot(sub);
  subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode.isDirectory() && subNode.asDirectory().isSnapshottable());

  hdfs.disallowSnapshot(sub);
  subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode.asDirectory().isWithSnapshot());
}
项目:hadoop-plus    文件:TestNestedSnapshots.java   
/**
 * When we have nested snapshottable directories and if we try to reset the
 * snapshottable descendant back to an regular directory, we need to replace
 * the snapshottable descendant with an INodeDirectoryWithSnapshot
 */
@Test
public void testDisallowNestedSnapshottableDir() throws Exception {
  cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);

  final Path dir = new Path("/dir");
  final Path sub = new Path(dir, "sub");
  hdfs.mkdirs(sub);

  SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
  final Path file = new Path(sub, "file");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);

  FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
  INode subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode instanceof INodeDirectoryWithSnapshot);

  hdfs.allowSnapshot(sub);
  subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode instanceof INodeDirectorySnapshottable);

  hdfs.disallowSnapshot(sub);
  subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode instanceof INodeDirectoryWithSnapshot);
}
项目:FlexMap    文件:TestNestedSnapshots.java   
/**
 * When we have nested snapshottable directories and if we try to reset the
 * snapshottable descendant back to an regular directory, we need to replace
 * the snapshottable descendant with an INodeDirectoryWithSnapshot
 */
@Test
public void testDisallowNestedSnapshottableDir() throws Exception {
  cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);

  final Path dir = new Path("/dir");
  final Path sub = new Path(dir, "sub");
  hdfs.mkdirs(sub);

  SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
  final Path file = new Path(sub, "file");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);

  FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
  INode subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode.asDirectory().isWithSnapshot());

  hdfs.allowSnapshot(sub);
  subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode.isDirectory() && subNode.asDirectory().isSnapshottable());

  hdfs.disallowSnapshot(sub);
  subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode.asDirectory().isWithSnapshot());
}
项目:hadoop-TCP    文件:TestNestedSnapshots.java   
/**
 * When we have nested snapshottable directories and if we try to reset the
 * snapshottable descendant back to an regular directory, we need to replace
 * the snapshottable descendant with an INodeDirectoryWithSnapshot
 */
@Test
public void testDisallowNestedSnapshottableDir() throws Exception {
  cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);

  final Path dir = new Path("/dir");
  final Path sub = new Path(dir, "sub");
  hdfs.mkdirs(sub);

  SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
  final Path file = new Path(sub, "file");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);

  FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
  INode subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode instanceof INodeDirectoryWithSnapshot);

  hdfs.allowSnapshot(sub);
  subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode instanceof INodeDirectorySnapshottable);

  hdfs.disallowSnapshot(sub);
  subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode instanceof INodeDirectoryWithSnapshot);
}
项目:hardfs    文件:TestNestedSnapshots.java   
/**
 * When we have nested snapshottable directories and if we try to reset the
 * snapshottable descendant back to an regular directory, we need to replace
 * the snapshottable descendant with an INodeDirectoryWithSnapshot
 */
@Test
public void testDisallowNestedSnapshottableDir() throws Exception {
  cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);

  final Path dir = new Path("/dir");
  final Path sub = new Path(dir, "sub");
  hdfs.mkdirs(sub);

  SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
  final Path file = new Path(sub, "file");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);

  FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
  INode subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode instanceof INodeDirectoryWithSnapshot);

  hdfs.allowSnapshot(sub);
  subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode instanceof INodeDirectorySnapshottable);

  hdfs.disallowSnapshot(sub);
  subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode instanceof INodeDirectoryWithSnapshot);
}
项目:hadoop-on-lustre2    文件:TestNestedSnapshots.java   
/**
 * When we have nested snapshottable directories and if we try to reset the
 * snapshottable descendant back to an regular directory, we need to replace
 * the snapshottable descendant with an INodeDirectoryWithSnapshot
 */
@Test
public void testDisallowNestedSnapshottableDir() throws Exception {
  cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);

  final Path dir = new Path("/dir");
  final Path sub = new Path(dir, "sub");
  hdfs.mkdirs(sub);

  SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
  final Path file = new Path(sub, "file");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);

  FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
  INode subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode.asDirectory().isWithSnapshot());

  hdfs.allowSnapshot(sub);
  subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode instanceof INodeDirectorySnapshottable);

  hdfs.disallowSnapshot(sub);
  subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode.asDirectory().isWithSnapshot());
}
项目:RDFS    文件:TestQuota.java   
/**
 * Test HDFS operations that change disk space consumed by a directory tree.
 * namely create, rename, delete, append, and setReplication.
 * 
 * This is based on testNamespaceCommands() above.
 */
public void testSpaceCommands() throws Exception {
  final Configuration conf = new Configuration();
  // set a smaller block size so that we can test with smaller 
  // diskspace quotas
  conf.set("dfs.block.size", "512");
  conf.setBoolean("dfs.support.append", true);
  final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
  final FileSystem fs = cluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
              fs instanceof DistributedFileSystem);

  final DistributedFileSystem dfs = (DistributedFileSystem)fs;
  FSDirectory fsd = cluster.getNameNode().namesystem.dir;
INodeDirectoryWithQuota rootDir = (INodeDirectoryWithQuota) (fsd
        .getExistingPathINodes("/")[0]);
  try {
    generateFiles(dfs, rootDir, 1024, 512);
    generateFiles(dfs, rootDir, 1019, 512);
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop    文件:DFSUtil.java   
/**
 * Whether the pathname is valid.  Currently prohibits relative paths, 
 * names which contain a ":" or "//", or other non-canonical paths.
 */
public static boolean isValidName(String src) {
  // Path must be absolute.
  if (!src.startsWith(Path.SEPARATOR)) {
    return false;
  }

  // Check for ".." "." ":" "/"
  String[] components = StringUtils.split(src, '/');
  for (int i = 0; i < components.length; i++) {
    String element = components[i];
    if (element.equals(".")  ||
        (element.indexOf(":") >= 0)  ||
        (element.indexOf("/") >= 0)) {
      return false;
    }
    // ".." is allowed in path starting with /.reserved/.inodes
    if (element.equals("..")) {
      if (components.length > 4
          && components[1].equals(FSDirectory.DOT_RESERVED_STRING)
          && components[2].equals(FSDirectory.DOT_INODES_STRING)) {
        continue;
      }
      return false;
    }
    // The string may start or end with a /, but not have
    // "//" in the middle.
    if (element.isEmpty() && i != components.length - 1 &&
        i != 0) {
      return false;
    }
  }
  return true;
}
项目:hadoop    文件:TestSnapshotBlocksMap.java   
static INodeFile assertBlockCollection(String path, int numBlocks,
   final FSDirectory dir, final BlockManager blkManager) throws Exception {
  final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
  assertEquals(numBlocks, file.getBlocks().length);
  for(BlockInfoContiguous b : file.getBlocks()) {
    assertBlockCollection(blkManager, file, b);
  }
  return file;
}
项目:aliyun-oss-hadoop-fs    文件:TestReservedRawPaths.java   
@Test(timeout = 120000)
public void testListDotReserved() throws Exception {
  // Create a base file for comparison
  final Path baseFileRaw = new Path("/.reserved/raw/base");
  final int len = 8192;
  DFSTestUtil.createFile(fs, baseFileRaw, len, (short) 1, 0xFEED);

  /*
   * Ensure that you can list /.reserved, with results: raw and .inodes
   */
  FileStatus[] stats = fs.listStatus(new Path("/.reserved"));
  assertEquals(2, stats.length);
  assertEquals(FSDirectory.DOT_INODES_STRING, stats[0].getPath().getName());
  assertEquals("raw", stats[1].getPath().getName());

  try {
    fs.listStatus(new Path("/.reserved/.inodes"));
    fail("expected FNFE");
  } catch (FileNotFoundException e) {
    assertExceptionContains(
            "/.reserved/.inodes does not exist", e);
  }

  final FileStatus[] fileStatuses = fs.listStatus(new Path("/.reserved/raw"));
  assertEquals("expected 1 entry", fileStatuses.length, 1);
  assertMatches(fileStatuses[0].getPath().toString(), "/.reserved/raw/base");
}
项目:aliyun-oss-hadoop-fs    文件:TestSnapshotBlocksMap.java   
static INodeFile assertBlockCollection(String path, int numBlocks,
   final FSDirectory dir, final BlockManager blkManager) throws Exception {
  final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
  assertEquals(numBlocks, file.getBlocks().length);
  for(BlockInfo b : file.getBlocks()) {
    assertBlockCollection(blkManager, file, b);
  }
  return file;
}
项目:big-c    文件:DFSUtil.java   
/**
 * Whether the pathname is valid.  Currently prohibits relative paths, 
 * names which contain a ":" or "//", or other non-canonical paths.
 */
public static boolean isValidName(String src) {
  // Path must be absolute.
  if (!src.startsWith(Path.SEPARATOR)) {
    return false;
  }

  // Check for ".." "." ":" "/"
  String[] components = StringUtils.split(src, '/');
  for (int i = 0; i < components.length; i++) {
    String element = components[i];
    if (element.equals(".")  ||
        (element.indexOf(":") >= 0)  ||
        (element.indexOf("/") >= 0)) {
      return false;
    }
    // ".." is allowed in path starting with /.reserved/.inodes
    if (element.equals("..")) {
      if (components.length > 4
          && components[1].equals(FSDirectory.DOT_RESERVED_STRING)
          && components[2].equals(FSDirectory.DOT_INODES_STRING)) {
        continue;
      }
      return false;
    }
    // The string may start or end with a /, but not have
    // "//" in the middle.
    if (element.isEmpty() && i != components.length - 1 &&
        i != 0) {
      return false;
    }
  }
  return true;
}
项目:big-c    文件:TestSnapshotBlocksMap.java   
static INodeFile assertBlockCollection(String path, int numBlocks,
   final FSDirectory dir, final BlockManager blkManager) throws Exception {
  final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
  assertEquals(numBlocks, file.getBlocks().length);
  for(BlockInfoContiguous b : file.getBlocks()) {
    assertBlockCollection(blkManager, file, b);
  }
  return file;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DFSUtil.java   
/**
 * Whether the pathname is valid.  Currently prohibits relative paths, 
 * names which contain a ":" or "//", or other non-canonical paths.
 */
public static boolean isValidName(String src) {
  // Path must be absolute.
  if (!src.startsWith(Path.SEPARATOR)) {
    return false;
  }

  // Check for ".." "." ":" "/"
  String[] components = StringUtils.split(src, '/');
  for (int i = 0; i < components.length; i++) {
    String element = components[i];
    if (element.equals(".")  ||
        (element.indexOf(":") >= 0)  ||
        (element.indexOf("/") >= 0)) {
      return false;
    }
    // ".." is allowed in path starting with /.reserved/.inodes
    if (element.equals("..")) {
      if (components.length > 4
          && components[1].equals(FSDirectory.DOT_RESERVED_STRING)
          && components[2].equals(FSDirectory.DOT_INODES_STRING)) {
        continue;
      }
      return false;
    }
    // The string may start or end with a /, but not have
    // "//" in the middle.
    if (element.isEmpty() && i != components.length - 1 &&
        i != 0) {
      return false;
    }
  }
  return true;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestSnapshotBlocksMap.java   
static INodeFile assertBlockCollection(String path, int numBlocks,
   final FSDirectory dir, final BlockManager blkManager) throws Exception {
  final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
  assertEquals(numBlocks, file.getBlocks().length);
  for(BlockInfo b : file.getBlocks()) {
    assertBlockCollection(blkManager, file, b);
  }
  return file;
}
项目:hadoop-EAR    文件:TestQuota.java   
/**
  * Test HDFS operations that change disk space consumed by a directory tree.
  * namely create, rename, delete, append, and setReplication.
  * 
  * This is based on testNamespaceCommands() above.
  */
@Test
 public void testSpaceCommands() throws Exception {
  // smaller block size, support append
  setUp(true, true);

  FSDirectory fsd = cluster.getNameNode().namesystem.dir;
    INodeDirectoryWithQuota rootDir = (INodeDirectoryWithQuota) (fsd
            .getExistingPathINodes("/")[0]);
   generateFiles(dfs, rootDir, 1024, 512);
   generateFiles(dfs, rootDir, 1019, 512);
 }
项目:hadoop-plus    文件:TestSnapshotBlocksMap.java   
static INodeFile assertBlockCollection(String path, int numBlocks,
   final FSDirectory dir, final BlockManager blkManager) throws Exception {
  final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
  assertEquals(numBlocks, file.getBlocks().length);
  for(BlockInfo b : file.getBlocks()) {
    assertBlockCollection(blkManager, file, b);
  }
  return file;
}
项目:FlexMap    文件:DFSUtil.java   
/**
 * Whether the pathname is valid.  Currently prohibits relative paths, 
 * names which contain a ":" or "//", or other non-canonical paths.
 */
public static boolean isValidName(String src) {
  // Path must be absolute.
  if (!src.startsWith(Path.SEPARATOR)) {
    return false;
  }

  // Check for ".." "." ":" "/"
  String[] components = StringUtils.split(src, '/');
  for (int i = 0; i < components.length; i++) {
    String element = components[i];
    if (element.equals(".")  ||
        (element.indexOf(":") >= 0)  ||
        (element.indexOf("/") >= 0)) {
      return false;
    }
    // ".." is allowed in path starting with /.reserved/.inodes
    if (element.equals("..")) {
      if (components.length > 4
          && components[1].equals(FSDirectory.DOT_RESERVED_STRING)
          && components[2].equals(FSDirectory.DOT_INODES_STRING)) {
        continue;
      }
      return false;
    }
    // The string may start or end with a /, but not have
    // "//" in the middle.
    if (element.isEmpty() && i != components.length - 1 &&
        i != 0) {
      return false;
    }
  }
  return true;
}
项目:FlexMap    文件:TestSnapshotBlocksMap.java   
static INodeFile assertBlockCollection(String path, int numBlocks,
   final FSDirectory dir, final BlockManager blkManager) throws Exception {
  final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
  assertEquals(numBlocks, file.getBlocks().length);
  for(BlockInfo b : file.getBlocks()) {
    assertBlockCollection(blkManager, file, b);
  }
  return file;
}
项目:hadoop-TCP    文件:DFSUtil.java   
/**
 * Whether the pathname is valid.  Currently prohibits relative paths, 
 * names which contain a ":" or "//", or other non-canonical paths.
 */
public static boolean isValidName(String src) {
  // Path must be absolute.
  if (!src.startsWith(Path.SEPARATOR)) {
    return false;
  }

  // Check for ".." "." ":" "/"
  String[] components = StringUtils.split(src, '/');
  for (int i = 0; i < components.length; i++) {
    String element = components[i];
    if (element.equals(".")  ||
        (element.indexOf(":") >= 0)  ||
        (element.indexOf("/") >= 0)) {
      return false;
    }
    // ".." is allowed in path starting with /.reserved/.inodes
    if (element.equals("..")) {
      if (components.length > 4
          && components[1].equals(FSDirectory.DOT_RESERVED_STRING)
          && components[2].equals(FSDirectory.DOT_INODES_STRING)) {
        continue;
      }
      return false;
    }
    // The string may start or end with a /, but not have
    // "//" in the middle.
    if (element.isEmpty() && i != components.length - 1 &&
        i != 0) {
      return false;
    }
  }
  return true;
}
项目:hadoop-TCP    文件:TestSnapshotBlocksMap.java   
static INodeFile assertBlockCollection(String path, int numBlocks,
   final FSDirectory dir, final BlockManager blkManager) throws Exception {
  final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
  assertEquals(numBlocks, file.getBlocks().length);
  for(BlockInfo b : file.getBlocks()) {
    assertBlockCollection(blkManager, file, b);
  }
  return file;
}
项目:hardfs    文件:DFSUtil.java   
/**
 * Whether the pathname is valid.  Currently prohibits relative paths, 
 * names which contain a ":" or "//", or other non-canonical paths.
 */
public static boolean isValidName(String src) {
  // Path must be absolute.
  if (!src.startsWith(Path.SEPARATOR)) {
    return false;
  }

  // Check for ".." "." ":" "/"
  String[] components = StringUtils.split(src, '/');
  for (int i = 0; i < components.length; i++) {
    String element = components[i];
    if (element.equals(".")  ||
        (element.indexOf(":") >= 0)  ||
        (element.indexOf("/") >= 0)) {
      return false;
    }
    // ".." is allowed in path starting with /.reserved/.inodes
    if (element.equals("..")) {
      if (components.length > 4
          && components[1].equals(FSDirectory.DOT_RESERVED_STRING)
          && components[2].equals(FSDirectory.DOT_INODES_STRING)) {
        continue;
      }
      return false;
    }
    // The string may start or end with a /, but not have
    // "//" in the middle.
    if (element.isEmpty() && i != components.length - 1 &&
        i != 0) {
      return false;
    }
  }
  return true;
}
项目:hardfs    文件:TestSnapshotBlocksMap.java   
static INodeFile assertBlockCollection(String path, int numBlocks,
   final FSDirectory dir, final BlockManager blkManager) throws Exception {
  final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
  assertEquals(numBlocks, file.getBlocks().length);
  for(BlockInfo b : file.getBlocks()) {
    assertBlockCollection(blkManager, file, b);
  }
  return file;
}
项目:hadoop-on-lustre2    文件:DFSUtil.java   
/**
 * Whether the pathname is valid.  Currently prohibits relative paths, 
 * names which contain a ":" or "//", or other non-canonical paths.
 */
public static boolean isValidName(String src) {
  // Path must be absolute.
  if (!src.startsWith(Path.SEPARATOR)) {
    return false;
  }

  // Check for ".." "." ":" "/"
  String[] components = StringUtils.split(src, '/');
  for (int i = 0; i < components.length; i++) {
    String element = components[i];
    if (element.equals(".")  ||
        (element.indexOf(":") >= 0)  ||
        (element.indexOf("/") >= 0)) {
      return false;
    }
    // ".." is allowed in path starting with /.reserved/.inodes
    if (element.equals("..")) {
      if (components.length > 4
          && components[1].equals(FSDirectory.DOT_RESERVED_STRING)
          && components[2].equals(FSDirectory.DOT_INODES_STRING)) {
        continue;
      }
      return false;
    }
    // The string may start or end with a /, but not have
    // "//" in the middle.
    if (element.isEmpty() && i != components.length - 1 &&
        i != 0) {
      return false;
    }
  }
  return true;
}
项目:hadoop-on-lustre2    文件:TestSnapshotBlocksMap.java   
static INodeFile assertBlockCollection(String path, int numBlocks,
   final FSDirectory dir, final BlockManager blkManager) throws Exception {
  final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
  assertEquals(numBlocks, file.getBlocks().length);
  for(BlockInfo b : file.getBlocks()) {
    assertBlockCollection(blkManager, file, b);
  }
  return file;
}
项目:hadoop    文件:SnapshotManager.java   
public SnapshotManager(final FSDirectory fsdir) {
  this.fsdir = fsdir;
}
项目:hadoop    文件:CacheReplicationMonitor.java   
/**
 * Scan all CacheDirectives.  Use the information to figure out
 * what cache replication factor each block should have.
 */
private void rescanCacheDirectives() {
  FSDirectory fsDir = namesystem.getFSDirectory();
  final long now = new Date().getTime();
  for (CacheDirective directive : cacheManager.getCacheDirectives()) {
    scannedDirectives++;
    // Skip processing this entry if it has expired
    if (directive.getExpiryTime() > 0 && directive.getExpiryTime() <= now) {
      LOG.debug("Directive {}: the directive expired at {} (now = {})",
           directive.getId(), directive.getExpiryTime(), now);
      continue;
    }
    String path = directive.getPath();
    INode node;
    try {
      node = fsDir.getINode(path);
    } catch (UnresolvedLinkException e) {
      // We don't cache through symlinks
      LOG.debug("Directive {}: got UnresolvedLinkException while resolving "
              + "path {}", directive.getId(), path
      );
      continue;
    }
    if (node == null)  {
      LOG.debug("Directive {}: No inode found at {}", directive.getId(),
          path);
    } else if (node.isDirectory()) {
      INodeDirectory dir = node.asDirectory();
      ReadOnlyList<INode> children = dir
          .getChildrenList(Snapshot.CURRENT_STATE_ID);
      for (INode child : children) {
        if (child.isFile()) {
          rescanFile(directive, child.asFile());
        }
      }
    } else if (node.isFile()) {
      rescanFile(directive, node.asFile());
    } else {
      LOG.debug("Directive {}: ignoring non-directive, non-file inode {} ",
          directive.getId(), node);
    }
  }
}
项目:hadoop    文件:TestFileAppend4.java   
/**
 * Test that an append with no locations fails with an exception
 * showing insufficient locations.
 */
@Test(timeout = 60000)
public void testAppendInsufficientLocations() throws Exception {
  Configuration conf = new Configuration();

  // lower heartbeat interval for fast recognition of DN
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
      1000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000);

  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4)
      .build();
  DistributedFileSystem fileSystem = null;
  try {
    // create a file with replication 3
    fileSystem = cluster.getFileSystem();
    Path f = new Path("/testAppend");
    FSDataOutputStream create = fileSystem.create(f, (short) 2);
    create.write("/testAppend".getBytes());
    create.close();

    // Check for replications
    DFSTestUtil.waitReplication(fileSystem, f, (short) 2);

    // Shut down all DNs that have the last block location for the file
    LocatedBlocks lbs = fileSystem.dfs.getNamenode().
        getBlockLocations("/testAppend", 0, Long.MAX_VALUE);
    List<DataNode> dnsOfCluster = cluster.getDataNodes();
    DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().
        getLocations();
    for( DataNode dn : dnsOfCluster) {
      for(DatanodeInfo loc: dnsWithLocations) {
        if(dn.getDatanodeId().equals(loc)){
          dn.shutdown();
          DFSTestUtil.waitForDatanodeDeath(dn);
        }
      }
    }

    // Wait till 0 replication is recognized
    DFSTestUtil.waitReplication(fileSystem, f, (short) 0);

    // Append to the file, at this state there are 3 live DNs but none of them
    // have the block.
    try{
      fileSystem.append(f);
      fail("Append should fail because insufficient locations");
    } catch (IOException e){
      LOG.info("Expected exception: ", e);
    }
    FSDirectory dir = cluster.getNamesystem().getFSDirectory();
    final INodeFile inode = INodeFile.
        valueOf(dir.getINode("/testAppend"), "/testAppend");
    assertTrue("File should remain closed", !inode.isUnderConstruction());
  } finally {
    if (null != fileSystem) {
      fileSystem.close();
    }
    cluster.shutdown();
  }
}
项目:hadoop    文件:TestSnapshotDeletion.java   
private static INodeDirectory getDir(final FSDirectory fsdir, final Path dir)
    throws IOException {
  final String dirStr = dir.toString();
  return INodeDirectory.valueOf(fsdir.getINode(dirStr), dirStr);
}
项目:hadoop    文件:TestRenameWithSnapshots.java   
/**
 * Test rename while the rename operation will exceed the quota in the dst
 * tree.
 */
@Test
public void testRenameUndo_5() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test, "dir1");
  final Path dir2 = new Path(test, "dir2");
  final Path subdir2 = new Path(dir2, "subdir2");
  hdfs.mkdirs(dir1);
  hdfs.mkdirs(subdir2);

  final Path foo = new Path(dir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);

  SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");

  // set ns quota of dir2 to 4, so the current remaining is 2 (already has
  // dir2, and subdir2)
  hdfs.setQuota(dir2, 4, Long.MAX_VALUE - 1);

  final Path foo2 = new Path(subdir2, foo.getName());
  FSDirectory fsdir2 = Mockito.spy(fsdir);
  Mockito.doThrow(new NSQuotaExceededException("fake exception")).when(fsdir2)
      .addLastINode((INodesInPath) Mockito.anyObject(),
          (INode) Mockito.anyObject(), Mockito.anyBoolean());
  Whitebox.setInternalState(fsn, "dir", fsdir2);
  // rename /test/dir1/foo to /test/dir2/subdir2/foo. 
  // FSDirectory#verifyQuota4Rename will pass since the remaining quota is 2.
  // However, the rename operation will fail since we let addLastINode throw
  // NSQuotaExceededException
  boolean rename = hdfs.rename(foo, foo2);
  assertFalse(rename);

  // check the undo
  assertTrue(hdfs.exists(foo));
  assertTrue(hdfs.exists(bar));
  INodeDirectory dir1Node = fsdir2.getINode4Write(dir1.toString())
      .asDirectory();
  List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node
      .getChildrenList(Snapshot.CURRENT_STATE_ID));
  assertEquals(1, childrenList.size());
  INode fooNode = childrenList.get(0);
  assertTrue(fooNode.asDirectory().isWithSnapshot());
  INode barNode = fsdir2.getINode4Write(bar.toString());
  assertTrue(barNode.getClass() == INodeFile.class);
  assertSame(fooNode, barNode.getParent());
  List<DirectoryDiff> diffList = dir1Node
      .getDiffs().asList();
  assertEquals(1, diffList.size());
  DirectoryDiff diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());

  // check dir2
  INodeDirectory dir2Node = fsdir2.getINode4Write(dir2.toString()).asDirectory();
  assertTrue(dir2Node.isSnapshottable());
  QuotaCounts counts = dir2Node.computeQuotaUsage(fsdir.getBlockStoragePolicySuite());
  assertEquals(2, counts.getNameSpace());
  assertEquals(0, counts.getStorageSpace());
  childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory()
      .getChildrenList(Snapshot.CURRENT_STATE_ID));
  assertEquals(1, childrenList.size());
  INode subdir2Node = childrenList.get(0);
  assertSame(dir2Node, subdir2Node.getParent());
  assertSame(subdir2Node, fsdir2.getINode4Write(subdir2.toString()));
  diffList = dir2Node.getDiffs().asList();
  assertEquals(1, diffList.size());
  diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
项目:hadoop    文件:TestRenameWithSnapshots.java   
/**
 * Test the rename undo when removing dst node fails
 */
@Test
public void testRenameUndo_6() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test, "dir1");
  final Path dir2 = new Path(test, "dir2");
  final Path sub_dir2 = new Path(dir2, "subdir");
  final Path subsub_dir2 = new Path(sub_dir2, "subdir");
  hdfs.mkdirs(dir1);
  hdfs.mkdirs(subsub_dir2);

  final Path foo = new Path(dir1, "foo");
  hdfs.mkdirs(foo);

  SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");

  // set ns quota of dir2 to 4, so the current remaining is 1 (already has
  // dir2, sub_dir2, and subsub_dir2)
  hdfs.setQuota(dir2, 4, Long.MAX_VALUE - 1);
  FSDirectory fsdir2 = Mockito.spy(fsdir);
  Mockito.doThrow(new RuntimeException("fake exception")).when(fsdir2)
      .removeLastINode((INodesInPath) Mockito.anyObject());
  Whitebox.setInternalState(fsn, "dir", fsdir2);
  // rename /test/dir1/foo to /test/dir2/sub_dir2/subsub_dir2. 
  // FSDirectory#verifyQuota4Rename will pass since foo only be counted 
  // as 1 in NS quota. However, the rename operation will fail when removing
  // subsub_dir2.
  try {
    hdfs.rename(foo, subsub_dir2, Rename.OVERWRITE);
    fail("Expect QuotaExceedException");
  } catch (Exception e) {
    String msg = "fake exception";
    GenericTestUtils.assertExceptionContains(msg, e);
  }

  // check the undo
  assertTrue(hdfs.exists(foo));
  INodeDirectory dir1Node = fsdir2.getINode4Write(dir1.toString())
      .asDirectory();
  List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node
      .getChildrenList(Snapshot.CURRENT_STATE_ID));
  assertEquals(1, childrenList.size());
  INode fooNode = childrenList.get(0);
  assertTrue(fooNode.asDirectory().isWithSnapshot());
  assertSame(dir1Node, fooNode.getParent());
  List<DirectoryDiff> diffList = dir1Node
      .getDiffs().asList();
  assertEquals(1, diffList.size());
  DirectoryDiff diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());

  // check dir2
  INodeDirectory dir2Node = fsdir2.getINode4Write(dir2.toString()).asDirectory();
  assertTrue(dir2Node.isSnapshottable());
  QuotaCounts counts = dir2Node.computeQuotaUsage(fsdir.getBlockStoragePolicySuite());
  assertEquals(3, counts.getNameSpace());
  assertEquals(0, counts.getStorageSpace());
  childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory()
      .getChildrenList(Snapshot.CURRENT_STATE_ID));
  assertEquals(1, childrenList.size());
  INode subdir2Node = childrenList.get(0);
  assertSame(dir2Node, subdir2Node.getParent());
  assertSame(subdir2Node, fsdir2.getINode4Write(sub_dir2.toString()));
  INode subsubdir2Node = fsdir2.getINode4Write(subsub_dir2.toString());
  assertTrue(subsubdir2Node.getClass() == INodeDirectory.class);
  assertSame(subdir2Node, subsubdir2Node.getParent());

  diffList = (  dir2Node).getDiffs().asList();
  assertEquals(1, diffList.size());
  diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
项目:hadoop    文件:SnapshotTestHelper.java   
public static void dumpTree2File(FSDirectory fsdir, File f) throws IOException{
  final PrintWriter out = new PrintWriter(new FileWriter(f, false), true);
  fsdir.getINode("/").dumpTreeRecursively(out, new StringBuilder(),
      Snapshot.CURRENT_STATE_ID);
  out.close();
}
项目:aliyun-oss-hadoop-fs    文件:SnapshotManager.java   
public SnapshotManager(final FSDirectory fsdir) {
  this.fsdir = fsdir;
}
项目:aliyun-oss-hadoop-fs    文件:CacheReplicationMonitor.java   
/**
 * Scan all CacheDirectives.  Use the information to figure out
 * what cache replication factor each block should have.
 */
private void rescanCacheDirectives() {
  FSDirectory fsDir = namesystem.getFSDirectory();
  final long now = new Date().getTime();
  for (CacheDirective directive : cacheManager.getCacheDirectives()) {
    scannedDirectives++;
    // Skip processing this entry if it has expired
    if (directive.getExpiryTime() > 0 && directive.getExpiryTime() <= now) {
      LOG.debug("Directive {}: the directive expired at {} (now = {})",
           directive.getId(), directive.getExpiryTime(), now);
      continue;
    }
    String path = directive.getPath();
    INode node;
    try {
      node = fsDir.getINode(path);
    } catch (UnresolvedLinkException e) {
      // We don't cache through symlinks
      LOG.debug("Directive {}: got UnresolvedLinkException while resolving "
              + "path {}", directive.getId(), path
      );
      continue;
    }
    if (node == null)  {
      LOG.debug("Directive {}: No inode found at {}", directive.getId(),
          path);
    } else if (node.isDirectory()) {
      INodeDirectory dir = node.asDirectory();
      ReadOnlyList<INode> children = dir
          .getChildrenList(Snapshot.CURRENT_STATE_ID);
      for (INode child : children) {
        if (child.isFile()) {
          rescanFile(directive, child.asFile());
        }
      }
    } else if (node.isFile()) {
      rescanFile(directive, node.asFile());
    } else {
      LOG.debug("Directive {}: ignoring non-directive, non-file inode {} ",
          directive.getId(), node);
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestFileAppend4.java   
/**
 * Test that an append with no locations fails with an exception
 * showing insufficient locations.
 */
@Test(timeout = 60000)
public void testAppendInsufficientLocations() throws Exception {
  Configuration conf = new Configuration();

  // lower heartbeat interval for fast recognition of DN
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
      1000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000);

  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4)
      .build();
  DistributedFileSystem fileSystem = null;
  try {
    // create a file with replication 3
    fileSystem = cluster.getFileSystem();
    Path f = new Path("/testAppend");
    FSDataOutputStream create = fileSystem.create(f, (short) 2);
    create.write("/testAppend".getBytes());
    create.close();

    // Check for replications
    DFSTestUtil.waitReplication(fileSystem, f, (short) 2);

    // Shut down all DNs that have the last block location for the file
    LocatedBlocks lbs = fileSystem.dfs.getNamenode().
        getBlockLocations("/testAppend", 0, Long.MAX_VALUE);
    List<DataNode> dnsOfCluster = cluster.getDataNodes();
    DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().
        getLocations();
    for( DataNode dn : dnsOfCluster) {
      for(DatanodeInfo loc: dnsWithLocations) {
        if(dn.getDatanodeId().equals(loc)){
          dn.shutdown();
          DFSTestUtil.waitForDatanodeDeath(dn);
        }
      }
    }

    // Wait till 0 replication is recognized
    DFSTestUtil.waitReplication(fileSystem, f, (short) 0);

    // Append to the file, at this state there are 3 live DNs but none of them
    // have the block.
    try{
      fileSystem.append(f);
      fail("Append should fail because insufficient locations");
    } catch (IOException e){
      LOG.info("Expected exception: ", e);
    }
    FSDirectory dir = cluster.getNamesystem().getFSDirectory();
    final INodeFile inode = INodeFile.
        valueOf(dir.getINode("/testAppend"), "/testAppend");
    assertTrue("File should remain closed", !inode.isUnderConstruction());
  } finally {
    if (null != fileSystem) {
      fileSystem.close();
    }
    cluster.shutdown();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestDFSShell.java   
@Test (timeout = 30000)
public void testListReserved() throws IOException {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
  FileSystem fs = cluster.getFileSystem();
  FsShell shell = new FsShell();
  shell.setConf(conf);
  FileStatus test = fs.getFileStatus(new Path("/.reserved"));
  assertEquals(FSDirectory.DOT_RESERVED_STRING, test.getPath().getName());

  // Listing /.reserved/ should show 2 items: raw and .inodes
  FileStatus[] stats = fs.listStatus(new Path("/.reserved"));
  assertEquals(2, stats.length);
  assertEquals(FSDirectory.DOT_INODES_STRING, stats[0].getPath().getName());
  assertEquals(conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY),
      stats[0].getGroup());
  assertEquals("raw", stats[1].getPath().getName());
  assertEquals(conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY),
      stats[1].getGroup());

  // Listing / should not show /.reserved
  stats = fs.listStatus(new Path("/"));
  assertEquals(0, stats.length);

  // runCmd prints error into System.err, thus verify from there.
  PrintStream syserr = System.err;
  final ByteArrayOutputStream baos = new ByteArrayOutputStream();
  PrintStream ps = new PrintStream(baos);
  System.setErr(ps);
  try {
    runCmd(shell, "-ls", "/.reserved");
    assertEquals(0, baos.toString().length());

    runCmd(shell, "-ls", "/.reserved/raw/.reserved");
    assertTrue(baos.toString().contains("No such file or directory"));
  } finally {
    System.setErr(syserr);
    cluster.shutdown();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestSnapshotDeletion.java   
private static INodeDirectory getDir(final FSDirectory fsdir, final Path dir)
    throws IOException {
  final String dirStr = dir.toString();
  return INodeDirectory.valueOf(fsdir.getINode(dirStr), dirStr);
}