Java 类org.apache.hadoop.hdfs.server.namenode.INodeId 实例源码

项目:hadoop    文件:PBHelper.java   
public static HdfsFileStatus convert(HdfsFileStatusProto fs) {
  if (fs == null)
    return null;
  return new HdfsLocatedFileStatus(
      fs.getLength(), fs.getFileType().equals(FileType.IS_DIR), 
      fs.getBlockReplication(), fs.getBlocksize(),
      fs.getModificationTime(), fs.getAccessTime(),
      PBHelper.convert(fs.getPermission()), fs.getOwner(), fs.getGroup(), 
      fs.getFileType().equals(FileType.IS_SYMLINK) ? 
          fs.getSymlink().toByteArray() : null,
      fs.getPath().toByteArray(),
      fs.hasFileId()? fs.getFileId(): INodeId.GRANDFATHER_INODE_ID,
      fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null,
      fs.hasChildrenNum() ? fs.getChildrenNum() : -1,
      fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) : null,
      fs.hasStoragePolicy() ? (byte) fs.getStoragePolicy()
          : BlockStoragePolicySuite.ID_UNSPECIFIED);
}
项目:hadoop    文件:TestJsonUtil.java   
@Test
public void testHdfsFileStatus() throws IOException {
  final long now = Time.now();
  final String parent = "/dir";
  final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
      now, now + 10, new FsPermission((short) 0644), "user", "group",
      DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
      INodeId.GRANDFATHER_INODE_ID, 0, null, (byte) 0);
  final FileStatus fstatus = toFileStatus(status, parent);
  System.out.println("status  = " + status);
  System.out.println("fstatus = " + fstatus);
  final String json = JsonUtil.toJsonString(status, true);
  System.out.println("json    = " + json.replace(",", ",\n  "));
  ObjectReader reader = new ObjectMapper().reader(Map.class);
  final HdfsFileStatus s2 =
      JsonUtil.toFileStatus((Map<?, ?>) reader.readValue(json), true);
  final FileStatus fs2 = toFileStatus(s2, parent);
  System.out.println("s2      = " + s2);
  System.out.println("fs2     = " + fs2);
  Assert.assertEquals(fstatus, fs2);
}
项目:hadoop    文件:TestOpenFilesWithSnapshot.java   
@Test
public void testOpenFilesWithRename() throws Exception {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);

  // check for zero sized blocks
  Path fileWithEmptyBlock = new Path("/test/test/test4");
  fs.create(fileWithEmptyBlock);
  NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
  String clientName = fs.getClient().getClientName();
  // create one empty block
  nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null,
      INodeId.GRANDFATHER_INODE_ID, null);
  fs.createSnapshot(path, "s2");

  fs.rename(new Path("/test/test"), new Path("/test/test-renamed"));
  fs.delete(new Path("/test/test-renamed"), true);
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  cluster.restartNameNode(true);
}
项目:aliyun-oss-hadoop-fs    文件:BlocksMap.java   
/**
 * Remove the block from the block map;
 * remove it from all data-node lists it belongs to;
 * and remove all data-node locations associated with the block.
 */
void removeBlock(Block block) {
  BlockInfo blockInfo = blocks.remove(block);
  if (blockInfo == null)
    return;

  blockInfo.setBlockCollectionId(INodeId.INVALID_INODE_ID);
  final int size = blockInfo.isStriped() ?
      blockInfo.getCapacity() : blockInfo.numNodes();
  for(int idx = size - 1; idx >= 0; idx--) {
    DatanodeDescriptor dn = blockInfo.getDatanode(idx);
    if (dn != null) {
      removeBlock(dn, blockInfo); // remove from the list and wipe the location
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:PBImageTextWriter.java   
@Override
public String getParentPath(long inode) throws IOException {
  if (inode == INodeId.ROOT_INODE_ID) {
    return "/";
  }
  byte[] bytes = dirChildMap.get(toBytes(inode));
  Preconditions.checkState(bytes != null && bytes.length == 8,
      "Can not find parent directory for inode %s, "
          + "fsimage might be corrupted", inode);
  long parent = toLong(bytes);
  if (!dirPathCache.containsKey(parent)) {
    bytes = dirMap.get(toBytes(parent));
    if (parent != INodeId.ROOT_INODE_ID) {
      Preconditions.checkState(bytes != null,
          "Can not find parent directory for inode %s, "
              + ", the fsimage might be corrupted.", parent);
    }
    String parentName = toString(bytes);
    String parentPath =
        new Path(getParentPath(parent),
            parentName.isEmpty()? "/" : parentName).toString();
    dirPathCache.put(parent, parentPath);
  }
  return dirPathCache.get(parent);
}
项目:aliyun-oss-hadoop-fs    文件:TestBlockManager.java   
@Before
public void setupMockCluster() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,
           "need to set a dummy value here so it assumes a multi-rack cluster");
  fsn = Mockito.mock(FSNamesystem.class);
  Mockito.doReturn(true).when(fsn).hasWriteLock();
  Mockito.doReturn(true).when(fsn).hasReadLock();
  Mockito.doReturn(true).when(fsn).isRunning();
  bm = new BlockManager(fsn, conf);
  final String[] racks = {
      "/rackA",
      "/rackA",
      "/rackA",
      "/rackB",
      "/rackB",
      "/rackB"};
  storages = DFSTestUtil.createDatanodeStorageInfos(racks);
  nodes = Arrays.asList(DFSTestUtil.toDatanodeDescriptor(storages));
  rackA = nodes.subList(0, 3);
  rackB = nodes.subList(3, 6);
  mockINodeId = INodeId.ROOT_INODE_ID + 1;
}
项目:big-c    文件:PBHelper.java   
public static HdfsFileStatus convert(HdfsFileStatusProto fs) {
  if (fs == null)
    return null;
  return new HdfsLocatedFileStatus(
      fs.getLength(), fs.getFileType().equals(FileType.IS_DIR), 
      fs.getBlockReplication(), fs.getBlocksize(),
      fs.getModificationTime(), fs.getAccessTime(),
      PBHelper.convert(fs.getPermission()), fs.getOwner(), fs.getGroup(), 
      fs.getFileType().equals(FileType.IS_SYMLINK) ? 
          fs.getSymlink().toByteArray() : null,
      fs.getPath().toByteArray(),
      fs.hasFileId()? fs.getFileId(): INodeId.GRANDFATHER_INODE_ID,
      fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null,
      fs.hasChildrenNum() ? fs.getChildrenNum() : -1,
      fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) : null,
      fs.hasStoragePolicy() ? (byte) fs.getStoragePolicy()
          : BlockStoragePolicySuite.ID_UNSPECIFIED);
}
项目:big-c    文件:PBImageTextWriter.java   
@Override
public String getParentPath(long inode) throws IOException {
  if (inode == INodeId.ROOT_INODE_ID) {
    return "/";
  }
  byte[] bytes = dirChildMap.get(toBytes(inode));
  Preconditions.checkState(bytes != null && bytes.length == 8,
      "Can not find parent directory for inode %s, "
          + "fsimage might be corrupted", inode);
  long parent = toLong(bytes);
  if (!dirPathCache.containsKey(parent)) {
    bytes = dirMap.get(toBytes(parent));
    if (parent != INodeId.ROOT_INODE_ID) {
      Preconditions.checkState(bytes != null,
          "Can not find parent directory for inode %s, "
              + ", the fsimage might be corrupted.", parent);
    }
    String parentName = toString(bytes);
    String parentPath =
        new Path(getParentPath(parent),
            parentName.isEmpty()? "/" : parentName).toString();
    dirPathCache.put(parent, parentPath);
  }
  return dirPathCache.get(parent);
}
项目:big-c    文件:TestJsonUtil.java   
@Test
public void testHdfsFileStatus() throws IOException {
  final long now = Time.now();
  final String parent = "/dir";
  final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
      now, now + 10, new FsPermission((short) 0644), "user", "group",
      DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
      INodeId.GRANDFATHER_INODE_ID, 0, null, (byte) 0);
  final FileStatus fstatus = toFileStatus(status, parent);
  System.out.println("status  = " + status);
  System.out.println("fstatus = " + fstatus);
  final String json = JsonUtil.toJsonString(status, true);
  System.out.println("json    = " + json.replace(",", ",\n  "));
  ObjectReader reader = new ObjectMapper().reader(Map.class);
  final HdfsFileStatus s2 =
      JsonUtil.toFileStatus((Map<?, ?>) reader.readValue(json), true);
  final FileStatus fs2 = toFileStatus(s2, parent);
  System.out.println("s2      = " + s2);
  System.out.println("fs2     = " + fs2);
  Assert.assertEquals(fstatus, fs2);
}
项目:big-c    文件:TestOpenFilesWithSnapshot.java   
@Test
public void testOpenFilesWithRename() throws Exception {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);

  // check for zero sized blocks
  Path fileWithEmptyBlock = new Path("/test/test/test4");
  fs.create(fileWithEmptyBlock);
  NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
  String clientName = fs.getClient().getClientName();
  // create one empty block
  nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null,
      INodeId.GRANDFATHER_INODE_ID, null);
  fs.createSnapshot(path, "s2");

  fs.rename(new Path("/test/test"), new Path("/test/test-renamed"));
  fs.delete(new Path("/test/test-renamed"), true);
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  cluster.restartNameNode(true);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:PBHelper.java   
public static HdfsFileStatus convert(HdfsFileStatusProto fs) {
  if (fs == null)
    return null;
  return new HdfsLocatedFileStatus(
      fs.getLength(), fs.getFileType().equals(FileType.IS_DIR), 
      fs.getBlockReplication(), fs.getBlocksize(),
      fs.getModificationTime(), fs.getAccessTime(),
      PBHelper.convert(fs.getPermission()), fs.getOwner(), fs.getGroup(), 
      fs.getFileType().equals(FileType.IS_SYMLINK) ? 
          fs.getSymlink().toByteArray() : null,
      fs.getPath().toByteArray(),
      fs.hasFileId()? fs.getFileId(): INodeId.GRANDFATHER_INODE_ID,
      fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null,
      fs.hasChildrenNum() ? fs.getChildrenNum() : -1,
      fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) : null,
      fs.hasStoragePolicy() ? (byte) fs.getStoragePolicy()
          : BlockStoragePolicySuite.ID_UNSPECIFIED);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:PBImageTextWriter.java   
@Override
public String getParentPath(long inode) throws IOException {
  if (inode == INodeId.ROOT_INODE_ID) {
    return "/";
  }
  byte[] bytes = dirChildMap.get(toBytes(inode));
  Preconditions.checkState(bytes != null && bytes.length == 8,
      "Can not find parent directory for inode %s, "
          + "fsimage might be corrupted", inode);
  long parent = toLong(bytes);
  if (!dirPathCache.containsKey(parent)) {
    bytes = dirMap.get(toBytes(parent));
    if (parent != INodeId.ROOT_INODE_ID) {
      Preconditions.checkState(bytes != null,
          "Can not find parent directory for inode %s, "
              + ", the fsimage might be corrupted.", parent);
    }
    String parentName = toString(bytes);
    String parentPath =
        new File(getParentPath(parent), parentName).toString();
    dirPathCache.put(parent, parentPath);
  }
  return dirPathCache.get(parent);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestJsonUtil.java   
@Test
public void testHdfsFileStatus() {
  final long now = Time.now();
  final String parent = "/dir";
  final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
      now, now + 10, new FsPermission((short) 0644), "user", "group",
      DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
      INodeId.GRANDFATHER_INODE_ID, 0, null, (byte) 0);
  final FileStatus fstatus = toFileStatus(status, parent);
  System.out.println("status  = " + status);
  System.out.println("fstatus = " + fstatus);
  final String json = JsonUtil.toJsonString(status, true);
  System.out.println("json    = " + json.replace(",", ",\n  "));
  final HdfsFileStatus s2 = JsonUtil.toFileStatus((Map<?, ?>)JSON.parse(json), true);
  final FileStatus fs2 = toFileStatus(s2, parent);
  System.out.println("s2      = " + s2);
  System.out.println("fs2     = " + fs2);
  Assert.assertEquals(fstatus, fs2);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestOpenFilesWithSnapshot.java   
@Test
public void testOpenFilesWithRename() throws Exception {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);

  // check for zero sized blocks
  Path fileWithEmptyBlock = new Path("/test/test/test4");
  fs.create(fileWithEmptyBlock);
  NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
  String clientName = fs.getClient().getClientName();
  // create one empty block
  nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null,
      INodeId.GRANDFATHER_INODE_ID, null);
  fs.createSnapshot(path, "s2");

  fs.rename(new Path("/test/test"), new Path("/test/test-renamed"));
  fs.delete(new Path("/test/test-renamed"), true);
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  cluster.restartNameNode(true);
}
项目:hadoop-plus    文件:TestJsonUtil.java   
@Test
public void testHdfsFileStatus() {
  final long now = Time.now();
  final String parent = "/dir";
  final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
      now, now + 10, new FsPermission((short) 0644), "user", "group",
      DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
      INodeId.GRANDFATHER_INODE_ID, 0);
  final FileStatus fstatus = toFileStatus(status, parent);
  System.out.println("status  = " + status);
  System.out.println("fstatus = " + fstatus);
  final String json = JsonUtil.toJsonString(status, true);
  System.out.println("json    = " + json.replace(",", ",\n  "));
  final HdfsFileStatus s2 = JsonUtil.toFileStatus((Map<?, ?>)JSON.parse(json), true);
  final FileStatus fs2 = toFileStatus(s2, parent);
  System.out.println("s2      = " + s2);
  System.out.println("fs2     = " + fs2);
  Assert.assertEquals(fstatus, fs2);
}
项目:FlexMap    文件:PBHelper.java   
public static HdfsFileStatus convert(HdfsFileStatusProto fs) {
  if (fs == null)
    return null;
  return new HdfsLocatedFileStatus(
      fs.getLength(), fs.getFileType().equals(FileType.IS_DIR), 
      fs.getBlockReplication(), fs.getBlocksize(),
      fs.getModificationTime(), fs.getAccessTime(),
      PBHelper.convert(fs.getPermission()), fs.getOwner(), fs.getGroup(), 
      fs.getFileType().equals(FileType.IS_SYMLINK) ? 
          fs.getSymlink().toByteArray() : null,
      fs.getPath().toByteArray(),
      fs.hasFileId()? fs.getFileId(): INodeId.GRANDFATHER_INODE_ID,
      fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null,
      fs.hasChildrenNum() ? fs.getChildrenNum() : -1,
      fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) : null,
      fs.hasStoragePolicy() ? (byte) fs.getStoragePolicy()
          : BlockStoragePolicySuite.ID_UNSPECIFIED);
}
项目:FlexMap    文件:TestJsonUtil.java   
@Test
public void testHdfsFileStatus() {
  final long now = Time.now();
  final String parent = "/dir";
  final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
      now, now + 10, new FsPermission((short) 0644), "user", "group",
      DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
      INodeId.GRANDFATHER_INODE_ID, 0, null, (byte) 0);
  final FileStatus fstatus = toFileStatus(status, parent);
  System.out.println("status  = " + status);
  System.out.println("fstatus = " + fstatus);
  final String json = JsonUtil.toJsonString(status, true);
  System.out.println("json    = " + json.replace(",", ",\n  "));
  final HdfsFileStatus s2 = JsonUtil.toFileStatus((Map<?, ?>)JSON.parse(json), true);
  final FileStatus fs2 = toFileStatus(s2, parent);
  System.out.println("s2      = " + s2);
  System.out.println("fs2     = " + fs2);
  Assert.assertEquals(fstatus, fs2);
}
项目:FlexMap    文件:TestOpenFilesWithSnapshot.java   
@Test
public void testOpenFilesWithRename() throws Exception {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);

  // check for zero sized blocks
  Path fileWithEmptyBlock = new Path("/test/test/test4");
  fs.create(fileWithEmptyBlock);
  NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
  String clientName = fs.getClient().getClientName();
  // create one empty block
  nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null,
      INodeId.GRANDFATHER_INODE_ID, null);
  fs.createSnapshot(path, "s2");

  fs.rename(new Path("/test/test"), new Path("/test/test-renamed"));
  fs.delete(new Path("/test/test-renamed"), true);
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  cluster.restartNameNode(true);
}
项目:hadoop-TCP    文件:TestJsonUtil.java   
@Test
public void testHdfsFileStatus() {
  final long now = Time.now();
  final String parent = "/dir";
  final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
      now, now + 10, new FsPermission((short) 0644), "user", "group",
      DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
      INodeId.GRANDFATHER_INODE_ID, 0);
  final FileStatus fstatus = toFileStatus(status, parent);
  System.out.println("status  = " + status);
  System.out.println("fstatus = " + fstatus);
  final String json = JsonUtil.toJsonString(status, true);
  System.out.println("json    = " + json.replace(",", ",\n  "));
  final HdfsFileStatus s2 = JsonUtil.toFileStatus((Map<?, ?>)JSON.parse(json), true);
  final FileStatus fs2 = toFileStatus(s2, parent);
  System.out.println("s2      = " + s2);
  System.out.println("fs2     = " + fs2);
  Assert.assertEquals(fstatus, fs2);
}
项目:hardfs    文件:TestJsonUtil.java   
@Test
public void testHdfsFileStatus() {
  final long now = Time.now();
  final String parent = "/dir";
  final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
      now, now + 10, new FsPermission((short) 0644), "user", "group",
      DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
      INodeId.GRANDFATHER_INODE_ID, 0);
  final FileStatus fstatus = toFileStatus(status, parent);
  System.out.println("status  = " + status);
  System.out.println("fstatus = " + fstatus);
  final String json = JsonUtil.toJsonString(status, true);
  System.out.println("json    = " + json.replace(",", ",\n  "));
  final HdfsFileStatus s2 = JsonUtil.toFileStatus((Map<?, ?>)JSON.parse(json), true);
  final FileStatus fs2 = toFileStatus(s2, parent);
  System.out.println("s2      = " + s2);
  System.out.println("fs2     = " + fs2);
  Assert.assertEquals(fstatus, fs2);
}
项目:hadoop-on-lustre2    文件:TestJsonUtil.java   
@Test
public void testHdfsFileStatus() {
  final long now = Time.now();
  final String parent = "/dir";
  final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
      now, now + 10, new FsPermission((short) 0644), "user", "group",
      DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
      INodeId.GRANDFATHER_INODE_ID, 0);
  final FileStatus fstatus = toFileStatus(status, parent);
  System.out.println("status  = " + status);
  System.out.println("fstatus = " + fstatus);
  final String json = JsonUtil.toJsonString(status, true);
  System.out.println("json    = " + json.replace(",", ",\n  "));
  final HdfsFileStatus s2 = JsonUtil.toFileStatus((Map<?, ?>)JSON.parse(json), true);
  final FileStatus fs2 = toFileStatus(s2, parent);
  System.out.println("s2      = " + s2);
  System.out.println("fs2     = " + fs2);
  Assert.assertEquals(fstatus, fs2);
}
项目:hadoop-on-lustre2    文件:TestOpenFilesWithSnapshot.java   
@Test
public void testOpenFilesWithRename() throws Exception {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);

  // check for zero sized blocks
  Path fileWithEmptyBlock = new Path("/test/test/test4");
  fs.create(fileWithEmptyBlock);
  NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
  String clientName = fs.getClient().getClientName();
  // create one empty block
  nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null,
      INodeId.GRANDFATHER_INODE_ID, null);
  fs.createSnapshot(path, "s2");

  fs.rename(new Path("/test/test"), new Path("/test/test-renamed"));
  fs.delete(new Path("/test/test-renamed"), true);
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  cluster.restartNameNode(true);
}
项目:hadoop    文件:JsonUtil.java   
/** Convert a Json map to a HdfsFileStatus object. */
public static HdfsFileStatus toFileStatus(final Map<?, ?> json, boolean includesType) {
  if (json == null) {
    return null;
  }

  final Map<?, ?> m = includesType ? 
      (Map<?, ?>)json.get(FileStatus.class.getSimpleName()) : json;
  final String localName = (String) m.get("pathSuffix");
  final PathType type = PathType.valueOf((String) m.get("type"));
  final byte[] symlink = type != PathType.SYMLINK? null
      : DFSUtil.string2Bytes((String)m.get("symlink"));

  final long len = ((Number) m.get("length")).longValue();
  final String owner = (String) m.get("owner");
  final String group = (String) m.get("group");
  final FsPermission permission = toFsPermission((String) m.get("permission"),
    (Boolean)m.get("aclBit"), (Boolean)m.get("encBit"));
  final long aTime = ((Number) m.get("accessTime")).longValue();
  final long mTime = ((Number) m.get("modificationTime")).longValue();
  final long blockSize = ((Number) m.get("blockSize")).longValue();
  final short replication = ((Number) m.get("replication")).shortValue();
  final long fileId = m.containsKey("fileId") ?
      ((Number) m.get("fileId")).longValue() : INodeId.GRANDFATHER_INODE_ID;
  final int childrenNum = getInt(m, "childrenNum", -1);
  final byte storagePolicy = m.containsKey("storagePolicy") ?
      (byte) ((Number) m.get("storagePolicy")).longValue() :
      BlockStoragePolicySuite.ID_UNSPECIFIED;
  return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication,
      blockSize, mTime, aTime, permission, owner, group, symlink,
      DFSUtil.string2Bytes(localName), fileId, childrenNum, null, storagePolicy);
}
项目:hadoop    文件:ClientNamenodeProtocolServerSideTranslatorPB.java   
@Override
public CompleteResponseProto complete(RpcController controller,
    CompleteRequestProto req) throws ServiceException {
  try {
    boolean result = 
        server.complete(req.getSrc(), req.getClientName(),
        req.hasLast() ? PBHelper.convert(req.getLast()) : null,
        req.hasFileId() ? req.getFileId() : INodeId.GRANDFATHER_INODE_ID);
    return CompleteResponseProto.newBuilder().setResult(result).build();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:hadoop    文件:PBImageTextWriter.java   
@Override
public String getParentPath(long inode) throws IOException {
  if (inode == INodeId.ROOT_INODE_ID) {
    return "";
  }
  Dir parent = dirChildMap.get(inode);
  if (parent == null) {
    // The inode is an INodeReference, which is generated from snapshot.
    // For delimited oiv tool, no need to print out metadata in snapshots.
    PBImageTextWriter.ignoreSnapshotName(inode);
  }
  return parent.getPath();
}
项目:hadoop    文件:PBImageTextWriter.java   
@Override
public String getParentPath(long inode) throws IOException {
  if (inode == INodeId.ROOT_INODE_ID) {
    return "/";
  }
  byte[] bytes = dirChildMap.get(toBytes(inode));
  if (bytes == null) {
    // The inode is an INodeReference, which is generated from snapshot.
    // For delimited oiv tool, no need to print out metadata in snapshots.
    PBImageTextWriter.ignoreSnapshotName(inode);
  }
  if (bytes.length != 8) {
    throw new IOException(
            "bytes array length error. Actual length is " + bytes.length);
  }
  long parent = toLong(bytes);
  if (!dirPathCache.containsKey(parent)) {
    bytes = dirMap.get(toBytes(parent));
    if (parent != INodeId.ROOT_INODE_ID && bytes == null) {
      // The parent is an INodeReference, which is generated from snapshot.
      // For delimited oiv tool, no need to print out metadata in snapshots.
      PBImageTextWriter.ignoreSnapshotName(parent);
    }
    String parentName = toString(bytes);
    String parentPath =
        new Path(getParentPath(parent),
            parentName.isEmpty()? "/" : parentName).toString();
    dirPathCache.put(parent, parentPath);
  }
  return dirPathCache.get(parent);
}
项目:hadoop    文件:FSImageLoader.java   
/**
 * Return the INodeId of the specified path.
 */
private long lookup(String path) throws IOException {
  Preconditions.checkArgument(path.startsWith("/"));
  long id = INodeId.ROOT_INODE_ID;
  for (int offset = 0, next; offset < path.length(); offset = next) {
    next = path.indexOf('/', offset + 1);
    if (next == -1) {
      next = path.length();
    }
    if (offset + 1 > next) {
      break;
    }

    final String component = path.substring(offset + 1, next);

    if (component.isEmpty()) {
      continue;
    }

    final long[] children = dirmap.get(id);
    if (children == null) {
      throw new FileNotFoundException(path);
    }

    boolean found = false;
    for (long cid : children) {
      FsImageProto.INodeSection.INode child = fromINodeId(cid);
      if (component.equals(child.getName().toStringUtf8())) {
        found = true;
        id = child.getId();
        break;
      }
    }
    if (!found) {
      throw new FileNotFoundException(path);
    }
  }
  return id;
}
项目:hadoop    文件:TestGlobPaths.java   
void run() throws Exception {
  String reservedRoot = "/.reserved/.inodes/" + INodeId.ROOT_INODE_ID;
  Assert.assertEquals(reservedRoot,
    TestPath.mergeStatuses(wrap.
        globStatus(new Path(reservedRoot), new AcceptAllPathFilter())));
  // These inodes don't show up via listStatus.
  Assert.assertEquals("",
    TestPath.mergeStatuses(wrap.
        globStatus(new Path("/.reserved/*"), new AcceptAllPathFilter())));
}
项目:hadoop    文件:TestFileCreation.java   
/** test addBlock(..) when replication<min and excludeNodes==null. */
@Test
public void testFileCreationError3() throws IOException {
  System.out.println("testFileCreationError3 start");
  Configuration conf = new HdfsConfiguration();
  // create cluster
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
  DistributedFileSystem dfs = null;
  try {
    cluster.waitActive();
    dfs = cluster.getFileSystem();
    DFSClient client = dfs.dfs;

    // create a new file.
    final Path f = new Path("/foo.txt");
    createFile(dfs, f, 3);
    try {
      cluster.getNameNodeRpc().addBlock(f.toString(), client.clientName,
          null, null, INodeId.GRANDFATHER_INODE_ID, null);
      fail();
    } catch(IOException ioe) {
      FileSystem.LOG.info("GOOD!", ioe);
    }

    System.out.println("testFileCreationError3 successful");
  } finally {
    IOUtils.closeStream(dfs);
    cluster.shutdown();
  }
}
项目:aliyun-oss-hadoop-fs    文件:PBImageTextWriter.java   
public String getParentPath(long inode) throws IOException {
  if (inode == INodeId.ROOT_INODE_ID) {
    return "";
  }
  Dir parent = dirChildMap.get(inode);
  Preconditions.checkState(parent != null,
      "Can not find parent directory for INode: %s", inode);
  return parent.getPath();
}
项目:aliyun-oss-hadoop-fs    文件:FSImageLoader.java   
/**
 * Return the INodeId of the specified path.
 */
private long lookup(String path) throws IOException {
  Preconditions.checkArgument(path.startsWith("/"));
  long id = INodeId.ROOT_INODE_ID;
  for (int offset = 0, next; offset < path.length(); offset = next) {
    next = path.indexOf('/', offset + 1);
    if (next == -1) {
      next = path.length();
    }
    if (offset + 1 > next) {
      break;
    }

    final String component = path.substring(offset + 1, next);

    if (component.isEmpty()) {
      continue;
    }

    final long[] children = dirmap.get(id);
    if (children == null) {
      throw new FileNotFoundException(path);
    }

    boolean found = false;
    for (long cid : children) {
      FsImageProto.INodeSection.INode child = fromINodeId(cid);
      if (component.equals(child.getName().toStringUtf8())) {
        found = true;
        id = child.getId();
        break;
      }
    }
    if (!found) {
      throw new FileNotFoundException(path);
    }
  }
  return id;
}
项目:big-c    文件:JsonUtil.java   
/** Convert a Json map to a HdfsFileStatus object. */
public static HdfsFileStatus toFileStatus(final Map<?, ?> json, boolean includesType) {
  if (json == null) {
    return null;
  }

  final Map<?, ?> m = includesType ? 
      (Map<?, ?>)json.get(FileStatus.class.getSimpleName()) : json;
  final String localName = (String) m.get("pathSuffix");
  final PathType type = PathType.valueOf((String) m.get("type"));
  final byte[] symlink = type != PathType.SYMLINK? null
      : DFSUtil.string2Bytes((String)m.get("symlink"));

  final long len = ((Number) m.get("length")).longValue();
  final String owner = (String) m.get("owner");
  final String group = (String) m.get("group");
  final FsPermission permission = toFsPermission((String) m.get("permission"),
    (Boolean)m.get("aclBit"), (Boolean)m.get("encBit"));
  final long aTime = ((Number) m.get("accessTime")).longValue();
  final long mTime = ((Number) m.get("modificationTime")).longValue();
  final long blockSize = ((Number) m.get("blockSize")).longValue();
  final short replication = ((Number) m.get("replication")).shortValue();
  final long fileId = m.containsKey("fileId") ?
      ((Number) m.get("fileId")).longValue() : INodeId.GRANDFATHER_INODE_ID;
  final int childrenNum = getInt(m, "childrenNum", -1);
  final byte storagePolicy = m.containsKey("storagePolicy") ?
      (byte) ((Number) m.get("storagePolicy")).longValue() :
      BlockStoragePolicySuite.ID_UNSPECIFIED;
  return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication,
      blockSize, mTime, aTime, permission, owner, group, symlink,
      DFSUtil.string2Bytes(localName), fileId, childrenNum, null, storagePolicy);
}
项目:big-c    文件:ClientNamenodeProtocolServerSideTranslatorPB.java   
@Override
public CompleteResponseProto complete(RpcController controller,
    CompleteRequestProto req) throws ServiceException {
  try {
    boolean result = 
        server.complete(req.getSrc(), req.getClientName(),
        req.hasLast() ? PBHelper.convert(req.getLast()) : null,
        req.hasFileId() ? req.getFileId() : INodeId.GRANDFATHER_INODE_ID);
    return CompleteResponseProto.newBuilder().setResult(result).build();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:big-c    文件:PBImageTextWriter.java   
public String getParentPath(long inode) throws IOException {
  if (inode == INodeId.ROOT_INODE_ID) {
    return "";
  }
  Dir parent = dirChildMap.get(inode);
  Preconditions.checkState(parent != null,
      "Can not find parent directory for INode: %s", inode);
  return parent.getPath();
}
项目:big-c    文件:FSImageLoader.java   
/**
 * Return the INodeId of the specified path.
 */
private long lookup(String path) throws IOException {
  Preconditions.checkArgument(path.startsWith("/"));
  long id = INodeId.ROOT_INODE_ID;
  for (int offset = 0, next; offset < path.length(); offset = next) {
    next = path.indexOf('/', offset + 1);
    if (next == -1) {
      next = path.length();
    }
    if (offset + 1 > next) {
      break;
    }

    final String component = path.substring(offset + 1, next);

    if (component.isEmpty()) {
      continue;
    }

    final long[] children = dirmap.get(id);
    if (children == null) {
      throw new FileNotFoundException(path);
    }

    boolean found = false;
    for (long cid : children) {
      FsImageProto.INodeSection.INode child = fromINodeId(cid);
      if (component.equals(child.getName().toStringUtf8())) {
        found = true;
        id = child.getId();
        break;
      }
    }
    if (!found) {
      throw new FileNotFoundException(path);
    }
  }
  return id;
}
项目:big-c    文件:TestGlobPaths.java   
void run() throws Exception {
  String reservedRoot = "/.reserved/.inodes/" + INodeId.ROOT_INODE_ID;
  Assert.assertEquals(reservedRoot,
    TestPath.mergeStatuses(wrap.
        globStatus(new Path(reservedRoot), new AcceptAllPathFilter())));
  // These inodes don't show up via listStatus.
  Assert.assertEquals("",
    TestPath.mergeStatuses(wrap.
        globStatus(new Path("/.reserved/*"), new AcceptAllPathFilter())));
}
项目:big-c    文件:TestFileCreation.java   
/** test addBlock(..) when replication<min and excludeNodes==null. */
@Test
public void testFileCreationError3() throws IOException {
  System.out.println("testFileCreationError3 start");
  Configuration conf = new HdfsConfiguration();
  // create cluster
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
  DistributedFileSystem dfs = null;
  try {
    cluster.waitActive();
    dfs = cluster.getFileSystem();
    DFSClient client = dfs.dfs;

    // create a new file.
    final Path f = new Path("/foo.txt");
    createFile(dfs, f, 3);
    try {
      cluster.getNameNodeRpc().addBlock(f.toString(), client.clientName,
          null, null, INodeId.GRANDFATHER_INODE_ID, null);
      fail();
    } catch(IOException ioe) {
      FileSystem.LOG.info("GOOD!", ioe);
    }

    System.out.println("testFileCreationError3 successful");
  } finally {
    IOUtils.closeStream(dfs);
    cluster.shutdown();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:JsonUtil.java   
/** Convert a Json map to a HdfsFileStatus object. */
public static HdfsFileStatus toFileStatus(final Map<?, ?> json, boolean includesType) {
  if (json == null) {
    return null;
  }

  final Map<?, ?> m = includesType ? 
      (Map<?, ?>)json.get(FileStatus.class.getSimpleName()) : json;
  final String localName = (String) m.get("pathSuffix");
  final PathType type = PathType.valueOf((String) m.get("type"));
  final byte[] symlink = type != PathType.SYMLINK? null
      : DFSUtil.string2Bytes((String)m.get("symlink"));

  final long len = (Long) m.get("length");
  final String owner = (String) m.get("owner");
  final String group = (String) m.get("group");
  final FsPermission permission = toFsPermission((String) m.get("permission"),
    (Boolean)m.get("aclBit"), (Boolean)m.get("encBit"));
  final long aTime = (Long) m.get("accessTime");
  final long mTime = (Long) m.get("modificationTime");
  final long blockSize = (Long) m.get("blockSize");
  final short replication = (short) (long) (Long) m.get("replication");
  final long fileId = m.containsKey("fileId") ? (Long) m.get("fileId")
      : INodeId.GRANDFATHER_INODE_ID;
  Long childrenNumLong = (Long) m.get("childrenNum");
  final int childrenNum = (childrenNumLong == null) ? -1
          : childrenNumLong.intValue();
  final byte storagePolicy = m.containsKey("storagePolicy") ?
      (byte) (long) (Long) m.get("storagePolicy") :
        BlockStoragePolicySuite.ID_UNSPECIFIED;
  return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication,
      blockSize, mTime, aTime, permission, owner, group, symlink,
      DFSUtil.string2Bytes(localName), fileId, childrenNum, null, storagePolicy);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:ClientNamenodeProtocolServerSideTranslatorPB.java   
@Override
public CompleteResponseProto complete(RpcController controller,
    CompleteRequestProto req) throws ServiceException {
  try {
    boolean result = 
        server.complete(req.getSrc(), req.getClientName(),
        req.hasLast() ? PBHelper.convert(req.getLast()) : null,
        req.hasFileId() ? req.getFileId() : INodeId.GRANDFATHER_INODE_ID);
    return CompleteResponseProto.newBuilder().setResult(result).build();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:PBImageTextWriter.java   
public String getParentPath(long inode) throws IOException {
  if (inode == INodeId.ROOT_INODE_ID) {
    return "";
  }
  Dir parent = dirChildMap.get(inode);
  Preconditions.checkState(parent != null,
      "Can not find parent directory for INode: %s", inode);
  return parent.getPath();
}